diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -122,15 +122,15 @@ } llvm::FunctionType *Ty = - cast(getTypes().ConvertType(FD->getType())); + cast(getTypes().ConvertType(FD->getType())); return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false); } /// Emit the conversions required to turn the given value into an /// integer of the given size. -static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, - QualType T, llvm::IntegerType *IntType) { +static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, + llvm::IntegerType *IntType) { V = CGF.EmitToMemory(V, T); if (V->getType()->isPointerTy()) @@ -140,8 +140,8 @@ return V; } -static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, - QualType T, llvm::Type *ResultType) { +static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, + llvm::Type *ResultType) { V = CGF.EmitFromMemory(V, T); if (ResultType->isPointerTy()) @@ -159,16 +159,15 @@ QualType T = E->getType(); assert(E->getArg(0)->getType()->isPointerType()); - assert(CGF.getContext().hasSameUnqualifiedType(T, - E->getArg(0)->getType()->getPointeeType())); + assert(CGF.getContext().hasSameUnqualifiedType( + T, E->getArg(0)->getType()->getPointeeType())); assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); - llvm::IntegerType *IntType = - llvm::IntegerType::get(CGF.getLLVMContext(), - CGF.getContext().getTypeSize(T)); + llvm::IntegerType *IntType = llvm::IntegerType::get( + CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); llvm::Value *Args[2]; @@ -177,8 +176,8 @@ llvm::Type *ValueType = Args[1]->getType(); Args[1] = EmitToInt(CGF, Args[1], T, IntType); - llvm::Value *Result = CGF.Builder.CreateAtomicRMW( - Kind, Args[0], Args[1], Ordering); + llvm::Value *Result = + CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1], Ordering); return EmitFromInt(CGF, Result, T, ValueType); } @@ -216,21 +215,19 @@ /// operation. static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, - const CallExpr *E, - Instruction::BinaryOps Op, + const CallExpr *E, Instruction::BinaryOps Op, bool Invert = false) { QualType T = E->getType(); assert(E->getArg(0)->getType()->isPointerType()); - assert(CGF.getContext().hasSameUnqualifiedType(T, - E->getArg(0)->getType()->getPointeeType())); + assert(CGF.getContext().hasSameUnqualifiedType( + T, E->getArg(0)->getType()->getPointeeType())); assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType())); llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0)); unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace(); - llvm::IntegerType *IntType = - llvm::IntegerType::get(CGF.getLLVMContext(), - CGF.getContext().getTypeSize(T)); + llvm::IntegerType *IntType = llvm::IntegerType::get( + CGF.getLLVMContext(), CGF.getContext().getTypeSize(T)); llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace); llvm::Value *Args[2]; @@ -307,8 +304,8 @@ /// function MakeAtomicCmpXchgValue since it expects the arguments to be /// already swapped. -static -Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, +static Value *EmitAtomicCmpXchgForMSIntrin( + CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) { assert(E->getArg(0)->getType()->isPointerType()); assert(CGF.getContext().hasSameUnqualifiedType( @@ -323,17 +320,16 @@ auto *Exchange = CGF.EmitScalarExpr(E->getArg(1)); // For Release ordering, the failure ordering should be Monotonic. - auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ? - AtomicOrdering::Monotonic : - SuccessOrdering; + auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release + ? AtomicOrdering::Monotonic + : SuccessOrdering; // The atomic instruction is marked volatile for consistency with MSVC. This // blocks the few atomics optimizations that LLVM has. If we want to optimize // _Interlocked* operations in the future, we will have to remove the volatile // marker. auto *Result = CGF.Builder.CreateAtomicCmpXchg( - Destination, Comparand, Exchange, - SuccessOrdering, FailureOrdering); + Destination, Comparand, Exchange, SuccessOrdering, FailureOrdering); Result->setVolatile(true); return CGF.Builder.CreateExtractValue(Result, 0); } @@ -400,29 +396,27 @@ return CGF.Builder.CreateZExt(Success, CGF.Int8Ty); } -static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, +static Value *EmitAtomicIncrementValue( + CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { assert(E->getArg(0)->getType()->isPointerType()); auto *IntTy = CGF.ConvertType(E->getType()); auto *Result = CGF.Builder.CreateAtomicRMW( - AtomicRMWInst::Add, - CGF.EmitScalarExpr(E->getArg(0)), - ConstantInt::get(IntTy, 1), - Ordering); + AtomicRMWInst::Add, CGF.EmitScalarExpr(E->getArg(0)), + ConstantInt::get(IntTy, 1), Ordering); return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1)); } -static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, +static Value *EmitAtomicDecrementValue( + CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) { assert(E->getArg(0)->getType()->isPointerType()); auto *IntTy = CGF.ConvertType(E->getType()); auto *Result = CGF.Builder.CreateAtomicRMW( - AtomicRMWInst::Sub, - CGF.EmitScalarExpr(E->getArg(0)), - ConstantInt::get(IntTy, 1), - Ordering); + AtomicRMWInst::Sub, CGF.EmitScalarExpr(E->getArg(0)), + ConstantInt::get(IntTy, 1), Ordering); return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1)); } @@ -457,15 +451,16 @@ // Emit a simple mangled intrinsic that has 1 argument and a return type // matching the argument type. Depending on mode, this may be a constrained // floating-point intrinsic. -static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, - const CallExpr *E, unsigned IntrinsicID, - unsigned ConstrainedIntrinsicID) { +static Value * +emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, + unsigned IntrinsicID, + unsigned ConstrainedIntrinsicID) { llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); if (CGF.Builder.getIsFPConstrained()) { CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); - return CGF.Builder.CreateConstrainedFPCall(F, { Src0 }); + return CGF.Builder.CreateConstrainedFPCall(F, {Src0}); } else { Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); return CGF.Builder.CreateCall(F, Src0); @@ -474,27 +469,29 @@ // Emit an intrinsic that has 2 operands of the same type as its result. // Depending on mode, this may be a constrained floating-point intrinsic. -static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, - const CallExpr *E, unsigned IntrinsicID, - unsigned ConstrainedIntrinsicID) { +static Value * +emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, + unsigned IntrinsicID, + unsigned ConstrainedIntrinsicID) { llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); if (CGF.Builder.getIsFPConstrained()) { CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); - return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 }); + return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1}); } else { Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); - return CGF.Builder.CreateCall(F, { Src0, Src1 }); + return CGF.Builder.CreateCall(F, {Src0, Src1}); } } // Emit an intrinsic that has 3 operands of the same type as its result. // Depending on mode, this may be a constrained floating-point intrinsic. -static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, - const CallExpr *E, unsigned IntrinsicID, - unsigned ConstrainedIntrinsicID) { +static Value * +emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, + unsigned IntrinsicID, + unsigned ConstrainedIntrinsicID) { llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); @@ -502,10 +499,10 @@ if (CGF.Builder.getIsFPConstrained()) { CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType()); - return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 }); + return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1, Src2}); } else { Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); - return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); + return CGF.Builder.CreateCall(F, {Src0, Src1, Src2}); } } @@ -540,31 +537,28 @@ } // Emit an intrinsic that has 2 operands of the same type as its result. -static Value *emitBinaryBuiltin(CodeGenFunction &CGF, - const CallExpr *E, +static Value *emitBinaryBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID) { llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); - return CGF.Builder.CreateCall(F, { Src0, Src1 }); + return CGF.Builder.CreateCall(F, {Src0, Src1}); } // Emit an intrinsic that has 3 operands of the same type as its result. -static Value *emitTernaryBuiltin(CodeGenFunction &CGF, - const CallExpr *E, +static Value *emitTernaryBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID) { llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2)); Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType()); - return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 }); + return CGF.Builder.CreateCall(F, {Src0, Src1, Src2}); } // Emit an intrinsic that has 1 float or double operand, and 1 integer. -static Value *emitFPIntBuiltin(CodeGenFunction &CGF, - const CallExpr *E, +static Value *emitFPIntBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID) { llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1)); @@ -662,23 +656,22 @@ return CGF.Builder.CreateExtractValue(Tmp, 0); } -static Value *emitRangedBuiltin(CodeGenFunction &CGF, - unsigned IntrinsicID, +static Value *emitRangedBuiltin(CodeGenFunction &CGF, unsigned IntrinsicID, int low, int high) { - llvm::MDBuilder MDHelper(CGF.getLLVMContext()); - llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); - Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); - llvm::Instruction *Call = CGF.Builder.CreateCall(F); - Call->setMetadata(llvm::LLVMContext::MD_range, RNode); - return Call; + llvm::MDBuilder MDHelper(CGF.getLLVMContext()); + llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high)); + Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {}); + llvm::Instruction *Call = CGF.Builder.CreateCall(F); + Call->setMetadata(llvm::LLVMContext::MD_range, RNode); + return Call; } namespace { - struct WidthAndSignedness { - unsigned Width; - bool Signed; - }; -} +struct WidthAndSignedness { + unsigned Width; + bool Signed; +}; +} // namespace static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &context, @@ -743,11 +736,9 @@ return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true); } -llvm::Value * -CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type, - llvm::IntegerType *ResType, - llvm::Value *EmittedE, - bool IsDynamic) { +llvm::Value *CodeGenFunction::evaluateOrEmitBuiltinObjectSize( + const Expr *E, unsigned Type, llvm::IntegerType *ResType, + llvm::Value *EmittedE, bool IsDynamic) { uint64_t ObjectSize; if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type)) return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic); @@ -763,10 +754,11 @@ /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null /// and we wouldn't otherwise try to reference a pass_object_size parameter, /// we'll call @llvm.objectsize on EmittedE, rather than emitting E. -llvm::Value * -CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type, - llvm::IntegerType *ResType, - llvm::Value *EmittedE, bool IsDynamic) { +llvm::Value *CodeGenFunction::emitBuiltinObjectSize(const Expr *E, + unsigned Type, + llvm::IntegerType *ResType, + llvm::Value *EmittedE, + bool IsDynamic) { // We need to reference an argument if the pointer is a parameter with the // pass_object_size attribute. if (auto *D = dyn_cast(E->IgnoreParenImpCasts())) { @@ -876,16 +868,19 @@ static char bitActionToX86BTCode(BitTest::ActionKind A) { switch (A) { - case BitTest::TestOnly: return '\0'; - case BitTest::Complement: return 'c'; - case BitTest::Reset: return 'r'; - case BitTest::Set: return 's'; + case BitTest::TestOnly: + return '\0'; + case BitTest::Complement: + return 'c'; + case BitTest::Reset: + return 'r'; + case BitTest::Set: + return 's'; } llvm_unreachable("invalid action"); } -static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, - BitTest BT, +static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos) { char Action = bitActionToX86BTCode(BT.Action); @@ -923,11 +918,16 @@ static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I) { switch (I) { - case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic; - case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent; - case BitTest::Acquire: return llvm::AtomicOrdering::Acquire; - case BitTest::Release: return llvm::AtomicOrdering::Release; - case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic; + case BitTest::Unlocked: + return llvm::AtomicOrdering::NotAtomic; + case BitTest::Sequential: + return llvm::AtomicOrdering::SequentiallyConsistent; + case BitTest::Acquire: + return llvm::AtomicOrdering::Acquire; + case BitTest::Release: + return llvm::AtomicOrdering::Release; + case BitTest::NoFence: + return llvm::AtomicOrdering::Monotonic; } llvm_unreachable("invalid interlocking"); } @@ -1065,11 +1065,7 @@ } namespace { -enum class MSVCSetJmpKind { - _setjmpex, - _setjmp3, - _setjmp -}; +enum class MSVCSetJmpKind { _setjmpex, _setjmp3, _setjmp }; } /// MSVC handles setjmp a bit differently on different platforms. On every @@ -1681,12 +1677,12 @@ CGF.EmitARCIntrinsicUse(object); } }; -} +} // namespace Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind) { - assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) - && "Unsupported builtin check kind"); + assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) && + "Unsupported builtin check kind"); Value *ArgValue = EmitScalarExpr(E); if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef()) @@ -1881,8 +1877,8 @@ unsigned ArgValSize = CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType()); - llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(), - ArgValSize); + llvm::IntegerType *IntTy = + llvm::Type::getIntNTy(getLLVMContext(), ArgValSize); ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy); CanQualType ArgTy = getOSLogArgType(Ctx, Size); // If ArgVal has type x86_fp80, zero-extend ArgVal. @@ -2017,8 +2013,7 @@ IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult)); Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow); if (ResultInfo.Width < OpWidth) { - auto IntMax = - llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth); + auto IntMax = llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth); llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT( UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax)); Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow); @@ -2087,75 +2082,75 @@ // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same. unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; Function *F = CGM.getIntrinsic(IID, Ty); - return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt })); + return RValue::get(Builder.CreateCall(F, {Src, Src, ShiftAmt})); } // Map math builtins for long-double to f128 version. static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) { switch (BuiltinID) { -#define MUTATE_LDBL(func) \ - case Builtin::BI__builtin_##func##l: \ +#define MUTATE_LDBL(func) \ + case Builtin::BI__builtin_##func##l: \ return Builtin::BI__builtin_##func##f128; - MUTATE_LDBL(sqrt) - MUTATE_LDBL(cbrt) - MUTATE_LDBL(fabs) - MUTATE_LDBL(log) - MUTATE_LDBL(log2) - MUTATE_LDBL(log10) - MUTATE_LDBL(log1p) - MUTATE_LDBL(logb) - MUTATE_LDBL(exp) - MUTATE_LDBL(exp2) - MUTATE_LDBL(expm1) - MUTATE_LDBL(fdim) - MUTATE_LDBL(hypot) - MUTATE_LDBL(ilogb) - MUTATE_LDBL(pow) - MUTATE_LDBL(fmin) - MUTATE_LDBL(fmax) - MUTATE_LDBL(ceil) - MUTATE_LDBL(trunc) - MUTATE_LDBL(rint) - MUTATE_LDBL(nearbyint) - MUTATE_LDBL(round) - MUTATE_LDBL(floor) - MUTATE_LDBL(lround) - MUTATE_LDBL(llround) - MUTATE_LDBL(lrint) - MUTATE_LDBL(llrint) - MUTATE_LDBL(fmod) - MUTATE_LDBL(modf) - MUTATE_LDBL(nan) - MUTATE_LDBL(nans) - MUTATE_LDBL(inf) - MUTATE_LDBL(fma) - MUTATE_LDBL(sin) - MUTATE_LDBL(cos) - MUTATE_LDBL(tan) - MUTATE_LDBL(sinh) - MUTATE_LDBL(cosh) - MUTATE_LDBL(tanh) - MUTATE_LDBL(asin) - MUTATE_LDBL(acos) - MUTATE_LDBL(atan) - MUTATE_LDBL(asinh) - MUTATE_LDBL(acosh) - MUTATE_LDBL(atanh) - MUTATE_LDBL(atan2) - MUTATE_LDBL(erf) - MUTATE_LDBL(erfc) - MUTATE_LDBL(ldexp) - MUTATE_LDBL(frexp) - MUTATE_LDBL(huge_val) - MUTATE_LDBL(copysign) - MUTATE_LDBL(nextafter) - MUTATE_LDBL(nexttoward) - MUTATE_LDBL(remainder) - MUTATE_LDBL(remquo) - MUTATE_LDBL(scalbln) - MUTATE_LDBL(scalbn) - MUTATE_LDBL(tgamma) - MUTATE_LDBL(lgamma) + MUTATE_LDBL(sqrt) + MUTATE_LDBL(cbrt) + MUTATE_LDBL(fabs) + MUTATE_LDBL(log) + MUTATE_LDBL(log2) + MUTATE_LDBL(log10) + MUTATE_LDBL(log1p) + MUTATE_LDBL(logb) + MUTATE_LDBL(exp) + MUTATE_LDBL(exp2) + MUTATE_LDBL(expm1) + MUTATE_LDBL(fdim) + MUTATE_LDBL(hypot) + MUTATE_LDBL(ilogb) + MUTATE_LDBL(pow) + MUTATE_LDBL(fmin) + MUTATE_LDBL(fmax) + MUTATE_LDBL(ceil) + MUTATE_LDBL(trunc) + MUTATE_LDBL(rint) + MUTATE_LDBL(nearbyint) + MUTATE_LDBL(round) + MUTATE_LDBL(floor) + MUTATE_LDBL(lround) + MUTATE_LDBL(llround) + MUTATE_LDBL(lrint) + MUTATE_LDBL(llrint) + MUTATE_LDBL(fmod) + MUTATE_LDBL(modf) + MUTATE_LDBL(nan) + MUTATE_LDBL(nans) + MUTATE_LDBL(inf) + MUTATE_LDBL(fma) + MUTATE_LDBL(sin) + MUTATE_LDBL(cos) + MUTATE_LDBL(tan) + MUTATE_LDBL(sinh) + MUTATE_LDBL(cosh) + MUTATE_LDBL(tanh) + MUTATE_LDBL(asin) + MUTATE_LDBL(acos) + MUTATE_LDBL(atan) + MUTATE_LDBL(asinh) + MUTATE_LDBL(acosh) + MUTATE_LDBL(atanh) + MUTATE_LDBL(atan2) + MUTATE_LDBL(erf) + MUTATE_LDBL(erfc) + MUTATE_LDBL(ldexp) + MUTATE_LDBL(frexp) + MUTATE_LDBL(huge_val) + MUTATE_LDBL(copysign) + MUTATE_LDBL(nextafter) + MUTATE_LDBL(nexttoward) + MUTATE_LDBL(remainder) + MUTATE_LDBL(remquo) + MUTATE_LDBL(scalbln) + MUTATE_LDBL(scalbn) + MUTATE_LDBL(tgamma) + MUTATE_LDBL(lgamma) #undef MUTATE_LDBL default: return BuiltinID; @@ -2172,11 +2167,11 @@ if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) && !Result.hasSideEffects()) { if (Result.Val.isInt()) - return RValue::get(llvm::ConstantInt::get(getLLVMContext(), - Result.Val.getInt())); + return RValue::get( + llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt())); if (Result.Val.isFloat()) - return RValue::get(llvm::ConstantFP::get(getLLVMContext(), - Result.Val.getFloat())); + return RValue::get( + llvm::ConstantFP::get(getLLVMContext(), Result.Val.getFloat())); } // If current long-double semantics is IEEE 128-bit, replace math builtins @@ -2217,9 +2212,8 @@ case Builtin::BI__builtin_ceilf16: case Builtin::BI__builtin_ceill: case Builtin::BI__builtin_ceilf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::ceil, - Intrinsic::experimental_constrained_ceil)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::ceil, Intrinsic::experimental_constrained_ceil)); case Builtin::BIcopysign: case Builtin::BIcopysignf: @@ -2239,9 +2233,8 @@ case Builtin::BI__builtin_cosf16: case Builtin::BI__builtin_cosl: case Builtin::BI__builtin_cosf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::cos, - Intrinsic::experimental_constrained_cos)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::cos, Intrinsic::experimental_constrained_cos)); case Builtin::BIexp: case Builtin::BIexpf: @@ -2251,9 +2244,8 @@ case Builtin::BI__builtin_expf16: case Builtin::BI__builtin_expl: case Builtin::BI__builtin_expf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::exp, - Intrinsic::experimental_constrained_exp)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::exp, Intrinsic::experimental_constrained_exp)); case Builtin::BIexp2: case Builtin::BIexp2f: @@ -2263,9 +2255,8 @@ case Builtin::BI__builtin_exp2f16: case Builtin::BI__builtin_exp2l: case Builtin::BI__builtin_exp2f128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::exp2, - Intrinsic::experimental_constrained_exp2)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::exp2, Intrinsic::experimental_constrained_exp2)); case Builtin::BIfabs: case Builtin::BIfabsf: @@ -2285,9 +2276,9 @@ case Builtin::BI__builtin_floorf16: case Builtin::BI__builtin_floorl: case Builtin::BI__builtin_floorf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::floor, - Intrinsic::experimental_constrained_floor)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::floor, + Intrinsic::experimental_constrained_floor)); case Builtin::BIfma: case Builtin::BIfmaf: @@ -2297,9 +2288,8 @@ case Builtin::BI__builtin_fmaf16: case Builtin::BI__builtin_fmal: case Builtin::BI__builtin_fmaf128: - return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::fma, - Intrinsic::experimental_constrained_fma)); + return RValue::get(emitTernaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::fma, Intrinsic::experimental_constrained_fma)); case Builtin::BIfmax: case Builtin::BIfmaxf: @@ -2309,9 +2299,9 @@ case Builtin::BI__builtin_fmaxf16: case Builtin::BI__builtin_fmaxl: case Builtin::BI__builtin_fmaxf128: - return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::maxnum, - Intrinsic::experimental_constrained_maxnum)); + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::maxnum, + Intrinsic::experimental_constrained_maxnum)); case Builtin::BIfmin: case Builtin::BIfminf: @@ -2321,9 +2311,9 @@ case Builtin::BI__builtin_fminf16: case Builtin::BI__builtin_fminl: case Builtin::BI__builtin_fminf128: - return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::minnum, - Intrinsic::experimental_constrained_minnum)); + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::minnum, + Intrinsic::experimental_constrained_minnum)); // fmod() is a special-case. It maps to the frem instruction rather than an // LLVM intrinsic. @@ -2349,9 +2339,8 @@ case Builtin::BI__builtin_logf16: case Builtin::BI__builtin_logl: case Builtin::BI__builtin_logf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::log, - Intrinsic::experimental_constrained_log)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::log, Intrinsic::experimental_constrained_log)); case Builtin::BIlog10: case Builtin::BIlog10f: @@ -2361,9 +2350,9 @@ case Builtin::BI__builtin_log10f16: case Builtin::BI__builtin_log10l: case Builtin::BI__builtin_log10f128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::log10, - Intrinsic::experimental_constrained_log10)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::log10, + Intrinsic::experimental_constrained_log10)); case Builtin::BIlog2: case Builtin::BIlog2f: @@ -2373,9 +2362,8 @@ case Builtin::BI__builtin_log2f16: case Builtin::BI__builtin_log2l: case Builtin::BI__builtin_log2f128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::log2, - Intrinsic::experimental_constrained_log2)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::log2, Intrinsic::experimental_constrained_log2)); case Builtin::BInearbyint: case Builtin::BInearbyintf: @@ -2384,9 +2372,9 @@ case Builtin::BI__builtin_nearbyintf: case Builtin::BI__builtin_nearbyintl: case Builtin::BI__builtin_nearbyintf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::nearbyint, - Intrinsic::experimental_constrained_nearbyint)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::nearbyint, + Intrinsic::experimental_constrained_nearbyint)); case Builtin::BIpow: case Builtin::BIpowf: @@ -2396,9 +2384,8 @@ case Builtin::BI__builtin_powf16: case Builtin::BI__builtin_powl: case Builtin::BI__builtin_powf128: - return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::pow, - Intrinsic::experimental_constrained_pow)); + return RValue::get(emitBinaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::pow, Intrinsic::experimental_constrained_pow)); case Builtin::BIrint: case Builtin::BIrintf: @@ -2408,9 +2395,8 @@ case Builtin::BI__builtin_rintf16: case Builtin::BI__builtin_rintl: case Builtin::BI__builtin_rintf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::rint, - Intrinsic::experimental_constrained_rint)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::rint, Intrinsic::experimental_constrained_rint)); case Builtin::BIround: case Builtin::BIroundf: @@ -2420,9 +2406,9 @@ case Builtin::BI__builtin_roundf16: case Builtin::BI__builtin_roundl: case Builtin::BI__builtin_roundf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::round, - Intrinsic::experimental_constrained_round)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::round, + Intrinsic::experimental_constrained_round)); case Builtin::BIsin: case Builtin::BIsinf: @@ -2432,9 +2418,8 @@ case Builtin::BI__builtin_sinf16: case Builtin::BI__builtin_sinl: case Builtin::BI__builtin_sinf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::sin, - Intrinsic::experimental_constrained_sin)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::sin, Intrinsic::experimental_constrained_sin)); case Builtin::BIsqrt: case Builtin::BIsqrtf: @@ -2444,9 +2429,8 @@ case Builtin::BI__builtin_sqrtf16: case Builtin::BI__builtin_sqrtl: case Builtin::BI__builtin_sqrtf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::sqrt, - Intrinsic::experimental_constrained_sqrt)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt)); case Builtin::BItrunc: case Builtin::BItruncf: @@ -2456,9 +2440,9 @@ case Builtin::BI__builtin_truncf16: case Builtin::BI__builtin_truncl: case Builtin::BI__builtin_truncf128: - return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E, - Intrinsic::trunc, - Intrinsic::experimental_constrained_trunc)); + return RValue::get(emitUnaryMaybeConstrainedFPBuiltin( + *this, E, Intrinsic::trunc, + Intrinsic::experimental_constrained_trunc)); case Builtin::BIlround: case Builtin::BIlroundf: @@ -2510,7 +2494,8 @@ } switch (BuiltinIDIfNoAsmLabel) { - default: break; + default: + break; case Builtin::BI__builtin___CFStringMakeConstantString: case Builtin::BI__builtin___NSStringMakeConstantString: return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType())); @@ -2578,13 +2563,15 @@ // is available as debuginfo is needed to preserve user-level // access pattern. if (!getDebugInfo()) { - CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g"); + CGM.Error(E->getExprLoc(), + "using builtin_preserve_access_index() without -g"); return RValue::get(EmitScalarExpr(E->getArg(0))); } // Nested builtin_preserve_access_index() not supported if (IsInPreservedAIRegion) { - CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported"); + CGM.Error(E->getExprLoc(), + "nested builtin_preserve_access_index() not supported"); return RValue::get(EmitScalarExpr(E->getArg(0))); } @@ -2620,8 +2607,8 @@ Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue); Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()}); Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1)); - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return RValue::get(Result); } case Builtin::BI__builtin_ctzs: @@ -2637,8 +2624,8 @@ Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return RValue::get(Result); } case Builtin::BI__builtin_clzs: @@ -2654,8 +2641,8 @@ Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef()); Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef}); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return RValue::get(Result); } case Builtin::BI__builtin_ffs: @@ -2675,8 +2662,8 @@ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero"); Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs"); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return RValue::get(Result); } case Builtin::BI__builtin_parity: @@ -2692,8 +2679,8 @@ Value *Tmp = Builder.CreateCall(F, ArgValue); Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return RValue::get(Result); } case Builtin::BI__lzcnt16: @@ -2707,8 +2694,8 @@ llvm::Type *ResultType = ConvertType(E->getType()); Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()}); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return RValue::get(Result); } case Builtin::BI__popcnt16: @@ -2725,8 +2712,8 @@ llvm::Type *ResultType = ConvertType(E->getType()); Value *Result = Builder.CreateCall(F, ArgValue); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return RValue::get(Result); } case Builtin::BI__builtin_unpredictable: { @@ -2784,7 +2771,7 @@ if (PtrValue->getType() != VoidPtrTy) PtrValue = EmitCastToVoidPtr(PtrValue); Value *OffsetValue = - (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; + (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr; Value *AlignmentValue = EmitScalarExpr(E->getArg(1)); ConstantInt *AlignmentCI = cast(AlignmentValue); @@ -2901,7 +2888,7 @@ CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType)); Value *Result = Builder.CreateCall(F, ArgValue); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false); + Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false); return RValue::get(Result); } case Builtin::BI__builtin_dynamic_object_size: @@ -2919,10 +2906,10 @@ case Builtin::BI__builtin_prefetch: { Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); // FIXME: Technically these constants should of type 'int', yes? - RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : - llvm::ConstantInt::get(Int32Ty, 0); - Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : - llvm::ConstantInt::get(Int32Ty, 3); + RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) + : llvm::ConstantInt::get(Int32Ty, 0); + Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) + : llvm::ConstantInt::get(Int32Ty, 3); Value *Data = llvm::ConstantInt::get(Int32Ty, 1); Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType()); return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data})); @@ -2960,12 +2947,12 @@ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E); Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi, Src0->getType()); - return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 })); + return RValue::get(Builder.CreateConstrainedFPCall(F, {Src0, Src1})); } - Function *F = CGM.getIntrinsic(Intrinsic::powi, - { Src0->getType(), Src1->getType() }); - return RValue::get(Builder.CreateCall(F, { Src0, Src1 })); + Function *F = + CGM.getIntrinsic(Intrinsic::powi, {Src0->getType(), Src1->getType()}); + return RValue::get(Builder.CreateCall(F, {Src0, Src1})); } case Builtin::BI__builtin_isgreater: case Builtin::BI__builtin_isgreaterequal: @@ -2980,7 +2967,8 @@ Value *RHS = EmitScalarExpr(E->getArg(1)); switch (BuiltinID) { - default: llvm_unreachable("Unknown ordered comparison"); + default: + llvm_unreachable("Unknown ordered comparison"); case Builtin::BI__builtin_isgreater: LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); break; @@ -3057,7 +3045,7 @@ emitUnaryBuiltin(*this, E, llvm::Intrinsic::ceil, "elt.ceil")); case Builtin::BI__builtin_elementwise_cos: return RValue::get( - emitUnaryBuiltin(*this, E, llvm::Intrinsic::cos, "elt.cos")); + emitUnaryBuiltin(*this, E, llvm::Intrinsic::cos, "elt.cos")); case Builtin::BI__builtin_elementwise_floor: return RValue::get( emitUnaryBuiltin(*this, E, llvm::Intrinsic::floor, "elt.floor")); @@ -3066,8 +3054,8 @@ "elt.roundeven")); case Builtin::BI__builtin_elementwise_sin: return RValue::get( - emitUnaryBuiltin(*this, E, llvm::Intrinsic::sin, "elt.sin")); - + emitUnaryBuiltin(*this, E, llvm::Intrinsic::sin, "elt.sin")); + case Builtin::BI__builtin_elementwise_trunc: return RValue::get( emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc")); @@ -3195,8 +3183,7 @@ Value *Result = MB.CreateColumnMajorLoad( Src.getElementType(), Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride, IsVolatile, - ResultTy->getNumRows(), ResultTy->getNumColumns(), - "matrix"); + ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix"); return RValue::get(Result); } @@ -3292,13 +3279,12 @@ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq"); Value *Abs = EmitFAbs(*this, V); - Value *IsLessThanInf = - Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf"); + Value *IsLessThanInf = Builder.CreateFCmpULT( + Abs, ConstantFP::getInfinity(V->getType()), "isinf"); APFloat Smallest = APFloat::getSmallestNormalized( - getContext().getFloatTypeSemantics(E->getArg(0)->getType())); - Value *IsNormal = - Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest), - "isnormal"); + getContext().getFloatTypeSemantics(E->getArg(0)->getType())); + Value *IsNormal = Builder.CreateFCmpUGE( + Abs, ConstantFP::get(V->getContext(), Smallest), "isnormal"); V = Builder.CreateAnd(Eq, IsLessThanInf, "and"); V = Builder.CreateAnd(V, IsNormal, "and"); return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()))); @@ -3310,8 +3296,8 @@ llvm::Type *ResultType = ConvertType(E->getType()); Value *Result = Builder.CreateCall(F); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return RValue::get(Result); } @@ -3325,14 +3311,13 @@ BasicBlock *Begin = Builder.GetInsertBlock(); BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn); Builder.SetInsertPoint(End); - PHINode *Result = - Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, - "fpclassify_result"); + PHINode *Result = Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4, + "fpclassify_result"); // if (V==0) return FP_ZERO Builder.SetInsertPoint(Begin); - Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), - "iszero"); + Value *IsZero = + Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty), "iszero"); Value *ZeroLiteral = EmitScalarExpr(E->getArg(4)); BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn); Builder.CreateCondBr(IsZero, End, NotZero); @@ -3349,9 +3334,8 @@ // if (fabs(V) == infinity) return FP_INFINITY Builder.SetInsertPoint(NotNan); Value *VAbs = EmitFAbs(*this, V); - Value *IsInf = - Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()), - "isinf"); + Value *IsInf = Builder.CreateFCmpOEQ( + VAbs, ConstantFP::getInfinity(V->getType()), "isinf"); Value *InfLiteral = EmitScalarExpr(E->getArg(1)); BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn); Builder.CreateCondBr(IsInf, End, NotInf); @@ -3361,12 +3345,10 @@ Builder.SetInsertPoint(NotInf); APFloat Smallest = APFloat::getSmallestNormalized( getContext().getFloatTypeSemantics(E->getArg(5)->getType())); - Value *IsNormal = - Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest), - "isnormal"); - Value *NormalResult = - Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)), - EmitScalarExpr(E->getArg(3))); + Value *IsNormal = Builder.CreateFCmpUGE( + VAbs, ConstantFP::get(V->getContext(), Smallest), "isnormal"); + Value *NormalResult = Builder.CreateSelect( + IsNormal, EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3))); Builder.CreateBr(End); Result->addIncoming(NormalResult, NotInf); @@ -3475,8 +3457,8 @@ Address DestAddr = EmitPointerWithAlignment(E->getArg(0)); Address SrcAddr = EmitPointerWithAlignment(E->getArg(1)); Value *SizeVal = EmitScalarExpr(E->getArg(2)); - CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, - DestAddr, SrcAddr, SizeVal); + CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestAddr, SrcAddr, + SizeVal); return RValue::get(DestAddr.getPointer()); } @@ -3512,8 +3494,8 @@ case Builtin::BImemset: case Builtin::BI__builtin_memset: { Address Dest = EmitPointerWithAlignment(E->getArg(0)); - Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), - Builder.getInt8Ty()); + Value *ByteVal = + Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty()); Value *SizeVal = EmitScalarExpr(E->getArg(2)); EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD, 0); @@ -3542,8 +3524,8 @@ if (Size.ugt(DstSize)) break; Address Dest = EmitPointerWithAlignment(E->getArg(0)); - Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), - Builder.getInt8Ty()); + Value *ByteVal = + Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty()); Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size); Builder.CreateMemSet(Dest, ByteVal, SizeVal, false); return RValue::get(Dest.getPointer()); @@ -3663,12 +3645,12 @@ int32_t Offset = 0; Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa); - return RValue::get(Builder.CreateCall(F, - llvm::ConstantInt::get(Int32Ty, Offset))); + return RValue::get( + Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, Offset))); } case Builtin::BI__builtin_return_address: { - Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), - getContext().UnsignedIntTy); + Value *Depth = ConstantEmitter(*this).emitAbstract( + E->getArg(0), getContext().UnsignedIntTy); Function *F = CGM.getIntrinsic(Intrinsic::returnaddress); return RValue::get(Builder.CreateCall(F, Depth)); } @@ -3677,8 +3659,8 @@ return RValue::get(Builder.CreateCall(F, Builder.getInt32(0))); } case Builtin::BI__builtin_frame_address: { - Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0), - getContext().UnsignedIntTy); + Value *Depth = ConstantEmitter(*this).emitAbstract( + E->getArg(0), getContext().UnsignedIntTy); Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy); return RValue::get(Builder.CreateCall(F, Depth)); } @@ -3693,8 +3675,7 @@ return RValue::get(Result); } case Builtin::BI__builtin_dwarf_sp_column: { - llvm::IntegerType *Ty - = cast(ConvertType(E->getType())); + llvm::IntegerType *Ty = cast(ConvertType(E->getType())); int Column = getTargetHooks().getDwarfEHStackPointer(CGM); if (Column == -1) { CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column"); @@ -3944,12 +3925,11 @@ Value *Ptr = EmitScalarExpr(E->getArg(0)); QualType ElTy = E->getArg(0)->getType()->getPointeeType(); CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy); - llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), - StoreSize.getQuantity() * 8); + llvm::Type *ITy = + llvm::IntegerType::get(getLLVMContext(), StoreSize.getQuantity() * 8); Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo()); - llvm::StoreInst *Store = - Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr, - StoreSize); + llvm::StoreInst *Store = Builder.CreateAlignedStore( + llvm::Constant::getNullValue(ITy), Ptr, StoreSize); Store->setAtomic(llvm::AtomicOrdering::Release); return RValue::get(nullptr); } @@ -3989,8 +3969,8 @@ CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args); llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo); llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName); - return EmitCall(FuncInfo, CGCallee::forDirect(Func), - ReturnValueSlot(), Args); + return EmitCall(FuncInfo, CGCallee::forDirect(Func), ReturnValueSlot(), + Args); } case Builtin::BI__atomic_test_and_set: { @@ -4041,12 +4021,9 @@ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); llvm::BasicBlock *BBs[5] = { - createBasicBlock("monotonic", CurFn), - createBasicBlock("acquire", CurFn), - createBasicBlock("release", CurFn), - createBasicBlock("acqrel", CurFn), - createBasicBlock("seqcst", CurFn) - }; + createBasicBlock("monotonic", CurFn), + createBasicBlock("acquire", CurFn), createBasicBlock("release", CurFn), + createBasicBlock("acqrel", CurFn), createBasicBlock("seqcst", CurFn)}; llvm::AtomicOrdering Orders[5] = { llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, @@ -4095,10 +4072,10 @@ default: // invalid order Store->setOrdering(llvm::AtomicOrdering::Monotonic); break; - case 3: // memory_order_release + case 3: // memory_order_release Store->setOrdering(llvm::AtomicOrdering::Release); break; - case 5: // memory_order_seq_cst + case 5: // memory_order_seq_cst Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); break; } @@ -4107,11 +4084,9 @@ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); - llvm::BasicBlock *BBs[3] = { - createBasicBlock("monotonic", CurFn), - createBasicBlock("release", CurFn), - createBasicBlock("seqcst", CurFn) - }; + llvm::BasicBlock *BBs[3] = {createBasicBlock("monotonic", CurFn), + createBasicBlock("release", CurFn), + createBasicBlock("seqcst", CurFn)}; llvm::AtomicOrdering Orders[3] = { llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, llvm::AtomicOrdering::SequentiallyConsistent}; @@ -4151,17 +4126,17 @@ case 0: // memory_order_relaxed default: // invalid order break; - case 1: // memory_order_consume - case 2: // memory_order_acquire + case 1: // memory_order_consume + case 2: // memory_order_acquire Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID); break; - case 3: // memory_order_release + case 3: // memory_order_release Builder.CreateFence(llvm::AtomicOrdering::Release, SSID); break; - case 4: // memory_order_acq_rel + case 4: // memory_order_acq_rel Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID); break; - case 5: // memory_order_seq_cst + case 5: // memory_order_seq_cst Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID); break; } @@ -4237,8 +4212,8 @@ } case Builtin::BI__builtin_annotation: { llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0)); - llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation, - AnnVal->getType()); + llvm::Function *F = + CGM.getIntrinsic(llvm::Intrinsic::annotation, AnnVal->getType()); // Get the annotation string, go through casts. Sema requires this to be a // non-wide string literal, potentially casted, so the cast<> is safe. @@ -4284,7 +4259,8 @@ // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow. llvm::Intrinsic::ID IntrinsicId; switch (BuiltinID) { - default: llvm_unreachable("Unknown multiprecision builtin id."); + default: + llvm_unreachable("Unknown multiprecision builtin id."); case Builtin::BI__builtin_addcb: case Builtin::BI__builtin_addcs: case Builtin::BI__builtin_addc: @@ -4303,13 +4279,12 @@ // Construct our resulting LLVM IR expression. llvm::Value *Carry1; - llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, - X, Y, Carry1); + llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry1); llvm::Value *Carry2; - llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId, - Sum1, Carryin, Carry2); - llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), - X->getType()); + llvm::Value *Sum2 = + EmitOverflowIntrinsic(*this, IntrinsicId, Sum1, Carryin, Carry2); + llvm::Value *CarryOut = + Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2), X->getType()); Builder.CreateStore(CarryOut, CarryOutPtr); return RValue::get(Sum2); } @@ -4403,7 +4378,7 @@ // Finally, store the result using the pointer. bool isVolatile = - ResultArg->getType()->getPointeeType().isVolatileQualified(); + ResultArg->getType()->getPointeeType().isVolatileQualified(); Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile); return RValue::get(Overflow); @@ -4438,7 +4413,8 @@ // Decide which of the overflow intrinsics we are lowering to: llvm::Intrinsic::ID IntrinsicId; switch (BuiltinID) { - default: llvm_unreachable("Unknown overflow builtin id."); + default: + llvm_unreachable("Unknown overflow builtin id."); case Builtin::BI__builtin_uadd_overflow: case Builtin::BI__builtin_uaddl_overflow: case Builtin::BI__builtin_uaddll_overflow: @@ -4471,7 +4447,6 @@ break; } - llvm::Value *Carry; llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry); Builder.CreateStore(Sum, SumOutPtr); @@ -4505,9 +4480,8 @@ case Builtin::BI__builtin_call_with_static_chain: { const CallExpr *Call = cast(E->getArg(0)); const Expr *Chain = E->getArg(1); - return EmitCall(Call->getCallee()->getType(), - EmitCallee(Call->getCallee()), Call, ReturnValue, - EmitScalarExpr(Chain)); + return EmitCall(Call->getCallee()->getType(), EmitCallee(Call->getCallee()), + Call, ReturnValue, EmitScalarExpr(Chain)); } case Builtin::BI_InterlockedExchange8: case Builtin::BI_InterlockedExchange16: @@ -4518,32 +4492,31 @@ case Builtin::BI_InterlockedCompareExchangePointer: case Builtin::BI_InterlockedCompareExchangePointer_nf: { llvm::Type *RTy; - llvm::IntegerType *IntType = - IntegerType::get(getLLVMContext(), - getContext().getTypeSize(E->getType())); + llvm::IntegerType *IntType = IntegerType::get( + getLLVMContext(), getContext().getTypeSize(E->getType())); llvm::Type *IntPtrType = IntType->getPointerTo(); llvm::Value *Destination = - Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); + Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType); llvm::Value *Exchange = EmitScalarExpr(E->getArg(1)); RTy = Exchange->getType(); Exchange = Builder.CreatePtrToInt(Exchange, IntType); llvm::Value *Comparand = - Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); + Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType); auto Ordering = - BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ? - AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent; + BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf + ? AtomicOrdering::Monotonic + : AtomicOrdering::SequentiallyConsistent; auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange, Ordering, Ordering); Result->setVolatile(true); - return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, - 0), - RTy)); + return RValue::get( + Builder.CreateIntToPtr(Builder.CreateExtractValue(Result, 0), RTy)); } case Builtin::BI_InterlockedCompareExchange8: case Builtin::BI_InterlockedCompareExchange16: @@ -4718,8 +4691,8 @@ const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4" : "__write_pipe_4"; - llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy, - Int32Ty, Int32Ty}; + llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, + I8PTy, Int32Ty, Int32Ty}; Value *Arg2 = EmitScalarExpr(E->getArg(2)), *Arg3 = EmitScalarExpr(E->getArg(3)); llvm::FunctionType *FTy = llvm::FunctionType::get( @@ -4842,11 +4815,11 @@ case Builtin::BIto_local: case Builtin::BIto_private: { auto Arg0 = EmitScalarExpr(E->getArg(0)); - auto NewArgT = llvm::PointerType::get(Int8Ty, - CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); - auto NewRetT = llvm::PointerType::get(Int8Ty, - CGM.getContext().getTargetAddressSpace( - E->getType()->getPointeeType().getAddressSpace())); + auto NewArgT = llvm::PointerType::get( + Int8Ty, CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); + auto NewRetT = llvm::PointerType::get( + Int8Ty, CGM.getContext().getTargetAddressSpace( + E->getType()->getPointeeType().getAddressSpace())); auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false); llvm::Value *NewArg; if (Arg0->getType()->getPointerAddressSpace() != @@ -4857,8 +4830,8 @@ auto NewName = std::string("__") + E->getDirectCallee()->getName().str(); auto NewCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg}); - return RValue::get(Builder.CreateBitOrPointerCast(NewCall, - ConvertType(E->getType()))); + return RValue::get( + Builder.CreateBitOrPointerCast(NewCall, ConvertType(E->getType()))); } // OpenCL v2.0, s6.13.17 - Enqueue kernel function. @@ -4924,8 +4897,8 @@ auto *Zero = llvm::ConstantInt::get(IntTy, 0); for (unsigned I = First; I < NumArgs; ++I) { auto *Index = llvm::ConstantInt::get(IntTy, I - First); - auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr, - {Zero, Index}); + auto *GEP = + Builder.CreateGEP(Tmp.getElementType(), TmpPtr, {Zero, Index}); if (I == First) ElemPtr = GEP; auto *V = @@ -4983,8 +4956,8 @@ EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy); } else { EventWaitList = E->getArg(4)->getType()->isArrayType() - ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() - : EmitScalarExpr(E->getArg(4)); + ? EmitArrayToPointerDecay(E->getArg(4)).getPointer() + : EmitScalarExpr(E->getArg(4)); // Convert to generic address space. EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy); } @@ -5258,8 +5231,8 @@ // If this is a predefined lib function (e.g. malloc), emit the call // using exactly the normal call path. if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID)) - return emitLibraryCall(*this, FD, E, - cast(EmitScalarExpr(E->getCallee()))); + return emitLibraryCall( + *this, FD, E, cast(EmitScalarExpr(E->getCallee()))); // Check that a call to a target specific builtin has the correct target // features. @@ -5268,7 +5241,8 @@ // can move this up to the beginning of the function. checkTargetFeatures(E, FD); - if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) + if (unsigned VectorWidth = + getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID)) LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth); // See if we have a target specific intrinsic. @@ -5286,7 +5260,7 @@ } if (IntrinsicID != Intrinsic::not_intrinsic) { - SmallVector Args; + SmallVector Args; // Find out if any arguments are required to be integer constant // expressions. @@ -5320,8 +5294,8 @@ if (PtrTy->getAddressSpace() != ArgValue->getType()->getPointerAddressSpace()) { ArgValue = Builder.CreateAddrSpaceCast( - ArgValue, - ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace())); + ArgValue, + ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace())); } } @@ -5349,9 +5323,10 @@ if (RetTy != V->getType()) { // XXX - vector of pointers? if (auto *PtrTy = dyn_cast(RetTy)) { - if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) { + if (PtrTy->getAddressSpace() != + V->getType()->getPointerAddressSpace()) { V = Builder.CreateAddrSpaceCast( - V, V->getType()->getPointerTo(PtrTy->getAddressSpace())); + V, V->getType()->getPointerTo(PtrTy->getAddressSpace())); } } @@ -5474,7 +5449,8 @@ return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); case NeonTypeFlags::BFloat16: if (AllowBFloatArgsAndRet) - return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad)); + return llvm::FixedVectorType::get(CGF->BFloatTy, + V1Ty ? 1 : (4 << IsQuad)); else return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad)); case NeonTypeFlags::Float16: @@ -5526,9 +5502,9 @@ return EmitNeonSplat(V, C, EC); } -Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl &Ops, - const char *name, - unsigned shift, bool rightshift) { +Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl &Ops, + const char *name, unsigned shift, + bool rightshift) { unsigned j = 0; for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end(); ai != ae; ++ai, ++j) { @@ -5624,781 +5600,883 @@ }; } // end anonymous namespace -#define NEONMAP0(NameBase) \ - { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 } - -#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ - { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ - Intrinsic::LLVMIntrinsic, 0, TypeModifier } - -#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ - { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \ - Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \ - TypeModifier } - -static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = { - NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0), - NEONMAP0(splat_lane_v), - NEONMAP0(splat_laneq_v), - NEONMAP0(splatq_lane_v), - NEONMAP0(splatq_laneq_v), - NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), - NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts), - NEONMAP1(vabs_v, arm_neon_vabs, 0), - NEONMAP1(vabsq_v, arm_neon_vabs, 0), - NEONMAP0(vadd_v), - NEONMAP0(vaddhn_v), - NEONMAP0(vaddq_v), - NEONMAP1(vaesdq_v, arm_neon_aesd, 0), - NEONMAP1(vaeseq_v, arm_neon_aese, 0), - NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0), - NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0), - NEONMAP1(vbfdot_v, arm_neon_bfdot, 0), - NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0), - NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0), - NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0), - NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0), - NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType), - NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType), - NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), - NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), - NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), - NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), - NEONMAP1(vcage_v, arm_neon_vacge, 0), - NEONMAP1(vcageq_v, arm_neon_vacge, 0), - NEONMAP1(vcagt_v, arm_neon_vacgt, 0), - NEONMAP1(vcagtq_v, arm_neon_vacgt, 0), - NEONMAP1(vcale_v, arm_neon_vacge, 0), - NEONMAP1(vcaleq_v, arm_neon_vacge, 0), - NEONMAP1(vcalt_v, arm_neon_vacgt, 0), - NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), - NEONMAP0(vceqz_v), - NEONMAP0(vceqzq_v), - NEONMAP0(vcgez_v), - NEONMAP0(vcgezq_v), - NEONMAP0(vcgtz_v), - NEONMAP0(vcgtzq_v), - NEONMAP0(vclez_v), - NEONMAP0(vclezq_v), - NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), - NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), - NEONMAP0(vcltz_v), - NEONMAP0(vcltzq_v), - NEONMAP1(vclz_v, ctlz, Add1ArgType), - NEONMAP1(vclzq_v, ctlz, Add1ArgType), - NEONMAP1(vcnt_v, ctpop, Add1ArgType), - NEONMAP1(vcntq_v, ctpop, Add1ArgType), - NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), - NEONMAP0(vcvt_f16_v), - NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), - NEONMAP0(vcvt_f32_v), - NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), - NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), - NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0), - NEONMAP0(vcvt_s16_v), - NEONMAP0(vcvt_s32_v), - NEONMAP0(vcvt_s64_v), - NEONMAP0(vcvt_u16_v), - NEONMAP0(vcvt_u32_v), - NEONMAP0(vcvt_u64_v), - NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0), - NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0), - NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0), - NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0), - NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0), - NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0), - NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0), - NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0), - NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0), - NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0), - NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0), - NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0), - NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0), - NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0), - NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0), - NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0), - NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0), - NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0), - NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0), - NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0), - NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0), - NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0), - NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0), - NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0), - NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0), - NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0), - NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0), - NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0), - NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0), - NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0), - NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0), - NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0), - NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0), - NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0), - NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0), - NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0), - NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0), - NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0), - NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0), - NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0), - NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0), - NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0), - NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0), - NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0), - NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0), - NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0), - NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0), - NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), - NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), - NEONMAP0(vcvtq_f16_v), - NEONMAP0(vcvtq_f32_v), - NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), - NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), - NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0), - NEONMAP0(vcvtq_s16_v), - NEONMAP0(vcvtq_s32_v), - NEONMAP0(vcvtq_s64_v), - NEONMAP0(vcvtq_u16_v), - NEONMAP0(vcvtq_u32_v), - NEONMAP0(vcvtq_u64_v), - NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0), - NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0), - NEONMAP0(vext_v), - NEONMAP0(vextq_v), - NEONMAP0(vfma_v), - NEONMAP0(vfmaq_v), - NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), - NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts), - NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), - NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts), - NEONMAP0(vld1_dup_v), - NEONMAP1(vld1_v, arm_neon_vld1, 0), - NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0), - NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0), - NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0), - NEONMAP0(vld1q_dup_v), - NEONMAP1(vld1q_v, arm_neon_vld1, 0), - NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0), - NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0), - NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0), - NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0), - NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0), - NEONMAP1(vld2_v, arm_neon_vld2, 0), - NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0), - NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0), - NEONMAP1(vld2q_v, arm_neon_vld2, 0), - NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0), - NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0), - NEONMAP1(vld3_v, arm_neon_vld3, 0), - NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0), - NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0), - NEONMAP1(vld3q_v, arm_neon_vld3, 0), - NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0), - NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0), - NEONMAP1(vld4_v, arm_neon_vld4, 0), - NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0), - NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0), - NEONMAP1(vld4q_v, arm_neon_vld4, 0), - NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), - NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType), - NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType), - NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts), - NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), - NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType), - NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType), - NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts), - NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0), - NEONMAP0(vmovl_v), - NEONMAP0(vmovn_v), - NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType), - NEONMAP0(vmull_v), - NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType), - NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), - NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), - NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType), - NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), - NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), - NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType), - NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts), - NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts), - NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType), - NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType), - NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), - NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), - NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0), - NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0), - NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType), - NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType), - NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType), - NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts), - NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType), - NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType), - NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType), - NEONMAP1(vqrdmlah_v, arm_neon_vqrdmlah, Add1ArgType), - NEONMAP1(vqrdmlahq_v, arm_neon_vqrdmlah, Add1ArgType), - NEONMAP1(vqrdmlsh_v, arm_neon_vqrdmlsh, Add1ArgType), - NEONMAP1(vqrdmlshq_v, arm_neon_vqrdmlsh, Add1ArgType), - NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType), - NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType), - NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), - NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts), - NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), - NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), - NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), - NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts), - NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0), - NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0), - NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), - NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), - NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType), - NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), - NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), - NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType), - NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType), - NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), - NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts), - NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType), - NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType), - NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType), - NEONMAP0(vrndi_v), - NEONMAP0(vrndiq_v), - NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType), - NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType), - NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType), - NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType), - NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType), - NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType), - NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType), - NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType), - NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType), - NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), - NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts), - NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), - NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), - NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), - NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), - NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType), - NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType), - NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType), - NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0), - NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0), - NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0), - NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0), - NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0), - NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0), - NEONMAP0(vshl_n_v), - NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), - NEONMAP0(vshll_n_v), - NEONMAP0(vshlq_n_v), - NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts), - NEONMAP0(vshr_n_v), - NEONMAP0(vshrn_n_v), - NEONMAP0(vshrq_n_v), - NEONMAP1(vst1_v, arm_neon_vst1, 0), - NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0), - NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0), - NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0), - NEONMAP1(vst1q_v, arm_neon_vst1, 0), - NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0), - NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0), - NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0), - NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0), - NEONMAP1(vst2_v, arm_neon_vst2, 0), - NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0), - NEONMAP1(vst2q_v, arm_neon_vst2, 0), - NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0), - NEONMAP1(vst3_v, arm_neon_vst3, 0), - NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0), - NEONMAP1(vst3q_v, arm_neon_vst3, 0), - NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0), - NEONMAP1(vst4_v, arm_neon_vst4, 0), - NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0), - NEONMAP1(vst4q_v, arm_neon_vst4, 0), - NEONMAP0(vsubhn_v), - NEONMAP0(vtrn_v), - NEONMAP0(vtrnq_v), - NEONMAP0(vtst_v), - NEONMAP0(vtstq_v), - NEONMAP1(vusdot_v, arm_neon_usdot, 0), - NEONMAP1(vusdotq_v, arm_neon_usdot, 0), - NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0), - NEONMAP0(vuzp_v), - NEONMAP0(vuzpq_v), - NEONMAP0(vzip_v), - NEONMAP0(vzipq_v) -}; +#define NEONMAP0(NameBase) \ + { #NameBase, NEON::BI__builtin_neon_##NameBase, 0, 0, 0 } + +#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ + { \ +#NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ + TypeModifier \ + } + +#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \ + { \ +#NameBase, NEON::BI__builtin_neon_##NameBase, Intrinsic::LLVMIntrinsic, \ + Intrinsic::AltLLVMIntrinsic, TypeModifier \ + } + +static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap[] = { + NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0), + NEONMAP0(splat_lane_v), + NEONMAP0(splat_laneq_v), + NEONMAP0(splatq_lane_v), + NEONMAP0(splatq_laneq_v), + NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, + Add1ArgType | UnsignedAlts), + NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, + Add1ArgType | UnsignedAlts), + NEONMAP1(vabs_v, arm_neon_vabs, 0), + NEONMAP1(vabsq_v, arm_neon_vabs, 0), + NEONMAP0(vadd_v), + NEONMAP0(vaddhn_v), + NEONMAP0(vaddq_v), + NEONMAP1(vaesdq_v, arm_neon_aesd, 0), + NEONMAP1(vaeseq_v, arm_neon_aese, 0), + NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0), + NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0), + NEONMAP1(vbfdot_v, arm_neon_bfdot, 0), + NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0), + NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0), + NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0), + NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0), + NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType), + NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType), + NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcage_v, arm_neon_vacge, 0), + NEONMAP1(vcageq_v, arm_neon_vacge, 0), + NEONMAP1(vcagt_v, arm_neon_vacgt, 0), + NEONMAP1(vcagtq_v, arm_neon_vacgt, 0), + NEONMAP1(vcale_v, arm_neon_vacge, 0), + NEONMAP1(vcaleq_v, arm_neon_vacge, 0), + NEONMAP1(vcalt_v, arm_neon_vacgt, 0), + NEONMAP1(vcaltq_v, arm_neon_vacgt, 0), + NEONMAP0(vceqz_v), + NEONMAP0(vceqzq_v), + NEONMAP0(vcgez_v), + NEONMAP0(vcgezq_v), + NEONMAP0(vcgtz_v), + NEONMAP0(vcgtzq_v), + NEONMAP0(vclez_v), + NEONMAP0(vclezq_v), + NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType), + NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType), + NEONMAP0(vcltz_v), + NEONMAP0(vcltzq_v), + NEONMAP1(vclz_v, ctlz, Add1ArgType), + NEONMAP1(vclzq_v, ctlz, Add1ArgType), + NEONMAP1(vcnt_v, ctpop, Add1ArgType), + NEONMAP1(vcntq_v, ctpop, Add1ArgType), + NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0), + NEONMAP0(vcvt_f16_v), + NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0), + NEONMAP0(vcvt_f32_v), + NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), + NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), + NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0), + NEONMAP0(vcvt_s16_v), + NEONMAP0(vcvt_s32_v), + NEONMAP0(vcvt_s64_v), + NEONMAP0(vcvt_u16_v), + NEONMAP0(vcvt_u32_v), + NEONMAP0(vcvt_u64_v), + NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0), + NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0), + NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0), + NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0), + NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0), + NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0), + NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0), + NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0), + NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0), + NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0), + NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0), + NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0), + NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0), + NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0), + NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0), + NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0), + NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0), + NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0), + NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0), + NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0), + NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0), + NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0), + NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0), + NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0), + NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0), + NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0), + NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0), + NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0), + NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0), + NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0), + NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0), + NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0), + NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0), + NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0), + NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0), + NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0), + NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0), + NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0), + NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0), + NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0), + NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0), + NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0), + NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0), + NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0), + NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0), + NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0), + NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0), + NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0), + NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0), + NEONMAP0(vcvtq_f16_v), + NEONMAP0(vcvtq_f32_v), + NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), + NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0), + NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0), + NEONMAP0(vcvtq_s16_v), + NEONMAP0(vcvtq_s32_v), + NEONMAP0(vcvtq_s64_v), + NEONMAP0(vcvtq_u16_v), + NEONMAP0(vcvtq_u32_v), + NEONMAP0(vcvtq_u64_v), + NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0), + NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0), + NEONMAP0(vext_v), + NEONMAP0(vextq_v), + NEONMAP0(vfma_v), + NEONMAP0(vfmaq_v), + NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, + Add1ArgType | UnsignedAlts), + NEONMAP0(vld1_dup_v), + NEONMAP1(vld1_v, arm_neon_vld1, 0), + NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0), + NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0), + NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0), + NEONMAP0(vld1q_dup_v), + NEONMAP1(vld1q_v, arm_neon_vld1, 0), + NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0), + NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0), + NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0), + NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0), + NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0), + NEONMAP1(vld2_v, arm_neon_vld2, 0), + NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0), + NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0), + NEONMAP1(vld2q_v, arm_neon_vld2, 0), + NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0), + NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0), + NEONMAP1(vld3_v, arm_neon_vld3, 0), + NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0), + NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0), + NEONMAP1(vld3q_v, arm_neon_vld3, 0), + NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0), + NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0), + NEONMAP1(vld4_v, arm_neon_vld4, 0), + NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0), + NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0), + NEONMAP1(vld4q_v, arm_neon_vld4, 0), + NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, + Add1ArgType | UnsignedAlts), + NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType), + NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType), + NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, + Add1ArgType | UnsignedAlts), + NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType), + NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType), + NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, + Add1ArgType | UnsignedAlts), + NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0), + NEONMAP0(vmovl_v), + NEONMAP0(vmovn_v), + NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType), + NEONMAP0(vmull_v), + NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType), + NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), + NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts), + NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType), + NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), + NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts), + NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType), + NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, + Add1ArgType | UnsignedAlts), + NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, + Add1ArgType | UnsignedAlts), + NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType), + NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType), + NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), + NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts), + NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0), + NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0), + NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType), + NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType), + NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType), + NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, + Add1ArgType | UnsignedAlts), + NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType), + NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType), + NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType), + NEONMAP1(vqrdmlah_v, arm_neon_vqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlahq_v, arm_neon_vqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlsh_v, arm_neon_vqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmlshq_v, arm_neon_vqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType), + NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType), + NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), + NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts), + NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, + Add1ArgType | UnsignedAlts), + NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0), + NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0), + NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), + NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts), + NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType), + NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), + NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0), + NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType), + NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType), + NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, + Add1ArgType | UnsignedAlts), + NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType), + NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType), + NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType), + NEONMAP0(vrndi_v), + NEONMAP0(vrndiq_v), + NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType), + NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType), + NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType), + NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType), + NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType), + NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType), + NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType), + NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType), + NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType), + NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), + NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts), + NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), + NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0), + NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType), + NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType), + NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType), + NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0), + NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0), + NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0), + NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0), + NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0), + NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0), + NEONMAP0(vshl_n_v), + NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, + Add1ArgType | UnsignedAlts), + NEONMAP0(vshll_n_v), + NEONMAP0(vshlq_n_v), + NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, + Add1ArgType | UnsignedAlts), + NEONMAP0(vshr_n_v), + NEONMAP0(vshrn_n_v), + NEONMAP0(vshrq_n_v), + NEONMAP1(vst1_v, arm_neon_vst1, 0), + NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0), + NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0), + NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0), + NEONMAP1(vst1q_v, arm_neon_vst1, 0), + NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0), + NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0), + NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0), + NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0), + NEONMAP1(vst2_v, arm_neon_vst2, 0), + NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0), + NEONMAP1(vst2q_v, arm_neon_vst2, 0), + NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0), + NEONMAP1(vst3_v, arm_neon_vst3, 0), + NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0), + NEONMAP1(vst3q_v, arm_neon_vst3, 0), + NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0), + NEONMAP1(vst4_v, arm_neon_vst4, 0), + NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0), + NEONMAP1(vst4q_v, arm_neon_vst4, 0), + NEONMAP0(vsubhn_v), + NEONMAP0(vtrn_v), + NEONMAP0(vtrnq_v), + NEONMAP0(vtst_v), + NEONMAP0(vtstq_v), + NEONMAP1(vusdot_v, arm_neon_usdot, 0), + NEONMAP1(vusdotq_v, arm_neon_usdot, 0), + NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0), + NEONMAP0(vuzp_v), + NEONMAP0(vuzpq_v), + NEONMAP0(vzip_v), + NEONMAP0(vzipq_v)}; static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = { - NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0), - NEONMAP0(splat_lane_v), - NEONMAP0(splat_laneq_v), - NEONMAP0(splatq_lane_v), - NEONMAP0(splatq_laneq_v), - NEONMAP1(vabs_v, aarch64_neon_abs, 0), - NEONMAP1(vabsq_v, aarch64_neon_abs, 0), - NEONMAP0(vadd_v), - NEONMAP0(vaddhn_v), - NEONMAP0(vaddq_p128), - NEONMAP0(vaddq_v), - NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0), - NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0), - NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0), - NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0), - NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts), - NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0), - NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0), - NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0), - NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0), - NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0), - NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), - NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), - NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), - NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), - NEONMAP1(vcage_v, aarch64_neon_facge, 0), - NEONMAP1(vcageq_v, aarch64_neon_facge, 0), - NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), - NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), - NEONMAP1(vcale_v, aarch64_neon_facge, 0), - NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), - NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), - NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), - NEONMAP0(vceqz_v), - NEONMAP0(vceqzq_v), - NEONMAP0(vcgez_v), - NEONMAP0(vcgezq_v), - NEONMAP0(vcgtz_v), - NEONMAP0(vcgtzq_v), - NEONMAP0(vclez_v), - NEONMAP0(vclezq_v), - NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), - NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), - NEONMAP0(vcltz_v), - NEONMAP0(vcltzq_v), - NEONMAP1(vclz_v, ctlz, Add1ArgType), - NEONMAP1(vclzq_v, ctlz, Add1ArgType), - NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), - NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), - NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), - NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType), - NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), - NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), - NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), - NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType), - NEONMAP1(vcnt_v, ctpop, Add1ArgType), - NEONMAP1(vcntq_v, ctpop, Add1ArgType), - NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), - NEONMAP0(vcvt_f16_v), - NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), - NEONMAP0(vcvt_f32_v), - NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), - NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), - NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), - NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), - NEONMAP0(vcvtq_f16_v), - NEONMAP0(vcvtq_f32_v), - NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0), - NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), - NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), - NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), - NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), - NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), - NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), - NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0), - NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0), - NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts), - NEONMAP0(vext_v), - NEONMAP0(vextq_v), - NEONMAP0(vfma_v), - NEONMAP0(vfmaq_v), - NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0), - NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0), - NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0), - NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0), - NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0), - NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0), - NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0), - NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0), - NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), - NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts), - NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), - NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), - NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), - NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), - NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), - NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), - NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0), - NEONMAP0(vmovl_v), - NEONMAP0(vmovn_v), - NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), - NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), - NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), - NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), - NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), - NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), - NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), - NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), - NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), - NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), - NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0), - NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0), - NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), - NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0), - NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0), - NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), - NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), - NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts), - NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), - NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), - NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), - NEONMAP1(vqrdmlah_v, aarch64_neon_sqrdmlah, Add1ArgType), - NEONMAP1(vqrdmlahq_v, aarch64_neon_sqrdmlah, Add1ArgType), - NEONMAP1(vqrdmlsh_v, aarch64_neon_sqrdmlsh, Add1ArgType), - NEONMAP1(vqrdmlshq_v, aarch64_neon_sqrdmlsh, Add1ArgType), - NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0), - NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), - NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), - NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0), - NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), - NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), - NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), - NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts), - NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts), - NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), - NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), - NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), - NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts), - NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), - NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0), - NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), - NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), - NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), - NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), - NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), - NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts), - NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType), - NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType), - NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType), - NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType), - NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType), - NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType), - NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType), - NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType), - NEONMAP0(vrndi_v), - NEONMAP0(vrndiq_v), - NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts), - NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), - NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), - NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), - NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), - NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), - NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), - NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), - NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0), - NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0), - NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0), - NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0), - NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0), - NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0), - NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0), - NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0), - NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0), - NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0), - NEONMAP0(vshl_n_v), - NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), - NEONMAP0(vshll_n_v), - NEONMAP0(vshlq_n_v), - NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts), - NEONMAP0(vshr_n_v), - NEONMAP0(vshrn_n_v), - NEONMAP0(vshrq_n_v), - NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0), - NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0), - NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0), - NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0), - NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0), - NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0), - NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0), - NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0), - NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0), - NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), - NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), - NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), - NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), - NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), - NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), - NEONMAP0(vsubhn_v), - NEONMAP0(vtst_v), - NEONMAP0(vtstq_v), - NEONMAP1(vusdot_v, aarch64_neon_usdot, 0), - NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0), - NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0), - NEONMAP1(vxarq_v, aarch64_crypto_xar, 0), + NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0), + NEONMAP0(splat_lane_v), + NEONMAP0(splat_laneq_v), + NEONMAP0(splatq_lane_v), + NEONMAP0(splatq_laneq_v), + NEONMAP1(vabs_v, aarch64_neon_abs, 0), + NEONMAP1(vabsq_v, aarch64_neon_abs, 0), + NEONMAP0(vadd_v), + NEONMAP0(vaddhn_v), + NEONMAP0(vaddq_p128), + NEONMAP0(vaddq_v), + NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0), + NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0), + NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0), + NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0), + NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, + Add1ArgType | UnsignedAlts), + NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0), + NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0), + NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0), + NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0), + NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0), + NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType), + NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType), + NEONMAP1(vcage_v, aarch64_neon_facge, 0), + NEONMAP1(vcageq_v, aarch64_neon_facge, 0), + NEONMAP1(vcagt_v, aarch64_neon_facgt, 0), + NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0), + NEONMAP1(vcale_v, aarch64_neon_facge, 0), + NEONMAP1(vcaleq_v, aarch64_neon_facge, 0), + NEONMAP1(vcalt_v, aarch64_neon_facgt, 0), + NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0), + NEONMAP0(vceqz_v), + NEONMAP0(vceqzq_v), + NEONMAP0(vcgez_v), + NEONMAP0(vcgezq_v), + NEONMAP0(vcgtz_v), + NEONMAP0(vcgtzq_v), + NEONMAP0(vclez_v), + NEONMAP0(vclezq_v), + NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType), + NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType), + NEONMAP0(vcltz_v), + NEONMAP0(vcltzq_v), + NEONMAP1(vclz_v, ctlz, Add1ArgType), + NEONMAP1(vclzq_v, ctlz, Add1ArgType), + NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), + NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), + NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), + NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType), + NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType), + NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType), + NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType), + NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType), + NEONMAP1(vcnt_v, ctpop, Add1ArgType), + NEONMAP1(vcntq_v, ctpop, Add1ArgType), + NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0), + NEONMAP0(vcvt_f16_v), + NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0), + NEONMAP0(vcvt_f32_v), + NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0), + NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP0(vcvtq_f16_v), + NEONMAP0(vcvtq_f32_v), + NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0), + NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, + 0), + NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, + 0), + NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, + 0), + NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0), + NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0), + NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType), + NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0), + NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0), + NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, + Add1ArgType | UnsignedAlts), + NEONMAP0(vext_v), + NEONMAP0(vextq_v), + NEONMAP0(vfma_v), + NEONMAP0(vfmaq_v), + NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0), + NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0), + NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0), + NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0), + NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0), + NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0), + NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0), + NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0), + NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, + Add1ArgType | UnsignedAlts), + NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, + Add1ArgType | UnsignedAlts), + NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0), + NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0), + NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0), + NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0), + NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0), + NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0), + NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0), + NEONMAP0(vmovl_v), + NEONMAP0(vmovn_v), + NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType), + NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType), + NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType), + NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), + NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts), + NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType), + NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType), + NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0), + NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0), + NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0), + NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0), + NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0), + NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0), + NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType), + NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, + Add1ArgType | UnsignedAlts), + NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType), + NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqrdmlah_v, aarch64_neon_sqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlahq_v, aarch64_neon_sqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlsh_v, aarch64_neon_sqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmlshq_v, aarch64_neon_sqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0), + NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), + NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0), + NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0), + NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), + NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts), + NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, + Add1ArgType | UnsignedAlts), + NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0), + NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0), + NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, + Add1ArgType | UnsignedAlts), + NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, + Add1ArgType | UnsignedAlts), + NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType), + NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0), + NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), + NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0), + NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType), + NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType), + NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, + Add1ArgType | UnsignedAlts), + NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType), + NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType), + NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType), + NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType), + NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType), + NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType), + NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType), + NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType), + NEONMAP0(vrndi_v), + NEONMAP0(vrndiq_v), + NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, + Add1ArgType | UnsignedAlts), + NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), + NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts), + NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), + NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0), + NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType), + NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0), + NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0), + NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0), + NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0), + NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0), + NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0), + NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0), + NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0), + NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0), + NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0), + NEONMAP0(vshl_n_v), + NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, + Add1ArgType | UnsignedAlts), + NEONMAP0(vshll_n_v), + NEONMAP0(vshlq_n_v), + NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, + Add1ArgType | UnsignedAlts), + NEONMAP0(vshr_n_v), + NEONMAP0(vshrn_n_v), + NEONMAP0(vshrq_n_v), + NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0), + NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0), + NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0), + NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0), + NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0), + NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0), + NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0), + NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0), + NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0), + NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0), + NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0), + NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0), + NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0), + NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0), + NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0), + NEONMAP0(vsubhn_v), + NEONMAP0(vtst_v), + NEONMAP0(vtstq_v), + NEONMAP1(vusdot_v, aarch64_neon_usdot, 0), + NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0), + NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0), + NEONMAP1(vxarq_v, aarch64_crypto_xar, 0), }; static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = { - NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), - NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), - NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), - NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), - NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), - NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), - NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), - NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), - NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), - NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), - NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), - NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), - NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), - NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), - NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), - NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), - NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), - NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), - NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), - NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), - NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), - NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), - NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0), - NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), - NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), - NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), - NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), - NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), - NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), - NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), - NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), - NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), - NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), - NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), - NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), - NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), - NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), - NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), - NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), - NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), - NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), - NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), - NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), - NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), - NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), - NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), - NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), - NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), - NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), - NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), - NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), - NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), - NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), - NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), - NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), - NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), - NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), - NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), - NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), - NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), - NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), - NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), - NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), - NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), - NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), - NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), - NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), - NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), - NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), - NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType), - NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), - NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), - NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), - NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), - NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType), - NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType), - NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), - NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), - NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), - NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), - NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), - NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), - NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), - NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), - NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), - NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors), - NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), - NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), - NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), - NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), - NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), - NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), - NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), - NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), - NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), - NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), - NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), - NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), - NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), - NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), - NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), - NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), - NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), - NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), - NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), - NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), - NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), - NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), - NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), - NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), - NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), - NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), - NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), - NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), - NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), - NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), - NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), - NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), - NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), - NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), - NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), - NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), - NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), - NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), - NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), - NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), - NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors), - NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), - // FP16 scalar intrinisics go here. - NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), - NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), - NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), - NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), - NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), - NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType), - NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), - NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType), - NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), - NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType), - NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), - NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), - NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), - NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), - NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), - NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), - NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), - NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), - NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), - NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), - NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), - NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), - NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), - NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), - NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), - NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), - NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), - NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), - NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), - NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), - NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType), + NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType), + NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType), + NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType), + NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType), + NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, + AddRetType | Add1ArgType), + NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, + AddRetType | Add1ArgType), + NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), + NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), + NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0), + NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, + AddRetType | Add1ArgType), + NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, + AddRetType | Add1ArgType), + NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), + NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), + NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0), + NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType), + NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType), + NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), + NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType), + NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType), + NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0), + NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType), + NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType), + NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType), + NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType), + NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType), + NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType), + NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType), + NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType), + NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType), + NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType), + NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType), + NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors), + NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0), + NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType), + NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType), + NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors), + NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, + AddRetType | Add1ArgType), + NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), + NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors), + NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType), + NEONMAP1(vqrdmlahh_s16, aarch64_neon_sqrdmlah, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrdmlahs_s32, aarch64_neon_sqrdmlah, Add1ArgType), + NEONMAP1(vqrdmlshh_s16, aarch64_neon_sqrdmlsh, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrdmlshs_s32, aarch64_neon_sqrdmlsh, Add1ArgType), + NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType), + NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType), + NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType), + NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType), + NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType), + NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType), + NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType), + NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType), + NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, + VectorRet | Use64BitVectors), + NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, + VectorRet | Use64BitVectors), + NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType), + NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType), + NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType), + NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType), + NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType), + NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors), + NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType), + NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), + NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors), + NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType), + NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType), + NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType), + NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType), + NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType), + NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType), + NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType), + NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType), + NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType), + NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType), + NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType), + NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType), + NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType), + NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0), + NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0), + NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0), + NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0), + NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType), + NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType), + NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType), + NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType), + NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType), + NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType), + NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType), + NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType), + NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType), + NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, + Vectorize1ArgType | Use64BitVectors), + NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType), + // FP16 scalar intrinisics go here. + NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType), + NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType), + NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, + AddRetType | Add1ArgType), + NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), + NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType), + NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), + NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType), + NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType), + NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType), + NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType), + NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType), + NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType), + NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType), + NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType), + NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType), + NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType), + NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType), + NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType), }; #undef NEONMAP0 @@ -6407,7 +6485,7 @@ #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier) \ { \ - #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ +#NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0, \ TypeModifier \ } @@ -6415,8 +6493,8 @@ { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier } static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = { #define GET_SVE_LLVM_INTRINSIC_MAP -#include "clang/Basic/arm_sve_builtin_cg.inc" #include "clang/Basic/BuiltinsAArch64NeonSVEBridge_cg.def" +#include "clang/Basic/arm_sve_builtin_cg.inc" #undef GET_SVE_LLVM_INTRINSIC_MAP }; @@ -6529,7 +6607,7 @@ ai != ae; ++ai, ++j) { llvm::Type *ArgTy = ai->getType(); if (Ops[j]->getType()->getPrimitiveSizeInBits() == - ArgTy->getPrimitiveSizeInBits()) + ArgTy->getPrimitiveSizeInBits()) continue; assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy()); @@ -6576,7 +6654,7 @@ if (!Ty) return nullptr; - auto getAlignmentValue32 = [&](Address addr) -> Value* { + auto getAlignmentValue32 = [&](Address addr) -> Value * { return Builder.getInt32(addr.getAlignment().getQuantity()); }; @@ -6585,7 +6663,8 @@ Int = AltLLVMIntrinsic; switch (BuiltinID) { - default: break; + default: + break; case NEON::BI__builtin_neon_splat_lane_v: case NEON::BI__builtin_neon_splat_laneq_v: case NEON::BI__builtin_neon_splatq_lane_v: @@ -6616,7 +6695,7 @@ llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8); Ops[0] = Builder.CreateBitCast(Ops[0], VTy); Ops[1] = Builder.CreateBitCast(Ops[1], VTy); - Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); + Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); return Builder.CreateBitCast(Ops[0], Ty); } case NEON::BI__builtin_neon_vaddhn_v: { @@ -6648,7 +6727,8 @@ case NEON::BI__builtin_neon_vcagtq_v: { llvm::Type *Ty; switch (VTy->getScalarSizeInBits()) { - default: llvm_unreachable("unexpected type"); + default: + llvm_unreachable("unexpected type"); case 32: Ty = FloatTy; break; @@ -6660,7 +6740,7 @@ break; } auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements()); - llvm::Type *Tys[] = { VTy, VecFlt }; + llvm::Type *Tys[] = {VTy, VecFlt}; Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); return EmitNeonCall(F, Ops, NameHint); } @@ -6710,7 +6790,7 @@ case NEON::BI__builtin_neon_vcvtq_n_f16_v: case NEON::BI__builtin_neon_vcvtq_n_f32_v: case NEON::BI__builtin_neon_vcvtq_n_f64_v: { - llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty }; + llvm::Type *Tys[2] = {GetFloatNeonType(this, Type), Ty}; Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; Function *F = CGM.getIntrinsic(Int, Tys); return EmitNeonCall(F, Ops, "vcvt_n"); @@ -6727,7 +6807,7 @@ case NEON::BI__builtin_neon_vcvtq_n_u32_v: case NEON::BI__builtin_neon_vcvtq_n_s64_v: case NEON::BI__builtin_neon_vcvtq_n_u64_v: { - llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; + llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); return EmitNeonCall(F, Ops, "vcvt_n"); } @@ -6795,20 +6875,19 @@ case NEON::BI__builtin_neon_vcvtmq_u16_v: case NEON::BI__builtin_neon_vcvtmq_u32_v: case NEON::BI__builtin_neon_vcvtmq_u64_v: { - llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; + llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); } case NEON::BI__builtin_neon_vcvtx_f32_v: { - llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty}; + llvm::Type *Tys[2] = {VTy->getTruncatedElementVectorType(VTy), Ty}; return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint); - } case NEON::BI__builtin_neon_vext_v: case NEON::BI__builtin_neon_vextq_v: { int CV = cast(Ops[2])->getSExtValue(); SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) - Indices.push_back(i+CV); + Indices.push_back(i + CV); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); @@ -6839,7 +6918,7 @@ case NEON::BI__builtin_neon_vld1q_x4_v: { llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); - llvm::Type *Tys[2] = { VTy, PTy }; + llvm::Type *Tys[2] = {VTy, PTy}; Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN"); Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); @@ -6918,11 +6997,10 @@ case NEON::BI__builtin_neon_vpadalq_v: { // The source operand type has twice as many elements of half the size. unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits(); - llvm::Type *EltTy = - llvm::IntegerType::get(getLLVMContext(), EltBits / 2); + llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); auto *NarrowTy = llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); - llvm::Type *Tys[2] = { Ty, NarrowTy }; + llvm::Type *Tys[2] = {Ty, NarrowTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint); } case NEON::BI__builtin_neon_vpaddl_v: @@ -6932,7 +7010,7 @@ llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2); auto *NarrowTy = llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2); - llvm::Type *Tys[2] = { Ty, NarrowTy }; + llvm::Type *Tys[2] = {Ty, NarrowTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl"); } case NEON::BI__builtin_neon_vqdmlal_v: @@ -6968,12 +7046,10 @@ } case NEON::BI__builtin_neon_vqshl_n_v: case NEON::BI__builtin_neon_vqshlq_n_v: - return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", - 1, false); + return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n", 1, false); case NEON::BI__builtin_neon_vqshlu_n_v: case NEON::BI__builtin_neon_vqshluq_n_v: - return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", - 1, false); + return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", 1, false); case NEON::BI__builtin_neon_vrecpe_v: case NEON::BI__builtin_neon_vrecpeq_v: case NEON::BI__builtin_neon_vrsqrte_v: @@ -6988,8 +7064,7 @@ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint); case NEON::BI__builtin_neon_vrshr_n_v: case NEON::BI__builtin_neon_vrshrq_n_v: - return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", - 1, true); + return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true); case NEON::BI__builtin_neon_vsha512hq_v: case NEON::BI__builtin_neon_vsha512h2q_v: case NEON::BI__builtin_neon_vsha512su0q_v: @@ -7000,7 +7075,7 @@ case NEON::BI__builtin_neon_vshl_n_v: case NEON::BI__builtin_neon_vshlq_n_v: Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false); - return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], + return Builder.CreateShl(Builder.CreateBitCast(Ops[0], Ty), Ops[1], "vshl_n"); case NEON::BI__builtin_neon_vshll_n_v: { llvm::FixedVectorType *SrcTy = @@ -7072,11 +7147,11 @@ // in AArch64 it comes last. We may want to stick to one or another. if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be || Arch == llvm::Triple::aarch64_32) { - llvm::Type *Tys[2] = { VTy, PTy }; + llvm::Type *Tys[2] = {VTy, PTy}; std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); } - llvm::Type *Tys[2] = { PTy, VTy }; + llvm::Type *Tys[2] = {PTy, VTy}; return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, ""); } case NEON::BI__builtin_neon_vsubhn_v: { @@ -7106,8 +7181,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { - Indices.push_back(i+vi); - Indices.push_back(i+e+vi); + Indices.push_back(i + vi); + Indices.push_back(i + e + vi); } Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); @@ -7134,7 +7209,7 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) - Indices.push_back(2*i+vi); + Indices.push_back(2 * i + vi); Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); @@ -7157,8 +7232,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { - Indices.push_back((i + vi*e) >> 1); - Indices.push_back(((i + vi*e) >> 1)+e); + Indices.push_back((i + vi * e) >> 1); + Indices.push_back(((i + vi * e) >> 1) + e); } Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); @@ -7170,7 +7245,7 @@ case NEON::BI__builtin_neon_vdotq_v: { auto *InputTy = llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot"); } @@ -7178,63 +7253,62 @@ case NEON::BI__builtin_neon_vfmlalq_low_v: { auto *InputTy = llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low"); } case NEON::BI__builtin_neon_vfmlsl_low_v: case NEON::BI__builtin_neon_vfmlslq_low_v: { auto *InputTy = llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low"); } case NEON::BI__builtin_neon_vfmlal_high_v: case NEON::BI__builtin_neon_vfmlalq_high_v: { auto *InputTy = llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high"); } case NEON::BI__builtin_neon_vfmlsl_high_v: case NEON::BI__builtin_neon_vfmlslq_high_v: { auto *InputTy = llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high"); } case NEON::BI__builtin_neon_vmmlaq_v: { auto *InputTy = llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla"); } case NEON::BI__builtin_neon_vusmmlaq_v: { auto *InputTy = llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla"); } case NEON::BI__builtin_neon_vusdot_v: case NEON::BI__builtin_neon_vusdotq_v: { auto *InputTy = llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot"); } case NEON::BI__builtin_neon_vbfdot_v: case NEON::BI__builtin_neon_vbfdotq_v: { llvm::Type *InputTy = llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16); - llvm::Type *Tys[2] = { Ty, InputTy }; + llvm::Type *Tys[2] = {Ty, InputTy}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot"); } case NEON::BI__builtin_neon___a32_vcvt_bf16_v: { - llvm::Type *Tys[1] = { Ty }; + llvm::Type *Tys[1] = {Ty}; Function *F = CGM.getIntrinsic(Int, Tys); return EmitNeonCall(F, Ops, "vcvtfp2bf"); } - } assert(Int && "Expected valid intrinsic number"); @@ -7285,15 +7359,14 @@ SmallVector Indices; auto *TblTy = cast(Ops[0]->getType()); for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) { - Indices.push_back(2*i); - Indices.push_back(2*i+1); + Indices.push_back(2 * i); + Indices.push_back(2 * i + 1); } int PairPos = 0, End = Ops.size() - 1; while (PairPos < End) { - TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], - Ops[PairPos+1], Indices, - Name)); + TblOps.push_back(CGF.Builder.CreateShuffleVector( + Ops[PairPos], Ops[PairPos + 1], Indices, Name)); PairPos += 2; } @@ -7301,8 +7374,8 @@ // of the 128-bit lookup table with zero. if (PairPos == End) { Value *ZeroTbl = ConstantAggregateZero::get(TblTy); - TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos], - ZeroTbl, Indices, Name)); + TblOps.push_back( + CGF.Builder.CreateShuffleVector(Ops[PairPos], ZeroTbl, Indices, Name)); } Function *TblF; @@ -7362,8 +7435,8 @@ SpecialRegisterAccessKind AccessKind, StringRef SysReg = "") { // write and register intrinsics only support 32 and 64 bit operations. - assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) - && "Unsupported size for register."); + assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64)) && + "Unsupported size for register."); CodeGen::CGBuilderTy &Builder = CGF.Builder; CodeGen::CodeGenModule &CGM = CGF.CGM; @@ -7374,15 +7447,15 @@ SysReg = cast(SysRegStrExpr)->getString(); } - llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) }; + llvm::Metadata *Ops[] = {llvm::MDString::get(Context, SysReg)}; llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); - llvm::Type *Types[] = { RegisterType }; + llvm::Type *Types[] = {RegisterType}; bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32); - assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) - && "Can't fit 64-bit value in 32-bit register"); + assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64)) && + "Can't fit 64-bit value in 32-bit register"); if (AccessKind != Write) { assert(AccessKind == NormalRead || AccessKind == VolatileRead); @@ -7408,23 +7481,24 @@ if (MixedTypes) { // Extend 32 bit write value to 64 bit to pass to write. ArgValue = Builder.CreateZExt(ArgValue, RegisterType); - return Builder.CreateCall(F, { Metadata, ArgValue }); + return Builder.CreateCall(F, {Metadata, ArgValue}); } if (ValueType->isPointerTy()) { // Have VoidPtrTy ArgValue but want to return an i32/i64. ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType); - return Builder.CreateCall(F, { Metadata, ArgValue }); + return Builder.CreateCall(F, {Metadata, ArgValue}); } - return Builder.CreateCall(F, { Metadata, ArgValue }); + return Builder.CreateCall(F, {Metadata, ArgValue}); } /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra /// argument that specifies the vector type. static bool HasExtraNeonArgument(unsigned BuiltinID) { switch (BuiltinID) { - default: break; + default: + break; case NEON::BI__builtin_neon_vget_lane_i8: case NEON::BI__builtin_neon_vget_lane_i16: case NEON::BI__builtin_neon_vget_lane_bf16: @@ -7498,8 +7572,8 @@ if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) { Value *Address = EmitScalarExpr(E->getArg(0)); - Value *RW = EmitScalarExpr(E->getArg(1)); - Value *IsData = EmitScalarExpr(E->getArg(2)); + Value *RW = EmitScalarExpr(E->getArg(1)); + Value *IsData = EmitScalarExpr(E->getArg(2)); // Locality is not supported on ARM target Value *Locality = llvm::ConstantInt::get(Int32Ty, 3); @@ -7541,7 +7615,8 @@ Function *F; switch (BuiltinID) { - default: llvm_unreachable("unexpected builtin"); + default: + llvm_unreachable("unexpected builtin"); case clang::ARM::BI__builtin_arm_mcrr: F = CGM.getIntrinsic(Intrinsic::arm_mcrr); break; @@ -7575,7 +7650,8 @@ Function *F; switch (BuiltinID) { - default: llvm_unreachable("unexpected builtin"); + default: + llvm_unreachable("unexpected builtin"); case clang::ARM::BI__builtin_arm_mrrc: F = CGM.getIntrinsic(Intrinsic::arm_mrrc); break; @@ -7586,7 +7662,7 @@ Value *Coproc = EmitScalarExpr(E->getArg(0)); Value *Opc1 = EmitScalarExpr(E->getArg(1)); - Value *CRm = EmitScalarExpr(E->getArg(2)); + Value *CRm = EmitScalarExpr(E->getArg(2)); Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm}); // Returns an unsigned 64 bit integer, represented @@ -7612,7 +7688,8 @@ Function *F; switch (BuiltinID) { - default: llvm_unreachable("unexpected builtin"); + default: + llvm_unreachable("unexpected builtin"); case clang::ARM::BI__builtin_arm_ldaex: F = CGM.getIntrinsic(Intrinsic::arm_ldaexd); break; @@ -7685,7 +7762,8 @@ Value *Arg0 = Builder.CreateExtractValue(Val, 0); Value *Arg1 = Builder.CreateExtractValue(Val, 1); - Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); + Value *StPtr = + Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd"); } @@ -7695,8 +7773,8 @@ Value *StoreAddr = EmitScalarExpr(E->getArg(1)); QualType Ty = E->getArg(0)->getType(); - llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), - getContext().getTypeSize(Ty)); + llvm::Type *StoreTy = + llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty)); StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); if (StoreVal->getType()->isPointerTy()) @@ -7729,19 +7807,25 @@ Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; switch (BuiltinID) { case clang::ARM::BI__builtin_arm_crc32b: - CRCIntrinsicID = Intrinsic::arm_crc32b; break; + CRCIntrinsicID = Intrinsic::arm_crc32b; + break; case clang::ARM::BI__builtin_arm_crc32cb: - CRCIntrinsicID = Intrinsic::arm_crc32cb; break; + CRCIntrinsicID = Intrinsic::arm_crc32cb; + break; case clang::ARM::BI__builtin_arm_crc32h: - CRCIntrinsicID = Intrinsic::arm_crc32h; break; + CRCIntrinsicID = Intrinsic::arm_crc32h; + break; case clang::ARM::BI__builtin_arm_crc32ch: - CRCIntrinsicID = Intrinsic::arm_crc32ch; break; + CRCIntrinsicID = Intrinsic::arm_crc32ch; + break; case clang::ARM::BI__builtin_arm_crc32w: case clang::ARM::BI__builtin_arm_crc32d: - CRCIntrinsicID = Intrinsic::arm_crc32w; break; + CRCIntrinsicID = Intrinsic::arm_crc32w; + break; case clang::ARM::BI__builtin_arm_crc32cw: case clang::ARM::BI__builtin_arm_crc32cd: - CRCIntrinsicID = Intrinsic::arm_crc32cw; break; + CRCIntrinsicID = Intrinsic::arm_crc32cw; + break; } if (CRCIntrinsicID != Intrinsic::not_intrinsic) { @@ -7826,13 +7910,13 @@ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); assert(Error == ASTContext::GE_None && "Should not codegen an error"); - auto getAlignmentValue32 = [&](Address addr) -> Value* { + auto getAlignmentValue32 = [&](Address addr) -> Value * { return Builder.getInt32(addr.getAlignment().getQuantity()); }; Address PtrOp0 = Address::invalid(); Address PtrOp1 = Address::invalid(); - SmallVector Ops; + SmallVector Ops; bool HasExtraArg = HasExtraNeonArgument(BuiltinID); unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0); for (unsigned i = 0, e = NumArgs; i != e; i++) { @@ -7907,7 +7991,8 @@ } switch (BuiltinID) { - default: break; + default: + break; case NEON::BI__builtin_neon_vget_lane_i8: case NEON::BI__builtin_neon_vget_lane_i16: @@ -7929,7 +8014,8 @@ Value *Arg = EmitScalarExpr(E->getArg(0)); llvm::Type *Tys[] = {Arg->getType()}; Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys); - return Builder.CreateCall(F, {Arg}, "vrndn"); } + return Builder.CreateCall(F, {Arg}, "vrndn"); + } case NEON::BI__builtin_neon_vset_lane_i8: case NEON::BI__builtin_neon_vset_lane_i16: @@ -7970,14 +8056,14 @@ Function *F = CGM.getIntrinsic(BuiltinID == clang::ARM::BI_MoveToCoprocessor ? Intrinsic::arm_mcr : Intrinsic::arm_mcr2); - return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0], - Ops[3], Ops[4], Ops[5]}); + return Builder.CreateCall(F, + {Ops[1], Ops[2], Ops[0], Ops[3], Ops[4], Ops[5]}); } } // Get the last argument, which specifies the vector type. assert(HasExtraArg); - const Expr *Arg = E->getArg(E->getNumArgs()-1); + const Expr *Arg = E->getArg(E->getNumArgs() - 1); Optional Result = Arg->getIntegerConstantExpr(getContext()); if (!Result) return nullptr; @@ -8024,7 +8110,8 @@ unsigned Int; switch (BuiltinID) { - default: return nullptr; + default: + return nullptr; case NEON::BI__builtin_neon_vld1q_lane_v: // Handle 64-bit integer elements as a special case. Use shuffles of // one-element vectors to avoid poor code for i64 in the backend. @@ -8032,7 +8119,8 @@ // Extract the other lane. Ops[1] = Builder.CreateBitCast(Ops[1], Ty); int Lane = cast(Ops[2])->getZExtValue(); - Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); + Value *SV = + llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1 - Lane)); Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); // Load the value as a one-element vector. Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1); @@ -8053,26 +8141,24 @@ } case NEON::BI__builtin_neon_vqrshrn_n_v: Int = - usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; - return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", - 1, true); + usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns; + return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n", 1, true); case NEON::BI__builtin_neon_vqrshrun_n_v: return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty), Ops, "vqrshrun_n", 1, true); case NEON::BI__builtin_neon_vqshrn_n_v: Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns; - return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", - 1, true); + return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n", 1, true); case NEON::BI__builtin_neon_vqshrun_n_v: return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty), Ops, "vqshrun_n", 1, true); case NEON::BI__builtin_neon_vrecpe_v: case NEON::BI__builtin_neon_vrecpeq_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), - Ops, "vrecpe"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty), Ops, + "vrecpe"); case NEON::BI__builtin_neon_vrshrn_n_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), - Ops, "vrshrn_n", 1, true); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty), Ops, + "vrshrn_n", 1, true); case NEON::BI__builtin_neon_vrsra_n_v: case NEON::BI__builtin_neon_vrsraq_n_v: Ops[0] = Builder.CreateBitCast(Ops[0], Ty); @@ -8104,8 +8190,8 @@ Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); Ops[2] = getAlignmentValue32(PtrOp0); llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()}; - return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, - Tys), Ops); + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Tys), + Ops); } [[fallthrough]]; case NEON::BI__builtin_neon_vst1_lane_v: { @@ -8116,33 +8202,33 @@ return St; } case NEON::BI__builtin_neon_vtbl1_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), - Ops, "vtbl1"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1), Ops, + "vtbl1"); case NEON::BI__builtin_neon_vtbl2_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), - Ops, "vtbl2"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2), Ops, + "vtbl2"); case NEON::BI__builtin_neon_vtbl3_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), - Ops, "vtbl3"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3), Ops, + "vtbl3"); case NEON::BI__builtin_neon_vtbl4_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), - Ops, "vtbl4"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4), Ops, + "vtbl4"); case NEON::BI__builtin_neon_vtbx1_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), - Ops, "vtbx1"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1), Ops, + "vtbx1"); case NEON::BI__builtin_neon_vtbx2_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), - Ops, "vtbx2"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2), Ops, + "vtbx2"); case NEON::BI__builtin_neon_vtbx3_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), - Ops, "vtbx3"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3), Ops, + "vtbx3"); case NEON::BI__builtin_neon_vtbx4_v: - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), - Ops, "vtbx4"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4), Ops, + "vtbx4"); } } -template +template static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) { return E->getIntegerConstantExpr(Context)->getExtValue(); } @@ -8210,7 +8296,8 @@ } } -static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) { +static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, + bool Odd) { // Make a shufflevector that extracts every other element of a vector (evens // or odds, as desired). SmallVector Indices; @@ -8235,7 +8322,7 @@ return Builder.CreateShuffleVector(V0, V1, Indices); } -template +template static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) { // MVE-specific helper function to make a vector splat of a constant such as // UINT_MAX or INT_MIN, in which all bits below the highest one are equal. @@ -8272,7 +8359,7 @@ // Code autogenerated by Tablegen will handle all the simple builtins. switch (BuiltinID) { - #include "clang/Basic/arm_mve_builtin_cg.inc" +#include "clang/Basic/arm_mve_builtin_cg.inc" // If we didn't match an MVE builtin id at all, go back to the // main EmitARMBuiltinExpr. @@ -8374,10 +8461,10 @@ } } -static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID, - const CallExpr *E, - SmallVectorImpl &Ops, - llvm::Triple::ArchType Arch) { +static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, + unsigned BuiltinID, const CallExpr *E, + SmallVectorImpl &Ops, + llvm::Triple::ArchType Arch) { unsigned int Int = 0; const char *s = nullptr; @@ -8475,8 +8562,7 @@ Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2"); llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24); - Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], - TwentyFourV); + Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4], TwentyFourV); CmpRes = Builder.CreateSExt(CmpRes, Ty); Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]); @@ -8490,28 +8576,44 @@ } case NEON::BI__builtin_neon_vqtbl1_v: case NEON::BI__builtin_neon_vqtbl1q_v: - Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break; + Int = Intrinsic::aarch64_neon_tbl1; + s = "vtbl1"; + break; case NEON::BI__builtin_neon_vqtbl2_v: case NEON::BI__builtin_neon_vqtbl2q_v: { - Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break; + Int = Intrinsic::aarch64_neon_tbl2; + s = "vtbl2"; + break; case NEON::BI__builtin_neon_vqtbl3_v: case NEON::BI__builtin_neon_vqtbl3q_v: - Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break; + Int = Intrinsic::aarch64_neon_tbl3; + s = "vtbl3"; + break; case NEON::BI__builtin_neon_vqtbl4_v: case NEON::BI__builtin_neon_vqtbl4q_v: - Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break; + Int = Intrinsic::aarch64_neon_tbl4; + s = "vtbl4"; + break; case NEON::BI__builtin_neon_vqtbx1_v: case NEON::BI__builtin_neon_vqtbx1q_v: - Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break; + Int = Intrinsic::aarch64_neon_tbx1; + s = "vtbx1"; + break; case NEON::BI__builtin_neon_vqtbx2_v: case NEON::BI__builtin_neon_vqtbx2q_v: - Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break; + Int = Intrinsic::aarch64_neon_tbx2; + s = "vtbx2"; + break; case NEON::BI__builtin_neon_vqtbx3_v: case NEON::BI__builtin_neon_vqtbx3q_v: - Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break; + Int = Intrinsic::aarch64_neon_tbx3; + s = "vtbx3"; + break; case NEON::BI__builtin_neon_vqtbx4_v: case NEON::BI__builtin_neon_vqtbx4q_v: - Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break; + Int = Intrinsic::aarch64_neon_tbx4; + s = "vtbx4"; + break; } } @@ -8587,7 +8689,8 @@ llvm::ScalableVectorType * CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) { switch (TypeFlags.getEltType()) { - default: llvm_unreachable("Unhandled SVETypeFlag!"); + default: + llvm_unreachable("Unhandled SVETypeFlag!"); case SVETypeFlags::EltTyInt8: return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); @@ -8840,7 +8943,7 @@ } Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags, - SmallVectorImpl &Ops, + SmallVectorImpl &Ops, unsigned IntID) { llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); auto VecPtrTy = llvm::PointerType::getUnqual(VTy); @@ -8860,11 +8963,11 @@ default: llvm_unreachable("unknown intrinsic!"); } - auto RetTy = llvm::VectorType::get(VTy->getElementType(), - VTy->getElementCount() * N); + auto RetTy = + llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N); - Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); - Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy); + Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy); + Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy); Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0); BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset); BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy); @@ -8881,7 +8984,7 @@ } Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags, - SmallVectorImpl &Ops, + SmallVectorImpl &Ops, unsigned IntID) { llvm::ScalableVectorType *VTy = getSVEType(TypeFlags); auto VecPtrTy = llvm::PointerType::getUnqual(VTy); @@ -8911,7 +9014,7 @@ // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we // need to break up the tuple vector. - SmallVector Operands; + SmallVector Operands; unsigned MinElts = VTy->getElementCount().getKnownMinValue(); for (unsigned I = 0; I < N; ++I) { Value *Idx = ConstantInt::get(CGM.Int64Ty, I * MinElts); @@ -8919,7 +9022,7 @@ } Operands.append({Predicate, BasePtr}); - Function *F = CGM.getIntrinsic(IntID, { VTy }); + Function *F = CGM.getIntrinsic(IntID, {VTy}); return Builder.CreateCall(F, Operands); } @@ -8968,7 +9071,8 @@ } // Prefetch intriniscs always expect an i8* - BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty)); + BasePtr = + Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty)); Value *PrfOp = Ops.back(); Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType()); @@ -9002,7 +9106,7 @@ CGM.DecorateInstructionWithTBAA(Load, TBAAInfo); return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy) - : Builder.CreateSExt(Load, VectorTy); + : Builder.CreateSExt(Load, VectorTy); } Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E, @@ -9041,7 +9145,7 @@ return Builder.CreateCall(F, Scalar); } -Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) { +Value *CodeGenFunction::EmitSVEDupX(Value *Scalar) { return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType())); } @@ -9097,9 +9201,9 @@ unsigned I = cast(Ops[1])->getSExtValue(); auto *SingleVecTy = dyn_cast( - TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty); - Value *Idx = ConstantInt::get(CGM.Int64Ty, - I * SingleVecTy->getMinNumElements()); + TypeFlags.isTupleSet() ? Ops[2]->getType() : Ty); + Value *Idx = + ConstantInt::get(CGM.Int64Ty, I * SingleVecTy->getMinNumElements()); if (TypeFlags.isTupleSet()) return Builder.CreateInsertVector(Ty, Ops[0], Ops[2], Idx); @@ -9107,8 +9211,8 @@ } Value *CodeGenFunction::EmitSVETupleCreate(const SVETypeFlags &TypeFlags, - llvm::Type *Ty, - ArrayRef Ops) { + llvm::Type *Ty, + ArrayRef Ops) { assert(TypeFlags.isTupleCreate() && "Expects TypleFlag isTupleCreate"); auto *SrcTy = dyn_cast(Ops[0]->getType()); @@ -9172,14 +9276,14 @@ return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); else if (TypeFlags.isGatherPrefetch()) return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic); - else if (TypeFlags.isStructLoad()) - return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); - else if (TypeFlags.isStructStore()) - return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); + else if (TypeFlags.isStructLoad()) + return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic); + else if (TypeFlags.isStructStore()) + return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic); else if (TypeFlags.isTupleSet() || TypeFlags.isTupleGet()) - return EmitSVETupleSetOrGet(TypeFlags, Ty, Ops); + return EmitSVETupleSetOrGet(TypeFlags, Ty, Ops); else if (TypeFlags.isTupleCreate()) - return EmitSVETupleCreate(TypeFlags, Ty, Ops); + return EmitSVETupleCreate(TypeFlags, Ty, Ops); else if (TypeFlags.isUndef()) return UndefValue::get(Ty); else if (Builtin->LLVMIntrinsic != 0) { @@ -9241,7 +9345,7 @@ case SVE::BI__builtin_sve_svmov_b_z: { // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op) SVETypeFlags TypeFlags(Builtin->TypeModifier); - llvm::Type* OverloadedTy = getSVEType(TypeFlags); + llvm::Type *OverloadedTy = getSVEType(TypeFlags); Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy); return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]}); } @@ -9249,7 +9353,7 @@ case SVE::BI__builtin_sve_svnot_b_z: { // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg) SVETypeFlags TypeFlags(Builtin->TypeModifier); - llvm::Type* OverloadedTy = getSVEType(TypeFlags); + llvm::Type *OverloadedTy = getSVEType(TypeFlags); Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy); return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]}); } @@ -9313,8 +9417,8 @@ case SVE::BI__builtin_sve_svdupq_n_u32: case SVE::BI__builtin_sve_svdupq_n_f32: case SVE::BI__builtin_sve_svdupq_n_s32: { - // These builtins are implemented by storing each element to an array and using - // ld1rq to materialize a vector. + // These builtins are implemented by storing each element to an array and + // using ld1rq to materialize a vector. unsigned NumOpnds = Ops.size(); bool IsBoolTy = @@ -9329,7 +9433,7 @@ SmallVector VecOps; for (unsigned I = 0; I < NumOpnds; ++I) - VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy)); + VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy)); Value *Vec = BuildVector(VecOps); SVETypeFlags TypeFlags(Builtin->TypeModifier); @@ -9465,7 +9569,8 @@ unsigned HintID = static_cast(-1); switch (BuiltinID) { - default: break; + default: + break; case clang::AArch64::BI__builtin_arm_nop: HintID = 0; break; @@ -9497,17 +9602,17 @@ } if (BuiltinID == clang::AArch64::BI__builtin_arm_prefetch) { - Value *Address = EmitScalarExpr(E->getArg(0)); - Value *RW = EmitScalarExpr(E->getArg(1)); - Value *CacheLevel = EmitScalarExpr(E->getArg(2)); + Value *Address = EmitScalarExpr(E->getArg(0)); + Value *RW = EmitScalarExpr(E->getArg(1)); + Value *CacheLevel = EmitScalarExpr(E->getArg(2)); Value *RetentionPolicy = EmitScalarExpr(E->getArg(3)); - Value *IsData = EmitScalarExpr(E->getArg(4)); + Value *IsData = EmitScalarExpr(E->getArg(4)); Value *Locality = nullptr; if (cast(RetentionPolicy)->isZero()) { // Temporal fetch, needs to convert cache level to locality. - Locality = llvm::ConstantInt::get(Int32Ty, - -cast(CacheLevel)->getValue() + 3); + Locality = llvm::ConstantInt::get( + Int32Ty, -cast(CacheLevel)->getValue() + 3); } else { // Streaming fetch. Locality = llvm::ConstantInt::get(Int32Ty, 0); @@ -9581,8 +9686,8 @@ assert((getContext().getTypeSize(E->getType()) == 32) && "__jcvt of unusual size!"); llvm::Value *Arg = EmitScalarExpr(E->getArg(0)); - return Builder.CreateCall( - CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg); + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), + Arg); } if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b || @@ -9667,8 +9772,8 @@ : Intrinsic::aarch64_ldxp); Value *LdPtr = EmitScalarExpr(E->getArg(0)); - Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), - "ldxp"); + Value *Val = + Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy), "ldxp"); Value *Val0 = Builder.CreateExtractValue(Val, 1); Value *Val1 = Builder.CreateExtractValue(Val, 0); @@ -9726,8 +9831,8 @@ Value *Arg0 = Builder.CreateExtractValue(Val, 0); Value *Arg1 = Builder.CreateExtractValue(Val, 1); - Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), - Int8PtrTy); + Value *StPtr = + Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy); return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp"); } @@ -9737,8 +9842,8 @@ Value *StoreAddr = EmitScalarExpr(E->getArg(1)); QualType Ty = E->getArg(0)->getType(); - llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(), - getContext().getTypeSize(Ty)); + llvm::Type *StoreTy = + llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty)); StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo()); if (StoreVal->getType()->isPointerTy()) @@ -9802,21 +9907,29 @@ Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic; switch (BuiltinID) { case clang::AArch64::BI__builtin_arm_crc32b: - CRCIntrinsicID = Intrinsic::aarch64_crc32b; break; + CRCIntrinsicID = Intrinsic::aarch64_crc32b; + break; case clang::AArch64::BI__builtin_arm_crc32cb: - CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break; + CRCIntrinsicID = Intrinsic::aarch64_crc32cb; + break; case clang::AArch64::BI__builtin_arm_crc32h: - CRCIntrinsicID = Intrinsic::aarch64_crc32h; break; + CRCIntrinsicID = Intrinsic::aarch64_crc32h; + break; case clang::AArch64::BI__builtin_arm_crc32ch: - CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break; + CRCIntrinsicID = Intrinsic::aarch64_crc32ch; + break; case clang::AArch64::BI__builtin_arm_crc32w: - CRCIntrinsicID = Intrinsic::aarch64_crc32w; break; + CRCIntrinsicID = Intrinsic::aarch64_crc32w; + break; case clang::AArch64::BI__builtin_arm_crc32cw: - CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break; + CRCIntrinsicID = Intrinsic::aarch64_crc32cw; + break; case clang::AArch64::BI__builtin_arm_crc32d: - CRCIntrinsicID = Intrinsic::aarch64_crc32x; break; + CRCIntrinsicID = Intrinsic::aarch64_crc32x; + break; case clang::AArch64::BI__builtin_arm_crc32cd: - CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break; + CRCIntrinsicID = Intrinsic::aarch64_crc32cx; + break; } if (CRCIntrinsicID != Intrinsic::not_intrinsic) { @@ -9846,17 +9959,23 @@ Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic; switch (BuiltinID) { case clang::AArch64::BI__builtin_arm_irg: - MTEIntrinsicID = Intrinsic::aarch64_irg; break; + MTEIntrinsicID = Intrinsic::aarch64_irg; + break; case clang::AArch64::BI__builtin_arm_addg: - MTEIntrinsicID = Intrinsic::aarch64_addg; break; + MTEIntrinsicID = Intrinsic::aarch64_addg; + break; case clang::AArch64::BI__builtin_arm_gmi: - MTEIntrinsicID = Intrinsic::aarch64_gmi; break; + MTEIntrinsicID = Intrinsic::aarch64_gmi; + break; case clang::AArch64::BI__builtin_arm_ldg: - MTEIntrinsicID = Intrinsic::aarch64_ldg; break; + MTEIntrinsicID = Intrinsic::aarch64_ldg; + break; case clang::AArch64::BI__builtin_arm_stg: - MTEIntrinsicID = Intrinsic::aarch64_stg; break; + MTEIntrinsicID = Intrinsic::aarch64_stg; + break; case clang::AArch64::BI__builtin_arm_subp: - MTEIntrinsicID = Intrinsic::aarch64_subp; break; + MTEIntrinsicID = Intrinsic::aarch64_subp; + break; } if (MTEIntrinsicID != Intrinsic::not_intrinsic) { @@ -9868,9 +9987,9 @@ Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); Mask = Builder.CreateZExt(Mask, Int64Ty); - Value *RV = Builder.CreateCall( - CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask}); - return Builder.CreatePointerCast(RV, T); + Value *RV = + Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask}); + return Builder.CreatePointerCast(RV, T); } if (MTEIntrinsicID == Intrinsic::aarch64_addg) { Value *Pointer = EmitScalarExpr(E->getArg(0)); @@ -9878,8 +9997,8 @@ Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); TagOffset = Builder.CreateZExt(TagOffset, Int64Ty); - Value *RV = Builder.CreateCall( - CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset}); + Value *RV = Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID), + {Pointer, TagOffset}); return Builder.CreatePointerCast(RV, T); } if (MTEIntrinsicID == Intrinsic::aarch64_gmi) { @@ -9888,8 +10007,8 @@ ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty); Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy); - return Builder.CreateCall( - CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask}); + return Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID), + {Pointer, ExcludedMask}); } // Although it is possible to supply a different return // address (first arg) to this intrinsic, for now we set @@ -9897,26 +10016,26 @@ if (MTEIntrinsicID == Intrinsic::aarch64_ldg) { Value *TagAddress = EmitScalarExpr(E->getArg(0)); TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); - Value *RV = Builder.CreateCall( - CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); + Value *RV = Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID), + {TagAddress, TagAddress}); return Builder.CreatePointerCast(RV, T); } // Although it is possible to supply a different tag (to set) // to this intrinsic (as first arg), for now we supply // the tag that is in input address arg (common use case). if (MTEIntrinsicID == Intrinsic::aarch64_stg) { - Value *TagAddress = EmitScalarExpr(E->getArg(0)); - TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); - return Builder.CreateCall( - CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress}); + Value *TagAddress = EmitScalarExpr(E->getArg(0)); + TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy); + return Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID), + {TagAddress, TagAddress}); } if (MTEIntrinsicID == Intrinsic::aarch64_subp) { Value *PointerA = EmitScalarExpr(E->getArg(0)); Value *PointerB = EmitScalarExpr(E->getArg(1)); PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy); PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy); - return Builder.CreateCall( - CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB}); + return Builder.CreateCall(CGM.getIntrinsic(MTEIntrinsicID), + {PointerA, PointerB}); } } @@ -9958,33 +10077,33 @@ LLVMContext &Context = CGM.getLLVMContext(); unsigned SysReg = - E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue(); + E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue(); std::string SysRegStr; - llvm::raw_string_ostream(SysRegStr) << - ((1 << 1) | ((SysReg >> 14) & 1)) << ":" << - ((SysReg >> 11) & 7) << ":" << - ((SysReg >> 7) & 15) << ":" << - ((SysReg >> 3) & 15) << ":" << - ( SysReg & 7); - - llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) }; + llvm::raw_string_ostream(SysRegStr) + << ((1 << 1) | ((SysReg >> 14) & 1)) << ":" << ((SysReg >> 11) & 7) + << ":" << ((SysReg >> 7) & 15) << ":" << ((SysReg >> 3) & 15) << ":" + << (SysReg & 7); + + llvm::Metadata *Ops[] = {llvm::MDString::get(Context, SysRegStr)}; llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops); llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName); llvm::Type *RegisterType = Int64Ty; - llvm::Type *Types[] = { RegisterType }; + llvm::Type *Types[] = {RegisterType}; if (BuiltinID == clang::AArch64::BI_ReadStatusReg) { - llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); + llvm::Function *F = + CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); return Builder.CreateCall(F, Metadata); } - llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); + llvm::Function *F = + CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1)); - return Builder.CreateCall(F, { Metadata, ArgValue }); + return Builder.CreateCall(F, {Metadata, ArgValue}); } if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) { @@ -10083,7 +10202,7 @@ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); assert(Error == ASTContext::GE_None && "Should not codegen an error"); - llvm::SmallVector Ops; + llvm::SmallVector Ops; Address PtrOp0 = Address::invalid(); for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) { if (i == 0) { @@ -10127,7 +10246,7 @@ return Result; } - const Expr *Arg = E->getArg(E->getNumArgs()-1); + const Expr *Arg = E->getArg(E->getNumArgs() - 1); NeonTypeFlags Type(0); if (Optional Result = Arg->getIntegerConstantExpr(getContext())) // Determine the type of this overloaded NEON intrinsic. @@ -10138,7 +10257,8 @@ // Handle non-overloaded intrinsics first. switch (BuiltinID) { - default: break; + default: + break; case NEON::BI__builtin_neon_vabsh_f16: Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs"); @@ -10147,7 +10267,7 @@ Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); - Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); + Ops[0] = Builder.CreateXor(Ops[0], Ops[1]); llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128); return Builder.CreateBitCast(Ops[0], Int128Ty); } @@ -10211,32 +10331,43 @@ case NEON::BI__builtin_neon_vcvtph_s16_f16: case NEON::BI__builtin_neon_vcvth_s16_f16: { unsigned Int; - llvm::Type* InTy = Int32Ty; - llvm::Type* FTy = HalfTy; + llvm::Type *InTy = Int32Ty; + llvm::Type *FTy = HalfTy; llvm::Type *Tys[2] = {InTy, FTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); switch (BuiltinID) { - default: llvm_unreachable("missing builtin ID in switch!"); + default: + llvm_unreachable("missing builtin ID in switch!"); case NEON::BI__builtin_neon_vcvtah_u16_f16: - Int = Intrinsic::aarch64_neon_fcvtau; break; + Int = Intrinsic::aarch64_neon_fcvtau; + break; case NEON::BI__builtin_neon_vcvtmh_u16_f16: - Int = Intrinsic::aarch64_neon_fcvtmu; break; + Int = Intrinsic::aarch64_neon_fcvtmu; + break; case NEON::BI__builtin_neon_vcvtnh_u16_f16: - Int = Intrinsic::aarch64_neon_fcvtnu; break; + Int = Intrinsic::aarch64_neon_fcvtnu; + break; case NEON::BI__builtin_neon_vcvtph_u16_f16: - Int = Intrinsic::aarch64_neon_fcvtpu; break; + Int = Intrinsic::aarch64_neon_fcvtpu; + break; case NEON::BI__builtin_neon_vcvth_u16_f16: - Int = Intrinsic::aarch64_neon_fcvtzu; break; + Int = Intrinsic::aarch64_neon_fcvtzu; + break; case NEON::BI__builtin_neon_vcvtah_s16_f16: - Int = Intrinsic::aarch64_neon_fcvtas; break; + Int = Intrinsic::aarch64_neon_fcvtas; + break; case NEON::BI__builtin_neon_vcvtmh_s16_f16: - Int = Intrinsic::aarch64_neon_fcvtms; break; + Int = Intrinsic::aarch64_neon_fcvtms; + break; case NEON::BI__builtin_neon_vcvtnh_s16_f16: - Int = Intrinsic::aarch64_neon_fcvtns; break; + Int = Intrinsic::aarch64_neon_fcvtns; + break; case NEON::BI__builtin_neon_vcvtph_s16_f16: - Int = Intrinsic::aarch64_neon_fcvtps; break; + Int = Intrinsic::aarch64_neon_fcvtps; + break; case NEON::BI__builtin_neon_vcvth_s16_f16: - Int = Intrinsic::aarch64_neon_fcvtzs; break; + Int = Intrinsic::aarch64_neon_fcvtzs; + break; } Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -10246,20 +10377,27 @@ case NEON::BI__builtin_neon_vcageh_f16: case NEON::BI__builtin_neon_vcagth_f16: { unsigned Int; - llvm::Type* InTy = Int32Ty; - llvm::Type* FTy = HalfTy; + llvm::Type *InTy = Int32Ty; + llvm::Type *FTy = HalfTy; llvm::Type *Tys[2] = {InTy, FTy}; Ops.push_back(EmitScalarExpr(E->getArg(1))); switch (BuiltinID) { - default: llvm_unreachable("missing builtin ID in switch!"); + default: + llvm_unreachable("missing builtin ID in switch!"); case NEON::BI__builtin_neon_vcageh_f16: - Int = Intrinsic::aarch64_neon_facge; break; + Int = Intrinsic::aarch64_neon_facge; + break; case NEON::BI__builtin_neon_vcagth_f16: - Int = Intrinsic::aarch64_neon_facgt; break; + Int = Intrinsic::aarch64_neon_facgt; + break; case NEON::BI__builtin_neon_vcaleh_f16: - Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break; + Int = Intrinsic::aarch64_neon_facge; + std::swap(Ops[0], Ops[1]); + break; case NEON::BI__builtin_neon_vcalth_f16: - Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break; + Int = Intrinsic::aarch64_neon_facgt; + std::swap(Ops[0], Ops[1]); + break; } Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -10267,16 +10405,19 @@ case NEON::BI__builtin_neon_vcvth_n_s16_f16: case NEON::BI__builtin_neon_vcvth_n_u16_f16: { unsigned Int; - llvm::Type* InTy = Int32Ty; - llvm::Type* FTy = HalfTy; + llvm::Type *InTy = Int32Ty; + llvm::Type *FTy = HalfTy; llvm::Type *Tys[2] = {InTy, FTy}; Ops.push_back(EmitScalarExpr(E->getArg(1))); switch (BuiltinID) { - default: llvm_unreachable("missing builtin ID in switch!"); + default: + llvm_unreachable("missing builtin ID in switch!"); case NEON::BI__builtin_neon_vcvth_n_s16_f16: - Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break; + Int = Intrinsic::aarch64_neon_vcvtfp2fxs; + break; case NEON::BI__builtin_neon_vcvth_n_u16_f16: - Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break; + Int = Intrinsic::aarch64_neon_vcvtfp2fxu; + break; } Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -10284,12 +10425,13 @@ case NEON::BI__builtin_neon_vcvth_n_f16_s16: case NEON::BI__builtin_neon_vcvth_n_f16_u16: { unsigned Int; - llvm::Type* FTy = HalfTy; - llvm::Type* InTy = Int32Ty; + llvm::Type *FTy = HalfTy; + llvm::Type *InTy = Int32Ty; llvm::Type *Tys[2] = {FTy, InTy}; Ops.push_back(EmitScalarExpr(E->getArg(1))); switch (BuiltinID) { - default: llvm_unreachable("missing builtin ID in switch!"); + default: + llvm_unreachable("missing builtin ID in switch!"); case NEON::BI__builtin_neon_vcvth_n_f16_s16: Int = Intrinsic::aarch64_neon_vcvtfxs2fp; Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext"); @@ -10392,12 +10534,23 @@ case NEON::BI__builtin_neon_vcgtd_f64: { llvm::CmpInst::Predicate P; switch (BuiltinID) { - default: llvm_unreachable("missing builtin ID in switch!"); - case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break; - case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break; - case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break; - case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break; - case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break; + default: + llvm_unreachable("missing builtin ID in switch!"); + case NEON::BI__builtin_neon_vceqd_f64: + P = llvm::FCmpInst::FCMP_OEQ; + break; + case NEON::BI__builtin_neon_vcled_f64: + P = llvm::FCmpInst::FCMP_OLE; + break; + case NEON::BI__builtin_neon_vcltd_f64: + P = llvm::FCmpInst::FCMP_OLT; + break; + case NEON::BI__builtin_neon_vcged_f64: + P = llvm::FCmpInst::FCMP_OGE; + break; + case NEON::BI__builtin_neon_vcgtd_f64: + P = llvm::FCmpInst::FCMP_OGT; + break; } Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy); @@ -10415,12 +10568,23 @@ case NEON::BI__builtin_neon_vcgts_f32: { llvm::CmpInst::Predicate P; switch (BuiltinID) { - default: llvm_unreachable("missing builtin ID in switch!"); - case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break; - case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break; - case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break; - case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break; - case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break; + default: + llvm_unreachable("missing builtin ID in switch!"); + case NEON::BI__builtin_neon_vceqs_f32: + P = llvm::FCmpInst::FCMP_OEQ; + break; + case NEON::BI__builtin_neon_vcles_f32: + P = llvm::FCmpInst::FCMP_OLE; + break; + case NEON::BI__builtin_neon_vclts_f32: + P = llvm::FCmpInst::FCMP_OLT; + break; + case NEON::BI__builtin_neon_vcges_f32: + P = llvm::FCmpInst::FCMP_OGE; + break; + case NEON::BI__builtin_neon_vcgts_f32: + P = llvm::FCmpInst::FCMP_OGT; + break; } Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy); @@ -10438,12 +10602,23 @@ case NEON::BI__builtin_neon_vcgth_f16: { llvm::CmpInst::Predicate P; switch (BuiltinID) { - default: llvm_unreachable("missing builtin ID in switch!"); - case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break; - case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break; - case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break; - case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break; - case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break; + default: + llvm_unreachable("missing builtin ID in switch!"); + case NEON::BI__builtin_neon_vceqh_f16: + P = llvm::FCmpInst::FCMP_OEQ; + break; + case NEON::BI__builtin_neon_vcleh_f16: + P = llvm::FCmpInst::FCMP_OLE; + break; + case NEON::BI__builtin_neon_vclth_f16: + P = llvm::FCmpInst::FCMP_OLT; + break; + case NEON::BI__builtin_neon_vcgeh_f16: + P = llvm::FCmpInst::FCMP_OGE; + break; + case NEON::BI__builtin_neon_vcgth_f16: + P = llvm::FCmpInst::FCMP_OGT; + break; } Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy); @@ -10466,17 +10641,36 @@ case NEON::BI__builtin_neon_vcled_s64: { llvm::CmpInst::Predicate P; switch (BuiltinID) { - default: llvm_unreachable("missing builtin ID in switch!"); + default: + llvm_unreachable("missing builtin ID in switch!"); case NEON::BI__builtin_neon_vceqd_s64: - case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break; - case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break; - case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break; - case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break; - case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break; - case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break; - case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break; - case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break; - case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break; + case NEON::BI__builtin_neon_vceqd_u64: + P = llvm::ICmpInst::ICMP_EQ; + break; + case NEON::BI__builtin_neon_vcgtd_s64: + P = llvm::ICmpInst::ICMP_SGT; + break; + case NEON::BI__builtin_neon_vcgtd_u64: + P = llvm::ICmpInst::ICMP_UGT; + break; + case NEON::BI__builtin_neon_vcltd_s64: + P = llvm::ICmpInst::ICMP_SLT; + break; + case NEON::BI__builtin_neon_vcltd_u64: + P = llvm::ICmpInst::ICMP_ULT; + break; + case NEON::BI__builtin_neon_vcged_u64: + P = llvm::ICmpInst::ICMP_UGE; + break; + case NEON::BI__builtin_neon_vcged_s64: + P = llvm::ICmpInst::ICMP_SGE; + break; + case NEON::BI__builtin_neon_vcled_u64: + P = llvm::ICmpInst::ICMP_ULE; + break; + case NEON::BI__builtin_neon_vcled_s64: + P = llvm::ICmpInst::ICMP_SLE; + break; } Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty); @@ -10621,7 +10815,8 @@ case NEON::BI__builtin_neon_vfmsh_f16: { // FIXME: This should be an fneg instruction: Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy); - Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh"); + Value *Sub = + Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh"); // NEON intrinsic puts accumulator first, unlike the LLVM fma. return emitCallMaybeConstrainedFPBuiltin( @@ -10640,27 +10835,29 @@ ProductOps.push_back(vectorWrapScalar16(Ops[1])); ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2)))); auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4); - Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), - ProductOps, "vqdmlXl"); + Ops[1] = + EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), + ProductOps, "vqdmlXl"); Constant *CI = ConstantInt::get(SizeTy, 0); Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16 - ? Intrinsic::aarch64_neon_sqadd - : Intrinsic::aarch64_neon_sqsub; + ? Intrinsic::aarch64_neon_sqadd + : Intrinsic::aarch64_neon_sqsub; return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl"); } case NEON::BI__builtin_neon_vqshlud_n_s64: { Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), - Ops, "vqshlu_n"); + return EmitNeonCall( + CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty), Ops, + "vqshlu_n"); } case NEON::BI__builtin_neon_vqshld_n_u64: case NEON::BI__builtin_neon_vqshld_n_s64: { unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64 - ? Intrinsic::aarch64_neon_uqshl - : Intrinsic::aarch64_neon_sqshl; + ? Intrinsic::aarch64_neon_uqshl + : Intrinsic::aarch64_neon_sqshl; Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty); return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n"); @@ -10668,8 +10865,8 @@ case NEON::BI__builtin_neon_vrshrd_n_u64: case NEON::BI__builtin_neon_vrshrd_n_s64: { unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64 - ? Intrinsic::aarch64_neon_urshl - : Intrinsic::aarch64_neon_srshl; + ? Intrinsic::aarch64_neon_urshl + : Intrinsic::aarch64_neon_srshl; Ops.push_back(EmitScalarExpr(E->getArg(1))); int SV = cast(Ops[1])->getSExtValue(); Ops[1] = ConstantInt::get(Int64Ty, -SV); @@ -10678,8 +10875,8 @@ case NEON::BI__builtin_neon_vrsrad_n_u64: case NEON::BI__builtin_neon_vrsrad_n_s64: { unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64 - ? Intrinsic::aarch64_neon_urshl - : Intrinsic::aarch64_neon_srshl; + ? Intrinsic::aarch64_neon_urshl + : Intrinsic::aarch64_neon_srshl; Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty); Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2)))); Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty), @@ -10695,8 +10892,9 @@ case NEON::BI__builtin_neon_vshrd_n_s64: { llvm::ConstantInt *Amt = cast(EmitScalarExpr(E->getArg(1))); return Builder.CreateAShr( - Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast(63), - Amt->getZExtValue())), + Ops[0], + ConstantInt::get( + Int64Ty, std::min(static_cast(63), Amt->getZExtValue())), "shrd_n"); } case NEON::BI__builtin_neon_vshrd_n_u64: { @@ -10711,8 +10909,9 @@ case NEON::BI__builtin_neon_vsrad_n_s64: { llvm::ConstantInt *Amt = cast(EmitScalarExpr(E->getArg(2))); Ops[1] = Builder.CreateAShr( - Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast(63), - Amt->getZExtValue())), + Ops[1], + ConstantInt::get( + Int64Ty, std::min(static_cast(63), Amt->getZExtValue())), "shrd_n"); return Builder.CreateAdd(Ops[0], Ops[1]); } @@ -10737,8 +10936,9 @@ ProductOps.push_back(vectorWrapScalar16(Ops[1])); ProductOps.push_back(vectorWrapScalar16(Ops[2])); auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4); - Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), - ProductOps, "vqdmlXl"); + Ops[1] = + EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy), + ProductOps, "vqdmlXl"); Constant *CI = ConstantInt::get(SizeTy, 0); Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0"); Ops.pop_back(); @@ -10759,8 +10959,8 @@ ProductOps, "vqdmlXl"); unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32 - ? Intrinsic::aarch64_neon_sqadd - : Intrinsic::aarch64_neon_sqsub; + ? Intrinsic::aarch64_neon_sqadd + : Intrinsic::aarch64_neon_sqsub; return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl"); } case NEON::BI__builtin_neon_vqdmlals_lane_s32: @@ -10799,9 +10999,9 @@ case clang::AArch64::BI_InterlockedAdd: { Value *Arg0 = EmitScalarExpr(E->getArg(0)); Value *Arg1 = EmitScalarExpr(E->getArg(1)); - AtomicRMWInst *RMWI = Builder.CreateAtomicRMW( - AtomicRMWInst::Add, Arg0, Arg1, - llvm::AtomicOrdering::SequentiallyConsistent); + AtomicRMWInst *RMWI = + Builder.CreateAtomicRMW(AtomicRMWInst::Add, Arg0, Arg1, + llvm::AtomicOrdering::SequentiallyConsistent); return Builder.CreateAdd(RMWI, Arg1); } } @@ -10827,7 +11027,8 @@ unsigned Int; switch (BuiltinID) { - default: return nullptr; + default: + return nullptr; case NEON::BI__builtin_neon_vbsl_v: case NEON::BI__builtin_neon_vbslq_v: { llvm::Type *BitTy = llvm::VectorType::getInteger(VTy); @@ -10922,13 +11123,15 @@ case NEON::BI__builtin_neon_vmull_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull; - if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull; + if (Type.isPoly()) + Int = Intrinsic::aarch64_neon_pmull; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull"); case NEON::BI__builtin_neon_vmax_v: case NEON::BI__builtin_neon_vmaxq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax; + if (Ty->isFPOrFPVectorTy()) + Int = Intrinsic::aarch64_neon_fmax; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax"); case NEON::BI__builtin_neon_vmaxh_f16: { Ops.push_back(EmitScalarExpr(E->getArg(1))); @@ -10939,7 +11142,8 @@ case NEON::BI__builtin_neon_vminq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin; + if (Ty->isFPOrFPVectorTy()) + Int = Intrinsic::aarch64_neon_fmin; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin"); case NEON::BI__builtin_neon_vminh_f16: { Ops.push_back(EmitScalarExpr(E->getArg(1))); @@ -10950,7 +11154,8 @@ case NEON::BI__builtin_neon_vabdq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd; + if (Ty->isFPOrFPVectorTy()) + Int = Intrinsic::aarch64_neon_fabd; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd"); case NEON::BI__builtin_neon_vpadal_v: case NEON::BI__builtin_neon_vpadalq_v: { @@ -10959,9 +11164,10 @@ unsigned BitWidth = EltTy->getBitWidth(); auto *ArgTy = llvm::FixedVectorType::get( llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts); - llvm::Type* Tys[2] = { VTy, ArgTy }; - Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; - SmallVector TmpOps; + llvm::Type *Tys[2] = {VTy, ArgTy}; + Int = + usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp; + SmallVector TmpOps; TmpOps.push_back(Ops[1]); Function *F = CGM.getIntrinsic(Int, Tys); llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal"); @@ -10972,13 +11178,15 @@ case NEON::BI__builtin_neon_vpminq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp; + if (Ty->isFPOrFPVectorTy()) + Int = Intrinsic::aarch64_neon_fminp; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin"); case NEON::BI__builtin_neon_vpmax_v: case NEON::BI__builtin_neon_vpmaxq_v: // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics. Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp; - if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp; + if (Ty->isFPOrFPVectorTy()) + Int = Intrinsic::aarch64_neon_fmaxp; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax"); case NEON::BI__builtin_neon_vminnm_v: case NEON::BI__builtin_neon_vminnmq_v: @@ -10998,17 +11206,20 @@ return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm"); case NEON::BI__builtin_neon_vrecpss_f32: { Ops.push_back(EmitScalarExpr(E->getArg(1))); - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), - Ops, "vrecps"); + return EmitNeonCall( + CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy), Ops, + "vrecps"); } case NEON::BI__builtin_neon_vrecpsd_f64: Ops.push_back(EmitScalarExpr(E->getArg(1))); - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), - Ops, "vrecps"); + return EmitNeonCall( + CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy), Ops, + "vrecps"); case NEON::BI__builtin_neon_vrecpsh_f16: Ops.push_back(EmitScalarExpr(E->getArg(1))); - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy), - Ops, "vrecps"); + return EmitNeonCall( + CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy), Ops, + "vrecps"); case NEON::BI__builtin_neon_vqshrun_n_v: Int = Intrinsic::aarch64_neon_sqshrun; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n"); @@ -11016,13 +11227,15 @@ Int = Intrinsic::aarch64_neon_sqrshrun; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n"); case NEON::BI__builtin_neon_vqshrn_n_v: - Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; + Int = + usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n"); case NEON::BI__builtin_neon_vrshrn_n_v: Int = Intrinsic::aarch64_neon_rshrn; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n"); case NEON::BI__builtin_neon_vqrshrn_n_v: - Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn; + Int = usgn ? Intrinsic::aarch64_neon_uqrshrn + : Intrinsic::aarch64_neon_sqrshrn; return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n"); case NEON::BI__builtin_neon_vrndah_f16: { Ops.push_back(EmitScalarExpr(E->getArg(0))); @@ -11197,8 +11410,9 @@ case NEON::BI__builtin_neon_vcvtaq_s64_v: case NEON::BI__builtin_neon_vcvta_u64_v: case NEON::BI__builtin_neon_vcvtaq_u64_v: { - Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; - llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; + Int = + usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas; + llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta"); } case NEON::BI__builtin_neon_vcvtm_s16_v: @@ -11213,8 +11427,9 @@ case NEON::BI__builtin_neon_vcvtmq_s64_v: case NEON::BI__builtin_neon_vcvtm_u64_v: case NEON::BI__builtin_neon_vcvtmq_u64_v: { - Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; - llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; + Int = + usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms; + llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm"); } case NEON::BI__builtin_neon_vcvtn_s16_v: @@ -11229,8 +11444,9 @@ case NEON::BI__builtin_neon_vcvtnq_s64_v: case NEON::BI__builtin_neon_vcvtn_u64_v: case NEON::BI__builtin_neon_vcvtnq_u64_v: { - Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; - llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; + Int = + usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns; + llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn"); } case NEON::BI__builtin_neon_vcvtp_s16_v: @@ -11245,8 +11461,9 @@ case NEON::BI__builtin_neon_vcvtpq_s64_v: case NEON::BI__builtin_neon_vcvtp_u64_v: case NEON::BI__builtin_neon_vcvtpq_u64_v: { - Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; - llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) }; + Int = + usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps; + llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)}; return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp"); } case NEON::BI__builtin_neon_vmulx_v: @@ -11320,7 +11537,7 @@ Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11332,7 +11549,7 @@ Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11344,7 +11561,7 @@ Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 16); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11356,7 +11573,7 @@ Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11365,7 +11582,7 @@ Int = Intrinsic::aarch64_neon_umaxv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11374,7 +11591,7 @@ Int = Intrinsic::aarch64_neon_umaxv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11383,7 +11600,7 @@ Int = Intrinsic::aarch64_neon_umaxv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 16); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11392,7 +11609,7 @@ Int = Intrinsic::aarch64_neon_umaxv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11401,7 +11618,7 @@ Int = Intrinsic::aarch64_neon_smaxv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11410,7 +11627,7 @@ Int = Intrinsic::aarch64_neon_smaxv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11419,7 +11636,7 @@ Int = Intrinsic::aarch64_neon_smaxv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 16); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11428,7 +11645,7 @@ Int = Intrinsic::aarch64_neon_smaxv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11437,7 +11654,7 @@ Int = Intrinsic::aarch64_neon_fmaxv; Ty = HalfTy; VTy = llvm::FixedVectorType::get(HalfTy, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], HalfTy); @@ -11446,7 +11663,7 @@ Int = Intrinsic::aarch64_neon_fmaxv; Ty = HalfTy; VTy = llvm::FixedVectorType::get(HalfTy, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv"); return Builder.CreateTrunc(Ops[0], HalfTy); @@ -11455,7 +11672,7 @@ Int = Intrinsic::aarch64_neon_uminv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11464,7 +11681,7 @@ Int = Intrinsic::aarch64_neon_uminv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11473,7 +11690,7 @@ Int = Intrinsic::aarch64_neon_uminv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 16); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11482,7 +11699,7 @@ Int = Intrinsic::aarch64_neon_uminv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11491,7 +11708,7 @@ Int = Intrinsic::aarch64_neon_sminv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11500,7 +11717,7 @@ Int = Intrinsic::aarch64_neon_sminv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11509,7 +11726,7 @@ Int = Intrinsic::aarch64_neon_sminv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 16); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], Int8Ty); @@ -11518,7 +11735,7 @@ Int = Intrinsic::aarch64_neon_sminv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11527,7 +11744,7 @@ Int = Intrinsic::aarch64_neon_fminv; Ty = HalfTy; VTy = llvm::FixedVectorType::get(HalfTy, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], HalfTy); @@ -11536,7 +11753,7 @@ Int = Intrinsic::aarch64_neon_fminv; Ty = HalfTy; VTy = llvm::FixedVectorType::get(HalfTy, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv"); return Builder.CreateTrunc(Ops[0], HalfTy); @@ -11545,7 +11762,7 @@ Int = Intrinsic::aarch64_neon_fmaxnmv; Ty = HalfTy; VTy = llvm::FixedVectorType::get(HalfTy, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); return Builder.CreateTrunc(Ops[0], HalfTy); @@ -11554,7 +11771,7 @@ Int = Intrinsic::aarch64_neon_fmaxnmv; Ty = HalfTy; VTy = llvm::FixedVectorType::get(HalfTy, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv"); return Builder.CreateTrunc(Ops[0], HalfTy); @@ -11563,7 +11780,7 @@ Int = Intrinsic::aarch64_neon_fminnmv; Ty = HalfTy; VTy = llvm::FixedVectorType::get(HalfTy, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); return Builder.CreateTrunc(Ops[0], HalfTy); @@ -11572,7 +11789,7 @@ Int = Intrinsic::aarch64_neon_fminnmv; Ty = HalfTy; VTy = llvm::FixedVectorType::get(HalfTy, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv"); return Builder.CreateTrunc(Ops[0], HalfTy); @@ -11586,7 +11803,7 @@ Int = Intrinsic::aarch64_neon_uaddlv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11595,7 +11812,7 @@ Int = Intrinsic::aarch64_neon_uaddlv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); } @@ -11603,7 +11820,7 @@ Int = Intrinsic::aarch64_neon_uaddlv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 16); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11612,7 +11829,7 @@ Int = Intrinsic::aarch64_neon_uaddlv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); } @@ -11620,7 +11837,7 @@ Int = Intrinsic::aarch64_neon_saddlv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11629,7 +11846,7 @@ Int = Intrinsic::aarch64_neon_saddlv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 4); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); } @@ -11637,7 +11854,7 @@ Int = Intrinsic::aarch64_neon_saddlv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int8Ty, 16); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); return Builder.CreateTrunc(Ops[0], Int16Ty); @@ -11646,7 +11863,7 @@ Int = Intrinsic::aarch64_neon_saddlv; Ty = Int32Ty; VTy = llvm::FixedVectorType::get(Int16Ty, 8); - llvm::Type *Tys[2] = { Ty, VTy }; + llvm::Type *Tys[2] = {Ty, VTy}; Ops.push_back(EmitScalarExpr(E->getArg(0))); return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv"); } @@ -11670,10 +11887,10 @@ case NEON::BI__builtin_neon_vrsra_n_v: case NEON::BI__builtin_neon_vrsraq_n_v: { Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl; - SmallVector TmpOps; + SmallVector TmpOps; TmpOps.push_back(Ops[1]); TmpOps.push_back(Ops[2]); - Function* F = CGM.getIntrinsic(Int, Ty); + Function *F = CGM.getIntrinsic(Int, Ty); llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true); Ops[0] = Builder.CreateBitCast(Ops[0], VTy); return Builder.CreateAdd(Ops[0], tmp); @@ -11719,74 +11936,71 @@ case NEON::BI__builtin_neon_vld2q_v: { llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); - llvm::Type *Tys[2] = { VTy, PTy }; + llvm::Type *Tys[2] = {VTy, PTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); - Ops[0] = Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(Ops[1]->getType())); + Ops[0] = Builder.CreateBitCast( + Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld3_v: case NEON::BI__builtin_neon_vld3q_v: { llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); - llvm::Type *Tys[2] = { VTy, PTy }; + llvm::Type *Tys[2] = {VTy, PTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); - Ops[0] = Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(Ops[1]->getType())); + Ops[0] = Builder.CreateBitCast( + Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld4_v: case NEON::BI__builtin_neon_vld4q_v: { llvm::Type *PTy = llvm::PointerType::getUnqual(VTy); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); - llvm::Type *Tys[2] = { VTy, PTy }; + llvm::Type *Tys[2] = {VTy, PTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); - Ops[0] = Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(Ops[1]->getType())); + Ops[0] = Builder.CreateBitCast( + Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld2_dup_v: case NEON::BI__builtin_neon_vld2q_dup_v: { - llvm::Type *PTy = - llvm::PointerType::getUnqual(VTy->getElementType()); + llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); - llvm::Type *Tys[2] = { VTy, PTy }; + llvm::Type *Tys[2] = {VTy, PTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld2"); - Ops[0] = Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(Ops[1]->getType())); + Ops[0] = Builder.CreateBitCast( + Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld3_dup_v: case NEON::BI__builtin_neon_vld3q_dup_v: { - llvm::Type *PTy = - llvm::PointerType::getUnqual(VTy->getElementType()); + llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); - llvm::Type *Tys[2] = { VTy, PTy }; + llvm::Type *Tys[2] = {VTy, PTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld3"); - Ops[0] = Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(Ops[1]->getType())); + Ops[0] = Builder.CreateBitCast( + Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld4_dup_v: case NEON::BI__builtin_neon_vld4q_dup_v: { - llvm::Type *PTy = - llvm::PointerType::getUnqual(VTy->getElementType()); + llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType()); Ops[1] = Builder.CreateBitCast(Ops[1], PTy); - llvm::Type *Tys[2] = { VTy, PTy }; + llvm::Type *Tys[2] = {VTy, PTy}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys); Ops[1] = Builder.CreateCall(F, Ops[1], "vld4"); - Ops[0] = Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(Ops[1]->getType())); + Ops[0] = Builder.CreateBitCast( + Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]); } case NEON::BI__builtin_neon_vld2_lane_v: case NEON::BI__builtin_neon_vld2q_lane_v: { - llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; + llvm::Type *Tys[2] = {VTy, Ops[1]->getType()}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys); std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); @@ -11799,7 +12013,7 @@ } case NEON::BI__builtin_neon_vld3_lane_v: case NEON::BI__builtin_neon_vld3q_lane_v: { - llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; + llvm::Type *Tys[2] = {VTy, Ops[1]->getType()}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys); std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); @@ -11813,7 +12027,7 @@ } case NEON::BI__builtin_neon_vld4_lane_v: case NEON::BI__builtin_neon_vld4q_lane_v: { - llvm::Type *Tys[2] = { VTy, Ops[1]->getType() }; + llvm::Type *Tys[2] = {VTy, Ops[1]->getType()}; Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys); std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end()); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); @@ -11829,45 +12043,45 @@ case NEON::BI__builtin_neon_vst2_v: case NEON::BI__builtin_neon_vst2q_v: { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); - llvm::Type *Tys[2] = { VTy, Ops[2]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), - Ops, ""); + llvm::Type *Tys[2] = {VTy, Ops[2]->getType()}; + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys), Ops, + ""); } case NEON::BI__builtin_neon_vst2_lane_v: case NEON::BI__builtin_neon_vst2q_lane_v: { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty); - llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; + llvm::Type *Tys[2] = {VTy, Ops[3]->getType()}; return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys), Ops, ""); } case NEON::BI__builtin_neon_vst3_v: case NEON::BI__builtin_neon_vst3q_v: { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); - llvm::Type *Tys[2] = { VTy, Ops[3]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), - Ops, ""); + llvm::Type *Tys[2] = {VTy, Ops[3]->getType()}; + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys), Ops, + ""); } case NEON::BI__builtin_neon_vst3_lane_v: case NEON::BI__builtin_neon_vst3q_lane_v: { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty); - llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; + llvm::Type *Tys[2] = {VTy, Ops[4]->getType()}; return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys), Ops, ""); } case NEON::BI__builtin_neon_vst4_v: case NEON::BI__builtin_neon_vst4q_v: { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); - llvm::Type *Tys[2] = { VTy, Ops[4]->getType() }; - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), - Ops, ""); + llvm::Type *Tys[2] = {VTy, Ops[4]->getType()}; + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys), Ops, + ""); } case NEON::BI__builtin_neon_vst4_lane_v: case NEON::BI__builtin_neon_vst4q_lane_v: { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end()); Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty); - llvm::Type *Tys[2] = { VTy, Ops[5]->getType() }; + llvm::Type *Tys[2] = {VTy, Ops[5]->getType()}; return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys), Ops, ""); } @@ -11881,8 +12095,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { - Indices.push_back(i+vi); - Indices.push_back(i+e+vi); + Indices.push_back(i + vi); + Indices.push_back(i + e + vi); } Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn"); @@ -11900,7 +12114,7 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) - Indices.push_back(2*i+vi); + Indices.push_back(2 * i + vi); Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp"); @@ -11918,8 +12132,8 @@ for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { - Indices.push_back((i + vi*e) >> 1); - Indices.push_back(((i + vi*e) >> 1)+e); + Indices.push_back((i + vi * e) >> 1); + Indices.push_back(((i + vi * e) >> 1) + e); } Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi); SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip"); @@ -11928,36 +12142,36 @@ return SV; } case NEON::BI__builtin_neon_vqtbl1q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), - Ops, "vtbl1"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty), Ops, + "vtbl1"); } case NEON::BI__builtin_neon_vqtbl2q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), - Ops, "vtbl2"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty), Ops, + "vtbl2"); } case NEON::BI__builtin_neon_vqtbl3q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), - Ops, "vtbl3"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty), Ops, + "vtbl3"); } case NEON::BI__builtin_neon_vqtbl4q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), - Ops, "vtbl4"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty), Ops, + "vtbl4"); } case NEON::BI__builtin_neon_vqtbx1q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), - Ops, "vtbx1"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty), Ops, + "vtbx1"); } case NEON::BI__builtin_neon_vqtbx2q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), - Ops, "vtbx2"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty), Ops, + "vtbx2"); } case NEON::BI__builtin_neon_vqtbx3q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), - Ops, "vtbx3"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty), Ops, + "vtbx3"); } case NEON::BI__builtin_neon_vqtbx4q_v: { - return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), - Ops, "vtbx4"); + return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty), Ops, + "vtbx4"); } case NEON::BI__builtin_neon_vsqadd_v: case NEON::BI__builtin_neon_vsqaddq_v: { @@ -12080,8 +12294,7 @@ } } -llvm::Value *CodeGenFunction:: -BuildVector(ArrayRef Ops) { +llvm::Value *CodeGenFunction::BuildVector(ArrayRef Ops) { assert((Ops.size() & (Ops.size() - 1)) == 0 && "Not a power-of-two sized vector!"); bool AllConstants = true; @@ -12090,7 +12303,7 @@ // If this is a constant vector, create a ConstantVector. if (AllConstants) { - SmallVector CstOps; + SmallVector CstOps; for (unsigned i = 0, e = Ops.size(); i != e; ++i) CstOps.push_back(cast(Ops[i])); return llvm::ConstantVector::get(CstOps); @@ -12121,9 +12334,8 @@ int Indices[4]; for (unsigned i = 0; i != NumElts; ++i) Indices[i] = i; - MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec, - makeArrayRef(Indices, NumElts), - "extract"); + MaskVec = CGF.Builder.CreateShuffleVector( + MaskVec, MaskVec, makeArrayRef(Indices, NumElts), "extract"); } return MaskVec; } @@ -12131,8 +12343,8 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef Ops, Align Alignment) { // Cast the pointer to right type. - Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(Ops[1]->getType())); + Value *Ptr = CGF.Builder.CreateBitCast( + Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); Value *MaskVec = getMaskVecValue( CGF, Ops[2], @@ -12154,25 +12366,23 @@ return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]); } -static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, - ArrayRef Ops) { +static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, ArrayRef Ops) { auto *ResultTy = cast(Ops[1]->getType()); llvm::Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. - Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(PtrTy)); + Value *Ptr = + CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(PtrTy)); Value *MaskVec = getMaskVecValue( CGF, Ops[2], cast(ResultTy)->getNumElements()); - llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, - ResultTy); - return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] }); + llvm::Function *F = + CGF.CGM.getIntrinsic(Intrinsic::masked_expandload, ResultTy); + return CGF.Builder.CreateCall(F, {Ptr, MaskVec, Ops[1]}); } -static Value *EmitX86CompressExpand(CodeGenFunction &CGF, - ArrayRef Ops, +static Value *EmitX86CompressExpand(CodeGenFunction &CGF, ArrayRef Ops, bool IsCompress) { auto *ResultTy = cast(Ops[1]->getType()); @@ -12181,7 +12391,7 @@ Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress : Intrinsic::x86_avx512_mask_expand; llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy); - return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec }); + return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], MaskVec}); } static Value *EmitX86CompressStore(CodeGenFunction &CGF, @@ -12190,19 +12400,18 @@ llvm::Type *PtrTy = ResultTy->getElementType(); // Cast the pointer to element type. - Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], - llvm::PointerType::getUnqual(PtrTy)); + Value *Ptr = + CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(PtrTy)); Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements()); - llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, - ResultTy); - return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec }); + llvm::Function *F = + CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore, ResultTy); + return CGF.Builder.CreateCall(F, {Ops[1], Ptr, MaskVec}); } static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc, - ArrayRef Ops, - bool InvertLHS = false) { + ArrayRef Ops, bool InvertLHS = false) { unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts); Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts); @@ -12272,8 +12481,8 @@ return Res; } -static Value *EmitX86Select(CodeGenFunction &CGF, - Value *Mask, Value *Op0, Value *Op1) { +static Value *EmitX86Select(CodeGenFunction &CGF, Value *Mask, Value *Op0, + Value *Op1) { // If the mask is all ones just return first argument. if (const auto *C = dyn_cast(Mask)) @@ -12286,8 +12495,8 @@ return CGF.Builder.CreateSelect(Mask, Op0, Op1); } -static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, - Value *Mask, Value *Op0, Value *Op1) { +static Value *EmitX86ScalarSelect(CodeGenFunction &CGF, Value *Mask, Value *Op0, + Value *Op1) { // If the mask is all ones just return first argument. if (const auto *C = dyn_cast(Mask)) if (C->isAllOnesValue()) @@ -12318,9 +12527,8 @@ Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices); } - return CGF.Builder.CreateBitCast(Cmp, - IntegerType::get(CGF.getLLVMContext(), - std::max(NumElts, 8U))); + return CGF.Builder.CreateBitCast( + Cmp, IntegerType::get(CGF.getLLVMContext(), std::max(NumElts, 8U))); } static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC, @@ -12340,13 +12548,26 @@ } else { ICmpInst::Predicate Pred; switch (CC) { - default: llvm_unreachable("Unknown condition code"); - case 0: Pred = ICmpInst::ICMP_EQ; break; - case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; - case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; - case 4: Pred = ICmpInst::ICMP_NE; break; - case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; - case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; + default: + llvm_unreachable("Unknown condition code"); + case 0: + Pred = ICmpInst::ICMP_EQ; + break; + case 1: + Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; + break; + case 2: + Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; + break; + case 4: + Pred = ICmpInst::ICMP_NE; + break; + case 5: + Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; + break; + case 6: + Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; + break; } Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]); } @@ -12360,7 +12581,7 @@ static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) { Value *Zero = Constant::getNullValue(In->getType()); - return EmitX86MaskedCompare(CGF, 1, true, { In, Zero }); + return EmitX86MaskedCompare(CGF, 1, true, {In, Zero}); } static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E, @@ -12372,8 +12593,8 @@ if (Rnd != 4) { Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round : Intrinsic::x86_avx512_uitofp_round; - Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() }); - Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] }); + Function *F = CGF.CGM.getIntrinsic(IID, {Ty, Ops[0]->getType()}); + Res = CGF.Builder.CreateCall(F, {Ops[0], Ops[3]}); } else { CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E); Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty) @@ -12391,7 +12612,8 @@ bool Subtract = false; Intrinsic::ID IID = Intrinsic::not_intrinsic; switch (BuiltinID) { - default: break; + default: + break; case clang::X86::BI__builtin_ia32_vfmsubph512_mask3: Subtract = true; [[fallthrough]]; @@ -12414,14 +12636,16 @@ case clang::X86::BI__builtin_ia32_vfmaddps512_mask: case clang::X86::BI__builtin_ia32_vfmaddps512_maskz: case clang::X86::BI__builtin_ia32_vfmaddps512_mask3: - IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break; + IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; + break; case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3: Subtract = true; [[fallthrough]]; case clang::X86::BI__builtin_ia32_vfmaddpd512_mask: case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz: case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3: - IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break; + IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; + break; case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3: Subtract = true; [[fallthrough]]; @@ -12454,7 +12678,7 @@ (cast(Ops.back())->getZExtValue() != (uint64_t)4 || IsAddSub)) { Function *Intr = CGF.CGM.getIntrinsic(IID); - Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() }); + Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back()}); } else { llvm::Type *Ty = A->getType(); Function *FMA; @@ -12553,8 +12777,8 @@ } // If we have more than 3 arguments, we need to do masking. if (Ops.size() > 3) { - Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType()) - : Ops[PTIdx]; + Value *PassThru = + ZeroMask ? Constant::getNullValue(Res->getType()) : Ops[PTIdx]; // If we negated the accumulator and the its the PassThru value we need to // bypass the negate. Conveniently Upper should be the same thing in this @@ -12618,8 +12842,8 @@ else llvm_unreachable("Unexpected intrinsic"); - Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), - Ops.drop_back()); + Value *Ternlog = + CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID), Ops.drop_back()); Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0]; return EmitX86Select(CGF, Ops[4], Ternlog, PassThru); } @@ -12676,8 +12900,7 @@ } // Convert a BF16 to a float. -static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF, - const CallExpr *E, +static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF, const CallExpr *E, ArrayRef Ops) { llvm::Type *Int32Ty = CGF.Builder.getInt32Ty(); Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty); @@ -12725,12 +12948,11 @@ llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, Index)}; llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs); - CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue, - CharUnits::fromQuantity(4)); + CpuValue = + Builder.CreateAlignedLoad(Int32Ty, CpuValue, CharUnits::fromQuantity(4)); // Check the value of the field against the requested value. - return Builder.CreateICmpEQ(CpuValue, - llvm::ConstantInt::get(Int32Ty, Value)); + return Builder.CreateICmpEQ(CpuValue, llvm::ConstantInt::get(Int32Ty, Value)); } Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) { @@ -12779,8 +13001,8 @@ } if (Features2 != 0) { - llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty, - "__cpu_features2"); + llvm::Constant *CpuFeatures2 = + CGM.CreateRuntimeVariable(Int32Ty, "__cpu_features2"); cast(CpuFeatures2)->setDSOLocal(true); Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2, @@ -12821,7 +13043,7 @@ if (Optional MsvcIntId = translateX86ToMsvcIntrin(BuiltinID)) return EmitMSVCBuiltinExpr(*MsvcIntId, E); - SmallVector Ops; + SmallVector Ops; bool IsMaskFCmp = false; bool IsConjFMA = false; @@ -12876,7 +13098,8 @@ }; switch (BuiltinID) { - default: return nullptr; + default: + return nullptr; case X86::BI_mm_prefetch: { Value *Address = Ops[0]; ConstantInt *C = cast(Ops[1]); @@ -12975,8 +13198,9 @@ case X86::BI__builtin_ia32_ldmxcsr: { Address Tmp = CreateMemTemp(E->getArg(0)->getType()); Builder.CreateStore(Ops[0], Tmp); - return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), - Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); + return Builder.CreateCall( + CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), + Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy)); } case X86::BI_mm_getcsr: case X86::BI__builtin_ia32_stmxcsr: { @@ -13000,32 +13224,33 @@ case X86::BI__builtin_ia32_xsetbv: case X86::BI_xsetbv: { Intrinsic::ID ID; -#define INTRINSIC_X86_XSAVE_ID(NAME) \ - case X86::BI__builtin_ia32_##NAME: \ - ID = Intrinsic::x86_##NAME; \ - break +#define INTRINSIC_X86_XSAVE_ID(NAME) \ + case X86::BI__builtin_ia32_##NAME: \ + ID = Intrinsic::x86_##NAME; \ + break switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); - INTRINSIC_X86_XSAVE_ID(xsave); - INTRINSIC_X86_XSAVE_ID(xsave64); - INTRINSIC_X86_XSAVE_ID(xrstor); - INTRINSIC_X86_XSAVE_ID(xrstor64); - INTRINSIC_X86_XSAVE_ID(xsaveopt); - INTRINSIC_X86_XSAVE_ID(xsaveopt64); - INTRINSIC_X86_XSAVE_ID(xrstors); - INTRINSIC_X86_XSAVE_ID(xrstors64); - INTRINSIC_X86_XSAVE_ID(xsavec); - INTRINSIC_X86_XSAVE_ID(xsavec64); - INTRINSIC_X86_XSAVE_ID(xsaves); - INTRINSIC_X86_XSAVE_ID(xsaves64); - INTRINSIC_X86_XSAVE_ID(xsetbv); + default: + llvm_unreachable("Unsupported intrinsic!"); + INTRINSIC_X86_XSAVE_ID(xsave); + INTRINSIC_X86_XSAVE_ID(xsave64); + INTRINSIC_X86_XSAVE_ID(xrstor); + INTRINSIC_X86_XSAVE_ID(xrstor64); + INTRINSIC_X86_XSAVE_ID(xsaveopt); + INTRINSIC_X86_XSAVE_ID(xsaveopt64); + INTRINSIC_X86_XSAVE_ID(xrstors); + INTRINSIC_X86_XSAVE_ID(xrstors64); + INTRINSIC_X86_XSAVE_ID(xsavec); + INTRINSIC_X86_XSAVE_ID(xsavec64); + INTRINSIC_X86_XSAVE_ID(xsaves); + INTRINSIC_X86_XSAVE_ID(xsaves64); + INTRINSIC_X86_XSAVE_ID(xsetbv); case X86::BI_xsetbv: ID = Intrinsic::x86_xsetbv; break; } #undef INTRINSIC_X86_XSAVE_ID Value *Mhi = Builder.CreateTrunc( - Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); + Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty); Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty); Ops[1] = Mhi; Ops.push_back(Mlo); @@ -13289,7 +13514,7 @@ case X86::BI__builtin_ia32_expandqi128_mask: case X86::BI__builtin_ia32_expandqi256_mask: case X86::BI__builtin_ia32_expandqi512_mask: - return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false); + return EmitX86CompressExpand(*this, Ops, /*IsCompress*/ false); case X86::BI__builtin_ia32_compressdf128_mask: case X86::BI__builtin_ia32_compressdf256_mask: @@ -13309,7 +13534,7 @@ case X86::BI__builtin_ia32_compressqi128_mask: case X86::BI__builtin_ia32_compressqi256_mask: case X86::BI__builtin_ia32_compressqi512_mask: - return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true); + return EmitX86CompressExpand(*this, Ops, /*IsCompress*/ true); case X86::BI__builtin_ia32_gather3div2df: case X86::BI__builtin_ia32_gather3div2di: @@ -13337,7 +13562,8 @@ case X86::BI__builtin_ia32_gatherdiv16si: { Intrinsic::ID IID; switch (BuiltinID) { - default: llvm_unreachable("Unexpected builtin"); + default: + llvm_unreachable("Unexpected builtin"); case X86::BI__builtin_ia32_gather3div2df: IID = Intrinsic::x86_avx512_mask_gather3div2_df; break; @@ -13446,7 +13672,8 @@ case X86::BI__builtin_ia32_scattersiv8si: { Intrinsic::ID IID; switch (BuiltinID) { - default: llvm_unreachable("Unexpected builtin"); + default: + llvm_unreachable("Unexpected builtin"); case X86::BI__builtin_ia32_scattersiv8df: IID = Intrinsic::x86_avx512_mask_scatter_dpd_512; break; @@ -13559,9 +13786,8 @@ for (unsigned i = 0; i != NumElts; ++i) Indices[i] = i + Index; - Value *Res = Builder.CreateShuffleVector(Ops[0], - makeArrayRef(Indices, NumElts), - "extract"); + Value *Res = Builder.CreateShuffleVector( + Ops[0], makeArrayRef(Indices, NumElts), "extract"); if (Ops.size() == 4) Res = EmitX86Select(*this, Ops[3], Res, Ops[2]); @@ -13598,9 +13824,8 @@ for (unsigned i = 0; i != DstNumElts; ++i) Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i; - Value *Op1 = Builder.CreateShuffleVector(Ops[1], - makeArrayRef(Indices, DstNumElts), - "widen"); + Value *Op1 = Builder.CreateShuffleVector( + Ops[1], makeArrayRef(Indices, DstNumElts), "widen"); for (unsigned i = 0; i != DstNumElts; ++i) { if (i >= Index && i < (Index + SrcNumElts)) @@ -13609,9 +13834,8 @@ Indices[i] = i; } - return Builder.CreateShuffleVector(Ops[0], Op1, - makeArrayRef(Indices, DstNumElts), - "insert"); + return Builder.CreateShuffleVector( + Ops[0], Op1, makeArrayRef(Indices, DstNumElts), "insert"); } case X86::BI__builtin_ia32_pmovqd512_mask: case X86::BI__builtin_ia32_pmovwb512_mask: { @@ -13627,7 +13851,8 @@ Intrinsic::ID IID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_pmovdb512_mask: IID = Intrinsic::x86_avx512_mask_pmov_db_512; break; @@ -13661,8 +13886,7 @@ Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i; return Builder.CreateShuffleVector(Ops[0], Ops[1], - makeArrayRef(Indices, NumElts), - "blend"); + makeArrayRef(Indices, NumElts), "blend"); } case X86::BI__builtin_ia32_pshuflw: case X86::BI__builtin_ia32_pshuflw256: @@ -13766,8 +13990,7 @@ } return Builder.CreateShuffleVector(Ops[0], Ops[1], - makeArrayRef(Indices, NumElts), - "shufp"); + makeArrayRef(Indices, NumElts), "shufp"); } case X86::BI__builtin_ia32_permdi256: case X86::BI__builtin_ia32_permdf256: @@ -13819,9 +14042,8 @@ } } - return Builder.CreateShuffleVector(Ops[1], Ops[0], - makeArrayRef(Indices, NumElts), - "palignr"); + return Builder.CreateShuffleVector( + Ops[1], Ops[0], makeArrayRef(Indices, NumElts), "palignr"); } case X86::BI__builtin_ia32_alignd128: case X86::BI__builtin_ia32_alignd256: @@ -13840,9 +14062,8 @@ for (unsigned i = 0; i != NumElts; ++i) Indices[i] = i + ShiftVal; - return Builder.CreateShuffleVector(Ops[1], Ops[0], - makeArrayRef(Indices, NumElts), - "valign"); + return Builder.CreateShuffleVector( + Ops[1], Ops[0], makeArrayRef(Indices, NumElts), "valign"); } case X86::BI__builtin_ia32_shuf_f32x4_256: case X86::BI__builtin_ia32_shuf_f64x2_256: @@ -13870,8 +14091,7 @@ } return Builder.CreateShuffleVector(Ops[0], Ops[1], - makeArrayRef(Indices, NumElts), - "shuf"); + makeArrayRef(Indices, NumElts), "shuf"); } case X86::BI__builtin_ia32_vperm2f128_pd256: @@ -13898,20 +14118,19 @@ else OutOps[l] = Ops[0]; - for (unsigned i = 0; i != NumElts/2; ++i) { + for (unsigned i = 0; i != NumElts / 2; ++i) { // Start with ith element of the source for this lane. unsigned Idx = (l * NumElts) + i; // If bit 0 of the immediate half is set, switch to the high half of // the source. if (Imm & (1 << (l * 4))) - Idx += NumElts/2; - Indices[(l * (NumElts/2)) + i] = Idx; + Idx += NumElts / 2; + Indices[(l * (NumElts / 2)) + i] = Idx; } } return Builder.CreateShuffleVector(OutOps[0], OutOps[1], - makeArrayRef(Indices, NumElts), - "vperm"); + makeArrayRef(Indices, NumElts), "vperm"); } case X86::BI__builtin_ia32_pslldqi128_byteshift: @@ -13931,7 +14150,8 @@ for (unsigned l = 0; l != NumElts; l += 16) { for (unsigned i = 0; i != 16; ++i) { unsigned Idx = NumElts + i - ShiftVal; - if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand. + if (Idx < NumElts) + Idx -= NumElts - 16; // end of lane, switch operand. Indices[l + i] = Idx + l; } } @@ -13939,9 +14159,8 @@ auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts); Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); Value *Zero = llvm::Constant::getNullValue(VecTy); - Value *SV = Builder.CreateShuffleVector(Zero, Cast, - makeArrayRef(Indices, NumElts), - "pslldq"); + Value *SV = Builder.CreateShuffleVector( + Zero, Cast, makeArrayRef(Indices, NumElts), "pslldq"); return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast"); } case X86::BI__builtin_ia32_psrldqi128_byteshift: @@ -13961,7 +14180,8 @@ for (unsigned l = 0; l != NumElts; l += 16) { for (unsigned i = 0; i != 16; ++i) { unsigned Idx = i + ShiftVal; - if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand. + if (Idx >= 16) + Idx += NumElts - 16; // end of lane, switch operand. Indices[l + i] = Idx + l; } } @@ -13969,9 +14189,8 @@ auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts); Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast"); Value *Zero = llvm::Constant::getNullValue(VecTy); - Value *SV = Builder.CreateShuffleVector(Cast, Zero, - makeArrayRef(Indices, NumElts), - "psrldq"); + Value *SV = Builder.CreateShuffleVector( + Cast, Zero, makeArrayRef(Indices, NumElts), "psrldq"); return Builder.CreateBitCast(SV, ResultType, "cast"); } case X86::BI__builtin_ia32_kshiftliqi: @@ -13991,9 +14210,8 @@ Indices[i] = NumElts + i - ShiftVal; Value *Zero = llvm::Constant::getNullValue(In->getType()); - Value *SV = Builder.CreateShuffleVector(Zero, In, - makeArrayRef(Indices, NumElts), - "kshiftl"); + Value *SV = Builder.CreateShuffleVector( + Zero, In, makeArrayRef(Indices, NumElts), "kshiftl"); return Builder.CreateBitCast(SV, Ops[0]->getType()); } case X86::BI__builtin_ia32_kshiftriqi: @@ -14013,9 +14231,8 @@ Indices[i] = i + ShiftVal; Value *Zero = llvm::Constant::getNullValue(In->getType()); - Value *SV = Builder.CreateShuffleVector(In, Zero, - makeArrayRef(Indices, NumElts), - "kshiftr"); + Value *SV = Builder.CreateShuffleVector( + In, Zero, makeArrayRef(Indices, NumElts), "kshiftr"); return Builder.CreateBitCast(SV, Ops[0]->getType()); } case X86::BI__builtin_ia32_movnti: @@ -14178,7 +14395,8 @@ case X86::BI__builtin_ia32_ktestzdi: { Intrinsic::ID IID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_ktestcqi: IID = Intrinsic::x86_avx512_ktestc_b; break; @@ -14218,7 +14436,8 @@ case X86::BI__builtin_ia32_kadddi: { Intrinsic::ID IID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_kaddqi: IID = Intrinsic::x86_avx512_kadd_b; break; @@ -14264,15 +14483,14 @@ case X86::BI__builtin_ia32_kxorhi: case X86::BI__builtin_ia32_kxorsi: case X86::BI__builtin_ia32_kxordi: - return EmitX86MaskLogic(*this, Instruction::Xor, Ops); + return EmitX86MaskLogic(*this, Instruction::Xor, Ops); case X86::BI__builtin_ia32_knotqi: case X86::BI__builtin_ia32_knothi: case X86::BI__builtin_ia32_knotsi: case X86::BI__builtin_ia32_knotdi: { unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth(); Value *Res = getMaskVecValue(*this, Ops[0], NumElts); - return Builder.CreateBitCast(Builder.CreateNot(Res), - Ops[0]->getType()); + return Builder.CreateBitCast(Builder.CreateNot(Res), Ops[0]->getType()); } case X86::BI__builtin_ia32_kmovb: case X86::BI__builtin_ia32_kmovw: @@ -14304,8 +14522,8 @@ makeArrayRef(Indices, NumElts / 2)); // Concat the vectors. // NOTE: Operands are swapped to match the intrinsic definition. - Value *Res = Builder.CreateShuffleVector(RHS, LHS, - makeArrayRef(Indices, NumElts)); + Value *Res = + Builder.CreateShuffleVector(RHS, LHS, makeArrayRef(Indices, NumElts)); return Builder.CreateBitCast(Res, Ops[0]->getType()); } @@ -14316,7 +14534,7 @@ case X86::BI__builtin_ia32_vplzcntq_256: case X86::BI__builtin_ia32_vplzcntq_512: { Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType()); - return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)}); + return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)}); } case X86::BI__builtin_ia32_sqrtss: case X86::BI__builtin_ia32_sqrtsd: { @@ -14418,12 +14636,12 @@ case X86::BI__builtin_ia32_pmuludq128: case X86::BI__builtin_ia32_pmuludq256: case X86::BI__builtin_ia32_pmuludq512: - return EmitX86Muldq(*this, /*IsSigned*/false, Ops); + return EmitX86Muldq(*this, /*IsSigned*/ false, Ops); case X86::BI__builtin_ia32_pmuldq128: case X86::BI__builtin_ia32_pmuldq256: case X86::BI__builtin_ia32_pmuldq512: - return EmitX86Muldq(*this, /*IsSigned*/true, Ops); + return EmitX86Muldq(*this, /*IsSigned*/ true, Ops); case X86::BI__builtin_ia32_pternlogd512_mask: case X86::BI__builtin_ia32_pternlogq512_mask: @@ -14431,7 +14649,7 @@ case X86::BI__builtin_ia32_pternlogd256_mask: case X86::BI__builtin_ia32_pternlogq128_mask: case X86::BI__builtin_ia32_pternlogq256_mask: - return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops); + return EmitX86Ternlog(*this, /*ZeroMask*/ false, Ops); case X86::BI__builtin_ia32_pternlogd512_maskz: case X86::BI__builtin_ia32_pternlogq512_maskz: @@ -14439,7 +14657,7 @@ case X86::BI__builtin_ia32_pternlogd256_maskz: case X86::BI__builtin_ia32_pternlogq128_maskz: case X86::BI__builtin_ia32_pternlogq256_maskz: - return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops); + return EmitX86Ternlog(*this, /*ZeroMask*/ true, Ops); case X86::BI__builtin_ia32_vpshldd128: case X86::BI__builtin_ia32_vpshldd256: @@ -14545,7 +14763,8 @@ case X86::BI__builtin_ia32_rdseed64_step: { Intrinsic::ID ID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_rdrand16_step: ID = Intrinsic::x86_rdrand_16; break; @@ -14577,7 +14796,8 @@ case X86::BI__builtin_ia32_subborrow_u64: { Intrinsic::ID IID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_addcarryx_u32: IID = Intrinsic::x86_addcarry_32; break; @@ -14592,8 +14812,8 @@ break; } - Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), - { Ops[0], Ops[1], Ops[2] }); + Value *Call = + Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]}); Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1), Ops[3]); return Builder.CreateExtractValue(Call, 0); @@ -14615,7 +14835,8 @@ Intrinsic::ID ID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_fpclassph128_mask: ID = Intrinsic::x86_avx512fp16_fpclass_ph_128; break; @@ -14660,7 +14881,8 @@ Intrinsic::ID ID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_vp2intersect_q_512: ID = Intrinsic::x86_avx512_vp2intersect_q_512; break; @@ -14696,7 +14918,8 @@ case X86::BI__builtin_ia32_vpmultishiftqb512: { Intrinsic::ID ID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_vpmultishiftqb128: ID = Intrinsic::x86_avx512_pmultishift_qb_128; break; @@ -14721,7 +14944,8 @@ Intrinsic::ID ID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_vpshufbitqmb128_mask: ID = Intrinsic::x86_avx512_vpshufbitqmb_128; break; @@ -14740,28 +14964,28 @@ // packed comparison intrinsics case X86::BI__builtin_ia32_cmpeqps: case X86::BI__builtin_ia32_cmpeqpd: - return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false); + return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/ false); case X86::BI__builtin_ia32_cmpltps: case X86::BI__builtin_ia32_cmpltpd: - return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true); + return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/ true); case X86::BI__builtin_ia32_cmpleps: case X86::BI__builtin_ia32_cmplepd: - return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true); + return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/ true); case X86::BI__builtin_ia32_cmpunordps: case X86::BI__builtin_ia32_cmpunordpd: - return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false); + return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/ false); case X86::BI__builtin_ia32_cmpneqps: case X86::BI__builtin_ia32_cmpneqpd: - return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false); + return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/ false); case X86::BI__builtin_ia32_cmpnltps: case X86::BI__builtin_ia32_cmpnltpd: - return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true); + return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/ true); case X86::BI__builtin_ia32_cmpnleps: case X86::BI__builtin_ia32_cmpnlepd: - return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true); + return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/ true); case X86::BI__builtin_ia32_cmpordps: case X86::BI__builtin_ia32_cmpordpd: - return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false); + return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/ false); case X86::BI__builtin_ia32_cmpph128_mask: case X86::BI__builtin_ia32_cmpph256_mask: case X86::BI__builtin_ia32_cmpph512_mask: @@ -14794,23 +15018,72 @@ // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling // behavior is inverted. We'll handle that after the switch. switch (CC & 0xf) { - case 0x00: Pred = FCmpInst::FCMP_OEQ; IsSignaling = false; break; - case 0x01: Pred = FCmpInst::FCMP_OLT; IsSignaling = true; break; - case 0x02: Pred = FCmpInst::FCMP_OLE; IsSignaling = true; break; - case 0x03: Pred = FCmpInst::FCMP_UNO; IsSignaling = false; break; - case 0x04: Pred = FCmpInst::FCMP_UNE; IsSignaling = false; break; - case 0x05: Pred = FCmpInst::FCMP_UGE; IsSignaling = true; break; - case 0x06: Pred = FCmpInst::FCMP_UGT; IsSignaling = true; break; - case 0x07: Pred = FCmpInst::FCMP_ORD; IsSignaling = false; break; - case 0x08: Pred = FCmpInst::FCMP_UEQ; IsSignaling = false; break; - case 0x09: Pred = FCmpInst::FCMP_ULT; IsSignaling = true; break; - case 0x0a: Pred = FCmpInst::FCMP_ULE; IsSignaling = true; break; - case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break; - case 0x0c: Pred = FCmpInst::FCMP_ONE; IsSignaling = false; break; - case 0x0d: Pred = FCmpInst::FCMP_OGE; IsSignaling = true; break; - case 0x0e: Pred = FCmpInst::FCMP_OGT; IsSignaling = true; break; - case 0x0f: Pred = FCmpInst::FCMP_TRUE; IsSignaling = false; break; - default: llvm_unreachable("Unhandled CC"); + case 0x00: + Pred = FCmpInst::FCMP_OEQ; + IsSignaling = false; + break; + case 0x01: + Pred = FCmpInst::FCMP_OLT; + IsSignaling = true; + break; + case 0x02: + Pred = FCmpInst::FCMP_OLE; + IsSignaling = true; + break; + case 0x03: + Pred = FCmpInst::FCMP_UNO; + IsSignaling = false; + break; + case 0x04: + Pred = FCmpInst::FCMP_UNE; + IsSignaling = false; + break; + case 0x05: + Pred = FCmpInst::FCMP_UGE; + IsSignaling = true; + break; + case 0x06: + Pred = FCmpInst::FCMP_UGT; + IsSignaling = true; + break; + case 0x07: + Pred = FCmpInst::FCMP_ORD; + IsSignaling = false; + break; + case 0x08: + Pred = FCmpInst::FCMP_UEQ; + IsSignaling = false; + break; + case 0x09: + Pred = FCmpInst::FCMP_ULT; + IsSignaling = true; + break; + case 0x0a: + Pred = FCmpInst::FCMP_ULE; + IsSignaling = true; + break; + case 0x0b: + Pred = FCmpInst::FCMP_FALSE; + IsSignaling = false; + break; + case 0x0c: + Pred = FCmpInst::FCMP_ONE; + IsSignaling = false; + break; + case 0x0d: + Pred = FCmpInst::FCMP_OGE; + IsSignaling = true; + break; + case 0x0e: + Pred = FCmpInst::FCMP_OGT; + IsSignaling = true; + break; + case 0x0f: + Pred = FCmpInst::FCMP_TRUE; + IsSignaling = false; + break; + default: + llvm_unreachable("Unhandled CC"); } // Invert the signalling behavior for 16-31. @@ -14828,7 +15101,8 @@ Intrinsic::ID IID; switch (BuiltinID) { - default: llvm_unreachable("Unexpected builtin"); + default: + llvm_unreachable("Unexpected builtin"); case X86::BI__builtin_ia32_cmpps: IID = Intrinsic::x86_sse_cmp_ps; break; @@ -14937,7 +15211,7 @@ return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType())); } -// AVX512 bf16 intrinsics + // AVX512 bf16 intrinsics case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: { Ops[2] = getMaskVecValue( *this, Ops[2], @@ -14952,7 +15226,8 @@ case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: { Intrinsic::ID IID; switch (BuiltinID) { - default: llvm_unreachable("Unsupported intrinsic!"); + default: + llvm_unreachable("Unsupported intrinsic!"); case X86::BI__builtin_ia32_cvtneps2bf16_256_mask: IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256; break; @@ -15309,7 +15584,8 @@ Intrinsic::ID ID = Intrinsic::not_intrinsic; switch (BuiltinID) { - default: return nullptr; + default: + return nullptr; // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we // call __builtin_readcyclecounter. @@ -15329,22 +15605,22 @@ case PPC::BI__builtin_vsx_lxvd2x_be: case PPC::BI__builtin_vsx_lxvw4x_be: case PPC::BI__builtin_vsx_lxvl: - case PPC::BI__builtin_vsx_lxvll: - { + case PPC::BI__builtin_vsx_lxvll: { SmallVector Ops; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops.push_back(EmitScalarExpr(E->getArg(1))); - if(BuiltinID == PPC::BI__builtin_vsx_lxvl || - BuiltinID == PPC::BI__builtin_vsx_lxvll){ + if (BuiltinID == PPC::BI__builtin_vsx_lxvl || + BuiltinID == PPC::BI__builtin_vsx_lxvll) { Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy); - }else { + } else { Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]); Ops.pop_back(); } switch (BuiltinID) { - default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!"); + default: + llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!"); case PPC::BI__builtin_altivec_lvx: ID = Intrinsic::ppc_altivec_lvx; break; @@ -15400,23 +15676,23 @@ case PPC::BI__builtin_vsx_stxvd2x_be: case PPC::BI__builtin_vsx_stxvw4x_be: case PPC::BI__builtin_vsx_stxvl: - case PPC::BI__builtin_vsx_stxvll: - { + case PPC::BI__builtin_vsx_stxvll: { SmallVector Ops; Ops.push_back(EmitScalarExpr(E->getArg(0))); Ops.push_back(EmitScalarExpr(E->getArg(1))); Ops.push_back(EmitScalarExpr(E->getArg(2))); - if(BuiltinID == PPC::BI__builtin_vsx_stxvl || - BuiltinID == PPC::BI__builtin_vsx_stxvll ){ + if (BuiltinID == PPC::BI__builtin_vsx_stxvl || + BuiltinID == PPC::BI__builtin_vsx_stxvll) { Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy); - }else { + } else { Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy); Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]); Ops.pop_back(); } switch (BuiltinID) { - default: llvm_unreachable("Unsupported st intrinsic!"); + default: + llvm_unreachable("Unsupported st intrinsic!"); case PPC::BI__builtin_altivec_stvx: ID = Intrinsic::ppc_altivec_stvx; break; @@ -15492,7 +15768,8 @@ Op0 = IsLE ? HiLd : LoLd; Op1 = IsLE ? LoLd : HiLd; Value *AllElts = Builder.CreateCall(Vperm, {Op0, Op1, Mask1}, "shuffle1"); - Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType()); + Constant *Zero = + llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType()); if (IsLE) { SmallVector Consts; @@ -15797,8 +16074,8 @@ llvm::Type *ResultType = ConvertType(E->getType()); Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1)); if (Result->getType() != ResultType) - Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true, - "cast"); + Result = + Builder.CreateIntCast(Result, ResultType, /*isSigned*/ true, "cast"); return Result; } case PPC::BI__builtin_ppc_cmpb: { @@ -15974,39 +16251,38 @@ else F = CGM.getIntrinsic(Intrinsic::fma, ResultType); switch (BuiltinID) { - case PPC::BI__builtin_vsx_xvmaddadp: - case PPC::BI__builtin_vsx_xvmaddasp: - if (Builder.getIsFPConstrained()) - return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); - else - return Builder.CreateCall(F, {X, Y, Z}); - case PPC::BI__builtin_vsx_xvnmaddadp: - case PPC::BI__builtin_vsx_xvnmaddasp: - if (Builder.getIsFPConstrained()) - return Builder.CreateFNeg( - Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); - else - return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); - case PPC::BI__builtin_vsx_xvmsubadp: - case PPC::BI__builtin_vsx_xvmsubasp: - if (Builder.getIsFPConstrained()) - return Builder.CreateConstrainedFPCall( - F, {X, Y, Builder.CreateFNeg(Z, "neg")}); - else - return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); - case PPC::BI__builtin_ppc_fnmsub: - case PPC::BI__builtin_ppc_fnmsubs: - case PPC::BI__builtin_vsx_xvnmsubadp: - case PPC::BI__builtin_vsx_xvnmsubasp: - if (Builder.getIsFPConstrained()) - return Builder.CreateFNeg( - Builder.CreateConstrainedFPCall( - F, {X, Y, Builder.CreateFNeg(Z, "neg")}), - "neg"); - else - return Builder.CreateCall( - CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z}); - } + case PPC::BI__builtin_vsx_xvmaddadp: + case PPC::BI__builtin_vsx_xvmaddasp: + if (Builder.getIsFPConstrained()) + return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); + else + return Builder.CreateCall(F, {X, Y, Z}); + case PPC::BI__builtin_vsx_xvnmaddadp: + case PPC::BI__builtin_vsx_xvnmaddasp: + if (Builder.getIsFPConstrained()) + return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), + "neg"); + else + return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); + case PPC::BI__builtin_vsx_xvmsubadp: + case PPC::BI__builtin_vsx_xvmsubasp: + if (Builder.getIsFPConstrained()) + return Builder.CreateConstrainedFPCall( + F, {X, Y, Builder.CreateFNeg(Z, "neg")}); + else + return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); + case PPC::BI__builtin_ppc_fnmsub: + case PPC::BI__builtin_ppc_fnmsubs: + case PPC::BI__builtin_vsx_xvnmsubadp: + case PPC::BI__builtin_vsx_xvnmsubasp: + if (Builder.getIsFPConstrained()) + return Builder.CreateFNeg(Builder.CreateConstrainedFPCall( + F, {X, Y, Builder.CreateFNeg(Z, "neg")}), + "neg"); + else + return Builder.CreateCall( + CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z}); + } llvm_unreachable("Unknown FMA operation"); return nullptr; // Suppress no-return warning } @@ -16185,91 +16461,91 @@ // use custom code generation to expand a builtin call with a pointer to a // load (if the corresponding instruction accumulates its result) followed by // the call to the intrinsic and a store of the result. -#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \ +#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \ case PPC::BI__builtin_##Name: #include "clang/Basic/BuiltinsPPC.def" - { - SmallVector Ops; - for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) - if (E->getArg(i)->getType()->isArrayType()) - Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer()); - else - Ops.push_back(EmitScalarExpr(E->getArg(i))); - // The first argument of these two builtins is a pointer used to store their - // result. However, the llvm intrinsics return their result in multiple - // return values. So, here we emit code extracting these values from the - // intrinsic results and storing them using that pointer. - if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc || - BuiltinID == PPC::BI__builtin_vsx_disassemble_pair || - BuiltinID == PPC::BI__builtin_mma_disassemble_pair) { - unsigned NumVecs = 2; - auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair; - if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) { - NumVecs = 4; - Intrinsic = Intrinsic::ppc_mma_disassemble_acc; + { + SmallVector Ops; + for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) + if (E->getArg(i)->getType()->isArrayType()) + Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer()); + else + Ops.push_back(EmitScalarExpr(E->getArg(i))); + // The first argument of these two builtins is a pointer used to store + // their result. However, the llvm intrinsics return their result in + // multiple return values. So, here we emit code extracting these values + // from the intrinsic results and storing them using that pointer. + if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc || + BuiltinID == PPC::BI__builtin_vsx_disassemble_pair || + BuiltinID == PPC::BI__builtin_mma_disassemble_pair) { + unsigned NumVecs = 2; + auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair; + if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) { + NumVecs = 4; + Intrinsic = Intrinsic::ppc_mma_disassemble_acc; + } + llvm::Function *F = CGM.getIntrinsic(Intrinsic); + Address Addr = EmitPointerWithAlignment(E->getArg(1)); + Value *Vec = Builder.CreateLoad(Addr); + Value *Call = Builder.CreateCall(F, {Vec}); + llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16); + Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo()); + for (unsigned i = 0; i < NumVecs; i++) { + Value *Vec = Builder.CreateExtractValue(Call, i); + llvm::ConstantInt *Index = llvm::ConstantInt::get(IntTy, i); + Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index); + Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16)); + } + return Call; } - llvm::Function *F = CGM.getIntrinsic(Intrinsic); - Address Addr = EmitPointerWithAlignment(E->getArg(1)); - Value *Vec = Builder.CreateLoad(Addr); - Value *Call = Builder.CreateCall(F, {Vec}); - llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16); - Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo()); - for (unsigned i=0; i CallOps; + if (Accumulate) { + Address Addr = EmitPointerWithAlignment(E->getArg(0)); + Value *Acc = Builder.CreateLoad(Addr); + CallOps.push_back(Acc); + } + for (unsigned i = 1; i < Ops.size(); i++) + CallOps.push_back(Ops[i]); llvm::Function *F = CGM.getIntrinsic(ID); - return Builder.CreateCall(F, Ops, ""); + Value *Call = Builder.CreateCall(F, CallOps); + return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64)); } - SmallVector CallOps; - if (Accumulate) { - Address Addr = EmitPointerWithAlignment(E->getArg(0)); - Value *Acc = Builder.CreateLoad(Addr); - CallOps.push_back(Acc); - } - for (unsigned i=1; isetMetadata(llvm::LLVMContext::MD_range, RNode); LD->setMetadata(llvm::LLVMContext::MD_invariant_load, - llvm::MDNode::get(CGF.getLLVMContext(), None)); + llvm::MDNode::get(CGF.getLLVMContext(), None)); return LD; } @@ -16598,8 +16874,8 @@ llvm::Value *Y = EmitScalarExpr(E->getArg(1)); llvm::Value *Z = EmitScalarExpr(E->getArg(2)); - llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, - X->getType()); + llvm::Function *Callee = + CGM.getIntrinsic(Intrinsic::amdgcn_div_scale, X->getType()); llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z}); @@ -16619,8 +16895,8 @@ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); - llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, - Src0->getType()); + llvm::Function *F = + CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas, Src0->getType()); llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3); return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool}); } @@ -16685,13 +16961,13 @@ case AMDGPU::BI__builtin_amdgcn_frexp_expf: { Value *Src0 = EmitScalarExpr(E->getArg(0)); Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, - { Builder.getInt32Ty(), Src0->getType() }); + {Builder.getInt32Ty(), Src0->getType()}); return Builder.CreateCall(F, Src0); } case AMDGPU::BI__builtin_amdgcn_frexp_exph: { Value *Src0 = EmitScalarExpr(E->getArg(0)); Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp, - { Builder.getInt16Ty(), Src0->getType() }); + {Builder.getInt16Ty(), Src0->getType()}); return Builder.CreateCall(F, Src0); } case AMDGPU::BI__builtin_amdgcn_fract: @@ -16714,8 +16990,8 @@ // FIXME-GFX10: How should 32 bit mask be handled? Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp, - { Builder.getInt64Ty(), Src0->getType() }); - return Builder.CreateCall(F, { Src0, Src1, Src2 }); + {Builder.getInt64Ty(), Src0->getType()}); + return Builder.CreateCall(F, {Src0, Src1, Src2}); } case AMDGPU::BI__builtin_amdgcn_fcmp: case AMDGPU::BI__builtin_amdgcn_fcmpf: { @@ -16725,8 +17001,8 @@ // FIXME-GFX10: How should 32 bit mask be handled? Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp, - { Builder.getInt64Ty(), Src0->getType() }); - return Builder.CreateCall(F, { Src0, Src1, Src2 }); + {Builder.getInt64Ty(), Src0->getType()}); + return Builder.CreateCall(F, {Src0, Src1, Src2}); } case AMDGPU::BI__builtin_amdgcn_class: case AMDGPU::BI__builtin_amdgcn_classf: @@ -16737,11 +17013,12 @@ return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3); case AMDGPU::BI__builtin_amdgcn_ds_append: case AMDGPU::BI__builtin_amdgcn_ds_consume: { - Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ? - Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume; + Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append + ? Intrinsic::amdgcn_ds_append + : Intrinsic::amdgcn_ds_consume; Value *Src0 = EmitScalarExpr(E->getArg(0)); - Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() }); - return Builder.CreateCall(F, { Src0, Builder.getFalse() }); + Function *F = CGM.getIntrinsic(Intrin, {Src0->getType()}); + return Builder.CreateCall(F, {Src0, Builder.getFalse()}); } case AMDGPU::BI__builtin_amdgcn_ds_faddf: case AMDGPU::BI__builtin_amdgcn_ds_fminf: @@ -16763,11 +17040,11 @@ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); llvm::Value *Src3 = EmitScalarExpr(E->getArg(3)); llvm::Value *Src4 = EmitScalarExpr(E->getArg(4)); - llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() }); + llvm::Function *F = CGM.getIntrinsic(Intrin, {Src1->getType()}); llvm::FunctionType *FTy = F->getFunctionType(); llvm::Type *PTy = FTy->getParamType(0); Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy); - return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 }); + return Builder.CreateCall(F, {Src0, Src1, Src2, Src3, Src4}); } case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64: case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32: @@ -16865,17 +17142,18 @@ return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1}); } case AMDGPU::BI__builtin_amdgcn_read_exec: { - CallInst *CI = cast( - EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec")); + CallInst *CI = cast(EmitSpecialRegisterBuiltin( + *this, E, Int64Ty, Int64Ty, NormalRead, "exec")); CI->setConvergent(); return CI; } case AMDGPU::BI__builtin_amdgcn_read_exec_lo: case AMDGPU::BI__builtin_amdgcn_read_exec_hi: { - StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ? - "exec_lo" : "exec_hi"; - CallInst *CI = cast( - EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName)); + StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo + ? "exec_lo" + : "exec_hi"; + CallInst *CI = cast(EmitSpecialRegisterBuiltin( + *this, E, Int32Ty, Int32Ty, NormalRead, RegName)); CI->setConvergent(); return CI; } @@ -16915,8 +17193,8 @@ Value *Rtn = Builder.CreateExtractValue(Call, 0); Value *A = Builder.CreateExtractValue(Call, 1); llvm::Type *RetTy = ConvertType(E->getType()); - Value *I0 = Builder.CreateInsertElement(PoisonValue::get(RetTy), Rtn, - (uint64_t)0); + Value *I0 = + Builder.CreateInsertElement(PoisonValue::get(RetTy), Rtn, (uint64_t)0); return Builder.CreateInsertElement(I0, A, 1); } @@ -17022,7 +17300,7 @@ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1)); llvm::Value *Src2 = EmitScalarExpr(E->getArg(2)); Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType()); - return Builder.CreateCall(F, { Src0, Src1, Src2 }); + return Builder.CreateCall(F, {Src0, Src1, Src2}); } case AMDGPU::BI__builtin_amdgcn_fence: { ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)), @@ -17063,7 +17341,7 @@ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); bool Volatile = - PtrTy->castAs()->getPointeeType().isVolatileQualified(); + PtrTy->castAs()->getPointeeType().isVolatileQualified(); Value *IsVolatile = Builder.getInt1(static_cast(Volatile)); return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile}); @@ -17133,10 +17411,10 @@ return Builder.CreateCall(F, {Data, Address}); } - // Vector builtins. Note that most vector builtins are mapped automatically - // to target-specific LLVM intrinsics. The ones handled specially here can - // be represented via standard LLVM IR, which is preferable to enable common - // LLVM optimizations. + // Vector builtins. Note that most vector builtins are mapped automatically + // to target-specific LLVM intrinsics. The ones handled specially here can + // be represented via standard LLVM IR, which is preferable to enable common + // LLVM optimizations. case SystemZ::BI__builtin_s390_vpopctb: case SystemZ::BI__builtin_s390_vpopcth: @@ -17175,8 +17453,9 @@ llvm::Type *ResultType = ConvertType(E->getType()); Value *X = EmitScalarExpr(E->getArg(0)); if (Builder.getIsFPConstrained()) { - Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType); - return Builder.CreateConstrainedFPCall(F, { X }); + Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, + ResultType); + return Builder.CreateConstrainedFPCall(F, {X}); } else { Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType); return Builder.CreateCall(F, X); @@ -17189,7 +17468,8 @@ Value *Y = EmitScalarExpr(E->getArg(1)); Value *Z = EmitScalarExpr(E->getArg(2)); if (Builder.getIsFPConstrained()) { - Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); + Function *F = + CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); return Builder.CreateConstrainedFPCall(F, {X, Y, Z}); } else { Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); @@ -17203,8 +17483,10 @@ Value *Y = EmitScalarExpr(E->getArg(1)); Value *Z = EmitScalarExpr(E->getArg(2)); if (Builder.getIsFPConstrained()) { - Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); - return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); + Function *F = + CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); + return Builder.CreateConstrainedFPCall( + F, {X, Y, Builder.CreateFNeg(Z, "neg")}); } else { Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}); @@ -17217,8 +17499,10 @@ Value *Y = EmitScalarExpr(E->getArg(1)); Value *Z = EmitScalarExpr(E->getArg(2)); if (Builder.getIsFPConstrained()) { - Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); - return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg"); + Function *F = + CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); + return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, Z}), + "neg"); } else { Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg"); @@ -17231,9 +17515,11 @@ Value *Y = EmitScalarExpr(E->getArg(1)); Value *Z = EmitScalarExpr(E->getArg(2)); if (Builder.getIsFPConstrained()) { - Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); + Function *F = + CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType); Value *NegZ = Builder.CreateFNeg(Z, "sub"); - return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ})); + return Builder.CreateFNeg( + Builder.CreateConstrainedFPCall(F, {X, Y, NegZ})); } else { Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType); Value *NegZ = Builder.CreateFNeg(Z, "neg"); @@ -17266,27 +17552,42 @@ Intrinsic::ID ID = Intrinsic::not_intrinsic; Intrinsic::ID CI; switch (M4.getZExtValue()) { - default: break; - case 0: // IEEE-inexact exception allowed + default: + break; + case 0: // IEEE-inexact exception allowed switch (M5.getZExtValue()) { - default: break; - case 0: ID = Intrinsic::rint; - CI = Intrinsic::experimental_constrained_rint; break; + default: + break; + case 0: + ID = Intrinsic::rint; + CI = Intrinsic::experimental_constrained_rint; + break; } break; - case 4: // IEEE-inexact exception suppressed + case 4: // IEEE-inexact exception suppressed switch (M5.getZExtValue()) { - default: break; - case 0: ID = Intrinsic::nearbyint; - CI = Intrinsic::experimental_constrained_nearbyint; break; - case 1: ID = Intrinsic::round; - CI = Intrinsic::experimental_constrained_round; break; - case 5: ID = Intrinsic::trunc; - CI = Intrinsic::experimental_constrained_trunc; break; - case 6: ID = Intrinsic::ceil; - CI = Intrinsic::experimental_constrained_ceil; break; - case 7: ID = Intrinsic::floor; - CI = Intrinsic::experimental_constrained_floor; break; + default: + break; + case 0: + ID = Intrinsic::nearbyint; + CI = Intrinsic::experimental_constrained_nearbyint; + break; + case 1: + ID = Intrinsic::round; + CI = Intrinsic::experimental_constrained_round; + break; + case 5: + ID = Intrinsic::trunc; + CI = Intrinsic::experimental_constrained_trunc; + break; + case 6: + ID = Intrinsic::ceil; + CI = Intrinsic::experimental_constrained_ceil; + break; + case 7: + ID = Intrinsic::floor; + CI = Intrinsic::experimental_constrained_floor; + break; } break; } @@ -17300,9 +17601,14 @@ } } switch (BuiltinID) { // FIXME: constrained version? - case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break; - case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break; - default: llvm_unreachable("Unknown BuiltinID"); + case SystemZ::BI__builtin_s390_vfisb: + ID = Intrinsic::s390_vfisb; + break; + case SystemZ::BI__builtin_s390_vfidb: + ID = Intrinsic::s390_vfidb; + break; + default: + llvm_unreachable("Unknown BuiltinID"); } Function *F = CGM.getIntrinsic(ID); Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); @@ -17321,9 +17627,12 @@ Intrinsic::ID ID = Intrinsic::not_intrinsic; Intrinsic::ID CI; switch (M4.getZExtValue()) { - default: break; - case 4: ID = Intrinsic::maxnum; - CI = Intrinsic::experimental_constrained_maxnum; break; + default: + break; + case 4: + ID = Intrinsic::maxnum; + CI = Intrinsic::experimental_constrained_maxnum; + break; } if (ID != Intrinsic::not_intrinsic) { if (Builder.getIsFPConstrained()) { @@ -17335,9 +17644,14 @@ } } switch (BuiltinID) { - case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break; - case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break; - default: llvm_unreachable("Unknown BuiltinID"); + case SystemZ::BI__builtin_s390_vfmaxsb: + ID = Intrinsic::s390_vfmaxsb; + break; + case SystemZ::BI__builtin_s390_vfmaxdb: + ID = Intrinsic::s390_vfmaxdb; + break; + default: + llvm_unreachable("Unknown BuiltinID"); } Function *F = CGM.getIntrinsic(ID); Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); @@ -17355,9 +17669,12 @@ Intrinsic::ID ID = Intrinsic::not_intrinsic; Intrinsic::ID CI; switch (M4.getZExtValue()) { - default: break; - case 4: ID = Intrinsic::minnum; - CI = Intrinsic::experimental_constrained_minnum; break; + default: + break; + case 4: + ID = Intrinsic::minnum; + CI = Intrinsic::experimental_constrained_minnum; + break; } if (ID != Intrinsic::not_intrinsic) { if (Builder.getIsFPConstrained()) { @@ -17369,9 +17686,14 @@ } } switch (BuiltinID) { - case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break; - case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break; - default: llvm_unreachable("Unknown BuiltinID"); + case SystemZ::BI__builtin_s390_vfminsb: + ID = Intrinsic::s390_vfminsb; + break; + case SystemZ::BI__builtin_s390_vfmindb: + ID = Intrinsic::s390_vfmindb; + break; + default: + llvm_unreachable("Unknown BuiltinID"); } Function *F = CGM.getIntrinsic(ID); Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4); @@ -17389,86 +17711,86 @@ // Vector intrinsics that output the post-instruction CC value. -#define INTRINSIC_WITH_CC(NAME) \ - case SystemZ::BI__builtin_##NAME: \ - return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) +#define INTRINSIC_WITH_CC(NAME) \ + case SystemZ::BI__builtin_##NAME: \ + return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E) - INTRINSIC_WITH_CC(s390_vpkshs); - INTRINSIC_WITH_CC(s390_vpksfs); - INTRINSIC_WITH_CC(s390_vpksgs); + INTRINSIC_WITH_CC(s390_vpkshs); + INTRINSIC_WITH_CC(s390_vpksfs); + INTRINSIC_WITH_CC(s390_vpksgs); - INTRINSIC_WITH_CC(s390_vpklshs); - INTRINSIC_WITH_CC(s390_vpklsfs); - INTRINSIC_WITH_CC(s390_vpklsgs); + INTRINSIC_WITH_CC(s390_vpklshs); + INTRINSIC_WITH_CC(s390_vpklsfs); + INTRINSIC_WITH_CC(s390_vpklsgs); - INTRINSIC_WITH_CC(s390_vceqbs); - INTRINSIC_WITH_CC(s390_vceqhs); - INTRINSIC_WITH_CC(s390_vceqfs); - INTRINSIC_WITH_CC(s390_vceqgs); + INTRINSIC_WITH_CC(s390_vceqbs); + INTRINSIC_WITH_CC(s390_vceqhs); + INTRINSIC_WITH_CC(s390_vceqfs); + INTRINSIC_WITH_CC(s390_vceqgs); - INTRINSIC_WITH_CC(s390_vchbs); - INTRINSIC_WITH_CC(s390_vchhs); - INTRINSIC_WITH_CC(s390_vchfs); - INTRINSIC_WITH_CC(s390_vchgs); + INTRINSIC_WITH_CC(s390_vchbs); + INTRINSIC_WITH_CC(s390_vchhs); + INTRINSIC_WITH_CC(s390_vchfs); + INTRINSIC_WITH_CC(s390_vchgs); - INTRINSIC_WITH_CC(s390_vchlbs); - INTRINSIC_WITH_CC(s390_vchlhs); - INTRINSIC_WITH_CC(s390_vchlfs); - INTRINSIC_WITH_CC(s390_vchlgs); + INTRINSIC_WITH_CC(s390_vchlbs); + INTRINSIC_WITH_CC(s390_vchlhs); + INTRINSIC_WITH_CC(s390_vchlfs); + INTRINSIC_WITH_CC(s390_vchlgs); - INTRINSIC_WITH_CC(s390_vfaebs); - INTRINSIC_WITH_CC(s390_vfaehs); - INTRINSIC_WITH_CC(s390_vfaefs); + INTRINSIC_WITH_CC(s390_vfaebs); + INTRINSIC_WITH_CC(s390_vfaehs); + INTRINSIC_WITH_CC(s390_vfaefs); - INTRINSIC_WITH_CC(s390_vfaezbs); - INTRINSIC_WITH_CC(s390_vfaezhs); - INTRINSIC_WITH_CC(s390_vfaezfs); + INTRINSIC_WITH_CC(s390_vfaezbs); + INTRINSIC_WITH_CC(s390_vfaezhs); + INTRINSIC_WITH_CC(s390_vfaezfs); - INTRINSIC_WITH_CC(s390_vfeebs); - INTRINSIC_WITH_CC(s390_vfeehs); - INTRINSIC_WITH_CC(s390_vfeefs); + INTRINSIC_WITH_CC(s390_vfeebs); + INTRINSIC_WITH_CC(s390_vfeehs); + INTRINSIC_WITH_CC(s390_vfeefs); - INTRINSIC_WITH_CC(s390_vfeezbs); - INTRINSIC_WITH_CC(s390_vfeezhs); - INTRINSIC_WITH_CC(s390_vfeezfs); + INTRINSIC_WITH_CC(s390_vfeezbs); + INTRINSIC_WITH_CC(s390_vfeezhs); + INTRINSIC_WITH_CC(s390_vfeezfs); - INTRINSIC_WITH_CC(s390_vfenebs); - INTRINSIC_WITH_CC(s390_vfenehs); - INTRINSIC_WITH_CC(s390_vfenefs); + INTRINSIC_WITH_CC(s390_vfenebs); + INTRINSIC_WITH_CC(s390_vfenehs); + INTRINSIC_WITH_CC(s390_vfenefs); - INTRINSIC_WITH_CC(s390_vfenezbs); - INTRINSIC_WITH_CC(s390_vfenezhs); - INTRINSIC_WITH_CC(s390_vfenezfs); + INTRINSIC_WITH_CC(s390_vfenezbs); + INTRINSIC_WITH_CC(s390_vfenezhs); + INTRINSIC_WITH_CC(s390_vfenezfs); - INTRINSIC_WITH_CC(s390_vistrbs); - INTRINSIC_WITH_CC(s390_vistrhs); - INTRINSIC_WITH_CC(s390_vistrfs); + INTRINSIC_WITH_CC(s390_vistrbs); + INTRINSIC_WITH_CC(s390_vistrhs); + INTRINSIC_WITH_CC(s390_vistrfs); - INTRINSIC_WITH_CC(s390_vstrcbs); - INTRINSIC_WITH_CC(s390_vstrchs); - INTRINSIC_WITH_CC(s390_vstrcfs); + INTRINSIC_WITH_CC(s390_vstrcbs); + INTRINSIC_WITH_CC(s390_vstrchs); + INTRINSIC_WITH_CC(s390_vstrcfs); - INTRINSIC_WITH_CC(s390_vstrczbs); - INTRINSIC_WITH_CC(s390_vstrczhs); - INTRINSIC_WITH_CC(s390_vstrczfs); + INTRINSIC_WITH_CC(s390_vstrczbs); + INTRINSIC_WITH_CC(s390_vstrczhs); + INTRINSIC_WITH_CC(s390_vstrczfs); - INTRINSIC_WITH_CC(s390_vfcesbs); - INTRINSIC_WITH_CC(s390_vfcedbs); - INTRINSIC_WITH_CC(s390_vfchsbs); - INTRINSIC_WITH_CC(s390_vfchdbs); - INTRINSIC_WITH_CC(s390_vfchesbs); - INTRINSIC_WITH_CC(s390_vfchedbs); + INTRINSIC_WITH_CC(s390_vfcesbs); + INTRINSIC_WITH_CC(s390_vfcedbs); + INTRINSIC_WITH_CC(s390_vfchsbs); + INTRINSIC_WITH_CC(s390_vfchdbs); + INTRINSIC_WITH_CC(s390_vfchesbs); + INTRINSIC_WITH_CC(s390_vfchedbs); - INTRINSIC_WITH_CC(s390_vftcisb); - INTRINSIC_WITH_CC(s390_vftcidb); + INTRINSIC_WITH_CC(s390_vftcisb); + INTRINSIC_WITH_CC(s390_vftcidb); - INTRINSIC_WITH_CC(s390_vstrsb); - INTRINSIC_WITH_CC(s390_vstrsh); - INTRINSIC_WITH_CC(s390_vstrsf); + INTRINSIC_WITH_CC(s390_vstrsb); + INTRINSIC_WITH_CC(s390_vstrsh); + INTRINSIC_WITH_CC(s390_vstrsf); - INTRINSIC_WITH_CC(s390_vstrszb); - INTRINSIC_WITH_CC(s390_vstrszh); - INTRINSIC_WITH_CC(s390_vstrszf); + INTRINSIC_WITH_CC(s390_vstrszb); + INTRINSIC_WITH_CC(s390_vstrszh); + INTRINSIC_WITH_CC(s390_vstrszf); #undef INTRINSIC_WITH_CC @@ -17480,13 +17802,13 @@ namespace { // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant. struct NVPTXMmaLdstInfo { - unsigned NumResults; // Number of elements to load/store + unsigned NumResults; // Number of elements to load/store // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported. unsigned IID_col; unsigned IID_row; }; -#define MMA_INTR(geom_op_type, layout) \ +#define MMA_INTR(geom_op_type, layout) \ Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride #define MMA_LDST(n, geom_op_type) \ { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) } @@ -17644,7 +17966,6 @@ #undef MMA_LDST #undef MMA_INTR - struct NVPTXMmaInfo { unsigned NumEltsA; unsigned NumEltsB; @@ -17664,8 +17985,8 @@ } }; - // Returns an intrinsic that matches Layout and Satf for valid combinations of - // Layout and Satf, 0 otherwise. +// Returns an intrinsic that matches Layout and Satf for valid combinations of +// Layout and Satf, 0 otherwise. static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) { // clang-format off #define MMA_VARIANTS(geom, type) \ @@ -17787,8 +18108,8 @@ } // namespace -Value * -CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) { +Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { auto MakeLdg = [&](unsigned IntrinsicID) { Value *Ptr = EmitScalarExpr(E->getArg(0)); QualType ArgType = E->getArg(0)->getType(); @@ -18012,8 +18333,8 @@ llvm::Type *ElemTy = ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType()); return Builder.CreateCall( - CGM.getIntrinsic( - Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}), + CGM.getIntrinsic(Intrinsic::nvvm_atomic_cas_gen_i_cta, + {ElemTy, Ptr->getType()}), {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); } case NVPTX::BI__nvvm_atom_sys_cas_gen_i: @@ -18023,8 +18344,8 @@ llvm::Type *ElemTy = ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType()); return Builder.CreateCall( - CGM.getIntrinsic( - Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}), + CGM.getIntrinsic(Intrinsic::nvvm_atomic_cas_gen_i_sys, + {ElemTy, Ptr->getType()}), {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))}); } case NVPTX::BI__nvvm_match_all_sync_i32p: @@ -18154,8 +18475,7 @@ unsigned IID = isColMajor ? II.IID_col : II.IID_row; if (IID == 0) return nullptr; - Function *Intrinsic = - CGM.getIntrinsic(IID, Dst->getType()); + Function *Intrinsic = CGM.getIntrinsic(IID, Dst->getType()); llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1); SmallVector Values = {Dst}; for (unsigned i = 0; i < II.NumResults; ++i) { @@ -18214,7 +18534,7 @@ llvm::APSInt SatfArg; if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 || BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1) - SatfArg = 0; // .b1 does not have satf argument. + SatfArg = 0; // .b1 does not have satf argument. else if (Optional OptSatfArg = E->getArg(5)->getIntegerConstantExpr(getContext())) SatfArg = *OptSatfArg; @@ -18223,7 +18543,7 @@ bool Satf = SatfArg.getSExtValue(); NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID); unsigned IID = MI.getMMAIntrinsic(Layout, Satf); - if (IID == 0) // Unsupported combination of Layout/Satf. + if (IID == 0) // Unsupported combination of Layout/Satf. return nullptr; SmallVector Values; @@ -18914,44 +19234,46 @@ unsigned VecLen; }; Info Infos[] = { -#define CUSTOM_BUILTIN_MAPPING(x,s) \ - { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s }, - CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0) - CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0) - CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0) - CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0) - CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0) - CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0) - CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0) - CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0) - CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0) - CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0) - CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0) - CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0) - // Legacy builtins that take a vector in place of a vector predicate. - CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64) - CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64) - CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64) - CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64) - CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128) - CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128) - CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128) - CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128) +#define CUSTOM_BUILTIN_MAPPING(x, s) \ + {Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s}, + CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0) CUSTOM_BUILTIN_MAPPING( + L2_loadrb_pci, + 0) CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, + 0) CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0) + CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0) CUSTOM_BUILTIN_MAPPING( + L2_loadrd_pci, 0) CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0) + CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0) CUSTOM_BUILTIN_MAPPING( + L2_loadruh_pcr, 0) CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0) + CUSTOM_BUILTIN_MAPPING( + L2_loadri_pcr, 0) CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0) + CUSTOM_BUILTIN_MAPPING( + S2_storerb_pci, + 0) CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0) + CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0) + CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0) + CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0) + CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0) + CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, + 0) + CUSTOM_BUILTIN_MAPPING( + S2_storerf_pcr, 0) + CUSTOM_BUILTIN_MAPPING( + S2_storeri_pcr, 0) + CUSTOM_BUILTIN_MAPPING( + S2_storerd_pcr, 0) + // Legacy builtins that take a vector in place of a vector predicate. + CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64) CUSTOM_BUILTIN_MAPPING( + V6_vmaskedstorenq, 64) CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64) + CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64) + CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128) + CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128) + CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128) + CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128) #include "clang/Basic/BuiltinsHexagonMapCustomDep.def" #undef CUSTOM_BUILTIN_MAPPING }; - auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; }; + auto CmpInfo = [](Info A, Info B) { return A.BuiltinID < B.BuiltinID; }; static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true); (void)SortOnce; @@ -18971,8 +19293,8 @@ auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) { // The base pointer is passed by address, so it needs to be loaded. Address A = EmitPointerWithAlignment(E->getArg(0)); - Address BP = Address(Builder.CreateBitCast( - A.getPointer(), Int8PtrPtrTy), Int8PtrTy, A.getAlignment()); + Address BP = Address(Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), + Int8PtrTy, A.getAlignment()); llvm::Value *Base = Builder.CreateLoad(BP); // The treatment of both loads and stores is the same: the arguments for // the builtin are the same as the arguments for the intrinsic. @@ -18982,17 +19304,17 @@ // Store: // builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start) // builtin(Base, Mod, Val, Start) -> intr(Base, Mod, Val, Start) - SmallVector Ops = { Base }; + SmallVector Ops = {Base}; for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i) Ops.push_back(EmitScalarExpr(E->getArg(i))); llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops); // The load intrinsics generate two results (Value, NewBase), stores // generate one (NewBase). The new base address needs to be stored. - llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1) - : Result; - llvm::Value *LV = Builder.CreateBitCast( - EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo()); + llvm::Value *NewBase = + IsLoad ? Builder.CreateExtractValue(Result, 1) : Result; + llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), + NewBase->getType()->getPointerTo()); Address Dest = EmitPointerWithAlignment(E->getArg(0)); llvm::Value *RetVal = Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment()); @@ -19040,13 +19362,13 @@ return Builder.CreateExtractValue(Result, 1); }; - auto V2Q = [this, VecLen] (llvm::Value *Vec) { + auto V2Q = [this, VecLen](llvm::Value *Vec) { Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B : Intrinsic::hexagon_V6_vandvrt; return Builder.CreateCall(CGM.getIntrinsic(ID), {Vec, Builder.getInt32(-1)}); }; - auto Q2V = [this, VecLen] (llvm::Value *Pred) { + auto Q2V = [this, VecLen](llvm::Value *Pred) { Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B : Intrinsic::hexagon_V6_vandqrt; return Builder.CreateCall(CGM.getIntrinsic(ID), @@ -19066,12 +19388,13 @@ Address PredAddr = Builder.CreateElementBitCast( EmitPointerWithAlignment(E->getArg(2)), VecType); llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr)); - llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), + llvm::Value *Result = Builder.CreateCall( + CGM.getIntrinsic(ID), {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn}); llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1); Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(), - PredAddr.getAlignment()); + PredAddr.getAlignment()); return Builder.CreateExtractValue(Result, 0); } @@ -19083,7 +19406,7 @@ case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorenq_128B: case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentq_128B: case Hexagon::BI__builtin_HEXAGON_V6_vmaskedstorentnq_128B: { - SmallVector Ops; + SmallVector Ops; const Expr *PredOp = E->getArg(0); // There will be an implicit cast to a boolean vector. Strip it. if (auto *Cast = dyn_cast(PredOp)) { @@ -19182,7 +19505,8 @@ // Required for overloaded intrinsics. llvm::SmallVector IntrinsicTypes; switch (BuiltinID) { - default: llvm_unreachable("unexpected builtin ID"); + default: + llvm_unreachable("unexpected builtin ID"); case RISCV::BI__builtin_riscv_orc_b_32: case RISCV::BI__builtin_riscv_orc_b_64: case RISCV::BI__builtin_riscv_clz_32: @@ -19198,7 +19522,8 @@ case RISCV::BI__builtin_riscv_zip_32: case RISCV::BI__builtin_riscv_unzip_32: { switch (BuiltinID) { - default: llvm_unreachable("unexpected builtin ID"); + default: + llvm_unreachable("unexpected builtin ID"); // Zbb case RISCV::BI__builtin_riscv_orc_b_32: case RISCV::BI__builtin_riscv_orc_b_64: @@ -19359,7 +19684,7 @@ IntrinsicTypes = {ResultType}; break; - // Vector builtins are handled from here. + // Vector builtins are handled from here. #include "clang/Basic/riscv_vector_builtin_cg.inc" } diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -372,7 +372,8 @@ // First two arguments should be integers. for (unsigned I = 0; I < 2; ++I) { ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(I)); - if (Arg.isInvalid()) return true; + if (Arg.isInvalid()) + return true; TheCall->setArg(I, Arg.get()); QualType Ty = Arg.get()->getType(); @@ -388,17 +389,17 @@ // the other qualifiers aren't possible. { ExprResult Arg = S.DefaultFunctionArrayLvalueConversion(TheCall->getArg(2)); - if (Arg.isInvalid()) return true; + if (Arg.isInvalid()) + return true; TheCall->setArg(2, Arg.get()); QualType Ty = Arg.get()->getType(); const auto *PtrTy = Ty->getAs(); - if (!PtrTy || - !PtrTy->getPointeeType()->isIntegerType() || + if (!PtrTy || !PtrTy->getPointeeType()->isIntegerType() || PtrTy->getPointeeType().isConstQualified()) { S.Diag(Arg.get()->getBeginLoc(), diag::err_overflow_builtin_must_be_ptr_int) - << Ty << Arg.get()->getSourceRange(); + << Ty << Arg.get()->getSourceRange(); return true; } } @@ -516,7 +517,8 @@ } analyze_printf::PrintfSpecifier Specifier; - if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) { + if (Specifier.fixType(T, S.getLangOpts(), S.Context, + /*IsObjCLiteral=*/false)) { // We were able to guess how to format this. if (Specifier.getConversionSpecifier().getKind() == analyze_printf::PrintfConversionSpecifier::sArg) { @@ -772,7 +774,7 @@ } QualType ReturnTy = CE->getCallReturnType(S.Context); - QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; + QualType ArgTys[2] = {ReturnTy, ChainResult.get()->getType()}; QualType BuiltinTy = S.Context.getFunctionType( ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo()); QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); @@ -1467,7 +1469,7 @@ bool IllegalParams = false; for (unsigned I = Start; I <= End; ++I) IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I), - S.Context.getSizeType()); + S.Context.getSizeType()); return IllegalParams; } @@ -1572,7 +1574,8 @@ // we have a block type, check the prototype const BlockPointerType *BPT = cast(Arg3->getType().getCanonicalType()); - if (BPT->getPointeeType()->castAs()->getNumParams() > 0) { + if (BPT->getPointeeType()->castAs()->getNumParams() > + 0) { S.Diag(Arg3->getBeginLoc(), diag::err_opencl_enqueue_kernel_blocks_no_args); return true; @@ -1643,7 +1646,7 @@ /// Returns OpenCL access qual. static OpenCLAccessAttr *getOpenCLArgAccess(const Decl *D) { - return D->getAttr(); + return D->getAttr(); } /// Returns true if pipe element type is different from the pointer. @@ -1852,8 +1855,8 @@ return true; auto RT = Call->getArg(0)->getType(); - if (!RT->isPointerType() || RT->getPointeeType() - .getAddressSpace() == LangAS::opencl_constant) { + if (!RT->isPointerType() || + RT->getPointeeType().getAddressSpace() == LangAS::opencl_constant) { S.Diag(Call->getBeginLoc(), diag::err_opencl_builtin_to_addr_invalid_arg) << Call->getArg(0) << Call->getDirectCallee() << Call->getSourceRange(); return true; @@ -1881,8 +1884,8 @@ default: llvm_unreachable("Invalid builtin function"); } - Call->setType(S.Context.getPointerType(S.Context.getQualifiedType( - RT.getUnqualifiedType(), Qual))); + Call->setType(S.Context.getPointerType( + S.Context.getQualifiedType(RT.getUnqualifiedType(), Qual))); return false; } @@ -2023,9 +2026,9 @@ } } -ExprResult -Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, - CallExpr *TheCall) { +ExprResult Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, + unsigned BuiltinID, + CallExpr *TheCall) { ExprResult TheCallResult(TheCall); // Find out if any arguments are required to be integer constant expressions. @@ -2033,12 +2036,13 @@ ASTContext::GetBuiltinTypeError Error; Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); if (Error != ASTContext::GE_None) - ICEArguments = 0; // Don't diagnose previously diagnosed errors. + ICEArguments = 0; // Don't diagnose previously diagnosed errors. // If any arguments are required to be ICE's, check and diagnose. for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { // Skip arguments not required to be ICE's. - if ((ICEArguments & (1 << ArgNo)) == 0) continue; + if ((ICEArguments & (1 << ArgNo)) == 0) + continue; llvm::APSInt Result; // If we don't have enough arguments, continue so we can issue better @@ -2180,7 +2184,8 @@ return ExprError(); break; case Builtin::BI__builtin_classify_type: - if (checkArgCount(*this, TheCall, 1)) return true; + if (checkArgCount(*this, TheCall, 1)) + return true; TheCall->setType(Context.IntTy); break; case Builtin::BI__builtin_complex: @@ -2188,9 +2193,11 @@ return ExprError(); break; case Builtin::BI__builtin_constant_p: { - if (checkArgCount(*this, TheCall, 1)) return true; + if (checkArgCount(*this, TheCall, 1)) + return true; ExprResult Arg = DefaultFunctionArrayLvalueConversion(TheCall->getArg(0)); - if (Arg.isInvalid()) return true; + if (Arg.isInvalid()) + return true; TheCall->setArg(0, Arg.get()); TheCall->setType(Context.IntTy); break; @@ -2332,8 +2339,8 @@ break; } #define BUILTIN(ID, TYPE, ATTRS) -#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ - case Builtin::BI##ID: \ +#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ + case Builtin::BI##ID: \ return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); #include "clang/Basic/Builtins.def" case Builtin::BI__annotation: @@ -2652,7 +2659,7 @@ const auto *TyA = Arg->getType()->getAs(); if (!TyA || !TyA->getElementType()->isIntegerType()) { Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type) - << 1 << /* vector of integers */ 6 << Arg->getType(); + << 1 << /* vector of integers */ 6 << Arg->getType(); return ExprError(); } TheCall->setType(TyA->getElementType()); @@ -2787,7 +2794,7 @@ bool Sema::CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { // Range check SVE intrinsics that take immediate values. - SmallVector, 3> ImmChecks; + SmallVector, 3> ImmChecks; switch (BuiltinID) { default: @@ -2803,7 +2810,7 @@ int ArgNum, CheckTy, ElementSizeInBits; std::tie(ArgNum, CheckTy, ElementSizeInBits) = I; - typedef bool(*OptionSetCheckFnTy)(int64_t Value); + typedef bool (*OptionSetCheckFnTy)(int64_t Value); // Function that checks whether the operand (ArgNum) is an immediate // that is one of the predefined values. @@ -2915,14 +2922,14 @@ bool HasConstPtr = false; switch (BuiltinID) { #define GET_NEON_OVERLOAD_CHECK -#include "clang/Basic/arm_neon.inc" #include "clang/Basic/arm_fp16.inc" +#include "clang/Basic/arm_neon.inc" #undef GET_NEON_OVERLOAD_CHECK } // For NEON intrinsics which are overloaded on vector element type, validate // the immediate which specifies which variant to emit. - unsigned ImmArg = TheCall->getNumArgs()-1; + unsigned ImmArg = TheCall->getNumArgs() - 1; if (mask) { if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) return true; @@ -2966,10 +2973,10 @@ switch (BuiltinID) { default: return false; - #define GET_NEON_IMMEDIATE_CHECK - #include "clang/Basic/arm_neon.inc" - #include "clang/Basic/arm_fp16.inc" - #undef GET_NEON_IMMEDIATE_CHECK +#define GET_NEON_IMMEDIATE_CHECK +#include "clang/Basic/arm_fp16.inc" +#include "clang/Basic/arm_neon.inc" +#undef GET_NEON_IMMEDIATE_CHECK } return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); @@ -2979,7 +2986,7 @@ switch (BuiltinID) { default: return false; - #include "clang/Basic/arm_mve_builtin_sema.inc" +#include "clang/Basic/arm_mve_builtin_sema.inc" } } @@ -3037,7 +3044,8 @@ BuiltinID == AArch64::BI__builtin_arm_ldrex || BuiltinID == AArch64::BI__builtin_arm_ldaex; - DeclRefExpr *DRE =cast(TheCall->getCallee()->IgnoreParenCasts()); + DeclRefExpr *DRE = + cast(TheCall->getCallee()->IgnoreParenCasts()); // Ensure that we have the proper number of arguments. if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) @@ -3147,7 +3155,7 @@ if (BuiltinID == ARM::BI__builtin_arm_prefetch) { return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || - SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); + SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); } if (BuiltinID == ARM::BI__builtin_arm_rsr64 || @@ -3171,7 +3179,8 @@ // range check them here. // FIXME: VFP Intrinsics should error if VFP not present. switch (BuiltinID) { - default: return false; + default: + return false; case ARM::BI__builtin_arm_ssat: return SemaBuiltinConstantArgRange(TheCall, 1, 1, 32); case ARM::BI__builtin_arm_usat: @@ -3224,9 +3233,9 @@ if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || - SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || - SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || - SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); + SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || + SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || + SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); } if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || @@ -3272,11 +3281,18 @@ // range check them here. unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { - default: return false; + default: + return false; case AArch64::BI__builtin_arm_dmb: case AArch64::BI__builtin_arm_dsb: - case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; - case AArch64::BI__builtin_arm_tcancel: l = 0; u = 65535; break; + case AArch64::BI__builtin_arm_isb: + l = 0; + u = 15; + break; + case AArch64::BI__builtin_arm_tcancel: + l = 0; + u = 65535; + break; } return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); @@ -3366,8 +3382,7 @@ return llvm::is_contained(ET->getDecl()->enumerators(), Enumerator); } -bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, - CallExpr *TheCall) { +bool Sema::CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { assert((BuiltinID == BPF::BI__builtin_preserve_field_info || BuiltinID == BPF::BI__builtin_btf_type_id || BuiltinID == BPF::BI__builtin_preserve_type_info || @@ -3443,198 +3458,190 @@ }; static BuiltinInfo Infos[] = { - { Hexagon::BI__builtin_circ_ldd, {{ 3, true, 4, 3 }} }, - { Hexagon::BI__builtin_circ_ldw, {{ 3, true, 4, 2 }} }, - { Hexagon::BI__builtin_circ_ldh, {{ 3, true, 4, 1 }} }, - { Hexagon::BI__builtin_circ_lduh, {{ 3, true, 4, 1 }} }, - { Hexagon::BI__builtin_circ_ldb, {{ 3, true, 4, 0 }} }, - { Hexagon::BI__builtin_circ_ldub, {{ 3, true, 4, 0 }} }, - { Hexagon::BI__builtin_circ_std, {{ 3, true, 4, 3 }} }, - { Hexagon::BI__builtin_circ_stw, {{ 3, true, 4, 2 }} }, - { Hexagon::BI__builtin_circ_sth, {{ 3, true, 4, 1 }} }, - { Hexagon::BI__builtin_circ_sthhi, {{ 3, true, 4, 1 }} }, - { Hexagon::BI__builtin_circ_stb, {{ 3, true, 4, 0 }} }, - - { Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{ 1, true, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{ 1, true, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{ 1, true, 4, 1 }} }, - { Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{ 1, true, 4, 1 }} }, - { Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{ 1, true, 4, 2 }} }, - { Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{ 1, true, 4, 3 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{ 1, true, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{ 1, true, 4, 1 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{ 1, true, 4, 1 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{ 1, true, 4, 2 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{ 1, true, 4, 3 }} }, - - { Hexagon::BI__builtin_HEXAGON_A2_combineii, {{ 1, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{ 1, false, 16, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A2_tfril, {{ 1, false, 16, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{ 0, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{ 1, false, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{ 1, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{ 1, false, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{ 1, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{ 1, false, 7, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{ 1, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{ 1, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{ 1, false, 7, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{ 1, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{ 1, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{ 1, false, 7, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{ 1, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_C2_muxii, {{ 2, true, 8, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{ 1, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{ 0, false, 10, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{ 0, false, 10, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{ 0, false, 10, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{ 0, false, 10, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{ 1, false, 6, 2 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{ 2, false, 3, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{ 1, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{ 1, false, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{ 1, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, - {{ 1, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{ 1, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, - {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{ 1, false, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_extractu, {{ 1, false, 5, 0 }, - { 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_extractup, {{ 1, false, 6, 0 }, - { 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_insert, {{ 2, false, 5, 0 }, - { 3, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_insertp, {{ 2, false, 6, 0 }, - { 3, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{ 1, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{ 1, false, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, - {{ 2, false, 4, 0 }, - { 3, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, - {{ 2, false, 4, 0 }, - { 3, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, - {{ 2, false, 4, 0 }, - { 3, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, - {{ 2, false, 4, 0 }, - { 3, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_valignib, {{ 2, false, 3, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{ 2, false, 3, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{ 1, true , 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{ 1, true, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_extract, {{ 1, false, 5, 0 }, - { 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_extractp, {{ 1, false, 6, 0 }, - { 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_lsli, {{ 0, true, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{ 3, false, 2, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{ 2, false, 2, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, - {{ 1, false, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{ 1, false, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, - {{ 1, false, 4, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{ 1, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{ 2, false, 6, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{ 1, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{ 2, false, 5, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{ 2, false, 3, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{ 2, false, 3, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{ 2, false, 3, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{ 2, false, 3, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{ 2, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{ 2, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{ 3, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, - {{ 3, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{ 2, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{ 2, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{ 3, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, - {{ 3, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{ 2, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{ 2, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{ 3, false, 1, 0 }} }, - { Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, - {{ 3, false, 1, 0 }} }, + {Hexagon::BI__builtin_circ_ldd, {{3, true, 4, 3}}}, + {Hexagon::BI__builtin_circ_ldw, {{3, true, 4, 2}}}, + {Hexagon::BI__builtin_circ_ldh, {{3, true, 4, 1}}}, + {Hexagon::BI__builtin_circ_lduh, {{3, true, 4, 1}}}, + {Hexagon::BI__builtin_circ_ldb, {{3, true, 4, 0}}}, + {Hexagon::BI__builtin_circ_ldub, {{3, true, 4, 0}}}, + {Hexagon::BI__builtin_circ_std, {{3, true, 4, 3}}}, + {Hexagon::BI__builtin_circ_stw, {{3, true, 4, 2}}}, + {Hexagon::BI__builtin_circ_sth, {{3, true, 4, 1}}}, + {Hexagon::BI__builtin_circ_sthhi, {{3, true, 4, 1}}}, + {Hexagon::BI__builtin_circ_stb, {{3, true, 4, 0}}}, + + {Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci, {{1, true, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci, {{1, true, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci, {{1, true, 4, 1}}}, + {Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci, {{1, true, 4, 1}}}, + {Hexagon::BI__builtin_HEXAGON_L2_loadri_pci, {{1, true, 4, 2}}}, + {Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci, {{1, true, 4, 3}}}, + {Hexagon::BI__builtin_HEXAGON_S2_storerb_pci, {{1, true, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_storerh_pci, {{1, true, 4, 1}}}, + {Hexagon::BI__builtin_HEXAGON_S2_storerf_pci, {{1, true, 4, 1}}}, + {Hexagon::BI__builtin_HEXAGON_S2_storeri_pci, {{1, true, 4, 2}}}, + {Hexagon::BI__builtin_HEXAGON_S2_storerd_pci, {{1, true, 4, 3}}}, + + {Hexagon::BI__builtin_HEXAGON_A2_combineii, {{1, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A2_tfrih, {{1, false, 16, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A2_tfril, {{1, false, 16, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A2_tfrpi, {{0, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_bitspliti, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_cmpbeqi, {{1, false, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_cmpbgti, {{1, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_cround_ri, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_round_ri, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmpbeqi, {{1, false, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmpbgti, {{1, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmpbgtui, {{1, false, 7, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmpheqi, {{1, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmphgti, {{1, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmphgtui, {{1, false, 7, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmpweqi, {{1, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmpwgti, {{1, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_A4_vcmpwgtui, {{1, false, 7, 0}}}, + {Hexagon::BI__builtin_HEXAGON_C2_bitsclri, {{1, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_C2_muxii, {{2, true, 8, 0}}}, + {Hexagon::BI__builtin_HEXAGON_C4_nbitsclri, {{1, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_F2_dfclass, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_F2_dfimm_n, {{0, false, 10, 0}}}, + {Hexagon::BI__builtin_HEXAGON_F2_dfimm_p, {{0, false, 10, 0}}}, + {Hexagon::BI__builtin_HEXAGON_F2_sfclass, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_F2_sfimm_n, {{0, false, 10, 0}}}, + {Hexagon::BI__builtin_HEXAGON_F2_sfimm_p, {{0, false, 10, 0}}}, + {Hexagon::BI__builtin_HEXAGON_M4_mpyri_addi, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_M4_mpyri_addr_u2, {{1, false, 6, 2}}}, + {Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri, {{2, false, 3, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_p, {{1, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_r, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh, {{1, false, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_p, {{1, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, + {{1, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_rnd, {{1, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_r, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, + {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh, {{1, false, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_clrbit_i, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_extractu, + {{1, false, 5, 0}, {2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_extractup, + {{1, false, 6, 0}, {2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_insert, + {{2, false, 5, 0}, {3, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_insertp, + {{2, false, 6, 0}, {3, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p, {{1, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh, {{1, false, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_setbit_i, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax, + {{2, false, 4, 0}, {3, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax, + {{2, false, 4, 0}, {3, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax, + {{2, false, 4, 0}, {3, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax, + {{2, false, 4, 0}, {3, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_togglebit_i, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_tstbit_i, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_valignib, {{2, false, 3, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S2_vspliceib, {{2, false, 3, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_addi_asl_ri, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_addi_lsr_ri, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_andi_asl_ri, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_andi_lsr_ri, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_clbaddi, {{1, true, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_clbpaddi, {{1, true, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_extract, + {{1, false, 5, 0}, {2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_extractp, + {{1, false, 6, 0}, {2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_lsli, {{0, true, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_ntstbit_i, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_ori_asl_ri, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_ori_lsr_ri, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_subi_asl_ri, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_subi_lsr_ri, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_vrcrotate_acc, {{3, false, 2, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S4_vrcrotate, {{2, false, 2, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, + {{1, false, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S5_asrhub_sat, {{1, false, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, {{1, false, 4, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_p, {{1, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_acc, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_and, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_nac, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_or, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_p_xacc, {{2, false, 6, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_r, {{1, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_acc, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_and, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_nac, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_or, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_S6_rol_i_r_xacc, {{2, false, 5, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_valignbi, {{2, false, 3, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_valignbi_128B, {{2, false, 3, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vlalignbi, {{2, false, 3, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vlalignbi_128B, {{2, false, 3, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi, {{2, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_128B, {{2, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc, {{3, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrmpybusi_acc_128B, {{3, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi, {{2, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_128B, {{2, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc, {{3, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrmpyubi_acc_128B, {{3, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrsadubi, {{2, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_128B, {{2, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc, {{3, false, 1, 0}}}, + {Hexagon::BI__builtin_HEXAGON_V6_vrsadubi_acc_128B, {{3, false, 1, 0}}}, }; // Use a dynamically initialized static to sort the table exactly once on // first run. static const bool SortOnce = (llvm::sort(Infos, - [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { - return LHS.BuiltinID < RHS.BuiltinID; - }), + [](const BuiltinInfo &LHS, const BuiltinInfo &RHS) { + return LHS.BuiltinID < RHS.BuiltinID; + }), true); (void)SortOnce; @@ -3713,14 +3720,43 @@ bool Sema::CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall) { unsigned i = 0, l = 0, u = 0, m = 0; switch (BuiltinID) { - default: return false; - case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; - case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; - case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; - case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; - case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; - case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; - case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; + default: + return false; + case Mips::BI__builtin_mips_wrdsp: + i = 1; + l = 0; + u = 63; + break; + case Mips::BI__builtin_mips_rddsp: + i = 0; + l = 0; + u = 63; + break; + case Mips::BI__builtin_mips_append: + i = 2; + l = 0; + u = 31; + break; + case Mips::BI__builtin_mips_balign: + i = 2; + l = 0; + u = 3; + break; + case Mips::BI__builtin_mips_precr_sra_ph_w: + i = 2; + l = 0; + u = 31; + break; + case Mips::BI__builtin_mips_precr_sra_r_ph_w: + i = 2; + l = 0; + u = 31; + break; + case Mips::BI__builtin_mips_prepend: + i = 2; + l = 0; + u = 31; + break; // MSA intrinsics. Instructions (which the intrinsics maps to) which use the // df/m field. // These intrinsics take an unsigned 3 bit immediate. @@ -3733,9 +3769,17 @@ case Mips::BI__builtin_msa_srai_b: case Mips::BI__builtin_msa_srari_b: case Mips::BI__builtin_msa_srli_b: - case Mips::BI__builtin_msa_srlri_b: i = 1; l = 0; u = 7; break; + case Mips::BI__builtin_msa_srlri_b: + i = 1; + l = 0; + u = 7; + break; case Mips::BI__builtin_msa_binsli_b: - case Mips::BI__builtin_msa_binsri_b: i = 2; l = 0; u = 7; break; + case Mips::BI__builtin_msa_binsri_b: + i = 2; + l = 0; + u = 7; + break; // These intrinsics take an unsigned 4 bit immediate. case Mips::BI__builtin_msa_bclri_h: case Mips::BI__builtin_msa_bnegi_h: @@ -3746,14 +3790,26 @@ case Mips::BI__builtin_msa_srai_h: case Mips::BI__builtin_msa_srari_h: case Mips::BI__builtin_msa_srli_h: - case Mips::BI__builtin_msa_srlri_h: i = 1; l = 0; u = 15; break; + case Mips::BI__builtin_msa_srlri_h: + i = 1; + l = 0; + u = 15; + break; case Mips::BI__builtin_msa_binsli_h: - case Mips::BI__builtin_msa_binsri_h: i = 2; l = 0; u = 15; break; + case Mips::BI__builtin_msa_binsri_h: + i = 2; + l = 0; + u = 15; + break; // These intrinsics take an unsigned 5 bit immediate. // The first block of intrinsics actually have an unsigned 5 bit field, // not a df/n field. case Mips::BI__builtin_msa_cfcmsa: - case Mips::BI__builtin_msa_ctcmsa: i = 0; l = 0; u = 31; break; + case Mips::BI__builtin_msa_ctcmsa: + i = 0; + l = 0; + u = 31; + break; case Mips::BI__builtin_msa_clei_u_b: case Mips::BI__builtin_msa_clei_u_h: case Mips::BI__builtin_msa_clei_u_w: @@ -3787,9 +3843,17 @@ case Mips::BI__builtin_msa_subvi_b: case Mips::BI__builtin_msa_subvi_h: case Mips::BI__builtin_msa_subvi_w: - case Mips::BI__builtin_msa_subvi_d: i = 1; l = 0; u = 31; break; + case Mips::BI__builtin_msa_subvi_d: + i = 1; + l = 0; + u = 31; + break; case Mips::BI__builtin_msa_binsli_w: - case Mips::BI__builtin_msa_binsri_w: i = 2; l = 0; u = 31; break; + case Mips::BI__builtin_msa_binsri_w: + i = 2; + l = 0; + u = 31; + break; // These intrinsics take an unsigned 6 bit immediate. case Mips::BI__builtin_msa_bclri_d: case Mips::BI__builtin_msa_bnegi_d: @@ -3800,9 +3864,17 @@ case Mips::BI__builtin_msa_srai_d: case Mips::BI__builtin_msa_srari_d: case Mips::BI__builtin_msa_srli_d: - case Mips::BI__builtin_msa_srlri_d: i = 1; l = 0; u = 63; break; + case Mips::BI__builtin_msa_srlri_d: + i = 1; + l = 0; + u = 63; + break; case Mips::BI__builtin_msa_binsli_d: - case Mips::BI__builtin_msa_binsri_d: i = 2; l = 0; u = 63; break; + case Mips::BI__builtin_msa_binsri_d: + i = 2; + l = 0; + u = 63; + break; // These intrinsics take a signed 5 bit immediate. case Mips::BI__builtin_msa_ceqi_b: case Mips::BI__builtin_msa_ceqi_h: @@ -3823,7 +3895,11 @@ case Mips::BI__builtin_msa_mini_s_b: case Mips::BI__builtin_msa_mini_s_h: case Mips::BI__builtin_msa_mini_s_w: - case Mips::BI__builtin_msa_mini_s_d: i = 1; l = -16; u = 15; break; + case Mips::BI__builtin_msa_mini_s_d: + i = 1; + l = -16; + u = 15; + break; // These intrinsics take an unsigned 8 bit immediate. case Mips::BI__builtin_msa_andi_b: case Mips::BI__builtin_msa_nori_b: @@ -3831,53 +3907,161 @@ case Mips::BI__builtin_msa_shf_b: case Mips::BI__builtin_msa_shf_h: case Mips::BI__builtin_msa_shf_w: - case Mips::BI__builtin_msa_xori_b: i = 1; l = 0; u = 255; break; + case Mips::BI__builtin_msa_xori_b: + i = 1; + l = 0; + u = 255; + break; case Mips::BI__builtin_msa_bseli_b: case Mips::BI__builtin_msa_bmnzi_b: - case Mips::BI__builtin_msa_bmzi_b: i = 2; l = 0; u = 255; break; + case Mips::BI__builtin_msa_bmzi_b: + i = 2; + l = 0; + u = 255; + break; // df/n format // These intrinsics take an unsigned 4 bit immediate. case Mips::BI__builtin_msa_copy_s_b: case Mips::BI__builtin_msa_copy_u_b: case Mips::BI__builtin_msa_insve_b: - case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break; - case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break; + case Mips::BI__builtin_msa_splati_b: + i = 1; + l = 0; + u = 15; + break; + case Mips::BI__builtin_msa_sldi_b: + i = 2; + l = 0; + u = 15; + break; // These intrinsics take an unsigned 3 bit immediate. case Mips::BI__builtin_msa_copy_s_h: case Mips::BI__builtin_msa_copy_u_h: case Mips::BI__builtin_msa_insve_h: - case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break; - case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break; + case Mips::BI__builtin_msa_splati_h: + i = 1; + l = 0; + u = 7; + break; + case Mips::BI__builtin_msa_sldi_h: + i = 2; + l = 0; + u = 7; + break; // These intrinsics take an unsigned 2 bit immediate. case Mips::BI__builtin_msa_copy_s_w: case Mips::BI__builtin_msa_copy_u_w: case Mips::BI__builtin_msa_insve_w: - case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break; - case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break; + case Mips::BI__builtin_msa_splati_w: + i = 1; + l = 0; + u = 3; + break; + case Mips::BI__builtin_msa_sldi_w: + i = 2; + l = 0; + u = 3; + break; // These intrinsics take an unsigned 1 bit immediate. case Mips::BI__builtin_msa_copy_s_d: case Mips::BI__builtin_msa_copy_u_d: case Mips::BI__builtin_msa_insve_d: - case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break; - case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break; + case Mips::BI__builtin_msa_splati_d: + i = 1; + l = 0; + u = 1; + break; + case Mips::BI__builtin_msa_sldi_d: + i = 2; + l = 0; + u = 1; + break; // Memory offsets and immediate loads. // These intrinsics take a signed 10 bit immediate. - case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break; + case Mips::BI__builtin_msa_ldi_b: + i = 0; + l = -128; + u = 255; + break; case Mips::BI__builtin_msa_ldi_h: case Mips::BI__builtin_msa_ldi_w: - case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break; - case Mips::BI__builtin_msa_ld_b: i = 1; l = -512; u = 511; m = 1; break; - case Mips::BI__builtin_msa_ld_h: i = 1; l = -1024; u = 1022; m = 2; break; - case Mips::BI__builtin_msa_ld_w: i = 1; l = -2048; u = 2044; m = 4; break; - case Mips::BI__builtin_msa_ld_d: i = 1; l = -4096; u = 4088; m = 8; break; - case Mips::BI__builtin_msa_ldr_d: i = 1; l = -4096; u = 4088; m = 8; break; - case Mips::BI__builtin_msa_ldr_w: i = 1; l = -2048; u = 2044; m = 4; break; - case Mips::BI__builtin_msa_st_b: i = 2; l = -512; u = 511; m = 1; break; - case Mips::BI__builtin_msa_st_h: i = 2; l = -1024; u = 1022; m = 2; break; - case Mips::BI__builtin_msa_st_w: i = 2; l = -2048; u = 2044; m = 4; break; - case Mips::BI__builtin_msa_st_d: i = 2; l = -4096; u = 4088; m = 8; break; - case Mips::BI__builtin_msa_str_d: i = 2; l = -4096; u = 4088; m = 8; break; - case Mips::BI__builtin_msa_str_w: i = 2; l = -2048; u = 2044; m = 4; break; + case Mips::BI__builtin_msa_ldi_d: + i = 0; + l = -512; + u = 511; + break; + case Mips::BI__builtin_msa_ld_b: + i = 1; + l = -512; + u = 511; + m = 1; + break; + case Mips::BI__builtin_msa_ld_h: + i = 1; + l = -1024; + u = 1022; + m = 2; + break; + case Mips::BI__builtin_msa_ld_w: + i = 1; + l = -2048; + u = 2044; + m = 4; + break; + case Mips::BI__builtin_msa_ld_d: + i = 1; + l = -4096; + u = 4088; + m = 8; + break; + case Mips::BI__builtin_msa_ldr_d: + i = 1; + l = -4096; + u = 4088; + m = 8; + break; + case Mips::BI__builtin_msa_ldr_w: + i = 1; + l = -2048; + u = 2044; + m = 4; + break; + case Mips::BI__builtin_msa_st_b: + i = 2; + l = -512; + u = 511; + m = 1; + break; + case Mips::BI__builtin_msa_st_h: + i = 2; + l = -1024; + u = 1022; + m = 2; + break; + case Mips::BI__builtin_msa_st_w: + i = 2; + l = -2048; + u = 2044; + m = 4; + break; + case Mips::BI__builtin_msa_st_d: + i = 2; + l = -4096; + u = 4088; + m = 8; + break; + case Mips::BI__builtin_msa_str_d: + i = 2; + l = -4096; + u = 4088; + m = 8; + break; + case Mips::BI__builtin_msa_str_w: + i = 2; + l = -2048; + u = 2044; + m = 4; + break; } if (!m) @@ -3916,10 +4100,13 @@ Str = End; QualType Type; switch (size) { - #define PPC_VECTOR_TYPE(typeName, Id, size) \ - case size: Type = Context.Id##Ty; break; - #include "clang/Basic/PPCTypes.def" - default: llvm_unreachable("Invalid PowerPC MMA vector type"); +#define PPC_VECTOR_TYPE(typeName, Id, size) \ + case size: \ + Type = Context.Id##Ty; \ + break; +#include "clang/Basic/PPCTypes.def" + default: + llvm_unreachable("Invalid PowerPC MMA vector type"); } bool CheckVectorArgs = false; while (!CheckVectorArgs) { @@ -4029,7 +4216,8 @@ << TheCall->getSourceRange(); switch (BuiltinID) { - default: return false; + default: + return false; case PPC::BI__builtin_altivec_crypto_vshasigmaw: case PPC::BI__builtin_altivec_crypto_vshasigmad: return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || @@ -4115,15 +4303,15 @@ return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions", diag::err_ppc_builtin_only_on_arch, "10"); case PPC::BI__builtin_altivec_vgnb: - return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); + return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7); case PPC::BI__builtin_vsx_xxeval: - return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); + return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255); case PPC::BI__builtin_altivec_vsldbi: - return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); + return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); case PPC::BI__builtin_altivec_vsrdbi: - return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); + return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7); case PPC::BI__builtin_vsx_xxpermx: - return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); + return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7); case PPC::BI__builtin_ppc_tw: case PPC::BI__builtin_ppc_tdw: return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31); @@ -4274,7 +4462,7 @@ #define PPC_VECTOR_TYPE(Name, Id, Size) || CoreType == Context.Id##Ty if (false #include "clang/Basic/PPCTypes.def" - ) { + ) { Diag(TypeLoc, diag::err_ppc_invalid_use_mma_type); return true; } @@ -4408,8 +4596,7 @@ // Error message FeatureMissing = true; Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension) - << IsExtension - << TheCall->getSourceRange() << StringRef(FeatureStrs); + << IsExtension << TheCall->getSourceRange() << StringRef(FeatureStrs); } } @@ -4477,12 +4664,21 @@ // range check them here. unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { - default: return false; - case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; + default: + return false; + case SystemZ::BI__builtin_s390_lcbb: + i = 1; + l = 0; + u = 15; + break; case SystemZ::BI__builtin_s390_verimb: case SystemZ::BI__builtin_s390_verimh: case SystemZ::BI__builtin_s390_verimf: - case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; + case SystemZ::BI__builtin_s390_verimg: + i = 3; + l = 0; + u = 255; + break; case SystemZ::BI__builtin_s390_vfaeb: case SystemZ::BI__builtin_s390_vfaeh: case SystemZ::BI__builtin_s390_vfaef: @@ -4494,16 +4690,36 @@ case SystemZ::BI__builtin_s390_vfaezf: case SystemZ::BI__builtin_s390_vfaezbs: case SystemZ::BI__builtin_s390_vfaezhs: - case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_vfaezfs: + i = 2; + l = 0; + u = 15; + break; case SystemZ::BI__builtin_s390_vfisb: case SystemZ::BI__builtin_s390_vfidb: return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); case SystemZ::BI__builtin_s390_vftcisb: - case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; - case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; - case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; - case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_vftcidb: + i = 1; + l = 0; + u = 4095; + break; + case SystemZ::BI__builtin_s390_vlbb: + i = 1; + l = 0; + u = 15; + break; + case SystemZ::BI__builtin_s390_vpdi: + i = 2; + l = 0; + u = 15; + break; + case SystemZ::BI__builtin_s390_vsldb: + i = 2; + l = 0; + u = 15; + break; case SystemZ::BI__builtin_s390_vstrcb: case SystemZ::BI__builtin_s390_vstrch: case SystemZ::BI__builtin_s390_vstrcf: @@ -4515,19 +4731,47 @@ case SystemZ::BI__builtin_s390_vstrcfs: case SystemZ::BI__builtin_s390_vstrczbs: case SystemZ::BI__builtin_s390_vstrczhs: - case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; - case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_vstrczfs: + i = 3; + l = 0; + u = 15; + break; + case SystemZ::BI__builtin_s390_vmslg: + i = 3; + l = 0; + u = 15; + break; case SystemZ::BI__builtin_s390_vfminsb: case SystemZ::BI__builtin_s390_vfmaxsb: case SystemZ::BI__builtin_s390_vfmindb: - case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break; - case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break; - case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break; + case SystemZ::BI__builtin_s390_vfmaxdb: + i = 2; + l = 0; + u = 15; + break; + case SystemZ::BI__builtin_s390_vsld: + i = 2; + l = 0; + u = 7; + break; + case SystemZ::BI__builtin_s390_vsrd: + i = 2; + l = 0; + u = 7; + break; case SystemZ::BI__builtin_s390_vclfnhs: case SystemZ::BI__builtin_s390_vclfnls: case SystemZ::BI__builtin_s390_vcfn: - case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break; - case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break; + case SystemZ::BI__builtin_s390_vcnf: + i = 1; + l = 0; + u = 15; + break; + case SystemZ::BI__builtin_s390_vcrnfs: + i = 2; + l = 0; + u = 15; + break; } return SemaBuiltinConstantArgRange(TheCall, i, l, u); } @@ -4859,9 +5103,8 @@ // is set. If the intrinsic has rounding control(bits 1:0), make sure its only // combined with ROUND_NO_EXC. If the intrinsic does not have rounding // control, allow ROUND_NO_EXC and ROUND_CUR_DIRECTION together. - if (Result == 4/*ROUND_CUR_DIRECTION*/ || - Result == 8/*ROUND_NO_EXC*/ || - (!HasRC && Result == 12/*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || + if (Result == 4 /*ROUND_CUR_DIRECTION*/ || Result == 8 /*ROUND_NO_EXC*/ || + (!HasRC && Result == 12 /*ROUND_CUR_DIRECTION|ROUND_NO_EXC*/) || (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11)) return false; @@ -5088,7 +5331,9 @@ case X86::BI__builtin_ia32_extracti64x2_256_mask: case X86::BI__builtin_ia32_extractf32x4_256_mask: case X86::BI__builtin_ia32_extracti32x4_256_mask: - i = 1; l = 0; u = 1; + i = 1; + l = 0; + u = 1; break; case X86::BI__builtin_ia32_vec_set_v2di: case X86::BI__builtin_ia32_vinsertf128_pd256: @@ -5103,7 +5348,9 @@ case X86::BI__builtin_ia32_inserti64x2_256: case X86::BI__builtin_ia32_insertf32x4_256: case X86::BI__builtin_ia32_inserti32x4_256: - i = 2; l = 0; u = 1; + i = 2; + l = 0; + u = 1; break; case X86::BI__builtin_ia32_vpermilpd: case X86::BI__builtin_ia32_vec_ext_v4hi: @@ -5114,12 +5361,16 @@ case X86::BI__builtin_ia32_extracti32x4_mask: case X86::BI__builtin_ia32_extractf64x2_512_mask: case X86::BI__builtin_ia32_extracti64x2_512_mask: - i = 1; l = 0; u = 3; + i = 1; + l = 0; + u = 3; break; case X86::BI_mm_prefetch: case X86::BI__builtin_ia32_vec_ext_v8hi: case X86::BI__builtin_ia32_vec_ext_v8si: - i = 1; l = 0; u = 7; + i = 1; + l = 0; + u = 7; break; case X86::BI__builtin_ia32_sha1rnds4: case X86::BI__builtin_ia32_blendpd: @@ -5135,13 +5386,17 @@ case X86::BI__builtin_ia32_inserti64x2_512: case X86::BI__builtin_ia32_insertf32x4: case X86::BI__builtin_ia32_inserti32x4: - i = 2; l = 0; u = 3; + i = 2; + l = 0; + u = 3; break; case X86::BI__builtin_ia32_vpermil2pd: case X86::BI__builtin_ia32_vpermil2pd256: case X86::BI__builtin_ia32_vpermil2ps: case X86::BI__builtin_ia32_vpermil2ps256: - i = 3; l = 0; u = 3; + i = 3; + l = 0; + u = 3; break; case X86::BI__builtin_ia32_cmpb128_mask: case X86::BI__builtin_ia32_cmpw128_mask: @@ -5177,7 +5432,9 @@ case X86::BI__builtin_ia32_vpcomq: case X86::BI__builtin_ia32_vec_set_v8hi: case X86::BI__builtin_ia32_vec_set_v8si: - i = 2; l = 0; u = 7; + i = 2; + l = 0; + u = 7; break; case X86::BI__builtin_ia32_vpermilpd256: case X86::BI__builtin_ia32_roundps: @@ -5195,7 +5452,9 @@ case X86::BI__builtin_ia32_getmantph512_mask: case X86::BI__builtin_ia32_vec_ext_v16qi: case X86::BI__builtin_ia32_vec_ext_v16hi: - i = 1; l = 0; u = 15; + i = 1; + l = 0; + u = 15; break; case X86::BI__builtin_ia32_pblendd128: case X86::BI__builtin_ia32_blendps: @@ -5214,10 +5473,14 @@ case X86::BI__builtin_ia32_getmantsh_round_mask: case X86::BI__builtin_ia32_vec_set_v16qi: case X86::BI__builtin_ia32_vec_set_v16hi: - i = 2; l = 0; u = 15; + i = 2; + l = 0; + u = 15; break; case X86::BI__builtin_ia32_vec_ext_v32qi: - i = 1; l = 0; u = 31; + i = 1; + l = 0; + u = 31; break; case X86::BI__builtin_ia32_cmpps: case X86::BI__builtin_ia32_cmpss: @@ -5234,7 +5497,9 @@ case X86::BI__builtin_ia32_cmpsd_mask: case X86::BI__builtin_ia32_cmpss_mask: case X86::BI__builtin_ia32_vec_set_v32qi: - i = 2; l = 0; u = 31; + i = 2; + l = 0; + u = 31; break; case X86::BI__builtin_ia32_permdf256: case X86::BI__builtin_ia32_permdi256: @@ -5312,7 +5577,9 @@ case X86::BI__builtin_ia32_kshiftrihi: case X86::BI__builtin_ia32_kshiftrisi: case X86::BI__builtin_ia32_kshiftridi: - i = 1; l = 0; u = 255; + i = 1; + l = 0; + u = 255; break; case X86::BI__builtin_ia32_vperm2f128_pd256: case X86::BI__builtin_ia32_vperm2f128_ps256: @@ -5362,7 +5629,9 @@ case X86::BI__builtin_ia32_vpshrdw128: case X86::BI__builtin_ia32_vpshrdw256: case X86::BI__builtin_ia32_vpshrdw512: - i = 2; l = 0; u = 255; + i = 2; + l = 0; + u = 255; break; case X86::BI__builtin_ia32_fixupimmpd512_mask: case X86::BI__builtin_ia32_fixupimmpd512_maskz: @@ -5392,7 +5661,9 @@ case X86::BI__builtin_ia32_pternlogq128_maskz: case X86::BI__builtin_ia32_pternlogq256_mask: case X86::BI__builtin_ia32_pternlogq256_maskz: - i = 3; l = 0; u = 255; + i = 3; + l = 0; + u = 255; break; case X86::BI__builtin_ia32_gatherpfdpd: case X86::BI__builtin_ia32_gatherpfdps: @@ -5402,7 +5673,9 @@ case X86::BI__builtin_ia32_scatterpfdps: case X86::BI__builtin_ia32_scatterpfqpd: case X86::BI__builtin_ia32_scatterpfqps: - i = 4; l = 2; u = 3; + i = 4; + l = 2; + u = 3; break; case X86::BI__builtin_ia32_reducesd_mask: case X86::BI__builtin_ia32_reducess_mask: @@ -5410,7 +5683,9 @@ case X86::BI__builtin_ia32_rndscaless_round_mask: case X86::BI__builtin_ia32_rndscalesh_round_mask: case X86::BI__builtin_ia32_reducesh_mask: - i = 4; l = 0; u = 255; + i = 4; + l = 0; + u = 255; break; } @@ -5441,7 +5716,7 @@ // of member functions is counted. However, it doesn't appear in our own // lists, so decrement format_idx in that case. if (IsCXXMember) { - if(FSI->FormatIdx == 0) + if (FSI->FormatIdx == 0) return false; --FSI->FormatIdx; if (FSI->FirstDataArg != 0) @@ -5455,8 +5730,8 @@ /// Returns true if the value evaluates to null. static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { // If the expression has non-null type, it doesn't evaluate to null. - if (auto nullability - = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { + if (auto nullability = + Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { if (*nullability == NullabilityKind::NonNull) return false; } @@ -5465,21 +5740,18 @@ // considered null for the purposes of the nonnull attribute. if (const RecordType *UT = Expr->getType()->getAsUnionType()) { if (UT->getDecl()->hasAttr()) - if (const CompoundLiteralExpr *CLE = - dyn_cast(Expr)) + if (const CompoundLiteralExpr *CLE = dyn_cast(Expr)) if (const InitListExpr *ILE = - dyn_cast(CLE->getInitializer())) + dyn_cast(CLE->getInitializer())) Expr = ILE->getInit(0); } bool Result; return (!Expr->isValueDependent() && - Expr->EvaluateAsBooleanCondition(Result, S.Context) && - !Result); + Expr->EvaluateAsBooleanCondition(Result, S.Context) && !Result); } -static void CheckNonNullArgument(Sema &S, - const Expr *ArgExpr, +static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, SourceLocation CallSiteLoc) { if (CheckNonNullExpr(S, ArgExpr)) S.DiagRuntimeBehavior(CallSiteLoc, ArgExpr, @@ -5499,19 +5771,17 @@ /// Diagnose use of %s directive in an NSString which is being passed /// as formatting string to formatting method. -static void -DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, - const NamedDecl *FDecl, - Expr **Args, - unsigned NumArgs) { +static void DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, + const NamedDecl *FDecl, + Expr **Args, + unsigned NumArgs) { unsigned Idx = 0; bool Format = false; ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { Idx = 2; Format = true; - } - else + } else for (const auto *I : FDecl->specific_attrs()) { if (S.GetFormatNSStringIdx(I, Idx)) { Format = true; @@ -5525,7 +5795,7 @@ FormatExpr = CSCE->getSubExpr(); const StringLiteral *FormatString; if (const ObjCStringLiteral *OSL = - dyn_cast(FormatExpr->IgnoreParenImpCasts())) + dyn_cast(FormatExpr->IgnoreParenImpCasts())) FormatString = OSL->getString(); else FormatString = dyn_cast(FormatExpr->IgnoreParenImpCasts()); @@ -5533,9 +5803,9 @@ return; if (S.FormatStringHasSArg(FormatString)) { S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) - << "%s" << 1 << 1; + << "%s" << 1 << 1; S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) - << FDecl->getDeclName(); + << FDecl->getDeclName(); } } @@ -5547,8 +5817,7 @@ return false; } -static void CheckNonNullArguments(Sema &S, - const NamedDecl *FDecl, +static void CheckNonNullArguments(Sema &S, const NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef Args, SourceLocation CallSiteLoc) { @@ -5584,14 +5853,14 @@ if (FDecl && (isa(FDecl) || isa(FDecl))) { // Handle the nonnull attribute on the parameters of the // function/method. - ArrayRef parms; + ArrayRef parms; if (const FunctionDecl *FD = dyn_cast(FDecl)) parms = FD->parameters(); else parms = cast(FDecl)->parameters(); unsigned ParamIndex = 0; - for (ArrayRef::iterator I = parms.begin(), E = parms.end(); + for (ArrayRef::iterator I = parms.begin(), E = parms.end(); I != E; ++I, ++ParamIndex) { const ParmVarDecl *PVD = *I; if (PVD->hasAttr() || @@ -5746,11 +6015,11 @@ if (CallType != VariadicDoesNotApply && (!FD || FD->getBuiltinID() != Builtin::BI__noop)) { unsigned NumParams = Proto ? Proto->getNumParams() - : FDecl && isa(FDecl) - ? cast(FDecl)->getNumParams() - : FDecl && isa(FDecl) - ? cast(FDecl)->param_size() - : 0; + : FDecl && isa(FDecl) + ? cast(FDecl)->getNumParams() + : FDecl && isa(FDecl) + ? cast(FDecl)->param_size() + : 0; for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { // Args[ArgIdx] can be null in malformed code. @@ -5845,13 +6114,13 @@ /// and safety properties not strictly enforced by the C type system. bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto) { - bool IsMemberOperatorCall = isa(TheCall) && - isa(FDecl); - bool IsMemberFunction = isa(TheCall) || - IsMemberOperatorCall; - VariadicCallType CallType = getVariadicCallType(FDecl, Proto, - TheCall->getCallee()); - Expr** Args = TheCall->getArgs(); + bool IsMemberOperatorCall = + isa(TheCall) && isa(FDecl); + bool IsMemberFunction = + isa(TheCall) || IsMemberOperatorCall; + VariadicCallType CallType = + getVariadicCallType(FDecl, Proto, TheCall->getCallee()); + Expr **Args = TheCall->getArgs(); unsigned NumArgs = TheCall->getNumArgs(); Expr *ImplicitThis = nullptr; @@ -5973,8 +6242,8 @@ /// Checks function calls when a FunctionDecl or a NamedDecl is not available, /// such as function pointers returned from functions. bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { - VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, - TheCall->getCallee()); + VariadicCallType CallType = + getVariadicCallType(/*FDecl=*/nullptr, Proto, TheCall->getCallee()); checkCall(/*FDecl=*/nullptr, Proto, /*ThisArg=*/nullptr, llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), /*IsMemberFunction=*/false, TheCall->getRParenLoc(), @@ -6018,7 +6287,8 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op) { CallExpr *TheCall = cast(TheCallResult.get()); - DeclRefExpr *DRE =cast(TheCall->getCallee()->IgnoreParenCasts()); + DeclRefExpr *DRE = + cast(TheCall->getCallee()->IgnoreParenCasts()); MultiExprArg Args{TheCall->getArgs(), TheCall->getNumArgs()}; return BuildAtomicExpr({TheCall->getBeginLoc(), TheCall->getEndLoc()}, DRE->getSourceRange(), TheCall->getRParenLoc(), Args, @@ -6062,8 +6332,8 @@ } Form = Init; const unsigned NumForm = GNUCmpXchg + 1; - const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; - const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; + const unsigned NumArgs[] = {2, 2, 3, 3, 3, 3, 4, 5, 6}; + const unsigned NumVals[] = {1, 0, 1, 1, 1, 1, 2, 2, 3}; // where: // C is an appropriate type, // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, @@ -6071,9 +6341,9 @@ // M is C if C is an integer, and ptrdiff_t if C is a pointer, and // the int parameters are for orderings. - static_assert(sizeof(NumArgs)/sizeof(NumArgs[0]) == NumForm - && sizeof(NumVals)/sizeof(NumVals[0]) == NumForm, - "need to update code for modified forms"); + static_assert(sizeof(NumArgs) / sizeof(NumArgs[0]) == NumForm && + sizeof(NumVals) / sizeof(NumVals[0]) == NumForm, + "need to update code for modified forms"); static_assert(AtomicExpr::AO__c11_atomic_init == 0 && AtomicExpr::AO__c11_atomic_fetch_min + 1 == AtomicExpr::AO__atomic_load, @@ -6083,7 +6353,7 @@ bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load && Op <= AtomicExpr::AO__hip_atomic_fetch_max; bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init && - Op <= AtomicExpr::AO__c11_atomic_fetch_min) || + Op <= AtomicExpr::AO__c11_atomic_fetch_min) || IsOpenCL; bool IsN = Op == AtomicExpr::AO__atomic_load_n || Op == AtomicExpr::AO__atomic_store_n || @@ -6219,7 +6489,7 @@ // For a __c11 builtin, this should be a pointer to an _Atomic type. QualType AtomTy = pointerType->getPointeeType(); // 'A' - QualType ValType = AtomTy; // 'C' + QualType ValType = AtomTy; // 'C' if (IsC11) { if (!AtomTy->isAtomicType()) { Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic) @@ -6317,8 +6587,7 @@ ValType.removeLocalVolatile(); ValType.removeLocalConst(); QualType ResultType = ValType; - if (Form == Copy || Form == LoadCopy || Form == GNUXchg || - Form == Init) + if (Form == Copy || Form == LoadCopy || Form == GNUXchg || Form == Init) ResultType = Context.VoidTy; else if (Form == C11CmpXchg || Form == GNUCmpXchg) ResultType = Context.BoolTy; @@ -6442,7 +6711,7 @@ } // Permute the arguments into a 'consistent' order. - SmallVector SubExprs; + SmallVector SubExprs; SubExprs.push_back(Ptr); switch (Form) { case Init: @@ -6538,7 +6807,7 @@ ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); InitializedEntity Entity = - InitializedEntity::InitializeParameter(S.Context, Param); + InitializedEntity::InitializeParameter(S.Context, Param); ExprResult Arg = E->getArg(ArgIndex); Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); @@ -6556,8 +6825,7 @@ /// /// This function goes through and does final semantic checking for these /// builtins, as well as generating any warnings. -ExprResult -Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { +ExprResult Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { CallExpr *TheCall = static_cast(TheCallResult.get()); Expr *Callee = TheCall->getCallee(); DeclRefExpr *DRE = cast(Callee->IgnoreParenCasts()); @@ -6627,41 +6895,52 @@ // We need to figure out which concrete builtin this maps onto. For example, // __sync_fetch_and_add with a 2 byte object turns into // __sync_fetch_and_add_2. -#define BUILTIN_ROW(x) \ - { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ - Builtin::BI##x##_8, Builtin::BI##x##_16 } +#define BUILTIN_ROW(x) \ + { \ + Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ + Builtin::BI##x##_8, Builtin::BI##x##_16 \ + } static const unsigned BuiltinIndices[][5] = { - BUILTIN_ROW(__sync_fetch_and_add), - BUILTIN_ROW(__sync_fetch_and_sub), - BUILTIN_ROW(__sync_fetch_and_or), - BUILTIN_ROW(__sync_fetch_and_and), - BUILTIN_ROW(__sync_fetch_and_xor), - BUILTIN_ROW(__sync_fetch_and_nand), - - BUILTIN_ROW(__sync_add_and_fetch), - BUILTIN_ROW(__sync_sub_and_fetch), - BUILTIN_ROW(__sync_and_and_fetch), - BUILTIN_ROW(__sync_or_and_fetch), - BUILTIN_ROW(__sync_xor_and_fetch), - BUILTIN_ROW(__sync_nand_and_fetch), - - BUILTIN_ROW(__sync_val_compare_and_swap), - BUILTIN_ROW(__sync_bool_compare_and_swap), - BUILTIN_ROW(__sync_lock_test_and_set), - BUILTIN_ROW(__sync_lock_release), - BUILTIN_ROW(__sync_swap) - }; + BUILTIN_ROW(__sync_fetch_and_add), + BUILTIN_ROW(__sync_fetch_and_sub), + BUILTIN_ROW(__sync_fetch_and_or), + BUILTIN_ROW(__sync_fetch_and_and), + BUILTIN_ROW(__sync_fetch_and_xor), + BUILTIN_ROW(__sync_fetch_and_nand), + + BUILTIN_ROW(__sync_add_and_fetch), + BUILTIN_ROW(__sync_sub_and_fetch), + BUILTIN_ROW(__sync_and_and_fetch), + BUILTIN_ROW(__sync_or_and_fetch), + BUILTIN_ROW(__sync_xor_and_fetch), + BUILTIN_ROW(__sync_nand_and_fetch), + + BUILTIN_ROW(__sync_val_compare_and_swap), + BUILTIN_ROW(__sync_bool_compare_and_swap), + BUILTIN_ROW(__sync_lock_test_and_set), + BUILTIN_ROW(__sync_lock_release), + BUILTIN_ROW(__sync_swap)}; #undef BUILTIN_ROW // Determine the index of the size. unsigned SizeIndex; switch (Context.getTypeSizeInChars(ValType).getQuantity()) { - case 1: SizeIndex = 0; break; - case 2: SizeIndex = 1; break; - case 4: SizeIndex = 2; break; - case 8: SizeIndex = 3; break; - case 16: SizeIndex = 4; break; + case 1: + SizeIndex = 0; + break; + case 2: + SizeIndex = 1; + break; + case 4: + SizeIndex = 2; + break; + case 8: + SizeIndex = 3; + break; + case 16: + SizeIndex = 4; + break; default: Diag(DRE->getBeginLoc(), diag::err_atomic_builtin_pointer_size) << FirstArg->getType() << FirstArg->getSourceRange(); @@ -6676,7 +6955,8 @@ unsigned BuiltinIndex, NumFixed = 1; bool WarnAboutSemanticsChange = false; switch (BuiltinID) { - default: llvm_unreachable("Unknown overloaded atomic builtin!"); + default: + llvm_unreachable("Unknown overloaded atomic builtin!"); case Builtin::BI__sync_fetch_and_add: case Builtin::BI__sync_fetch_and_add_1: case Builtin::BI__sync_fetch_and_add_2: @@ -6840,7 +7120,7 @@ // Now that we know how many fixed arguments we expect, first check that we // have at least that many. - if (TheCall->getNumArgs() < 1+NumFixed) { + if (TheCall->getNumArgs() < 1 + NumFixed) { Diag(TheCall->getEndLoc(), diag::err_typecheck_call_too_few_args_at_least) << 0 << 1 + NumFixed << TheCall->getNumArgs() << Callee->getSourceRange(); @@ -6877,13 +7157,13 @@ // deduce the types of the rest of the arguments accordingly. Walk // the remaining arguments, converting them to the deduced value type. for (unsigned i = 0; i != NumFixed; ++i) { - ExprResult Arg = TheCall->getArg(i+1); + ExprResult Arg = TheCall->getArg(i + 1); // GCC does an implicit conversion to the pointer or integer ValType. This // can fail in some cases (1i -> int**), check for this error case now. // Initialize the argument. - InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, - ValType, /*consume*/ false); + InitializedEntity Entity = InitializedEntity::InitializeParameter( + Context, ValType, /*consume*/ false); Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); if (Arg.isInvalid()) return ExprError(); @@ -6894,7 +7174,7 @@ // pass in 42. The 42 gets converted to char. This is even more strange // for things like 45.123 -> char, etc. // FIXME: Do this check. - TheCall->setArg(i+1, Arg.get()); + TheCall->setArg(i + 1, Arg.get()); } // Create a new DeclRefExpr to refer to the new decl. @@ -6906,8 +7186,8 @@ // Set the callee in the CallExpr. // FIXME: This loses syntactic information. QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); - ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, - CK_BuiltinFnToFnPtr); + ExprResult PromotedCall = + ImpCastExprToType(NewDRE, CalleePtrTy, CK_BuiltinFnToFnPtr); TheCall->setCallee(PromotedCall.get()); // Change the result type of the call to match the original value type. This @@ -7079,8 +7359,7 @@ // On x64 Windows, don't allow this in System V ABI functions. // (Yes, that means there's no corresponding way to support variadic // System V ABI functions on Windows.) - if ((IsWindows && CC == CC_X86_64SysV) || - (!IsWindows && CC == CC_Win64)) + if ((IsWindows && CC == CC_X86_64SysV) || (!IsWindows && CC == CC_Win64)) return S.Diag(Fn->getBeginLoc(), diag::err_va_start_used_in_wrong_abi_function) << !IsWindows; @@ -7093,8 +7372,9 @@ return false; } -static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, - ParmVarDecl **LastParam = nullptr) { +static bool +checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn, + ParmVarDecl **LastParam = nullptr) { // Determine whether the current function, block, or obj-c method is variadic // and get its parameter list. bool IsVariadic = false; @@ -7190,8 +7470,10 @@ Context.typesAreCompatible(ED->getPromotionType(), Type)); }()) { unsigned Reason = 0; - if (Type->isReferenceType()) Reason = 1; - else if (IsCRegister) Reason = 2; + if (Type->isReferenceType()) + Reason = 1; + else if (IsCRegister) + Reason = 2; Diag(Arg->getBeginLoc(), diag::warn_va_start_type_is_undefined) << Reason; Diag(ParamLoc, diag::note_parameter_type) << Type; } @@ -7322,7 +7604,7 @@ TheCall->setArg(i, Res.get()); } - Expr *OrigArg = TheCall->getArg(NumArgs-1); + Expr *OrigArg = TheCall->getArg(NumArgs - 1); if (OrigArg->isTypeDependent()) return false; @@ -7382,8 +7664,8 @@ if (!Context.hasSameType(Real->getType(), Imag->getType())) { return Diag(Real->getBeginLoc(), diag::err_typecheck_call_different_arg_types) - << Real->getType() << Imag->getType() - << Real->getSourceRange() << Imag->getSourceRange(); + << Real->getType() << Imag->getType() << Real->getSourceRange() + << Imag->getSourceRange(); } // We don't allow _Complex _Float16 nor _Complex __fp16 as type specifiers; @@ -7525,7 +7807,7 @@ << TheCall->getArg(i)->getSourceRange()); } - SmallVector exprs; + SmallVector exprs; for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { exprs.push_back(TheCall->getArg(i)); @@ -7547,20 +7829,18 @@ QualType SrcTy = E->getType(); if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) - return ExprError(Diag(BuiltinLoc, - diag::err_convertvector_non_vector) + return ExprError(Diag(BuiltinLoc, diag::err_convertvector_non_vector) << E->getSourceRange()); if (!DstTy->isVectorType() && !DstTy->isDependentType()) - return ExprError(Diag(BuiltinLoc, - diag::err_convertvector_non_vector_type)); + return ExprError(Diag(BuiltinLoc, diag::err_convertvector_non_vector_type)); if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { unsigned SrcElts = SrcTy->castAs()->getNumElements(); unsigned DstElts = DstTy->castAs()->getNumElements(); if (SrcElts != DstElts) - return ExprError(Diag(BuiltinLoc, - diag::err_convertvector_incompatible_vector) - << E->getSourceRange()); + return ExprError( + Diag(BuiltinLoc, diag::err_convertvector_incompatible_vector) + << E->getSourceRange()); } return new (Context) @@ -7615,7 +7895,8 @@ // has side effects. bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { Expr *Arg = TheCall->getArg(0); - if (Arg->isInstantiationDependent()) return false; + if (Arg->isInstantiationDependent()) + return false; if (Arg->HasSideEffects(Context)) Diag(Arg->getBeginLoc(), diag::warn_assume_side_effects) @@ -7669,8 +7950,7 @@ Expr *FirstArg = TheCall->getArg(0); { - ExprResult FirstArgResult = - DefaultFunctionArrayLvalueConversion(FirstArg); + ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); if (FirstArgResult.isInvalid()) return true; TheCall->setArg(0, FirstArgResult.get()); @@ -7789,10 +8069,12 @@ bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result) { Expr *Arg = TheCall->getArg(ArgNum); - DeclRefExpr *DRE =cast(TheCall->getCallee()->IgnoreParenCasts()); + DeclRefExpr *DRE = + cast(TheCall->getCallee()->IgnoreParenCasts()); FunctionDecl *FDecl = cast(DRE->getDecl()); - if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; + if (Arg->isTypeDependent() || Arg->isValueDependent()) + return false; Optional R; if (!(R = Arg->getIntegerConstantExpr(Context))) @@ -7804,8 +8086,8 @@ /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr /// TheCall is a constant expression in the range [Low, High]. -bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, - int Low, int High, bool RangeIsError) { +bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, + int High, bool RangeIsError) { if (isConstantEvaluated()) return false; llvm::APSInt Result; @@ -7835,8 +8117,8 @@ return false; } -/// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of CallExpr -/// TheCall is a constant expression is a multiple of Num.. +/// SemaBuiltinConstantArgMultiple - Handle a check if argument ArgNum of +/// CallExpr TheCall is a constant expression is a multiple of Num.. bool Sema::SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Num) { llvm::APSInt Result; @@ -7963,7 +8245,8 @@ } /// SemaBuiltinARMMemoryTaggingCall - Handle calls of memory tagging extensions -bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall) { +bool Sema::SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, + CallExpr *TheCall) { if (BuiltinID == AArch64::BI__builtin_arm_irg) { if (checkArgCount(*this, TheCall, 2)) return true; @@ -7976,7 +8259,7 @@ QualType FirstArgType = FirstArg.get()->getType(); if (!FirstArgType->isAnyPointerType()) return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) - << "first" << FirstArgType << Arg0->getSourceRange(); + << "first" << FirstArgType << Arg0->getSourceRange(); TheCall->setArg(0, FirstArg.get()); ExprResult SecArg = DefaultLvalueConversion(Arg1); @@ -7985,7 +8268,7 @@ QualType SecArgType = SecArg.get()->getType(); if (!SecArgType->isIntegerType()) return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) - << "second" << SecArgType << Arg1->getSourceRange(); + << "second" << SecArgType << Arg1->getSourceRange(); // Derive the return type from the pointer argument. TheCall->setType(FirstArgType); @@ -8003,7 +8286,7 @@ QualType FirstArgType = FirstArg.get()->getType(); if (!FirstArgType->isAnyPointerType()) return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) - << "first" << FirstArgType << Arg0->getSourceRange(); + << "first" << FirstArgType << Arg0->getSourceRange(); TheCall->setArg(0, FirstArg.get()); // Derive the return type from the pointer argument. @@ -8025,12 +8308,12 @@ QualType FirstArgType = FirstArg.get()->getType(); if (!FirstArgType->isAnyPointerType()) return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) - << "first" << FirstArgType << Arg0->getSourceRange(); + << "first" << FirstArgType << Arg0->getSourceRange(); QualType SecArgType = Arg1->getType(); if (!SecArgType->isIntegerType()) return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_integer) - << "second" << SecArgType << Arg1->getSourceRange(); + << "second" << SecArgType << Arg1->getSourceRange(); TheCall->setType(Context.IntTy); return false; } @@ -8047,7 +8330,7 @@ QualType FirstArgType = FirstArg.get()->getType(); if (!FirstArgType->isAnyPointerType()) return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_must_be_pointer) - << "first" << FirstArgType << Arg0->getSourceRange(); + << "first" << FirstArgType << Arg0->getSourceRange(); TheCall->setArg(0, FirstArg.get()); // Derive the return type from the pointer argument. @@ -8069,18 +8352,19 @@ QualType ArgTypeA = ArgExprA.get()->getType(); QualType ArgTypeB = ArgExprB.get()->getType(); - auto isNull = [&] (Expr *E) -> bool { - return E->isNullPointerConstant( - Context, Expr::NPC_ValueDependentIsNotNull); }; + auto isNull = [&](Expr *E) -> bool { + return E->isNullPointerConstant(Context, + Expr::NPC_ValueDependentIsNotNull); + }; // argument should be either a pointer or null if (!ArgTypeA->isAnyPointerType() && !isNull(ArgA)) return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) - << "first" << ArgTypeA << ArgA->getSourceRange(); + << "first" << ArgTypeA << ArgA->getSourceRange(); if (!ArgTypeB->isAnyPointerType() && !isNull(ArgB)) return Diag(TheCall->getBeginLoc(), diag::err_memtag_arg_null_or_pointer) - << "second" << ArgTypeB << ArgB->getSourceRange(); + << "second" << ArgTypeB << ArgB->getSourceRange(); // Ensure Pointee types are compatible if (ArgTypeA->isAnyPointerType() && !isNull(ArgA) && @@ -8088,18 +8372,19 @@ QualType pointeeA = ArgTypeA->getPointeeType(); QualType pointeeB = ArgTypeB->getPointeeType(); if (!Context.typesAreCompatible( - Context.getCanonicalType(pointeeA).getUnqualifiedType(), - Context.getCanonicalType(pointeeB).getUnqualifiedType())) { - return Diag(TheCall->getBeginLoc(), diag::err_typecheck_sub_ptr_compatible) - << ArgTypeA << ArgTypeB << ArgA->getSourceRange() - << ArgB->getSourceRange(); + Context.getCanonicalType(pointeeA).getUnqualifiedType(), + Context.getCanonicalType(pointeeB).getUnqualifiedType())) { + return Diag(TheCall->getBeginLoc(), + diag::err_typecheck_sub_ptr_compatible) + << ArgTypeA << ArgTypeB << ArgA->getSourceRange() + << ArgB->getSourceRange(); } } // at least one argument should be pointer type if (!ArgTypeA->isAnyPointerType() && !ArgTypeB->isAnyPointerType()) return Diag(TheCall->getBeginLoc(), diag::err_memtag_any2arg_pointer) - << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); + << ArgTypeA << ArgTypeB << ArgA->getSourceRange(); if (isNull(ArgA)) // adopt type of the other pointer ArgExprA = ImpCastExprToType(ArgExprA.get(), ArgTypeB, CK_NullToPointer); @@ -8186,7 +8471,7 @@ else Ranges.append({15, 7, 15}); - for (unsigned i=0; i= 0 && IntField <= Ranges[i]); @@ -8299,7 +8584,7 @@ // number of arguments in TheCall and if it is not the case, to display a // better error message. while (*TypeStr != '\0') { - (void) DecodePPCMMATypeFromStr(Context, TypeStr, Mask); + (void)DecodePPCMMATypeFromStr(Context, TypeStr, Mask); ArgNum++; } if (checkArgCount(*this, TheCall, ArgNum)) @@ -8350,9 +8635,7 @@ public: UncoveredArgHandler() = default; - bool hasUncoveredArg() const { - return (FirstUncoveredArg >= 0); - } + bool hasUncoveredArg() const { return (FirstUncoveredArg >= 0); } unsigned getUncoveredArg() const { assert(hasUncoveredArg() && "no uncovered argument"); @@ -8396,8 +8679,7 @@ } // namespace static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend, - BinaryOperatorKind BinOpKind, - bool AddendIsRight) { + BinaryOperatorKind BinOpKind, bool AddendIsRight) { unsigned BitWidth = Offset.getBitWidth(); unsigned AddendBitWidth = Addend.getBitWidth(); // There might be negative interim results. @@ -8445,13 +8727,11 @@ const StringLiteral *FExpr; int64_t Offset; - public: +public: FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0) : FExpr(fexpr), Offset(Offset) {} - StringRef getString() const { - return FExpr->getString().drop_front(Offset); - } + StringRef getString() const { return FExpr->getString().drop_front(Offset); } unsigned getByteLength() const { return FExpr->getByteLength() - getCharByteWidth() * Offset; @@ -8471,10 +8751,11 @@ bool isUTF32() const { return FExpr->isUTF32(); } bool isPascal() const { return FExpr->isPascal(); } - SourceLocation getLocationOfByte( - unsigned ByteNo, const SourceManager &SM, const LangOptions &Features, - const TargetInfo &Target, unsigned *StartToken = nullptr, - unsigned *StartTokenByteOffset = nullptr) const { + SourceLocation + getLocationOfByte(unsigned ByteNo, const SourceManager &SM, + const LangOptions &Features, const TargetInfo &Target, + unsigned *StartToken = nullptr, + unsigned *StartTokenByteOffset = nullptr) const { return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target, StartToken, StartTokenByteOffset); } @@ -8533,8 +8814,7 @@ case Stmt::ConditionalOperatorClass: { // The expression is a literal if both sub-expressions were, and it was // completely checked only if both sub-expressions were checked. - const AbstractConditionalOperator *C = - cast(E); + const AbstractConditionalOperator *C = cast(E); // Determine whether it is necessary to check both sub-expressions, for // example, because the condition expression is a constant that can be @@ -8710,7 +8990,8 @@ case Stmt::CallExprClass: case Stmt::CXXMemberCallExprClass: { const CallExpr *CE = cast(E); - if (const NamedDecl *ND = dyn_cast_or_null(CE->getCalleeDecl())) { + if (const NamedDecl *ND = + dyn_cast_or_null(CE->getCalleeDecl())) { bool IsFirst = true; StringLiteralCheckType CommonResult; for (const auto *FA : ND->specific_attrs()) { @@ -8936,7 +9217,7 @@ if (UncoveredArg.hasUncoveredArg()) { unsigned ArgIdx = UncoveredArg.getUncoveredArg() + firstDataArg; assert(ArgIdx < Args.size() && "ArgIdx outside bounds"); - UncoveredArg.Diagnose(*this, /*IsFunctionCall*/true, Args[ArgIdx]); + UncoveredArg.Diagnose(*this, /*IsFunctionCall*/ true, Args[ArgIdx]); } if (CT != SLCT_NotALiteral) @@ -8960,7 +9241,7 @@ // warn only with -Wformat-nonliteral. if (Args.size() == firstDataArg) { Diag(FormatLoc, diag::warn_format_nonliteral_noargs) - << OrigFormatExpr->getSourceRange(); + << OrigFormatExpr->getSourceRange(); switch (Type) { default: break; @@ -8968,16 +9249,16 @@ case FST_FreeBSDKPrintf: case FST_Printf: Diag(FormatLoc, diag::note_format_security_fixit) - << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); + << FixItHint::CreateInsertion(FormatLoc, "\"%s\", "); break; case FST_NSString: Diag(FormatLoc, diag::note_format_security_fixit) - << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); + << FixItHint::CreateInsertion(FormatLoc, "@\"%@\", "); break; } } else { Diag(FormatLoc, diag::warn_format_nonliteral) - << OrigFormatExpr->getSourceRange(); + << OrigFormatExpr->getSourceRange(); } return false; } @@ -9029,23 +9310,21 @@ unsigned specifierLen) override; void HandleInvalidLengthModifier( - const analyze_format_string::FormatSpecifier &FS, - const analyze_format_string::ConversionSpecifier &CS, - const char *startSpecifier, unsigned specifierLen, - unsigned DiagID); + const analyze_format_string::FormatSpecifier &FS, + const analyze_format_string::ConversionSpecifier &CS, + const char *startSpecifier, unsigned specifierLen, unsigned DiagID); void HandleNonStandardLengthModifier( - const analyze_format_string::FormatSpecifier &FS, - const char *startSpecifier, unsigned specifierLen); + const analyze_format_string::FormatSpecifier &FS, + const char *startSpecifier, unsigned specifierLen); void HandleNonStandardConversionSpecifier( - const analyze_format_string::ConversionSpecifier &CS, - const char *startSpecifier, unsigned specifierLen); + const analyze_format_string::ConversionSpecifier &CS, + const char *startSpecifier, unsigned specifierLen); void HandlePosition(const char *startPos, unsigned posLen) override; - void HandleInvalidPosition(const char *startSpecifier, - unsigned specifierLen, + void HandleInvalidPosition(const char *startSpecifier, unsigned specifierLen, analyze_format_string::PositionContext p) override; void HandleZeroPosition(const char *startPos, unsigned posLen) override; @@ -9093,10 +9372,11 @@ return OrigFormatExpr->getSourceRange(); } -CharSourceRange CheckFormatHandler:: -getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { +CharSourceRange +CheckFormatHandler::getSpecifierRange(const char *startSpecifier, + unsigned specifierLen) { SourceLocation Start = getLocationOfByte(startSpecifier); - SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); + SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); // Advance the end SourceLocation by one due to half-open ranges. End = End.getLocWithOffset(1); @@ -9110,10 +9390,10 @@ } void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, - unsigned specifierLen){ + unsigned specifierLen) { EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), getLocationOfByte(startSpecifier), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen)); } @@ -9131,12 +9411,12 @@ if (FixedLM) { EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), getLocationOfByte(LM.getStart()), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen)); S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) - << FixedLM->toString() - << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); + << FixedLM->toString() + << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); } else { FixItHint Hint; @@ -9145,9 +9425,8 @@ EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), getLocationOfByte(LM.getStart()), - /*IsStringLocation*/true, - getSpecifierRange(startSpecifier, specifierLen), - Hint); + /*IsStringLocation*/ true, + getSpecifierRange(startSpecifier, specifierLen), Hint); } } @@ -9163,20 +9442,20 @@ Optional FixedLM = FS.getCorrectedLengthModifier(); if (FixedLM) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) - << LM.toString() << 0, + << LM.toString() << 0, getLocationOfByte(LM.getStart()), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen)); S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) - << FixedLM->toString() - << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); + << FixedLM->toString() + << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); } else { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) - << LM.toString() << 0, + << LM.toString() << 0, getLocationOfByte(LM.getStart()), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen)); } } @@ -9190,56 +9469,55 @@ Optional FixedCS = CS.getStandardSpecifier(); if (FixedCS) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) - << CS.toString() << /*conversion specifier*/1, + << CS.toString() << /*conversion specifier*/ 1, getLocationOfByte(CS.getStart()), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen)); CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) - << FixedCS->toString() - << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); + << FixedCS->toString() + << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); } else { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) - << CS.toString() << /*conversion specifier*/1, + << CS.toString() << /*conversion specifier*/ 1, getLocationOfByte(CS.getStart()), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen)); } } -void CheckFormatHandler::HandlePosition(const char *startPos, - unsigned posLen) { +void CheckFormatHandler::HandlePosition(const char *startPos, unsigned posLen) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), - getLocationOfByte(startPos), - /*IsStringLocation*/true, - getSpecifierRange(startPos, posLen)); + getLocationOfByte(startPos), + /*IsStringLocation*/ true, + getSpecifierRange(startPos, posLen)); } -void -CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, - analyze_format_string::PositionContext p) { +void CheckFormatHandler::HandleInvalidPosition( + const char *startPos, unsigned posLen, + analyze_format_string::PositionContext p) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) - << (unsigned) p, - getLocationOfByte(startPos), /*IsStringLocation*/true, + << (unsigned)p, + getLocationOfByte(startPos), /*IsStringLocation*/ true, getSpecifierRange(startPos, posLen)); } void CheckFormatHandler::HandleZeroPosition(const char *startPos, unsigned posLen) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), - getLocationOfByte(startPos), - /*IsStringLocation*/true, - getSpecifierRange(startPos, posLen)); + getLocationOfByte(startPos), + /*IsStringLocation*/ true, + getSpecifierRange(startPos, posLen)); } void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { if (!isa(OrigFormatExpr)) { // The presence of a null character is likely an error. EmitFormatDiagnostic( - S.PDiag(diag::warn_printf_format_string_contains_null_char), - getLocationOfByte(nullCharacter), /*IsStringLocation*/true, - getFormatStringRange()); + S.PDiag(diag::warn_printf_format_string_contains_null_char), + getLocationOfByte(nullCharacter), /*IsStringLocation*/ true, + getFormatStringRange()); } } @@ -9267,8 +9545,7 @@ void UncoveredArgHandler::Diagnose(Sema &S, bool IsFunctionCall, const Expr *ArgExpr) { - assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && - "Invalid state"); + assert(hasUncoveredArg() && DiagnosticExprs.size() > 0 && "Invalid state"); if (!ArgExpr) return; @@ -9283,25 +9560,19 @@ PDiag << E->getSourceRange(); CheckFormatHandler::EmitFormatDiagnostic( - S, IsFunctionCall, DiagnosticExprs[0], - PDiag, Loc, /*IsStringLocation*/false, - DiagnosticExprs[0]->getSourceRange()); -} - -bool -CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, - SourceLocation Loc, - const char *startSpec, - unsigned specifierLen, - const char *csStart, - unsigned csLen) { + S, IsFunctionCall, DiagnosticExprs[0], PDiag, Loc, + /*IsStringLocation*/ false, DiagnosticExprs[0]->getSourceRange()); +} + +bool CheckFormatHandler::HandleInvalidConversionSpecifier( + unsigned argIndex, SourceLocation Loc, const char *startSpec, + unsigned specifierLen, const char *csStart, unsigned csLen) { bool keepGoing = true; if (argIndex < NumDataArgs) { // Consider the argument coverered, even though the specifier doesn't // make sense. CoveredArgs.set(argIndex); - } - else { + } else { // If argIndex exceeds the number of data arguments we // don't issue a warning because that is just a cascade of warnings (and // they may have intended '%%' anyway). We don't want to continue processing @@ -9319,8 +9590,7 @@ if (!llvm::sys::locale::isPrint(*csStart)) { llvm::UTF32 CodePoint; const llvm::UTF8 **B = reinterpret_cast(&csStart); - const llvm::UTF8 *E = - reinterpret_cast(csStart + csLen); + const llvm::UTF8 *E = reinterpret_cast(csStart + csLen); llvm::ConversionResult Result = llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion); @@ -9347,29 +9617,27 @@ return keepGoing; } -void -CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, - const char *startSpec, - unsigned specifierLen) { +void CheckFormatHandler::HandlePositionalNonpositionalArgs( + SourceLocation Loc, const char *startSpec, unsigned specifierLen) { EmitFormatDiagnostic( - S.PDiag(diag::warn_format_mix_positional_nonpositional_args), - Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); + S.PDiag(diag::warn_format_mix_positional_nonpositional_args), Loc, + /*isStringLoc*/ true, getSpecifierRange(startSpec, specifierLen)); } -bool -CheckFormatHandler::CheckNumArgs( - const analyze_format_string::FormatSpecifier &FS, - const analyze_format_string::ConversionSpecifier &CS, - const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { +bool CheckFormatHandler::CheckNumArgs( + const analyze_format_string::FormatSpecifier &FS, + const analyze_format_string::ConversionSpecifier &CS, + const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { if (argIndex >= NumDataArgs) { - PartialDiagnostic PDiag = FS.usesPositionalArg() - ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) - << (argIndex+1) << NumDataArgs) - : S.PDiag(diag::warn_printf_insufficient_data_args); - EmitFormatDiagnostic( - PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, - getSpecifierRange(startSpecifier, specifierLen)); + PartialDiagnostic PDiag = + FS.usesPositionalArg() + ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) + << (argIndex + 1) << NumDataArgs) + : S.PDiag(diag::warn_printf_insufficient_data_args); + EmitFormatDiagnostic(PDiag, getLocationOfByte(CS.getStart()), + /*IsStringLocation*/ true, + getSpecifierRange(startSpecifier, specifierLen)); // Since more arguments than conversion tokens are given, by extension // all arguments are covered, so mark this as so. @@ -9379,14 +9647,14 @@ return true; } -template +template void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation Loc, bool IsStringLocation, Range StringRange, ArrayRef FixIt) { - EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, - Loc, IsStringLocation, StringRange, FixIt); + EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, Loc, + IsStringLocation, StringRange, FixIt); } /// If the format string is not within the function call, emit a note @@ -9427,18 +9695,19 @@ D << FixIt; } else { S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) - << ArgumentExpr->getSourceRange(); + << ArgumentExpr->getSourceRange(); const Sema::SemaDiagnosticBuilder &Note = - S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), - diag::note_format_string_defined); + S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), + diag::note_format_string_defined); Note << StringRange; Note << FixIt; } } -//===--- CHECK: Printf format string checking ------------------------------===// +//===--- CHECK: Printf format string checking +//------------------------------===// namespace { @@ -9467,9 +9736,8 @@ } bool HandleInvalidPrintfConversionSpecifier( - const analyze_printf::PrintfSpecifier &FS, - const char *startSpecifier, - unsigned specifierLen) override; + const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, + unsigned specifierLen) override; void handleInvalidMaskType(StringRef MaskType) override; @@ -9477,16 +9745,16 @@ const char *startSpecifier, unsigned specifierLen, const TargetInfo &Target) override; bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, - const char *StartSpecifier, - unsigned SpecifierLen, + const char *StartSpecifier, unsigned SpecifierLen, const Expr *E); - bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, - const char *startSpecifier, unsigned specifierLen); + bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, + unsigned k, const char *startSpecifier, + unsigned specifierLen); void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, const analyze_printf::OptionalAmount &Amt, - unsigned type, - const char *startSpecifier, unsigned specifierLen); + unsigned type, const char *startSpecifier, + unsigned specifierLen); void HandleFlag(const analyze_printf::PrintfSpecifier &FS, const analyze_printf::OptionalFlag &flag, const char *startSpecifier, unsigned specifierLen); @@ -9494,34 +9762,31 @@ const analyze_printf::OptionalFlag &ignoredFlag, const analyze_printf::OptionalFlag &flag, const char *startSpecifier, unsigned specifierLen); - bool checkForCStrMembers(const analyze_printf::ArgType &AT, - const Expr *E); + bool checkForCStrMembers(const analyze_printf::ArgType &AT, const Expr *E); void HandleEmptyObjCModifierFlag(const char *startFlag, unsigned flagLen) override; void HandleInvalidObjCModifierFlag(const char *startFlag, - unsigned flagLen) override; + unsigned flagLen) override; - void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, - const char *flagsEnd, - const char *conversionPosition) - override; + void + HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, + const char *flagsEnd, + const char *conversionPosition) override; }; } // namespace bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( - const analyze_printf::PrintfSpecifier &FS, - const char *startSpecifier, - unsigned specifierLen) { + const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, + unsigned specifierLen) { const analyze_printf::PrintfConversionSpecifier &CS = - FS.getConversionSpecifier(); + FS.getConversionSpecifier(); - return HandleInvalidConversionSpecifier(FS.getArgIndex(), - getLocationOfByte(CS.getStart()), - startSpecifier, specifierLen, - CS.getStart(), CS.getLength()); + return HandleInvalidConversionSpecifier( + FS.getArgIndex(), getLocationOfByte(CS.getStart()), startSpecifier, + specifierLen, CS.getStart(), CS.getLength()); } void CheckPrintfHandler::handleInvalidMaskType(StringRef MaskType) { @@ -9561,10 +9826,10 @@ if (!AT.matchesType(S.Context, T)) { EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) - << k << AT.getRepresentativeTypeName(S.Context) - << T << Arg->getSourceRange(), + << k << AT.getRepresentativeTypeName(S.Context) + << T << Arg->getSourceRange(), getLocationOfByte(Amt.getStart()), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen)); // Don't do any more checking. We will just emit // spurious errors. @@ -9576,26 +9841,23 @@ } void CheckPrintfHandler::HandleInvalidAmount( - const analyze_printf::PrintfSpecifier &FS, - const analyze_printf::OptionalAmount &Amt, - unsigned type, - const char *startSpecifier, - unsigned specifierLen) { + const analyze_printf::PrintfSpecifier &FS, + const analyze_printf::OptionalAmount &Amt, unsigned type, + const char *startSpecifier, unsigned specifierLen) { const analyze_printf::PrintfConversionSpecifier &CS = - FS.getConversionSpecifier(); + FS.getConversionSpecifier(); FixItHint fixit = - Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant - ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), - Amt.getConstantLength())) - : FixItHint(); + Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant + ? FixItHint::CreateRemoval( + getSpecifierRange(Amt.getStart(), Amt.getConstantLength())) + : FixItHint(); EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) - << type << CS.toString(), + << type << CS.toString(), getLocationOfByte(Amt.getStart()), - /*IsStringLocation*/true, - getSpecifierRange(startSpecifier, specifierLen), - fixit); + /*IsStringLocation*/ true, + getSpecifierRange(startSpecifier, specifierLen), fixit); } void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, @@ -9604,39 +9866,37 @@ unsigned specifierLen) { // Warn about pointless flag with a fixit removal. const analyze_printf::PrintfConversionSpecifier &CS = - FS.getConversionSpecifier(); - EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) - << flag.toString() << CS.toString(), - getLocationOfByte(flag.getPosition()), - /*IsStringLocation*/true, - getSpecifierRange(startSpecifier, specifierLen), - FixItHint::CreateRemoval( - getSpecifierRange(flag.getPosition(), 1))); + FS.getConversionSpecifier(); + EmitFormatDiagnostic( + S.PDiag(diag::warn_printf_nonsensical_flag) + << flag.toString() << CS.toString(), + getLocationOfByte(flag.getPosition()), + /*IsStringLocation*/ true, + getSpecifierRange(startSpecifier, specifierLen), + FixItHint::CreateRemoval(getSpecifierRange(flag.getPosition(), 1))); } void CheckPrintfHandler::HandleIgnoredFlag( - const analyze_printf::PrintfSpecifier &FS, - const analyze_printf::OptionalFlag &ignoredFlag, - const analyze_printf::OptionalFlag &flag, - const char *startSpecifier, - unsigned specifierLen) { + const analyze_printf::PrintfSpecifier &FS, + const analyze_printf::OptionalFlag &ignoredFlag, + const analyze_printf::OptionalFlag &flag, const char *startSpecifier, + unsigned specifierLen) { // Warn about ignored flag with a fixit removal. EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) - << ignoredFlag.toString() << flag.toString(), + << ignoredFlag.toString() << flag.toString(), getLocationOfByte(ignoredFlag.getPosition()), - /*IsStringLocation*/true, + /*IsStringLocation*/ true, getSpecifierRange(startSpecifier, specifierLen), FixItHint::CreateRemoval( - getSpecifierRange(ignoredFlag.getPosition(), 1))); + getSpecifierRange(ignoredFlag.getPosition(), 1))); } void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, unsigned flagLen) { // Warn about an empty flag. - EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), - getLocationOfByte(startFlag), - /*IsStringLocation*/true, - getSpecifierRange(startFlag, flagLen)); + EmitFormatDiagnostic( + S.PDiag(diag::warn_printf_empty_objc_flag), getLocationOfByte(startFlag), + /*IsStringLocation*/ true, getSpecifierRange(startFlag, flagLen)); } void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, @@ -9645,30 +9905,31 @@ auto Range = getSpecifierRange(startFlag, flagLen); StringRef flag(startFlag, flagLen); EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, - getLocationOfByte(startFlag), - /*IsStringLocation*/true, - Range, FixItHint::CreateRemoval(Range)); + getLocationOfByte(startFlag), + /*IsStringLocation*/ true, Range, + FixItHint::CreateRemoval(Range)); } void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( - const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { - // Warn about using '[...]' without a '@' conversion. - auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); - auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; - EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), - getLocationOfByte(conversionPosition), - /*IsStringLocation*/true, - Range, FixItHint::CreateRemoval(Range)); + const char *flagsStart, const char *flagsEnd, + const char *conversionPosition) { + // Warn about using '[...]' without a '@' conversion. + auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); + auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; + EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), + getLocationOfByte(conversionPosition), + /*IsStringLocation*/ true, Range, + FixItHint::CreateRemoval(Range)); } // Determines if the specified is a C++ class or struct containing // a member with the specified name and kind (e.g. a CXXMethodDecl named // "c_str()"). -template -static llvm::SmallPtrSet +template +static llvm::SmallPtrSet CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { const RecordType *RT = Ty->getAs(); - llvm::SmallPtrSet Results; + llvm::SmallPtrSet Results; if (!RT) return Results; @@ -9700,8 +9961,8 @@ MethodSet Results = CXXRecordMembersNamed("c_str", *this, E->getType()); - for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); - MI != ME; ++MI) + for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); MI != ME; + ++MI) if ((*MI)->getMinRequiredArguments() == 0) return true; return false; @@ -9710,15 +9971,15 @@ // Check if a (w)string was passed when a (w)char* was needed, and offer a // better diagnostic if so. AT is assumed to be valid. // Returns true when a c_str() conversion method is found. -bool CheckPrintfHandler::checkForCStrMembers( - const analyze_printf::ArgType &AT, const Expr *E) { +bool CheckPrintfHandler::checkForCStrMembers(const analyze_printf::ArgType &AT, + const Expr *E) { using MethodSet = llvm::SmallPtrSet; MethodSet Results = CXXRecordMembersNamed("c_str", S, E->getType()); - for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); - MI != ME; ++MI) { + for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); MI != ME; + ++MI) { const CXXMethodDecl *Method = *MI; if (Method->getMinRequiredArguments() == 0 && AT.matchesType(S.Context, Method->getReturnType())) { @@ -9743,10 +10004,9 @@ if (FS.consumesDataArgument()) { if (atFirstArg) { - atFirstArg = false; - usesPositionalArgs = FS.usesPositionalArg(); - } - else if (usesPositionalArgs != FS.usesPositionalArg()) { + atFirstArg = false; + usesPositionalArgs = FS.usesPositionalArg(); + } else if (usesPositionalArgs != FS.usesPositionalArg()) { HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), startSpecifier, specifierLen); return false; @@ -9755,13 +10015,13 @@ // First check if the field width, precision, and conversion specifier // have matching data arguments. - if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, - startSpecifier, specifierLen)) { + if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, startSpecifier, + specifierLen)) { return false; } - if (!HandleAmount(FS.getPrecision(), /* precision */ 1, - startSpecifier, specifierLen)) { + if (!HandleAmount(FS.getPrecision(), /* precision */ 1, startSpecifier, + specifierLen)) { return false; } @@ -9793,8 +10053,9 @@ // Type check the first argument (int for %b, pointer for %D) const Expr *Ex = getDataArg(argIndex); const analyze_printf::ArgType &AT = - (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? - ArgType(S.Context.IntTy) : ArgType::CPointerTy; + (CS.getKind() == ConversionSpecifier::FreeBSDbArg) + ? ArgType(S.Context.IntTy) + : ArgType::CPointerTy; if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) EmitFormatDiagnostic( S.PDiag(diag::warn_format_conversion_argument_type_mismatch) @@ -9814,7 +10075,7 @@ Ex->getBeginLoc(), /*IsStringLocation*/ false, getSpecifierRange(startSpecifier, specifierLen)); - return true; + return true; } // Check for using an Objective-C specific conversion specifier @@ -9879,13 +10140,13 @@ // Check for invalid use of field width if (!FS.hasValidFieldWidth()) { HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, - startSpecifier, specifierLen); + startSpecifier, specifierLen); } // Check for invalid use of precision if (!FS.hasValidPrecision()) { HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, - startSpecifier, specifierLen); + startSpecifier, specifierLen); } // Precision is mandatory for %P specifier. @@ -9914,10 +10175,10 @@ // Check that flags are not ignored by another flag if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), - startSpecifier, specifierLen); + startSpecifier, specifierLen); if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), - startSpecifier, specifierLen); + startSpecifier, specifierLen); // Check the length modifier is valid with the given conversion specifier. if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo(), @@ -9984,20 +10245,19 @@ } static std::pair -shouldNotPrintDirectly(const ASTContext &Context, - QualType IntendedTy, +shouldNotPrintDirectly(const ASTContext &Context, QualType IntendedTy, const Expr *E) { // Use a 'while' to peel off layers of typedefs. QualType TyTy = IntendedTy; while (const TypedefType *UserTy = TyTy->getAs()) { StringRef Name = UserTy->getDecl()->getName(); QualType CastTy = llvm::StringSwitch(Name) - .Case("CFIndex", Context.getNSIntegerType()) - .Case("NSInteger", Context.getNSIntegerType()) - .Case("NSUInteger", Context.getNSUIntegerType()) - .Case("SInt32", Context.IntTy) - .Case("UInt32", Context.UnsignedIntTy) - .Default(QualType()); + .Case("CFIndex", Context.getNSIntegerType()) + .Case("NSInteger", Context.getNSIntegerType()) + .Case("NSUInteger", Context.getNSUIntegerType()) + .Case("SInt32", Context.IntTy) + .Case("UInt32", Context.UnsignedIntTy) + .Default(QualType()); if (!CastTy.isNull()) return std::make_pair(CastTy, Name); @@ -10007,8 +10267,7 @@ // Strip parens if necessary. if (const ParenExpr *PE = dyn_cast(E)) - return shouldNotPrintDirectly(Context, - PE->getSubExpr()->getType(), + return shouldNotPrintDirectly(Context, PE->getSubExpr()->getType(), PE->getSubExpr()); // If this is a conditional expression, then its result type is constructed @@ -10019,14 +10278,10 @@ QualType TrueTy, FalseTy; StringRef TrueName, FalseName; - std::tie(TrueTy, TrueName) = - shouldNotPrintDirectly(Context, - CO->getTrueExpr()->getType(), - CO->getTrueExpr()); - std::tie(FalseTy, FalseName) = - shouldNotPrintDirectly(Context, - CO->getFalseExpr()->getType(), - CO->getFalseExpr()); + std::tie(TrueTy, TrueName) = shouldNotPrintDirectly( + Context, CO->getTrueExpr()->getType(), CO->getTrueExpr()); + std::tie(FalseTy, FalseName) = shouldNotPrintDirectly( + Context, CO->getFalseExpr()->getType(), CO->getFalseExpr()); if (TrueTy == FalseTy) return std::make_pair(TrueTy, TrueName); @@ -10042,8 +10297,8 @@ /// Return true if \p ICE is an implicit argument promotion of an arithmetic /// type. Bit-field 'promotions' from a higher ranked type to a lower ranked /// type do not count. -static bool -isArithmeticArgumentPromotion(Sema &S, const ImplicitCastExpr *ICE) { +static bool isArithmeticArgumentPromotion(Sema &S, + const ImplicitCastExpr *ICE) { QualType From = ICE->getSubExpr()->getType(); QualType To = ICE->getType(); // It's an integer promotion if the destination type is the promoted @@ -10063,11 +10318,9 @@ S.Context.getFloatingTypeOrder(From, To) < 0; } -bool -CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, - const char *StartSpecifier, - unsigned SpecifierLen, - const Expr *E) { +bool CheckPrintfHandler::checkFormatExpr( + const analyze_printf::PrintfSpecifier &FS, const char *StartSpecifier, + unsigned SpecifierLen, const Expr *E) { using namespace analyze_format_string; using namespace analyze_printf; @@ -10204,10 +10457,12 @@ // Special-case some of Darwin's platform-independence types by suggesting // casts to primitive types that are known to be large enough. - bool ShouldNotPrintDirectly = false; StringRef CastTyName; + bool ShouldNotPrintDirectly = false; + StringRef CastTyName; if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { QualType CastTy; - std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); + std::tie(CastTy, CastTyName) = + shouldNotPrintDirectly(S.Context, IntendedTy, E); if (!CastTy.isNull()) { // %zi/%zu and %td/%tu are OK to use for NSInteger/NSUInteger of type int // (long in ASTContext). Only complain to pedants. @@ -10273,7 +10528,7 @@ IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); CastFix << ")"; - SmallVector Hints; + SmallVector Hints; if (!AT.matchesType(S.Context, IntendedTy) || ShouldNotPrintDirectly) Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); @@ -10325,8 +10580,8 @@ } } } else { - const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, - SpecifierLen); + const CharSourceRange &CSR = + getSpecifierRange(StartSpecifier, SpecifierLen); // Since the warning for passing non-POD types to variadic functions // was deferred until now, we emit a warning for non-POD // arguments here. @@ -10435,10 +10690,10 @@ const char *startSpecifier, unsigned specifierLen) override; - bool HandleInvalidScanfConversionSpecifier( - const analyze_scanf::ScanfSpecifier &FS, - const char *startSpecifier, - unsigned specifierLen) override; + bool + HandleInvalidScanfConversionSpecifier(const analyze_scanf::ScanfSpecifier &FS, + const char *startSpecifier, + unsigned specifierLen) override; void HandleIncompleteScanList(const char *start, const char *end) override; }; @@ -10448,27 +10703,24 @@ void CheckScanfHandler::HandleIncompleteScanList(const char *start, const char *end) { EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), - getLocationOfByte(end), /*IsStringLocation*/true, + getLocationOfByte(end), /*IsStringLocation*/ true, getSpecifierRange(start, end - start)); } bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( - const analyze_scanf::ScanfSpecifier &FS, - const char *startSpecifier, - unsigned specifierLen) { + const analyze_scanf::ScanfSpecifier &FS, const char *startSpecifier, + unsigned specifierLen) { const analyze_scanf::ScanfConversionSpecifier &CS = - FS.getConversionSpecifier(); + FS.getConversionSpecifier(); - return HandleInvalidConversionSpecifier(FS.getArgIndex(), - getLocationOfByte(CS.getStart()), - startSpecifier, specifierLen, - CS.getStart(), CS.getLength()); + return HandleInvalidConversionSpecifier( + FS.getArgIndex(), getLocationOfByte(CS.getStart()), startSpecifier, + specifierLen, CS.getStart(), CS.getLength()); } bool CheckScanfHandler::HandleScanfSpecifier( - const analyze_scanf::ScanfSpecifier &FS, - const char *startSpecifier, - unsigned specifierLen) { + const analyze_scanf::ScanfSpecifier &FS, const char *startSpecifier, + unsigned specifierLen) { using namespace analyze_scanf; using namespace analyze_format_string; @@ -10480,8 +10732,7 @@ if (atFirstArg) { atFirstArg = false; usesPositionalArgs = FS.usesPositionalArg(); - } - else if (usesPositionalArgs != FS.usesPositionalArg()) { + } else if (usesPositionalArgs != FS.usesPositionalArg()) { HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), startSpecifier, specifierLen); return false; @@ -10492,11 +10743,11 @@ const OptionalAmount &Amt = FS.getFieldWidth(); if (Amt.getHowSpecified() == OptionalAmount::Constant) { if (Amt.getConstantAmount() == 0) { - const CharSourceRange &R = getSpecifierRange(Amt.getStart(), - Amt.getConstantLength()); + const CharSourceRange &R = + getSpecifierRange(Amt.getStart(), Amt.getConstantLength()); EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), getLocationOfByte(Amt.getStart()), - /*IsStringLocation*/true, R, + /*IsStringLocation*/ true, R, FixItHint::CreateRemoval(R)); } } @@ -10510,9 +10761,9 @@ // Consume the argument. unsigned argIndex = FS.getArgIndex(); if (argIndex < NumDataArgs) { - // The check to see if the argIndex is valid will come later. - // We set the bit here because we may exit early from this - // function if we encounter some other error. + // The check to see if the argIndex is valid will come later. + // We set the bit here because we may exit early from this + // function if we encounter some other error. CoveredArgs.set(argIndex); } @@ -10609,7 +10860,7 @@ const char *Str = StrRef.data(); // Account for cases where the string literal is truncated in a declaration. const ConstantArrayType *T = - S.Context.getAsConstantArrayType(FExpr->getType()); + S.Context.getAsConstantArrayType(FExpr->getType()); assert(T && "String literal not of constant array type!"); size_t TypeSize = T->getSize().getZExtValue(); size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); @@ -10673,9 +10924,8 @@ assert(T && "String literal not of constant array type!"); size_t TypeSize = T->getSize().getZExtValue(); size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); - return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, - getLangOpts(), - Context.getTargetInfo()); + return analyze_format_string::ParseFormatStringHasSArg( + Str, Str + StrLen, getLangOpts(), Context.getTargetInfo()); } //===--- CHECK: Warn on use of wrong absolute value function. -------------===// @@ -10723,7 +10973,7 @@ return 0; case Builtin::BIcabsf: - return Builtin::BIcabs; + return Builtin::BIcabs; case Builtin::BIcabs: return Builtin::BIcabsl; case Builtin::BIcabsl: @@ -10773,11 +11023,7 @@ return BestKind; } -enum AbsoluteValueKind { - AVK_Integer, - AVK_Floating, - AVK_Complex -}; +enum AbsoluteValueKind { AVK_Integer, AVK_Floating, AVK_Complex }; static AbsoluteValueKind getAbsoluteValueKind(QualType T) { if (T->isIntegralOrEnumerationType()) @@ -10967,8 +11213,8 @@ if (!EmitHeaderHint) return; - S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName - << FunctionName; + S.Diag(Loc, diag::note_include_header_or_declare) + << HeaderName << FunctionName; } template @@ -11067,32 +11313,44 @@ //===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===// void Sema::CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl) { - if (!Call || !FDecl) return; + if (!Call || !FDecl) + return; // Ignore template specializations and macros. - if (inTemplateInstantiation()) return; - if (Call->getExprLoc().isMacroID()) return; + if (inTemplateInstantiation()) + return; + if (Call->getExprLoc().isMacroID()) + return; // Only care about the one template argument, two function parameter std::max - if (Call->getNumArgs() != 2) return; - if (!IsStdFunction(FDecl, "max")) return; - const auto * ArgList = FDecl->getTemplateSpecializationArgs(); - if (!ArgList) return; - if (ArgList->size() != 1) return; + if (Call->getNumArgs() != 2) + return; + if (!IsStdFunction(FDecl, "max")) + return; + const auto *ArgList = FDecl->getTemplateSpecializationArgs(); + if (!ArgList) + return; + if (ArgList->size() != 1) + return; // Check that template type argument is unsigned integer. - const auto& TA = ArgList->get(0); - if (TA.getKind() != TemplateArgument::Type) return; + const auto &TA = ArgList->get(0); + if (TA.getKind() != TemplateArgument::Type) + return; QualType ArgType = TA.getAsType(); - if (!ArgType->isUnsignedIntegerType()) return; + if (!ArgType->isUnsignedIntegerType()) + return; // See if either argument is a literal zero. - auto IsLiteralZeroArg = [](const Expr* E) -> bool { + auto IsLiteralZeroArg = [](const Expr *E) -> bool { const auto *MTE = dyn_cast(E); - if (!MTE) return false; + if (!MTE) + return false; const auto *Num = dyn_cast(MTE->getSubExpr()); - if (!Num) return false; - if (Num->getValue() != 0) return false; + if (!Num) + return false; + if (Num->getValue() != 0) + return false; return true; }; @@ -11102,7 +11360,8 @@ const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg); // Only warn when exactly one argument is zero. - if (IsFirstArgZero == IsSecondArgZero) return; + if (IsFirstArgZero == IsSecondArgZero) + return; SourceRange FirstRange = FirstArg->getSourceRange(); SourceRange SecondRange = SecondArg->getSourceRange(); @@ -11123,8 +11382,8 @@ } Diag(Call->getExprLoc(), diag::note_remove_max_call) - << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) - << FixItHint::CreateRemoval(RemovalRange); + << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()) + << FixItHint::CreateRemoval(RemovalRange); } //===--- CHECK: Standard memory functions ---------------------------------===// @@ -11305,7 +11564,7 @@ Sema &S; }; -} +} // namespace /// Detect if \c SizeofExpr is likely to calculate the sizeof an object. static bool doesExprLikelyComputeSize(const Expr *SizeofExpr) { @@ -11349,7 +11608,7 @@ return; const Expr *SizeArg = - Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); + Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts(); auto isLiteralZero = [](const Expr *E) { return (isa(E) && @@ -11383,8 +11642,7 @@ // If the second argument to a memset is a sizeof expression and the third // isn't, this is also likely an error. This should catch // 'memset(buf, sizeof(buf), 0xff)'. - if (BId == Builtin::BImemset && - doesExprLikelyComputeSize(Call->getArg(1)) && + if (BId == Builtin::BImemset && doesExprLikelyComputeSize(Call->getArg(1)) && !doesExprLikelyComputeSize(Call->getArg(2))) { SourceLocation DiagLoc = Call->getArg(1)->getExprLoc(); S.Diag(DiagLoc, diag::warn_suspicious_sizeof_memset) << 1; @@ -11400,8 +11658,7 @@ /// function calls. /// /// \param Call The call expression to diagnose. -void Sema::CheckMemaccessArguments(const CallExpr *Call, - unsigned BId, +void Sema::CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName) { assert(BId != 0); @@ -11413,7 +11670,9 @@ return; unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero || - BId == Builtin::BIstrndup ? 1 : 2); + BId == Builtin::BIstrndup + ? 1 + : 2); unsigned LenArg = (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2); const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); @@ -11489,22 +11748,19 @@ ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); SL = SM.getSpellingLoc(SL); DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), - SM.getSpellingLoc(DSR.getEnd())); + SM.getSpellingLoc(DSR.getEnd())); SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), - SM.getSpellingLoc(SSR.getEnd())); + SM.getSpellingLoc(SSR.getEnd())); } DiagRuntimeBehavior(SL, SizeOfArg, PDiag(diag::warn_sizeof_pointer_expr_memaccess) - << ReadableName - << PointeeTy - << DestTy - << DSR - << SSR); - DiagRuntimeBehavior(SL, SizeOfArg, - PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) - << ActionIdx - << SSR); + << ReadableName << PointeeTy << DestTy << DSR + << SSR); + DiagRuntimeBehavior( + SL, SizeOfArg, + PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) + << ActionIdx << SSR); break; } @@ -11518,9 +11774,9 @@ Context.typesAreCompatible(SizeOfArgTy, DestTy)) { DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, PDiag(diag::warn_sizeof_pointer_type_memaccess) - << FnName << SizeOfArgTy << ArgIdx - << PointeeTy << Dest->getSourceRange() - << LenExpr->getSourceRange()); + << FnName << SizeOfArgTy << ArgIdx + << PointeeTy << Dest->getSourceRange() + << LenExpr->getSourceRange()); break; } } @@ -11543,7 +11799,7 @@ if (ArgIdx != 0 || IsCmp) { if (BId == Builtin::BImemcpy) OperationType = 1; - else if(BId == Builtin::BImemmove) + else if (BId == Builtin::BImemmove) OperationType = 2; else if (IsCmp) OperationType = 3; @@ -11555,12 +11811,11 @@ << IsContained << ContainedRD << OperationType << Call->getCallee()->getSourceRange()); } else if (PointeeTy.hasNonTrivialObjCLifetime() && - BId != Builtin::BImemset) - DiagRuntimeBehavior( - Dest->getExprLoc(), Dest, - PDiag(diag::warn_arc_object_memaccess) - << ArgIdx << FnName << PointeeTy - << Call->getCallee()->getSourceRange()); + BId != Builtin::BImemset) + DiagRuntimeBehavior(Dest->getExprLoc(), Dest, + PDiag(diag::warn_arc_object_memaccess) + << ArgIdx << FnName << PointeeTy + << Call->getCallee()->getSourceRange()); else if (const auto *RT = PointeeTy->getAs()) { if ((BId == Builtin::BImemset || BId == Builtin::BIbzero) && RT->getDecl()->isNonTrivialToPrimitiveDefaultInitialize()) { @@ -11581,9 +11836,9 @@ continue; DiagRuntimeBehavior( - Dest->getExprLoc(), Dest, - PDiag(diag::note_bad_memaccess_silence) - << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); + Dest->getExprLoc(), Dest, + PDiag(diag::note_bad_memaccess_silence) + << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); break; } } @@ -11595,7 +11850,7 @@ Ex = Ex->IgnoreParenCasts(); while (true) { - const BinaryOperator * BO = dyn_cast(Ex); + const BinaryOperator *BO = dyn_cast(Ex); if (!BO || !BO->isAdditiveOp()) break; @@ -11716,8 +11971,7 @@ // Warn on anti-patterns as the 'size' argument to strncat. // The correct size argument should look like following: // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); -void Sema::CheckStrncatArguments(const CallExpr *CE, - IdentifierInfo *FnName) { +void Sema::CheckStrncatArguments(const CallExpr *CE, IdentifierInfo *FnName) { // Don't crash if the user has the wrong number of arguments. if (CE->getNumArgs() < 3) return; @@ -11770,8 +12024,8 @@ // Check if the destination is an array (rather than a pointer to an array). QualType DstTy = DstArg->getType(); - bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, - Context); + bool isKnownSizeArray = + isConstantSizeArrayWithMoreThanOneElement(DstTy, Context); if (!isKnownSizeArray) { if (PatternType == 1) Diag(SL, diag::warn_strncat_wrong_size) << SR; @@ -11795,7 +12049,7 @@ OS << ") - 1"; Diag(SL, diag::note_strncat_wrong_size) - << FixItHint::CreateReplacement(SR, OS.str()); + << FixItHint::CreateReplacement(SR, OS.str()); } namespace { @@ -11912,18 +12166,15 @@ return CheckFreeArgumentsCast(*this, CalleeName, Cast); } -void -Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, - SourceLocation ReturnLoc, - bool isObjCMethod, - const AttrVec *Attrs, - const FunctionDecl *FD) { +void Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, + SourceLocation ReturnLoc, bool isObjCMethod, + const AttrVec *Attrs, const FunctionDecl *FD) { // Check if the return value is null but should not be. if (((Attrs && hasSpecificAttr(*Attrs)) || (!isObjCMethod && isNonNullType(Context, lhsType))) && CheckNonNullExpr(*this, RetValExp)) Diag(ReturnLoc, diag::warn_null_ret) - << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); + << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); // C++11 [basic.stc.dynamic.allocation]p4: // If an allocation function declared with a non-throwing @@ -11933,12 +12184,12 @@ if (FD) { OverloadedOperatorKind Op = FD->getOverloadedOperator(); if (Op == OO_New || Op == OO_Array_New) { - const FunctionProtoType *Proto - = FD->getType()->castAs(); - if (!Proto->isNothrow(/*ResultIfDependent*/true) && + const FunctionProtoType *Proto = + FD->getType()->castAs(); + if (!Proto->isNothrow(/*ResultIfDependent*/ true) && CheckNonNullExpr(*this, RetValExp)) Diag(ReturnLoc, diag::warn_operator_new_returns_null) - << FD << getLangOpts().CPlusPlus11; + << FD << getLangOpts().CPlusPlus11; } } @@ -11985,8 +12236,8 @@ } // Match a more general floating-point equality comparison (-Wfloat-equal). - Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); - Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); + Expr *LeftExprSansParen = LHS->IgnoreParenImpCasts(); + Expr *RightExprSansParen = RHS->IgnoreParenImpCasts(); // Special case: check for x == x (which is OK). // Do not emit warnings for such cases. @@ -12000,26 +12251,26 @@ // is a heuristic: often comparison against such literals are used to // detect if a value in a variable has not changed. This clearly can // lead to false negatives. - if (FloatingLiteral* FLL = dyn_cast(LeftExprSansParen)) { + if (FloatingLiteral *FLL = dyn_cast(LeftExprSansParen)) { if (FLL->isExact()) return; - } else - if (FloatingLiteral* FLR = dyn_cast(RightExprSansParen)) - if (FLR->isExact()) - return; + } else if (FloatingLiteral *FLR = + dyn_cast(RightExprSansParen)) + if (FLR->isExact()) + return; // Check for comparisons with builtin types. - if (CallExpr* CL = dyn_cast(LeftExprSansParen)) + if (CallExpr *CL = dyn_cast(LeftExprSansParen)) if (CL->getBuiltinCallee()) return; - if (CallExpr* CR = dyn_cast(RightExprSansParen)) + if (CallExpr *CR = dyn_cast(RightExprSansParen)) if (CR->getBuiltinCallee()) return; // Emit the diagnostic. Diag(Loc, diag::warn_floatingpoint_eq) - << LHS->getSourceRange() << RHS->getSourceRange(); + << LHS->getSourceRange() << RHS->getSourceRange(); } //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// @@ -12043,19 +12294,15 @@ : Width(Width), NonNegative(NonNegative) {} /// Number of bits excluding the sign bit. - unsigned valueBits() const { - return NonNegative ? Width : Width - 1; - } + unsigned valueBits() const { return NonNegative ? Width : Width - 1; } /// Returns the range of the bool type. - static IntRange forBoolType() { - return IntRange(1, true); - } + static IntRange forBoolType() { return IntRange(1, true); } /// Returns the range of an opaque value of the given integral type. static IntRange forValueOfType(ASTContext &C, QualType T) { return forValueOfCanonicalType(C, - T->getCanonicalTypeInternal().getTypePtr()); + T->getCanonicalTypeInternal().getTypePtr()); } /// Returns the range of an opaque value of a canonical integral type. @@ -12087,10 +12334,10 @@ unsigned NumNegative = Enum->getNumNegativeBits(); if (NumNegative == 0) - return IntRange(NumPositive, true/*NonNegative*/); + return IntRange(NumPositive, true /*NonNegative*/); else return IntRange(std::max(NumPositive + 1, NumNegative), - false/*NonNegative*/); + false /*NonNegative*/); } if (const auto *EIT = dyn_cast(T)) @@ -12301,13 +12548,15 @@ // GetExprRange requires an integer expression, but a throw expression // results in a void type. Expr *E = CO->getTrueExpr(); - IntRange L = E->getType()->isVoidType() - ? IntRange{0, true} - : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); + IntRange L = + E->getType()->isVoidType() + ? IntRange{0, true} + : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); E = CO->getFalseExpr(); - IntRange R = E->getType()->isVoidType() - ? IntRange{0, true} - : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); + IntRange R = + E->getType()->isVoidType() + ? IntRange{0, true} + : GetExprRange(C, E, MaxWidth, InConstantContext, Approximate); return IntRange::join(L, R); } @@ -12363,8 +12612,8 @@ case BO_Shl: // ...except that we want to treat '1 << (blah)' as logically // positive. It's an important idiom. - if (IntegerLiteral *I - = dyn_cast(BO->getLHS()->IgnoreParenCasts())) { + if (IntegerLiteral *I = + dyn_cast(BO->getLHS()->IgnoreParenCasts())) { if (I->getValue() == 1) { IntRange R = IntRange::forValueOfType(C, GetExprType(E)); return IntRange(R.Width, /*NonNegative*/ true); @@ -12547,8 +12796,7 @@ static bool IsEnumConstOrFromMacro(Sema &S, Expr *E) { // Suppress cases where we are comparing against an enum constant. - if (const DeclRefExpr *DR = - dyn_cast(E->IgnoreParenImpCasts())) + if (const DeclRefExpr *DR = dyn_cast(E->IgnoreParenImpCasts())) if (isa(DR->getDecl())) return true; @@ -12559,8 +12807,8 @@ if (BeginLoc.isMacroID()) { StringRef MacroName = Lexer::getImmediateMacroName( BeginLoc, S.getSourceManager(), S.getLangOpts()); - return MacroName != "YES" && MacroName != "NO" && - MacroName != "true" && MacroName != "false"; + return MacroName != "YES" && MacroName != "NO" && MacroName != "true" && + MacroName != "false"; } return false; @@ -12637,21 +12885,30 @@ Value.isUnsigned() == PromotedMin.isUnsigned()); if (!isContiguous()) { assert(Value.isUnsigned() && "discontiguous range for signed compare"); - if (Value.isMinValue()) return Min; - if (Value.isMaxValue()) return Max; - if (Value >= PromotedMin) return InRange; - if (Value <= PromotedMax) return InRange; + if (Value.isMinValue()) + return Min; + if (Value.isMaxValue()) + return Max; + if (Value >= PromotedMin) + return InRange; + if (Value <= PromotedMax) + return InRange; return InHole; } switch (llvm::APSInt::compareValues(Value, PromotedMin)) { - case -1: return Less; - case 0: return PromotedMin == PromotedMax ? OnlyValue : Min; + case -1: + return Less; + case 0: + return PromotedMin == PromotedMax ? OnlyValue : Min; case 1: switch (llvm::APSInt::compareValues(Value, PromotedMax)) { - case -1: return InRange; - case 0: return Max; - case 1: return Greater; + case -1: + return InRange; + case 0: + return Max; + case 1: + return Greater; } } @@ -12662,11 +12919,15 @@ constantValue(BinaryOperatorKind Op, ComparisonResult R, bool ConstantOnRHS) { if (Op == BO_Cmp) { ComparisonResult LTFlag = LT, GTFlag = GT; - if (ConstantOnRHS) std::swap(LTFlag, GTFlag); - - if (R & EQ) return StringRef("'std::strong_ordering::equal'"); - if (R & LTFlag) return StringRef("'std::strong_ordering::less'"); - if (R & GTFlag) return StringRef("'std::strong_ordering::greater'"); + if (ConstantOnRHS) + std::swap(LTFlag, GTFlag); + + if (R & EQ) + return StringRef("'std::strong_ordering::equal'"); + if (R & LTFlag) + return StringRef("'std::strong_ordering::less'"); + if (R & GTFlag) + return StringRef("'std::strong_ordering::greater'"); return llvm::None; } @@ -12695,13 +12956,12 @@ return llvm::None; } }; -} +} // namespace static bool HasEnumType(Expr *E) { // Strip off implicit integral promotions. while (ImplicitCastExpr *ICE = dyn_cast(E)) { - if (ICE->getCastKind() != CK_IntegralCast && - ICE->getCastKind() != CK_NoOp) + if (ICE->getCastKind() != CK_IntegralCast && ICE->getCastKind() != CK_NoOp) break; E = ICE->getSubExpr(); } @@ -12712,11 +12972,7 @@ static int classifyConstantValue(Expr *Constant) { // The values of this enumeration are used in the diagnostics // diag::warn_out_of_range_compare and diag::warn_tautological_bool_compare. - enum ConstantValueKind { - Miscellaneous = 0, - LiteralTrue, - LiteralFalse - }; + enum ConstantValueKind { Miscellaneous = 0, LiteralTrue, LiteralFalse }; if (auto *BL = dyn_cast(Constant)) return BL->getValue() ? ConstantValueKind::LiteralTrue : ConstantValueKind::LiteralFalse; @@ -12824,7 +13080,7 @@ if (ED) { OS << '\'' << *ED << "' (" << Value << ")"; } else if (auto *BL = dyn_cast( - Constant->IgnoreParenImpCasts())) { + Constant->IgnoreParenImpCasts())) { OS << (BL->getValue() ? "YES" : "NO"); } else { OS << Value; @@ -12863,8 +13119,8 @@ (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0) ? (HasEnumType(OriginalOther) ? diag::warn_unsigned_enum_always_true_comparison - : IsCharTy ? diag::warn_unsigned_char_always_true_comparison - : diag::warn_unsigned_always_true_comparison) + : IsCharTy ? diag::warn_unsigned_char_always_true_comparison + : diag::warn_unsigned_always_true_comparison) : diag::warn_tautological_constant_compare; S.Diag(E->getOperatorLoc(), Diag) @@ -13008,7 +13264,7 @@ // White-list bool bitfields. QualType BitfieldType = Bitfield->getType(); if (BitfieldType->isBooleanType()) - return false; + return false; if (BitfieldType->isEnumeralType()) { EnumDecl *BitfieldEnumDecl = BitfieldType->castAs()->getDecl(); @@ -13026,8 +13282,7 @@ // Ignore value- or type-dependent expressions. if (Bitfield->getBitWidth()->isValueDependent() || - Bitfield->getBitWidth()->isTypeDependent() || - Init->isValueDependent() || + Bitfield->getBitWidth()->isTypeDependent() || Init->isValueDependent() || Init->isTypeDependent()) return false; @@ -13172,19 +13427,19 @@ return; } S.Diag(E->getExprLoc(), diag) - << SourceType << T << E->getSourceRange() << SourceRange(CContext); + << SourceType << T << E->getSourceRange() << SourceRange(CContext); } /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, - SourceLocation CContext, - unsigned diag, bool pruneControlFlow = false) { + SourceLocation CContext, unsigned diag, + bool pruneControlFlow = false) { DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); } static bool isObjCSignedCharBool(Sema &S, QualType Ty) { return Ty->isSpecificBuiltinType(BuiltinType::SChar) && - S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); + S.getLangOpts().ObjC && S.NSAPIObj->isObjCBOOLType(Ty); } static void adornObjCBoolConversionDiagWithTernaryFixit( @@ -13219,7 +13474,7 @@ llvm::APFloat Value(0.0); bool IsConstant = - E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); + E->EvaluateAsFloat(Value, S.Context, Expr::SE_AllowSideEffects); if (!IsConstant) { if (isObjCSignedCharBool(S, T)) { return adornObjCBoolConversionDiagWithTernaryFixit( @@ -13228,8 +13483,8 @@ << E->getType()); } - return DiagnoseImpCast(S, E, T, CContext, - diag::warn_impcast_float_integer, PruneWarnings); + return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, + PruneWarnings); } bool isExact = false; @@ -13257,7 +13512,8 @@ } if (Result == llvm::APFloat::opOK && isExact) { - if (IsLiteral) return; + if (IsLiteral) + return; return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, PruneWarnings); } @@ -13276,7 +13532,7 @@ // Warn on floating point literal to integer. DiagID = diag::warn_impcast_literal_float_to_integer; } else if (IntegerValue == 0) { - if (Value.isZero()) { // Skip -0.0 to 0 conversion. + if (Value.isZero()) { // Skip -0.0 to 0 conversion. return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, PruneWarnings); } @@ -13288,7 +13544,7 @@ return DiagnoseImpCast(S, E, T, CContext, diag::warn_impcast_float_integer, PruneWarnings); } - } else { // IntegerValue.isSigned() + } else { // IntegerValue.isSigned() if (!IntegerValue.isMaxSignedValue() && !IntegerValue.isMinSignedValue()) { return DiagnoseImpCast(S, E, T, CContext, @@ -13337,7 +13593,8 @@ ->getAs(); // The below checks assume source is floating point. - if (!ResultBT || !RBT || !RBT->isFloatingPoint()) return; + if (!ResultBT || !RBT || !RBT->isFloatingPoint()) + return; // If source is floating point but target is an integer. if (ResultBT->isInteger()) @@ -13358,7 +13615,8 @@ static std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) { - if (!Range.Width) return "0"; + if (!Range.Width) + return "0"; llvm::APSInt ValueInRange = Value; ValueInRange.setIsSigned(!Range.NonNegative); @@ -13373,12 +13631,12 @@ Expr *InnerE = Ex->IgnoreParenImpCasts(); const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); const Type *Source = - S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); + S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); if (Target->isDependentType()) return false; const BuiltinType *FloatCandidateBT = - dyn_cast(ToBool ? Source : Target); + dyn_cast(ToBool ? Source : Target); const Type *BoolCandidateType = ToBool ? Target : Source; return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && @@ -13393,14 +13651,13 @@ if (!IsImplicitBoolFloatConversion(S, CurrA, true)) continue; - bool IsSwapped = ((i > 0) && - IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); - IsSwapped |= ((i < (NumArgs - 1)) && - IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); + bool IsSwapped = ((i > 0) && IsImplicitBoolFloatConversion( + S, TheCall->getArg(i - 1), false)); + IsSwapped |= ((i < (NumArgs - 1)) && IsImplicitBoolFloatConversion( + S, TheCall->getArg(i + 1), false)); if (IsSwapped) { // Warn on this floating-point to bool conversion. - DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), - CurrA->getType(), CC, + DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), CurrA->getType(), CC, diag::warn_impcast_floating_point_to_bool); } } @@ -13477,10 +13734,8 @@ QualType ElementType = Element->getType(); ExprResult ElementResult(Element); if (ElementType->getAs() && - S.CheckSingleAssignmentConstraints(TargetElementType, - ElementResult, - false, false) - != Sema::Compatible) { + S.CheckSingleAssignmentConstraints(TargetElementType, ElementResult, + false, false) != Sema::Compatible) { S.Diag(Element->getBeginLoc(), diag::warn_objc_collection_literal_element) << ElementType << ElementKind << TargetElementType << Element->getSourceRange(); @@ -13504,8 +13759,8 @@ return; if (TargetObjCPtr->isUnspecialized() || - TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() - != S.NSArrayDecl->getCanonicalDecl()) + TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() != + S.NSArrayDecl->getCanonicalDecl()) return; auto TypeArgs = TargetObjCPtr->getTypeArgs(); @@ -13515,8 +13770,7 @@ QualType TargetElementType = TypeArgs[0]; for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { checkObjCCollectionLiteralElement(S, TargetElementType, - ArrayLiteral->getElement(I), - 0); + ArrayLiteral->getElement(I), 0); } } @@ -13533,8 +13787,8 @@ return; if (TargetObjCPtr->isUnspecialized() || - TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() - != S.NSDictionaryDecl->getCanonicalDecl()) + TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() != + S.NSDictionaryDecl->getCanonicalDecl()) return; auto TypeArgs = TargetObjCPtr->getTypeArgs(); @@ -13629,12 +13883,15 @@ SourceLocation CC, bool *ICContext = nullptr, bool IsListInit = false) { - if (E->isTypeDependent() || E->isValueDependent()) return; + if (E->isTypeDependent() || E->isValueDependent()) + return; const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); - if (Source == Target) return; - if (Target->isDependentType()) return; + if (Source == Target) + return; + if (Target->isDependentType()) + return; // If the conversion context location is invalid don't complain. We also // don't want to emit a warning if the issue occurs from the expansion of @@ -13775,9 +14032,10 @@ Expr::EvalResult result; if (E->EvaluateAsRValue(result, S.Context)) { // Value might be a float, a float vector, or a float complex. - if (IsSameFloatAfterCast(result.Val, - S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), - S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) + if (IsSameFloatAfterCast( + result.Val, + S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), + S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) return; } @@ -14126,7 +14384,8 @@ // If -Wconversion would have warned about either of the candidates // for a signedness conversion to the context type... - if (!Suspicious) return; + if (!Suspicious) + return; // ...but it's currently ignored... if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) @@ -14134,11 +14393,12 @@ // ...then check whether it would have warned about either of the // candidates for a signedness conversion to the condition type. - if (E->getType() == T) return; + if (E->getType() == T) + return; Suspicious = false; - CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), - E->getType(), CC, &Suspicious); + CheckImplicitConversion(S, TrueExpr->IgnoreParenImpCasts(), E->getType(), CC, + &Suspicious); if (!Suspicious) CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), E->getType(), CC, &Suspicious); @@ -14160,7 +14420,7 @@ SourceLocation CC; bool IsListInit; }; -} +} // namespace /// Data recursive variant of AnalyzeImplicitConversions. Subexpressions /// that should be visited are added to WorkList. @@ -14266,10 +14526,12 @@ // we don't really need to recurse into them, because any internal // expressions should have been analyzed already when they were // built into statements. - if (isa(E)) return; + if (isa(E)) + return; // Don't descend into unevaluated contexts. - if (isa(E)) return; + if (isa(E)) + return; // Now just recurse over the expression's children. CC = E->getExprLoc(); @@ -14320,7 +14582,7 @@ /// implicit conversions in the given expression. There are a couple /// of competing diagnostics here, -Wconversion and -Wsign-compare. static void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC, - bool IsListInit/*= false*/) { + bool IsListInit /*= false*/) { llvm::SmallVector WorkList; WorkList.push_back({OrigE, CC, IsListInit}); while (!WorkList.empty()) @@ -14433,8 +14695,8 @@ unsigned DiagID = IsCompare ? diag::warn_address_of_reference_null_compare : diag::warn_address_of_reference_bool_conversion; - PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range - << IsEqual; + PartialDiagnostic PD = PDiag(DiagID) + << E->getSourceRange() << Range << IsEqual; if (CheckForReference(*this, E, PD)) { return; } @@ -14447,8 +14709,8 @@ E->printPretty(S, nullptr, getPrintingPolicy()); unsigned DiagID = IsCompare ? diag::warn_nonnull_expr_compare : diag::warn_cast_nonnull_to_bool; - Diag(E->getExprLoc(), DiagID) << IsParam << S.str() - << E->getSourceRange() << Range << IsEqual; + Diag(E->getExprLoc(), DiagID) + << IsParam << S.str() << E->getSourceRange() << Range << IsEqual; Diag(NonnullAttr->getLocation(), diag::note_declared_nonnull) << IsParam; }; @@ -14475,7 +14737,7 @@ return; // Check for parameter decl with nonnull attribute - if (const auto* PV = dyn_cast(D)) { + if (const auto *PV = dyn_cast(D)) { if (getCurFunction() && !getCurFunction()->ModifiedNonNullParams.count(PV)) { if (const Attr *A = PV->getAttr()) { @@ -14493,8 +14755,8 @@ for (const auto *NonNull : FD->specific_attrs()) { if (!NonNull->args_size()) { - ComplainAboutNonnullParamOrCall(NonNull); - return; + ComplainAboutNonnullParamOrCall(NonNull); + return; } for (const ParamIdx &ArgNo : NonNull->args()) { @@ -14528,11 +14790,7 @@ unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare : diag::warn_impcast_pointer_to_bool; - enum { - AddressOf, - FunctionPointer, - ArrayPointer - } DiagType; + enum { AddressOf, FunctionPointer, ArrayPointer } DiagType; if (IsAddressOf) DiagType = AddressOf; else if (IsFunction) @@ -14541,8 +14799,8 @@ DiagType = ArrayPointer; else llvm_unreachable("Could not determine diagnostic."); - Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() - << Range << IsEqual; + Diag(E->getExprLoc(), DiagID) + << DiagType << S.str() << E->getSourceRange() << Range << IsEqual; if (!IsFunction) return; @@ -14612,9 +14870,9 @@ ::CheckBoolLikeConversion(*this, E, CC); } -/// Diagnose when expression is an integer constant expression and its evaluation -/// results in integer overflow -void Sema::CheckForIntOverflow (Expr *E) { +/// Diagnose when expression is an integer constant expression and its +/// evaluation results in integer overflow +void Sema::CheckForIntOverflow(Expr *E) { // Use a work list to deal with nested struct initializers. SmallVector Exprs(1, E); @@ -14684,9 +14942,7 @@ } /// Merge a sequence of operations into its parent. - void merge(Seq S) { - Values[S.Index].Merged = true; - } + void merge(Seq S) { Values[S.Index].Merged = true; } /// Determine whether two operations are unsequenced. This operation /// is asymmetric: \p Cur should be the more recent sequence, and \p Old @@ -14777,7 +15033,7 @@ /// UK_ModAsValue. struct SequencedSubexpression { SequencedSubexpression(SequenceChecker &Self) - : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { + : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { Self.ModAsSideEffect = &ModAsSideEffect; } @@ -15512,9 +15768,8 @@ } void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, - FieldDecl *BitField, - Expr *Init) { - (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); + FieldDecl *BitField, Expr *Init) { + (void)AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); } static void diagnoseArrayStarInParamType(Sema &S, QualType PType, @@ -15627,8 +15882,11 @@ return HasInvalidParm; } -Optional> -static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx); +Optional> static getBaseAlignmentAndOffsetFromPtr(const Expr + *E, + ASTContext + &Ctx); /// Compute the alignment and offset of the base class object given the /// derived-to-base cast expression and the alignment and offset of the derived @@ -15693,8 +15951,10 @@ /// This helper function takes an lvalue expression and returns the alignment of /// a VarDecl and a constant offset from the VarDecl. -Optional> -static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) { +Optional> static getBaseAlignmentAndOffsetFromLValue(const Expr *E, + ASTContext &Ctx) { E = E->IgnoreParens(); switch (E->getStmtClass()) { default: @@ -15781,8 +16041,11 @@ /// This helper function takes a pointer expression and returns the alignment of /// a VarDecl and a constant offset from the VarDecl. -Optional> -static getBaseAlignmentAndOffsetFromPtr(const Expr *E, ASTContext &Ctx) { +Optional> static getBaseAlignmentAndOffsetFromPtr(const Expr + *E, + ASTContext + &Ctx) { E = E->IgnoreParens(); switch (E->getStmtClass()) { default: @@ -15870,34 +16133,39 @@ // Require that the destination be a pointer type. const PointerType *DestPtr = T->getAs(); - if (!DestPtr) return; + if (!DestPtr) + return; // If the destination has alignment 1, we're done. QualType DestPointee = DestPtr->getPointeeType(); - if (DestPointee->isIncompleteType()) return; + if (DestPointee->isIncompleteType()) + return; CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); - if (DestAlign.isOne()) return; + if (DestAlign.isOne()) + return; // Require that the source be a pointer type. const PointerType *SrcPtr = Op->getType()->getAs(); - if (!SrcPtr) return; + if (!SrcPtr) + return; QualType SrcPointee = SrcPtr->getPointeeType(); // Explicitly allow casts from cv void*. We already implicitly // allowed casts to cv void*, since they have alignment 1. // Also allow casts involving incomplete types, which implicitly // includes 'void'. - if (SrcPointee->isIncompleteType()) return; + if (SrcPointee->isIncompleteType()) + return; CharUnits SrcAlign = getPresumedAlignmentOfPointer(Op, *this); - if (SrcAlign >= DestAlign) return; + if (SrcAlign >= DestAlign) + return; Diag(TRange.getBegin(), diag::warn_cast_align) - << Op->getType() << T - << static_cast(SrcAlign.getQuantity()) - << static_cast(DestAlign.getQuantity()) - << TRange << Op->getSourceRange(); + << Op->getType() << T << static_cast(SrcAlign.getQuantity()) + << static_cast(DestAlign.getQuantity()) << TRange + << Op->getSourceRange(); } /// Check whether this array fits the idiom of a flexible array member, @@ -15967,8 +16235,8 @@ } void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, - const ArraySubscriptExpr *ASE, - bool AllowOnePastEnd, bool IndexNegated) { + const ArraySubscriptExpr *ASE, bool AllowOnePastEnd, + bool IndexNegated) { // Already diagnosed by the constant evaluator. if (isConstantEvaluated()) return; @@ -16097,7 +16365,8 @@ uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); uint64_t array_typesize = Context.getTypeSize(BaseType); // Handle ptrarith_typesize being zero, such as when casting to void* - if (!ptrarith_typesize) ptrarith_typesize = 1; + if (!ptrarith_typesize) + ptrarith_typesize = 1; if (ptrarith_typesize != array_typesize) { // There's a cast to a different size type involved uint64_t ratio = array_typesize / ptrarith_typesize; @@ -16124,8 +16393,8 @@ // ']' location) and the index expression are both from macro expansions // within a system header. if (ASE) { - SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( - ASE->getRBracketLoc()); + SourceLocation RBracketLoc = + SourceMgr.getSpellingLoc(ASE->getRBracketLoc()); if (SourceMgr.isInSystemHeader(RBracketLoc)) { SourceLocation IndexLoc = SourceMgr.getSpellingLoc(IndexExpr->getBeginLoc()); @@ -16146,7 +16415,8 @@ unsigned DiagID = diag::warn_array_index_precedes_bounds; if (!ASE) { DiagID = diag::warn_ptr_arith_precedes_bounds; - if (index.isNegative()) index = -index; + if (index.isNegative()) + index = -index; } DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr, @@ -16174,56 +16444,55 @@ while (expr) { expr = expr->IgnoreParenImpCasts(); switch (expr->getStmtClass()) { - case Stmt::ArraySubscriptExprClass: { - const ArraySubscriptExpr *ASE = cast(expr); - CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, - AllowOnePastEnd > 0); - expr = ASE->getBase(); - break; - } - case Stmt::MemberExprClass: { - expr = cast(expr)->getBase(); + case Stmt::ArraySubscriptExprClass: { + const ArraySubscriptExpr *ASE = cast(expr); + CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, AllowOnePastEnd > 0); + expr = ASE->getBase(); + break; + } + case Stmt::MemberExprClass: { + expr = cast(expr)->getBase(); + break; + } + case Stmt::OMPArraySectionExprClass: { + const OMPArraySectionExpr *ASE = cast(expr); + if (ASE->getLowerBound()) + CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), + /*ASE=*/nullptr, AllowOnePastEnd > 0); + return; + } + case Stmt::UnaryOperatorClass: { + // Only unwrap the * and & unary operators + const UnaryOperator *UO = cast(expr); + expr = UO->getSubExpr(); + switch (UO->getOpcode()) { + case UO_AddrOf: + AllowOnePastEnd++; break; - } - case Stmt::OMPArraySectionExprClass: { - const OMPArraySectionExpr *ASE = cast(expr); - if (ASE->getLowerBound()) - CheckArrayAccess(ASE->getBase(), ASE->getLowerBound(), - /*ASE=*/nullptr, AllowOnePastEnd > 0); - return; - } - case Stmt::UnaryOperatorClass: { - // Only unwrap the * and & unary operators - const UnaryOperator *UO = cast(expr); - expr = UO->getSubExpr(); - switch (UO->getOpcode()) { - case UO_AddrOf: - AllowOnePastEnd++; - break; - case UO_Deref: - AllowOnePastEnd--; - break; - default: - return; - } + case UO_Deref: + AllowOnePastEnd--; break; - } - case Stmt::ConditionalOperatorClass: { - const ConditionalOperator *cond = cast(expr); - if (const Expr *lhs = cond->getLHS()) - CheckArrayAccess(lhs); - if (const Expr *rhs = cond->getRHS()) - CheckArrayAccess(rhs); - return; - } - case Stmt::CXXOperatorCallExprClass: { - const auto *OCE = cast(expr); - for (const auto *Arg : OCE->arguments()) - CheckArrayAccess(Arg); - return; - } default: return; + } + break; + } + case Stmt::ConditionalOperatorClass: { + const ConditionalOperator *cond = cast(expr); + if (const Expr *lhs = cond->getLHS()) + CheckArrayAccess(lhs); + if (const Expr *rhs = cond->getRHS()) + CheckArrayAccess(rhs); + return; + } + case Stmt::CXXOperatorCallExprClass: { + const auto *OCE = cast(expr); + for (const auto *Arg : OCE->arguments()) + CheckArrayAccess(Arg); + return; + } + default: + return; } } } @@ -16289,19 +16558,22 @@ if (!findRetainCycleOwner(S, ref->getBase(), owner)) return false; - if (ref->isFreeIvar()) owner.setLocsFrom(ref); + if (ref->isFreeIvar()) + owner.setLocsFrom(ref); owner.Indirect = true; return true; } if (DeclRefExpr *ref = dyn_cast(e)) { VarDecl *var = dyn_cast(ref->getDecl()); - if (!var) return false; + if (!var) + return false; return considerVariable(var, ref, owner); } if (MemberExpr *member = dyn_cast(e)) { - if (member->isArrow()) return false; + if (member->isArrow()) + return false; // Don't count this as an indirect ownership. e = member->getBase(); @@ -16310,17 +16582,18 @@ if (PseudoObjectExpr *pseudo = dyn_cast(e)) { // Only pay attention to pseudo-objects on property references. - ObjCPropertyRefExpr *pre - = dyn_cast(pseudo->getSyntacticForm() - ->IgnoreParens()); - if (!pre) return false; - if (pre->isImplicitProperty()) return false; + ObjCPropertyRefExpr *pre = dyn_cast( + pseudo->getSyntacticForm()->IgnoreParens()); + if (!pre) + return false; + if (pre->isImplicitProperty()) + return false; ObjCPropertyDecl *property = pre->getExplicitProperty(); if (!property->isRetaining() && !(property->getPropertyIvarDecl() && - property->getPropertyIvarDecl()->getType() - .getObjCLifetime() == Qualifiers::OCL_Strong)) - return false; + property->getPropertyIvarDecl()->getType().getObjCLifetime() == + Qualifiers::OCL_Strong)) + return false; owner.Indirect = true; if (pre->isSuperReceiver()) { @@ -16331,8 +16604,8 @@ owner.Range = pre->getSourceRange(); return true; } - e = const_cast(cast(pre->getBase()) - ->getSourceExpr()); + e = const_cast( + cast(pre->getBase())->getSourceExpr()); continue; } @@ -16344,57 +16617,59 @@ namespace { - struct FindCaptureVisitor : EvaluatedExprVisitor { - ASTContext &Context; - VarDecl *Variable; - Expr *Capturer = nullptr; - bool VarWillBeReased = false; +struct FindCaptureVisitor : EvaluatedExprVisitor { + ASTContext &Context; + VarDecl *Variable; + Expr *Capturer = nullptr; + bool VarWillBeReased = false; - FindCaptureVisitor(ASTContext &Context, VarDecl *variable) - : EvaluatedExprVisitor(Context), - Context(Context), Variable(variable) {} + FindCaptureVisitor(ASTContext &Context, VarDecl *variable) + : EvaluatedExprVisitor(Context), Context(Context), + Variable(variable) {} - void VisitDeclRefExpr(DeclRefExpr *ref) { - if (ref->getDecl() == Variable && !Capturer) - Capturer = ref; - } + void VisitDeclRefExpr(DeclRefExpr *ref) { + if (ref->getDecl() == Variable && !Capturer) + Capturer = ref; + } - void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { - if (Capturer) return; - Visit(ref->getBase()); - if (Capturer && ref->isFreeIvar()) - Capturer = ref; - } + void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { + if (Capturer) + return; + Visit(ref->getBase()); + if (Capturer && ref->isFreeIvar()) + Capturer = ref; + } - void VisitBlockExpr(BlockExpr *block) { - // Look inside nested blocks - if (block->getBlockDecl()->capturesVariable(Variable)) - Visit(block->getBlockDecl()->getBody()); - } + void VisitBlockExpr(BlockExpr *block) { + // Look inside nested blocks + if (block->getBlockDecl()->capturesVariable(Variable)) + Visit(block->getBlockDecl()->getBody()); + } - void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { - if (Capturer) return; - if (OVE->getSourceExpr()) - Visit(OVE->getSourceExpr()); - } + void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { + if (Capturer) + return; + if (OVE->getSourceExpr()) + Visit(OVE->getSourceExpr()); + } - void VisitBinaryOperator(BinaryOperator *BinOp) { - if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) + void VisitBinaryOperator(BinaryOperator *BinOp) { + if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) + return; + Expr *LHS = BinOp->getLHS(); + if (const DeclRefExpr *DRE = dyn_cast_or_null(LHS)) { + if (DRE->getDecl() != Variable) return; - Expr *LHS = BinOp->getLHS(); - if (const DeclRefExpr *DRE = dyn_cast_or_null(LHS)) { - if (DRE->getDecl() != Variable) - return; - if (Expr *RHS = BinOp->getRHS()) { - RHS = RHS->IgnoreParenCasts(); - Optional Value; - VarWillBeReased = - (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && - *Value == 0); - } + if (Expr *RHS = BinOp->getRHS()) { + RHS = RHS->IgnoreParenCasts(); + Optional Value; + VarWillBeReased = + (RHS && (Value = RHS->getIntegerConstantExpr(Context)) && + *Value == 0); } } - }; + } +}; } // namespace @@ -16441,18 +16716,20 @@ assert(owner.Variable && owner.Loc.isValid()); S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) - << owner.Variable << capturer->getSourceRange(); + << owner.Variable << capturer->getSourceRange(); S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) - << owner.Indirect << owner.Range; + << owner.Indirect << owner.Range; } /// Check for a keyword selector that starts with the word 'add' or /// 'set'. static bool isSetterLikeSelector(Selector sel) { - if (sel.isUnarySelector()) return false; + if (sel.isUnarySelector()) + return false; StringRef str = sel.getNameForSlot(0); - while (!str.empty() && str.front() == '_') str = str.substr(1); + while (!str.empty() && str.front() == '_') + str = str.substr(1); if (str.startswith("set")) str = str.substr(3); else if (str.startswith("add")) { @@ -16460,19 +16737,18 @@ if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) return false; str = str.substr(3); - } - else + } else return false; - if (str.empty()) return true; + if (str.empty()) + return true; return !isLowercase(str.front()); } static Optional GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) { bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( - Message->getReceiverInterface(), - NSAPI::ClassId_NSMutableArray); + Message->getReceiverInterface(), NSAPI::ClassId_NSMutableArray); if (!IsMutableArray) { return None; } @@ -16480,7 +16756,7 @@ Selector Sel = Message->getSelector(); Optional MKOpt = - S.NSAPIObj->getNSArrayMethodKind(Sel); + S.NSAPIObj->getNSArrayMethodKind(Sel); if (!MKOpt) { return None; } @@ -16488,26 +16764,24 @@ NSAPI::NSArrayMethodKind MK = *MKOpt; switch (MK) { - case NSAPI::NSMutableArr_addObject: - case NSAPI::NSMutableArr_insertObjectAtIndex: - case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: - return 0; - case NSAPI::NSMutableArr_replaceObjectAtIndex: - return 1; + case NSAPI::NSMutableArr_addObject: + case NSAPI::NSMutableArr_insertObjectAtIndex: + case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: + return 0; + case NSAPI::NSMutableArr_replaceObjectAtIndex: + return 1; - default: - return None; + default: + return None; } return None; } -static -Optional GetNSMutableDictionaryArgumentIndex(Sema &S, - ObjCMessageExpr *Message) { +static Optional +GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) { bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( - Message->getReceiverInterface(), - NSAPI::ClassId_NSMutableDictionary); + Message->getReceiverInterface(), NSAPI::ClassId_NSMutableDictionary); if (!IsMutableDictionary) { return None; } @@ -16515,7 +16789,7 @@ Selector Sel = Message->getSelector(); Optional MKOpt = - S.NSAPIObj->getNSDictionaryMethodKind(Sel); + S.NSAPIObj->getNSDictionaryMethodKind(Sel); if (!MKOpt) { return None; } @@ -16523,13 +16797,13 @@ NSAPI::NSDictionaryMethodKind MK = *MKOpt; switch (MK) { - case NSAPI::NSMutableDict_setObjectForKey: - case NSAPI::NSMutableDict_setValueForKey: - case NSAPI::NSMutableDict_setObjectForKeyedSubscript: - return 0; + case NSAPI::NSMutableDict_setObjectForKey: + case NSAPI::NSMutableDict_setValueForKey: + case NSAPI::NSMutableDict_setObjectForKeyedSubscript: + return 0; - default: - return None; + default: + return None; } return None; @@ -16537,12 +16811,10 @@ static Optional GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( - Message->getReceiverInterface(), - NSAPI::ClassId_NSMutableSet); + Message->getReceiverInterface(), NSAPI::ClassId_NSMutableSet); bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( - Message->getReceiverInterface(), - NSAPI::ClassId_NSMutableOrderedSet); + Message->getReceiverInterface(), NSAPI::ClassId_NSMutableOrderedSet); if (!IsMutableSet && !IsMutableOrderedSet) { return None; } @@ -16557,13 +16829,13 @@ NSAPI::NSSetMethodKind MK = *MKOpt; switch (MK) { - case NSAPI::NSMutableSet_addObject: - case NSAPI::NSOrderedSet_setObjectAtIndex: - case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: - case NSAPI::NSOrderedSet_insertObjectAtIndex: - return 0; - case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: - return 1; + case NSAPI::NSMutableSet_addObject: + case NSAPI::NSOrderedSet_setObjectAtIndex: + case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: + case NSAPI::NSOrderedSet_insertObjectAtIndex: + return 0; + case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: + return 1; } return None; @@ -16594,7 +16866,7 @@ if (ArgRE->isObjCSelfExpr()) { Diag(Message->getSourceRange().getBegin(), diag::warn_objc_circular_container) - << ArgRE->getDecl() << StringRef("'super'"); + << ArgRE->getDecl() << StringRef("'super'"); } } } else { @@ -16610,11 +16882,11 @@ ValueDecl *Decl = ReceiverRE->getDecl(); Diag(Message->getSourceRange().getBegin(), diag::warn_objc_circular_container) - << Decl << Decl; + << Decl << Decl; if (!ArgRE->isObjCSelfExpr()) { Diag(Decl->getLocation(), diag::note_objc_circular_container_declared_here) - << Decl; + << Decl; } } } @@ -16624,10 +16896,10 @@ ObjCIvarDecl *Decl = IvarRE->getDecl(); Diag(Message->getSourceRange().getBegin(), diag::warn_objc_circular_container) - << Decl << Decl; + << Decl << Decl; Diag(Decl->getLocation(), diag::note_objc_circular_container_declared_here) - << Decl; + << Decl; } } } @@ -16688,8 +16960,8 @@ diagnoseRetainCycle(*this, Capturer, Owner); } -static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, - Expr *RHS, bool isProperty) { +static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, Expr *RHS, + bool isProperty) { // Check if RHS is an Objective-C object literal, which also can get // immediately zapped in a weak reference. Note that we explicitly // allow ObjCStringLiterals, since those are designed to never really die. @@ -16702,23 +16974,20 @@ return false; S.Diag(Loc, diag::warn_arc_literal_assign) - << (unsigned) Kind - << (isProperty ? 0 : 1) - << RHS->getSourceRange(); + << (unsigned)Kind << (isProperty ? 0 : 1) << RHS->getSourceRange(); return true; } static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, - Qualifiers::ObjCLifetime LT, - Expr *RHS, bool isProperty) { + Qualifiers::ObjCLifetime LT, Expr *RHS, + bool isProperty) { // Strip off any implicit cast added to get to the one ARC-specific. while (ImplicitCastExpr *cast = dyn_cast(RHS)) { if (cast->getCastKind() == CK_ARCConsumeObject) { S.Diag(Loc, diag::warn_arc_retained_assign) - << (LT == Qualifiers::OCL_ExplicitNone) - << (isProperty ? 0 : 1) - << RHS->getSourceRange(); + << (LT == Qualifiers::OCL_ExplicitNone) << (isProperty ? 0 : 1) + << RHS->getSourceRange(); return true; } RHS = cast->getSubExpr(); @@ -16731,8 +17000,7 @@ return false; } -bool Sema::checkUnsafeAssigns(SourceLocation Loc, - QualType LHS, Expr *RHS) { +bool Sema::checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS) { Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) @@ -16744,13 +17012,11 @@ return false; } -void Sema::checkUnsafeExprAssigns(SourceLocation Loc, - Expr *LHS, Expr *RHS) { +void Sema::checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS) { QualType LHSType; // PropertyRef on LHS type need be directly obtained from // its declaration as it has a PseudoType. - ObjCPropertyRefExpr *PRE - = dyn_cast(LHS->IgnoreParens()); + ObjCPropertyRefExpr *PRE = dyn_cast(LHS->IgnoreParens()); if (PRE && !PRE->isImplicitProperty()) { const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); if (PD) @@ -16794,7 +17060,7 @@ while (ImplicitCastExpr *cast = dyn_cast(RHS)) { if (cast->getCastKind() == CK_ARCConsumeObject) { Diag(Loc, diag::warn_arc_retained_property_assign) - << RHS->getSourceRange(); + << RHS->getSourceRange(); return; } RHS = cast->getSubExpr(); @@ -16821,14 +17087,14 @@ // Get line numbers of statement and body. bool StmtLineInvalid; - unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, - &StmtLineInvalid); + unsigned StmtLine = + SourceMgr.getPresumedLineNumber(StmtLoc, &StmtLineInvalid); if (StmtLineInvalid) return false; bool BodyLineInvalid; - unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), - &BodyLineInvalid); + unsigned BodyLine = + SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), &BodyLineInvalid); if (BodyLineInvalid) return false; @@ -16839,8 +17105,7 @@ return true; } -void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, - const Stmt *Body, +void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID) { // Since this is a syntactic check, don't emit diagnostic for template // instantiations, this just adds noise. @@ -16860,8 +17125,7 @@ Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); } -void Sema::DiagnoseEmptyLoopBody(const Stmt *S, - const Stmt *PossibleBody) { +void Sema::DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody) { assert(!CurrentInstantiationScope); // Ensured by caller SourceLocation StmtLoc; @@ -16930,7 +17194,7 @@ /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, - SourceLocation OpLoc) { + SourceLocation OpLoc) { if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) return; @@ -17067,12 +17331,10 @@ return false; // Check the base classes. - for (CXXRecordDecl::base_class_const_iterator - Base1 = D1CXX->bases_begin(), - BaseEnd1 = D1CXX->bases_end(), - Base2 = D2CXX->bases_begin(); - Base1 != BaseEnd1; - ++Base1, ++Base2) { + for (CXXRecordDecl::base_class_const_iterator Base1 = D1CXX->bases_begin(), + BaseEnd1 = D1CXX->bases_end(), + Base2 = D2CXX->bases_begin(); + Base1 != BaseEnd1; ++Base1, ++Base2) { if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) return false; } @@ -17087,7 +17349,7 @@ Field2End = RD2->field_end(), Field1 = RD1->field_begin(), Field1End = RD1->field_end(); - for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { + for (; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { if (!isLayoutCompatible(C, *Field1, *Field2)) return false; } @@ -17106,14 +17368,13 @@ UnmatchedFields.insert(Field2); for (auto *Field1 : RD1->fields()) { - llvm::SmallPtrSet::iterator - I = UnmatchedFields.begin(), - E = UnmatchedFields.end(); + llvm::SmallPtrSet::iterator I = UnmatchedFields.begin(), + E = UnmatchedFields.end(); - for ( ; I != E; ++I) { + for (; I != E; ++I) { if (isLayoutCompatible(C, Field1, *I)) { bool Result = UnmatchedFields.erase(*I); - (void) Result; + (void)Result; assert(Result); break; } @@ -17157,15 +17418,13 @@ return false; if (TC1 == Type::Enum) { - return isLayoutCompatible(C, - cast(T1)->getDecl(), + return isLayoutCompatible(C, cast(T1)->getDecl(), cast(T2)->getDecl()); } else if (TC1 == Type::Record) { if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) return false; - return isLayoutCompatible(C, - cast(T1)->getDecl(), + return isLayoutCompatible(C, cast(T1)->getDecl(), cast(T2)->getDecl()); } @@ -17188,7 +17447,7 @@ static bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, const ValueDecl **VD, uint64_t *MagicValue, bool isConstantEvaluated) { - while(true) { + while (true) { if (!TypeExpr) return false; @@ -17300,8 +17559,7 @@ if (!MagicValues) return false; - llvm::DenseMap::const_iterator I = + llvm::DenseMap::const_iterator I = MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); if (I == MagicValues->end()) return false; @@ -17312,8 +17570,7 @@ void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, - bool LayoutCompatible, - bool MustBeNull) { + bool LayoutCompatible, bool MustBeNull) { if (!TypeTagForDatatypeMagicValues) TypeTagForDatatypeMagicValues.reset( new llvm::DenseMap); @@ -17335,8 +17592,8 @@ BuiltinType::Kind T1Kind = BT1->getKind(); BuiltinType::Kind T2Kind = BT2->getKind(); - return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || - (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || + return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || + (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); } @@ -17363,7 +17620,7 @@ if (FoundWrongKind) Diag(TypeTagExpr->getExprLoc(), diag::warn_type_tag_for_datatype_wrong_kind) - << TypeTagExpr->getSourceRange(); + << TypeTagExpr->getSourceRange(); return; } @@ -17390,12 +17647,11 @@ if (TypeInfo.MustBeNull) { // Type tag with matching void type requires a null pointer. - if (!ArgumentExpr->isNullPointerConstant(Context, - Expr::NPC_ValueDependentIsNotNull)) { + if (!ArgumentExpr->isNullPointerConstant( + Context, Expr::NPC_ValueDependentIsNotNull)) { Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_null_pointer_required) - << ArgumentKind->getName() - << ArgumentExpr->getSourceRange() + << ArgumentKind->getName() << ArgumentExpr->getSourceRange() << TypeTagExpr->getSourceRange(); } return; @@ -17419,19 +17675,16 @@ RequiredType->getPointeeType())) || (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) mismatch = false; - } else - if (IsPointerAttr) - mismatch = !isLayoutCompatible(Context, - ArgumentType->getPointeeType(), - RequiredType->getPointeeType()); - else - mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); + } else if (IsPointerAttr) + mismatch = !isLayoutCompatible(Context, ArgumentType->getPointeeType(), + RequiredType->getPointeeType()); + else + mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); if (mismatch) Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) - << ArgumentType << ArgumentKind - << TypeInfo.LayoutCompatible << RequiredType - << ArgumentExpr->getSourceRange() + << ArgumentType << ArgumentKind << TypeInfo.LayoutCompatible + << RequiredType << ArgumentExpr->getSourceRange() << TypeTagExpr->getSourceRange(); } @@ -17524,7 +17777,7 @@ // For now, just disregard these cases. This is left for future // improvement. if (!DRE && !isa(TopBase)) - return; + return; // Alignment expected by the whole expression. CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType());