diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h --- a/clang/include/clang/AST/ASTContext.h +++ b/clang/include/clang/AST/ASTContext.h @@ -1472,9 +1472,12 @@ /// Return the unique reference to a scalable vector type of the specified /// element type and scalable number of elements. + /// For RISC-V, number of fields is also provided when it fetching for + /// tuple type. /// /// \pre \p EltTy must be a built-in type. - QualType getScalableVectorType(QualType EltTy, unsigned NumElts) const; + QualType getScalableVectorType(QualType EltTy, unsigned NumElts, + unsigned NumFields = 1) const; /// Return a WebAssembly externref type. QualType getWebAssemblyExternrefType() const; diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1503,6 +1503,58 @@ defm : RVVIndexedSegLoad<"vluxseg">; defm : RVVIndexedSegLoad<"vloxseg">; } + +multiclass RVVUnitStridedSegLoadTuple { + foreach type = ["i"] in { + defvar eew = !cond(!eq(type, "i") : "32"); + foreach nf = [2] in { + let Name = op # nf # "e" # eew # "_v_tuple", + OverloadedName = op # nf # "e" # eew # "_tuple", + IRName = op # nf, + MaskedIRName = op # nf # "_mask", + NF = nf, + ManualCodegen = [{ + { + assert(((IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) || + (!IsMasked && (PolicyAttrs & RVV_VTA))) && + "FIXME: Only handling default policy (TAMA) for now"); + + llvm::Type *ElementVectorType = cast(ResultType)->elements()[0]; + IntrinsicTypes = {ElementVectorType, Ops.back()->getType()}; + SmallVector Operands; + + Operands.append(NF, llvm::PoisonValue::get(ElementVectorType)); + + unsigned Offset = IsMasked ? 1 : 0; + Operands.push_back(Ops[Offset]); // Ptr + if (IsMasked) + Operands.push_back(Ops[0]); + Operands.push_back(Ops[Offset + 1]); // VL + if (IsMasked) + Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs)); + + llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes); + + llvm::Value *LoadValue = Builder.CreateCall(F, Operands, ""); + if (ReturnValue.isNull()) + return LoadValue; + else + return Builder.CreateStore(LoadValue, ReturnValue.getValue()); + } + }] in { + defvar T = "(Tuple:" # nf # ")"; + def : RVVBuiltin<"v", T # "vPCe", type>; + } + } + } +} +// TODO: Extend for policy +let UnMaskedPolicyScheme = NonePolicy, + MaskedPolicyScheme = NonePolicy, + IsTuple = true in { +defm : RVVUnitStridedSegLoadTuple<"vlseg">; +} + let UnMaskedPolicyScheme = NonePolicy, MaskedPolicyScheme = NonePolicy in { defm : RVVUnitStridedSegStore<"vsseg">; diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td --- a/clang/include/clang/Basic/riscv_vector_common.td +++ b/clang/include/clang/Basic/riscv_vector_common.td @@ -231,6 +231,9 @@ // Number of fields for Load/Store Segment instructions. int NF = 1; + + // Set to true if the builtin is associated with tuple types. + bit IsTuple = false; } // This is the code emitted in the header. diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h --- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h +++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h @@ -58,6 +58,7 @@ SFixedLog2LMUL1, SFixedLog2LMUL2, SFixedLog2LMUL3, + Tuple2, }; // Similar to basic type but used to describe what's kind of type related to @@ -243,6 +244,8 @@ unsigned ElementBitwidth = 0; VScaleVal Scale = 0; bool Valid; + bool IsTuple = false; + unsigned NF = 0; std::string BuiltinStr; std::string ClangBuiltinStr; @@ -293,10 +296,15 @@ } bool isConstant() const { return IsConstant; } bool isPointer() const { return IsPointer; } + bool isTuple() const { return IsTuple; } unsigned getElementBitwidth() const { return ElementBitwidth; } ScalarTypeKind getScalarType() const { return ScalarType; } VScaleVal getScale() const { return Scale; } + unsigned getNF() const { + assert(NF > 1 && NF < 8 && "Only legal NF should be fetched"); + return NF; + } private: // Verify RVV vector type and set Valid. @@ -373,6 +381,7 @@ std::vector IntrinsicTypes; unsigned NF = 1; Policy PolicyAttrs; + bool IsTuple = false; public: RVVIntrinsic(llvm::StringRef Name, llvm::StringRef Suffix, @@ -383,7 +392,7 @@ const RVVTypes &Types, const std::vector &IntrinsicTypes, const std::vector &RequiredFeatures, - unsigned NF, Policy PolicyAttrs); + unsigned NF, Policy PolicyAttrs, bool IsTuple); ~RVVIntrinsic() = default; RVVTypePtr getOutputType() const { return OutputType; } @@ -444,7 +453,7 @@ computeBuiltinTypes(llvm::ArrayRef Prototype, bool IsMasked, bool HasMaskedOffOperand, bool HasVL, unsigned NF, PolicyScheme DefaultScheme, - Policy PolicyAttrs); + Policy PolicyAttrs, bool IsTuple); static llvm::SmallVector getSupportedUnMaskedPolicies(); static llvm::SmallVector @@ -512,6 +521,7 @@ bool HasMaskedOffOperand : 1; bool HasTailPolicy : 1; bool HasMaskPolicy : 1; + bool IsTuple : 1; uint8_t UnMaskedPolicyScheme : 2; uint8_t MaskedPolicyScheme : 2; }; diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -4042,8 +4042,8 @@ /// getScalableVectorType - Return the unique reference to a scalable vector /// type of the specified element type and size. VectorType must be a built-in /// type. -QualType ASTContext::getScalableVectorType(QualType EltTy, - unsigned NumElts) const { +QualType ASTContext::getScalableVectorType(QualType EltTy, unsigned NumElts, + unsigned NumFields) const { if (Target->hasAArch64SVETypes()) { uint64_t EltTySize = getTypeSize(EltTy); #define SVE_VECTOR_TYPE(Name, MangledName, Id, SingletonId, NumEls, ElBits, \ @@ -4067,15 +4067,15 @@ uint64_t EltTySize = getTypeSize(EltTy); #define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \ IsFP) \ - if (!EltTy->isBooleanType() && \ - ((EltTy->hasIntegerRepresentation() && \ - EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ - (EltTy->hasFloatingRepresentation() && IsFP)) && \ - EltTySize == ElBits && NumElts == NumEls) \ - return SingletonId; + if (!EltTy->isBooleanType() && \ + ((EltTy->hasIntegerRepresentation() && \ + EltTy->hasSignedIntegerRepresentation() == IsSigned) || \ + (EltTy->hasFloatingRepresentation() && IsFP)) && \ + EltTySize == ElBits && NumElts == NumEls && NumFields == NF) \ + return SingletonId; #define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \ - if (EltTy->isBooleanType() && NumElts == NumEls) \ - return SingletonId; + if (EltTy->isBooleanType() && NumElts == NumEls) \ + return SingletonId; #include "clang/Basic/RISCVVTypes.def" } return QualType(); diff --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCVVectorLookup.cpp --- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp +++ b/clang/lib/Sema/SemaRISCVVectorLookup.cpp @@ -135,8 +135,12 @@ case Invalid: llvm_unreachable("Unhandled type."); } - if (Type->isVector()) - QT = Context.getScalableVectorType(QT, *Type->getScale()); + if (Type->isVector()) { + if (Type->isTuple()) + QT = Context.getScalableVectorType(QT, *Type->getScale(), Type->getNF()); + else + QT = Context.getScalableVectorType(QT, *Type->getScale()); + } if (Type->isConstant()) QT = Context.getConstType(QT); @@ -214,15 +218,16 @@ const Policy DefaultPolicy; llvm::SmallVector ProtoSeq = - RVVIntrinsic::computeBuiltinTypes(BasicProtoSeq, /*IsMasked=*/false, - /*HasMaskedOffOperand=*/false, - Record.HasVL, Record.NF, - UnMaskedPolicyScheme, DefaultPolicy); + RVVIntrinsic::computeBuiltinTypes( + BasicProtoSeq, /*IsMasked=*/false, + /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF, + UnMaskedPolicyScheme, DefaultPolicy, Record.IsTuple); llvm::SmallVector ProtoMaskSeq = RVVIntrinsic::computeBuiltinTypes( BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand, - Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy); + Record.HasVL, Record.NF, MaskedPolicyScheme, DefaultPolicy, + Record.IsTuple); bool UnMaskedHasPolicy = UnMaskedPolicyScheme != PolicyScheme::SchemeNone; bool MaskedHasPolicy = MaskedPolicyScheme != PolicyScheme::SchemeNone; @@ -280,7 +285,7 @@ RVVIntrinsic::computeBuiltinTypes( BasicProtoSeq, /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, Record.HasVL, Record.NF, - UnMaskedPolicyScheme, P); + UnMaskedPolicyScheme, P, Record.IsTuple); std::optional PolicyTypes = TypeCache.computeTypes( BaseType, Log2LMUL, Record.NF, PolicyPrototype); InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, @@ -301,8 +306,9 @@ for (auto P : SupportedMaskedPolicies) { llvm::SmallVector PolicyPrototype = RVVIntrinsic::computeBuiltinTypes( - BasicProtoSeq, /*IsMasked=*/true, Record.HasMaskedOffOperand, - Record.HasVL, Record.NF, MaskedPolicyScheme, P); + BasicProtoSeq, /*IsMasked=*/true, + Record.HasMaskedOffOperand, Record.HasVL, Record.NF, + MaskedPolicyScheme, P, Record.IsTuple); std::optional PolicyTypes = TypeCache.computeTypes( BaseType, Log2LMUL, Record.NF, PolicyPrototype); InitRVVIntrinsic(Record, SuffixStr, OverloadedSuffixStr, diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -113,6 +113,8 @@ return false; if (isFloat() && ElementBitwidth == 8) return false; + if (IsTuple && (NF == 1 || NF > 8)) + return false; unsigned V = *Scale; switch (ElementBitwidth) { case 1: @@ -214,6 +216,9 @@ // vector values. if (IsPointer) BuiltinStr += "*"; + + if (IsTuple) + BuiltinStr = "T" + utostr(NF) + BuiltinStr; } void RVVType::initClangBuiltinStr() { @@ -237,7 +242,8 @@ default: llvm_unreachable("ScalarTypeKind is invalid"); } - ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + "_t"; + ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + + (IsTuple ? "x" + utostr(NF) : "") + "_t"; } void RVVType::initTypeStr() { @@ -249,7 +255,8 @@ auto getTypeString = [&](StringRef TypeStr) { if (isScalar()) return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str(); - return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + "_t") + return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + + (IsTuple ? "x" + utostr(NF) : "") + "_t") .str(); }; @@ -542,6 +549,19 @@ return std::nullopt; } + } else if (ComplexTT.first == "Tuple") { + unsigned NF = 0; + if (ComplexTT.second.getAsInteger(10, NF)) { + llvm_unreachable("Invalid NF value!"); + return std::nullopt; + } + switch (NF) { + case 2: + VTM = VectorTypeModifier::Tuple2; + break; + default: + llvm_unreachable("Unhandled NF"); + } } else { llvm_unreachable("Illegal complex type transformers!"); } @@ -702,6 +722,11 @@ case VectorTypeModifier::SFixedLog2LMUL3: applyFixedLog2LMUL(3, FixedLMULType::SmallerThan); break; + case VectorTypeModifier::Tuple2: { + IsTuple = true; + NF = 2; + break; + } case VectorTypeModifier::NoModifier: break; } @@ -852,11 +877,12 @@ const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, const std::vector &RequiredFeatures, - unsigned NF, Policy NewPolicyAttrs) + unsigned NF, Policy NewPolicyAttrs, bool IsTuple) : IRName(IRName), IsMasked(IsMasked), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), Scheme(Scheme), SupportOverloading(SupportOverloading), HasBuiltinAlias(HasBuiltinAlias), - ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs) { + ManualCodegen(ManualCodegen.str()), NF(NF), PolicyAttrs(NewPolicyAttrs), + IsTuple(IsTuple) { // Init BuiltinName, Name and OverloadedName BuiltinName = NewName.str(); @@ -912,7 +938,7 @@ llvm::SmallVector RVVIntrinsic::computeBuiltinTypes( llvm::ArrayRef Prototype, bool IsMasked, bool HasMaskedOffOperand, bool HasVL, unsigned NF, - PolicyScheme DefaultScheme, Policy PolicyAttrs) { + PolicyScheme DefaultScheme, Policy PolicyAttrs, bool IsTuple) { SmallVector NewPrototype(Prototype.begin(), Prototype.end()); bool HasPassthruOp = DefaultScheme == PolicyScheme::HasPassthruOperand; @@ -938,8 +964,12 @@ // to // (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1, // ...) - NewPrototype.insert(NewPrototype.begin() + NF + 1, - PrototypeDescriptor::Mask); + if (IsTuple) + NewPrototype.insert(NewPrototype.begin() + 1, + PrototypeDescriptor::Mask); + else + NewPrototype.insert(NewPrototype.begin() + NF + 1, + PrototypeDescriptor::Mask); } else { // If IsMasked, insert PrototypeDescriptor:Mask as first input operand. NewPrototype.insert(NewPrototype.begin() + 1, PrototypeDescriptor::Mask); @@ -963,6 +993,8 @@ // If HasVL, append PrototypeDescriptor:VL to last operand if (HasVL) NewPrototype.push_back(PrototypeDescriptor::VL); + if (IsTuple) + NewPrototype[0].VTM = static_cast(VectorTypeModifier::Tuple2); return NewPrototype; } @@ -1077,6 +1109,7 @@ OS << (int)Record.HasMaskedOffOperand << ","; OS << (int)Record.HasTailPolicy << ","; OS << (int)Record.HasMaskPolicy << ","; + OS << (int)Record.IsTuple << ","; OS << (int)Record.UnMaskedPolicyScheme << ","; OS << (int)Record.MaskedPolicyScheme << ","; OS << "},\n"; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32_tuple.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32_tuple.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vlseg2e32_tuple.c @@ -0,0 +1,27 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \ +// RUN: -target-feature +experimental-zvfh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s +#include + +// CHECK-RV64-LABEL: define dso_local { , } @test_vlseg2e32_v_tuple_i32m1 +// CHECK-RV64-SAME: (ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.nxv2i32.i64( poison, poison, ptr [[BASE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret { , } [[TMP0]] +// +vint32m1x2_t test_vlseg2e32_v_tuple_i32m1(const int32_t *base, size_t vl) { + return __riscv_vlseg2e32_v_tuple_i32m1(base, vl); +} + +// CHECK-RV64-LABEL: define dso_local { , } @test_vlseg2e32_v_tuple_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], ptr noundef [[BASE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call { , } @llvm.riscv.vlseg2.mask.nxv2i32.i64( poison, poison, ptr [[BASE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret { , } [[TMP0]] +// +vint32m1x2_t test_vlseg2e32_v_tuple_i32m1_m(vbool32_t mask, const int32_t *base, size_t vl) { + return __riscv_vlseg2e32_v_tuple_i32m1_m(mask, base, vl); +} diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -65,6 +65,7 @@ bool HasMaskedOffOperand :1; bool HasTailPolicy : 1; bool HasMaskPolicy : 1; + bool IsTuple : 1; uint8_t UnMaskedPolicyScheme : 2; uint8_t MaskedPolicyScheme : 2; }; @@ -363,6 +364,16 @@ TypeModifier::UnsignedInteger)); printType(*UT); } + // FIXME: Expand more type declaration + if (I == 'i' && Log2LMUL == 0) { // vint32m1x2_t + auto TupleT = TypeCache.computeType( + BT, Log2LMUL, + PrototypeDescriptor(BaseTypeModifier::Vector, + VectorTypeModifier::Tuple2, + TypeModifier::SignedInteger)); + if (TupleT) + printType(*TupleT); + } } } @@ -512,6 +523,7 @@ StringRef IRName = R->getValueAsString("IRName"); StringRef MaskedIRName = R->getValueAsString("MaskedIRName"); unsigned NF = R->getValueAsInt("NF"); + bool IsTuple = R->getValueAsBit("IsTuple"); const Policy DefaultPolicy; SmallVector SupportedUnMaskedPolicies = @@ -532,10 +544,10 @@ auto Prototype = RVVIntrinsic::computeBuiltinTypes( BasicPrototype, /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL, NF, UnMaskedPolicyScheme, - DefaultPolicy); + DefaultPolicy, IsTuple); auto MaskedPrototype = RVVIntrinsic::computeBuiltinTypes( BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, NF, - MaskedPolicyScheme, DefaultPolicy); + MaskedPolicyScheme, DefaultPolicy, IsTuple); // Create Intrinsics for each type and LMUL. for (char I : TypeRange) { @@ -557,14 +569,14 @@ /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL, UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *Types, IntrinsicTypes, RequiredFeatures, NF, - DefaultPolicy)); + DefaultPolicy, IsTuple)); if (UnMaskedPolicyScheme != PolicyScheme::SchemeNone) for (auto P : SupportedUnMaskedPolicies) { SmallVector PolicyPrototype = RVVIntrinsic::computeBuiltinTypes( BasicPrototype, /*IsMasked=*/false, /*HasMaskedOffOperand=*/false, HasVL, NF, - UnMaskedPolicyScheme, P); + UnMaskedPolicyScheme, P, IsTuple); std::optional PolicyTypes = TypeCache.computeTypes(BT, Log2LMUL, NF, PolicyPrototype); Out.push_back(std::make_unique( @@ -572,7 +584,7 @@ /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, UnMaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, - NF, P)); + NF, P, IsTuple)); } if (!HasMasked) continue; @@ -583,14 +595,14 @@ Name, SuffixStr, OverloadedName, OverloadedSuffixStr, MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *MaskTypes, - IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy)); + IntrinsicTypes, RequiredFeatures, NF, DefaultPolicy, IsTuple)); if (MaskedPolicyScheme == PolicyScheme::SchemeNone) continue; for (auto P : SupportedMaskedPolicies) { SmallVector PolicyPrototype = RVVIntrinsic::computeBuiltinTypes( BasicPrototype, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, - NF, MaskedPolicyScheme, P); + NF, MaskedPolicyScheme, P, IsTuple); std::optional PolicyTypes = TypeCache.computeTypes(BT, Log2LMUL, NF, PolicyPrototype); Out.push_back(std::make_unique( @@ -598,7 +610,7 @@ MaskedIRName, /*IsMasked=*/true, HasMaskedOffOperand, HasVL, MaskedPolicyScheme, SupportOverloading, HasBuiltinAlias, ManualCodegen, *PolicyTypes, IntrinsicTypes, RequiredFeatures, NF, - P)); + P, IsTuple)); } } // End for Log2LMULList } // End for TypeRange @@ -650,6 +662,7 @@ SR.Prototype = std::move(BasicPrototype); SR.Suffix = parsePrototypes(SuffixProto); SR.OverloadedSuffix = parsePrototypes(OverloadedSuffixProto); + SR.IsTuple = IsTuple; SemaRecords->push_back(SR); } @@ -691,6 +704,7 @@ R.HasMaskPolicy = SR.HasMaskPolicy; R.UnMaskedPolicyScheme = SR.UnMaskedPolicyScheme; R.MaskedPolicyScheme = SR.MaskedPolicyScheme; + R.IsTuple = SR.IsTuple; assert(R.PrototypeIndex != static_cast(SemaSignatureTable::INVALID_INDEX));