diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h --- a/clang/include/clang/AST/Type.h +++ b/clang/include/clang/AST/Type.h @@ -2136,6 +2136,10 @@ bool isExtIntType() const; // Extended Int Type bool isOpenCLSpecificType() const; // Any OpenCL specific type + /// Check if this type has only two possible values, and so may be lowered to + /// a bool. + bool hasBooleanRepresentation() const; + /// Determines if this type, which must satisfy /// isObjCLifetimeType(), is implicitly __unsafe_unretained rather /// than implicitly __strong. diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp --- a/clang/lib/AST/Type.cpp +++ b/clang/lib/AST/Type.cpp @@ -2745,6 +2745,19 @@ } } +bool Type::hasBooleanRepresentation() const { + if (isBooleanType()) + return true; + + if (const EnumType *ET = getAs()) + return ET->getDecl()->getIntegerType()->isBooleanType(); + + if (const AtomicType *AT = getAs()) + return AT->getValueType()->hasBooleanRepresentation(); + + return false; +} + ElaboratedTypeKeyword TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) { switch (TypeSpec) { diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -2076,6 +2076,27 @@ QualType RetTy = FI.getReturnType(); const ABIArgInfo &RetAI = FI.getReturnInfo(); + + // Determine if the return type could be partially initialized + bool RetPartialInit = false; + const Type *RetTyPtr = RetTy.getTypePtr(); + if (RetTyPtr->isRecordType()) { + RecordDecl *RetRecord = RetTyPtr->getAsRecordDecl(); + auto &RetLayout = getTypes().getCGRecordLayout(RetRecord); + RetPartialInit = + RetLayout.isPartialInit() || (RetAI.getPaddingType() != nullptr); + } + + // If we're coercing to a type of different size, we're introducing more + // padding bits + if (RetAI.canHaveCoerceToType()) { + const size_t RetRealSize = + getDataLayout().getTypeSizeInBits(getTypes().ConvertType(RetTy)); + const size_t RetLowSize = + getDataLayout().getTypeSizeInBits(RetAI.getCoerceToType()); + RetPartialInit |= RetRealSize != RetLowSize; + } + switch (RetAI.getKind()) { case ABIArgInfo::Extend: if (RetAI.isSignExt()) @@ -2086,6 +2107,8 @@ case ABIArgInfo::Direct: if (RetAI.getInReg()) RetAttrs.addAttribute(llvm::Attribute::InReg); + if (RetPartialInit) + RetAttrs.addAttribute(llvm::Attribute::PartialInit); break; case ABIArgInfo::Ignore: break; @@ -2161,6 +2184,26 @@ } } + // Decide whether the argument we're handling may have valid + // uninitialized bits. + bool ArgPartialInit = false; + const Type *ArgTyPtr = ParamType.getTypePtr(); + if (ArgTyPtr->isRecordType()) { + RecordDecl *ArgRecord = ArgTyPtr->getAsRecordDecl(); + auto &ArgLayout = getTypes().getCGRecordLayout(ArgRecord); + ArgPartialInit = ArgLayout.isPartialInit(); + } + + // If we're coercing to a type of different size, we're introducing more + // padding bits + if (AI.canHaveCoerceToType()) { + const size_t ArgRealSize = + getDataLayout().getTypeSizeInBits(getTypes().ConvertType(ParamType)); + const size_t ArgLowSize = + getDataLayout().getTypeSizeInBits(AI.getCoerceToType()); + ArgPartialInit |= ArgRealSize != ArgLowSize; + } + // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we // have the corresponding parameter variable. It doesn't make // sense to do it here because parameters are so messed up. @@ -2176,6 +2219,8 @@ Attrs.addAttribute(llvm::Attribute::Nest); else if (AI.getInReg()) Attrs.addAttribute(llvm::Attribute::InReg); + if (ArgPartialInit) + Attrs.addAttribute(llvm::Attribute::PartialInit); break; case ABIArgInfo::Indirect: { @@ -2210,10 +2255,14 @@ break; } case ABIArgInfo::Ignore: - case ABIArgInfo::Expand: case ABIArgInfo::CoerceAndExpand: break; + case ABIArgInfo::Expand: + if (ArgPartialInit) + Attrs.addAttribute(llvm::Attribute::PartialInit); + break; + case ABIArgInfo::InAlloca: // inalloca disables readnone and readonly. FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -1560,19 +1560,6 @@ lvalue.getTBAAInfo(), lvalue.isNontemporal()); } -static bool hasBooleanRepresentation(QualType Ty) { - if (Ty->isBooleanType()) - return true; - - if (const EnumType *ET = Ty->getAs()) - return ET->getDecl()->getIntegerType()->isBooleanType(); - - if (const AtomicType *AT = Ty->getAs()) - return hasBooleanRepresentation(AT->getValueType()); - - return false; -} - static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool) { @@ -1609,7 +1596,7 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { llvm::APInt Min, End; if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums, - hasBooleanRepresentation(Ty))) + Ty->hasBooleanRepresentation())) return nullptr; llvm::MDBuilder MDHelper(getLLVMContext()); @@ -1623,7 +1610,7 @@ if (!HasBoolCheck && !HasEnumCheck) return false; - bool IsBool = hasBooleanRepresentation(Ty) || + bool IsBool = Ty->hasBooleanRepresentation() || NSAPI(CGM.getContext()).isObjCBOOLType(Ty); bool NeedsBoolCheck = HasBoolCheck && IsBool; bool NeedsEnumCheck = HasEnumCheck && Ty->getAs(); @@ -1721,7 +1708,7 @@ llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { // Bool has a different representation in memory than in registers. - if (hasBooleanRepresentation(Ty)) { + if (Ty->hasBooleanRepresentation()) { // This should really always be an i1, but sometimes it's already // an i8, and it's awkward to track those cases down. if (Value->getType()->isIntegerTy(1)) @@ -1735,7 +1722,7 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { // Bool has a different representation in memory than in registers. - if (hasBooleanRepresentation(Ty)) { + if (Ty->hasBooleanRepresentation()) { assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) && "wrong value rep of bool"); return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool"); @@ -2140,7 +2127,7 @@ Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load"); // Mask the source value as needed. - if (!hasBooleanRepresentation(Dst.getType())) + if (!Dst.getType()->hasBooleanRepresentation()) SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(Info.StorageSize, Info.Size), diff --git a/clang/lib/CodeGen/CGRecordLayout.h b/clang/lib/CodeGen/CGRecordLayout.h --- a/clang/lib/CodeGen/CGRecordLayout.h +++ b/clang/lib/CodeGen/CGRecordLayout.h @@ -145,15 +145,20 @@ /// when zero-initialized. bool IsZeroInitializableAsBase : 1; + /// True when this struct or union layout contains bits which are + /// necessarily uninitialized, like some kind of padding data between + /// struct fields or following narrow union fields. + bool IsPartialInit : 1; + public: CGRecordLayout(llvm::StructType *CompleteObjectType, - llvm::StructType *BaseSubobjectType, - bool IsZeroInitializable, - bool IsZeroInitializableAsBase) - : CompleteObjectType(CompleteObjectType), - BaseSubobjectType(BaseSubobjectType), - IsZeroInitializable(IsZeroInitializable), - IsZeroInitializableAsBase(IsZeroInitializableAsBase) {} + llvm::StructType *BaseSubobjectType, bool IsZeroInitializable, + bool IsZeroInitializableAsBase, bool IsPartialInit) + : CompleteObjectType(CompleteObjectType), + BaseSubobjectType(BaseSubobjectType), + IsZeroInitializable(IsZeroInitializable), + IsZeroInitializableAsBase(IsZeroInitializableAsBase), + IsPartialInit(IsPartialInit) {} /// Return the "complete object" LLVM type associated with /// this record. @@ -179,6 +184,10 @@ return IsZeroInitializableAsBase; } + /// Check whether this struct/union may be partially initialized due + /// to presence of padding. + bool isPartialInit() const { return IsPartialInit; } + /// Return llvm::StructType element number that corresponds to the /// field FD. unsigned getLLVMFieldNo(const FieldDecl *FD) const { diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp --- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -158,8 +158,10 @@ return Types.isZeroInitializable(RD); } void appendPaddingBytes(CharUnits Size) { - if (!Size.isZero()) + if (!Size.isZero()) { FieldTypes.push_back(getByteArrayType(Size)); + PartialInit = true; + } } uint64_t getFieldBitOffset(const FieldDecl *FD) { return Layout.getFieldOffset(FD->getFieldIndex()); @@ -185,6 +187,8 @@ void clipTailPadding(); /// Determines if we need a packed llvm struct. void determinePacked(bool NVBaseType); + /// Determines if this struct's members have any sort of padding. + void determineMemberPartialInit(); /// Inserts padding everywhere it's needed. void insertPadding(); /// Fills out the structures that are ultimately consumed. @@ -207,6 +211,10 @@ bool IsZeroInitializable : 1; bool IsZeroInitializableAsBase : 1; bool Packed : 1; + // Lowered type necessarily features uninitialized data (padding of some + // kind, as between struct fields or appended to narrower union fields) + bool PartialInit : 1; + private: CGRecordLowering(const CGRecordLowering &) = delete; void operator =(const CGRecordLowering &) = delete; @@ -219,7 +227,7 @@ RD(dyn_cast(D)), Layout(Types.getContext().getASTRecordLayout(D)), DataLayout(Types.getDataLayout()), IsZeroInitializable(true), - IsZeroInitializableAsBase(true), Packed(Packed) {} + IsZeroInitializableAsBase(true), Packed(Packed), PartialInit(false) {} void CGRecordLowering::setBitFieldInfo( const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) { @@ -280,6 +288,7 @@ insertPadding(); Members.pop_back(); calculateZeroInit(); + determineMemberPartialInit(); fillOutputFields(); } @@ -303,6 +312,21 @@ } Fields[Field->getCanonicalDecl()] = 0; llvm::Type *FieldType = getStorageType(Field); + + bool SizeChanged = + StorageType && getSize(FieldType) != getSize(StorageType); + QualType FieldTypePtr = Field->getType(); + // If fields have different sizes, we note that this union may have + // uninitialized bits when used with the smaller variant. + if (SizeChanged) { + PartialInit = true; + } else if (FieldTypePtr->isRecordType()) { + // Recurse + RecordDecl *FieldRecord = FieldTypePtr->getAsRecordDecl(); + auto &FieldLayout = Types.getCGRecordLayout(FieldRecord); + PartialInit |= FieldLayout.isPartialInit(); + } + // Compute zero-initializable status. // This union might not be zero initialized: it may contain a pointer to // data member which might have some exotic initialization sequence. @@ -319,6 +343,7 @@ StorageType = FieldType; } } + // Because our union isn't zero initializable, we won't be getting a better // storage type. if (!IsZeroInitializable) @@ -466,7 +491,12 @@ } // We've hit a break-point in the run and need to emit a storage field. - llvm::Type *Type = getIntNType(Tail - StartBitOffset); + uint64_t TypeWidth = Tail - StartBitOffset; + llvm::Type *Type = getIntNType(TypeWidth); + + if (DataLayout.getTypeAllocSizeInBits(Type) != TypeWidth) + PartialInit = true; + // Add the storage member to the record and set the bitfield info for all of // the bitfields in the run. Bitfields get the offset of their storage but // come afterward and remain there after a stable sort. @@ -646,6 +676,50 @@ Members.back().Data = getIntNType(Context.toBits(Alignment)); } +void CGRecordLowering::determineMemberPartialInit() { + auto hasTailPadding = [&](QualType FieldTypePtr) { + llvm::Type *LLTy = Types.ConvertType(FieldTypePtr); + if (FieldTypePtr->hasBooleanRepresentation()) + return false; + return DataLayout.getTypeAllocSizeInBits(LLTy) != + DataLayout.getTypeSizeInBits(LLTy); + }; + + auto isPartialInit = [&](QualType FieldTypePtr) { + assert(FieldTypePtr.getTypePtrOrNull() != nullptr); + + if (hasTailPadding(FieldTypePtr)) + return true; + + // If there's any padding between array elements, flag that. + // Also check that array base types aren't partialinit + while (FieldTypePtr->isArrayType()) { + FieldTypePtr = FieldTypePtr->getAsArrayTypeUnsafe()->getElementType(); + if (hasTailPadding(FieldTypePtr)) + return true; + } + if (!FieldTypePtr->isRecordType()) + return false; + if (CXXRecordDecl *CXXRecord = FieldTypePtr->getAsCXXRecordDecl()) { + if (CXXRecord->isEmpty()) + return false; + } + + RecordDecl *FieldRecord = FieldTypePtr->getAsRecordDecl(); + assert(FieldRecord); + auto &FieldLayout = Types.getCGRecordLayout(FieldRecord); + return FieldLayout.isPartialInit(); + }; + + // Recursively check for partialinit fields + for (const auto &Member : Members) { + if (!Member.Data || !Member.FD) + continue; + QualType FieldType = Member.FD->getType(); + PartialInit |= isPartialInit(FieldType); + } +} + void CGRecordLowering::insertPadding() { std::vector > Padding; CharUnits Size = CharUnits::Zero(); @@ -656,6 +730,7 @@ continue; CharUnits Offset = Member->Offset; assert(Offset >= Size); + // Insert padding if we need to. if (Offset != Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data))) @@ -664,6 +739,7 @@ } if (Padding.empty()) return; + PartialInit = true; // Add the padding to the Members list and sort it. for (std::vector >::const_iterator Pad = Padding.begin(), PadEnd = Padding.end(); @@ -757,10 +833,12 @@ // signifies that the type is no longer opaque and record layout is complete, // but we may need to recursively layout D while laying D out as a base type. Ty->setBody(Builder.FieldTypes, Builder.Packed); + bool TyPadded = Builder.DataLayout.getStructLayout(Ty)->hasPadding(); auto RL = std::make_unique( Ty, BaseTy, (bool)Builder.IsZeroInitializable, - (bool)Builder.IsZeroInitializableAsBase); + (bool)Builder.IsZeroInitializableAsBase, + (bool)Builder.PartialInit || TyPadded); RL->NonVirtualBases.swap(Builder.NonVirtualBases); RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases); diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp --- a/clang/lib/CodeGen/CodeGenModule.cpp +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -601,6 +601,10 @@ llvm::DenormalMode::IEEE); } + // Emit `partialinit` attributes for padded function arguments/return values + getModule().addModuleFlag(llvm::Module::Error, + "DisallowPoisonedCallArguments", 1); + // Emit OpenCL specific module metadata: OpenCL/SPIR version. if (LangOpts.OpenCL) { EmitOpenCLMetadata(); diff --git a/clang/test/CodeGen/aapcs-align.cpp b/clang/test/CodeGen/aapcs-align.cpp --- a/clang/test/CodeGen/aapcs-align.cpp +++ b/clang/test/CodeGen/aapcs-align.cpp @@ -56,10 +56,10 @@ f2m(1, 2, 3, 4, 5, s); } // CHECK: define void @g2 -// CHECK: call void @f2(i32 1, [4 x i32] [i32 6, i32 7 -// CHECK: call void @f2m(i32 1, i32 2, i32 3, i32 4, i32 5, [4 x i32] [i32 6, i32 7 -// CHECK: declare void @f2(i32, [4 x i32]) -// CHECK: declare void @f2m(i32, i32, i32, i32, i32, [4 x i32]) +// CHECK: call void @f2(i32 1, [4 x i32] partialinit [i32 6, i32 7 +// CHECK: call void @f2m(i32 1, i32 2, i32 3, i32 4, i32 5, [4 x i32] partialinit [i32 6, i32 7 +// CHECK: declare void @f2(i32, [4 x i32] partialinit) +// CHECK: declare void @f2m(i32, i32, i32, i32, i32, [4 x i32] partialinit) // Increased natural alignment. struct SF8 { @@ -134,8 +134,8 @@ f6m(1, 2, 3, 4, 5, s); } // CHECK: define void @g6 -// CHECK: call void @f6(i32 1, [4 x i32] [i32 6, i32 7, i32 0, i32 0]) -// CHECK: call void @f6m(i32 1, i32 2, i32 3, i32 4, i32 5, [4 x i32] [i32 6, i32 7, i32 0, i32 0]) -// CHECK: declare void @f6(i32, [4 x i32]) -// CHECK: declare void @f6m(i32, i32, i32, i32, i32, [4 x i32]) +// CHECK: call void @f6(i32 1, [4 x i32] partialinit [i32 6, i32 7, i32 0, i32 0]) +// CHECK: call void @f6m(i32 1, i32 2, i32 3, i32 4, i32 5, [4 x i32] partialinit [i32 6, i32 7, i32 0, i32 0]) +// CHECK: declare void @f6(i32, [4 x i32] partialinit) +// CHECK: declare void @f6m(i32, i32, i32, i32, i32, [4 x i32] partialinit) } diff --git a/clang/test/CodeGen/aapcs64-align.cpp b/clang/test/CodeGen/aapcs64-align.cpp --- a/clang/test/CodeGen/aapcs64-align.cpp +++ b/clang/test/CodeGen/aapcs64-align.cpp @@ -75,10 +75,10 @@ f4m(1, 2, 3, 4, 5, s); } // CHECK: define void @g4() -// CHECK: call void @f4(i32 1, [2 x i64] [i64 30064771078, i64 0]) -// CHECK: void @f4m(i32 1, i32 2, i32 3, i32 4, i32 5, [2 x i64] [i64 30064771078, i64 0]) -// CHECK: declare void @f4(i32, [2 x i64]) -// CHECK: declare void @f4m(i32, i32, i32, i32, i32, [2 x i64]) +// CHECK: call void @f4(i32 1, [2 x i64] partialinit [i64 30064771078, i64 0]) +// CHECK: void @f4m(i32 1, i32 2, i32 3, i32 4, i32 5, [2 x i64] partialinit [i64 30064771078, i64 0]) +// CHECK: declare void @f4(i32, [2 x i64] partialinit) +// CHECK: declare void @f4m(i32, i32, i32, i32, i32, [2 x i64] partialinit) // Packed structure, overaligned, same as above. @@ -95,9 +95,9 @@ f5m(1, 2, 3, 4, 5, s); } // CHECK: define void @g5() -// CHECK: call void @f5(i32 1, [2 x i64] [i64 30064771078, i64 0]) -// CHECK: void @f5m(i32 1, i32 2, i32 3, i32 4, i32 5, [2 x i64] [i64 30064771078, i64 0]) -// CHECK: declare void @f5(i32, [2 x i64]) -// CHECK: declare void @f5m(i32, i32, i32, i32, i32, [2 x i64]) +// CHECK: call void @f5(i32 1, [2 x i64] partialinit [i64 30064771078, i64 0]) +// CHECK: void @f5m(i32 1, i32 2, i32 3, i32 4, i32 5, [2 x i64] partialinit [i64 30064771078, i64 0]) +// CHECK: declare void @f5(i32, [2 x i64] partialinit) +// CHECK: declare void @f5m(i32, i32, i32, i32, i32, [2 x i64] partialinit) } diff --git a/clang/test/CodeGen/aarch64-args.cpp b/clang/test/CodeGen/aarch64-args.cpp --- a/clang/test/CodeGen/aarch64-args.cpp +++ b/clang/test/CodeGen/aarch64-args.cpp @@ -17,7 +17,7 @@ // CHECK: define i32 @empty_arg(i32 %a) // CHECK-GNU-C: define i32 @empty_arg(i32 %a) -// CHECK-GNU-CXX: define i32 @empty_arg(i8 %e.coerce, i32 %a) +// CHECK-GNU-CXX: define i32 @empty_arg(i8 partialinit %e.coerce, i32 %a) EXTERNC int empty_arg(struct Empty e, int a) { return a; } @@ -53,7 +53,7 @@ // CHECK: define i32 @sort_of_empty_arg(i32 %a) // CHECK-GNU-C: define i32 @sort_of_empty_arg(i32 %a) -// CHECK-GNU-CXX: define i32 @sort_of_empty_arg(i8 %e.coerce, i32 %a) +// CHECK-GNU-CXX: define i32 @sort_of_empty_arg(i8 partialinit %e.coerce, i32 %a) EXTERNC int sort_of_empty_arg(struct Empty e, int a) { return a; } diff --git a/clang/test/CodeGen/aarch64-arguments-hfa-v3.c b/clang/test/CodeGen/aarch64-arguments-hfa-v3.c --- a/clang/test/CodeGen/aarch64-arguments-hfa-v3.c +++ b/clang/test/CodeGen/aarch64-arguments-hfa-v3.c @@ -9,12 +9,12 @@ // CHECK: %struct.MixedHFAv3 = type { [3 x <3 x float>], <16 x i8> } typedef struct { float32x3_t arr[3]; int8x16_t b; } MixedHFAv3; -// CHECK: define %struct.HFAv3 @test([4 x <4 x float>] %{{.*}}, [4 x <4 x float>] %{{.*}}, [4 x <4 x float>] %{{.*}}) +// CHECK: define partialinit %struct.HFAv3 @test([4 x <4 x float>] partialinit %{{.*}}, [4 x <4 x float>] partialinit %{{.*}}, [4 x <4 x float>] partialinit %{{.*}}) HFAv3 test(HFAv3 a0, HFAv3 a1, HFAv3 a2) { return a2; } -// CHECK: define %struct.MixedHFAv3 @test_mixed([4 x <4 x float>] %{{.*}}, [4 x <4 x float>] %{{.*}}, [4 x <4 x float>] %{{.*}}) +// CHECK: define partialinit %struct.MixedHFAv3 @test_mixed([4 x <4 x float>] partialinit %{{.*}}, [4 x <4 x float>] partialinit %{{.*}}, [4 x <4 x float>] partialinit %{{.*}}) MixedHFAv3 test_mixed(MixedHFAv3 a0, MixedHFAv3 a1, MixedHFAv3 a2) { return a2; } diff --git a/clang/test/CodeGen/aarch64-varargs.c b/clang/test/CodeGen/aarch64-varargs.c --- a/clang/test/CodeGen/aarch64-varargs.c +++ b/clang/test/CodeGen/aarch64-varargs.c @@ -473,7 +473,7 @@ int val; } underaligned_int_struct; underaligned_int_struct underaligned_int_struct_test() { -// CHECK-LABEL: define i64 @underaligned_int_struct_test() +// CHECK-LABEL: define partialinit i64 @underaligned_int_struct_test() return va_arg(the_list, underaligned_int_struct); // CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 @@ -506,7 +506,7 @@ int val; } overaligned_int_struct; overaligned_int_struct overaligned_int_struct_test() { -// CHECK-LABEL: define i128 @overaligned_int_struct_test() +// CHECK-LABEL: define partialinit i128 @overaligned_int_struct_test() return va_arg(the_list, overaligned_int_struct); // CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 @@ -572,7 +572,7 @@ long long val; } overaligned_long_long_struct; overaligned_long_long_struct overaligned_long_long_struct_test() { -// CHECK-LABEL: define i128 @overaligned_long_long_struct_test() +// CHECK-LABEL: define partialinit i128 @overaligned_long_long_struct_test() return va_arg(the_list, overaligned_long_long_struct); // CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 @@ -675,7 +675,7 @@ int val __attribute__((packed,aligned(2))); } underaligned_int_struct_member; underaligned_int_struct_member underaligned_int_struct_member_test() { -// CHECK-LABEL: define i64 @underaligned_int_struct_member_test() +// CHECK-LABEL: define partialinit i64 @underaligned_int_struct_member_test() return va_arg(the_list, underaligned_int_struct_member); // CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 @@ -708,7 +708,7 @@ int val __attribute__((aligned(16))); } overaligned_int_struct_member; overaligned_int_struct_member overaligned_int_struct_member_test() { -// CHECK-LABEL: define i128 @overaligned_int_struct_member_test() +// CHECK-LABEL: define partialinit i128 @overaligned_int_struct_member_test() return va_arg(the_list, overaligned_int_struct_member); // CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 @@ -780,7 +780,7 @@ long long val __attribute__((aligned(16))); } overaligned_long_long_struct_member; overaligned_long_long_struct_member overaligned_long_long_struct_member_test() { -// CHECK-LABEL: define i128 @overaligned_long_long_struct_member_test() +// CHECK-LABEL: define partialinit i128 @overaligned_long_long_struct_member_test() return va_arg(the_list, overaligned_long_long_struct_member); // CHECK: [[GR_OFFS:%[a-z_0-9]+]] = load i32, i32* getelementptr inbounds (%struct.__va_list, %struct.__va_list* @the_list, i32 0, i32 3) // CHECK: [[EARLY_ONSTACK:%[a-z_0-9]+]] = icmp sge i32 [[GR_OFFS]], 0 diff --git a/clang/test/CodeGen/arc/arguments.c b/clang/test/CodeGen/arc/arguments.c --- a/clang/test/CodeGen/arc/arguments.c +++ b/clang/test/CodeGen/arc/arguments.c @@ -68,7 +68,7 @@ char b; }; // Unions should be passed inreg. -// CHECK: define void @f9(i32 inreg %s.coerce) +// CHECK: define void @f9(i32 inreg partialinit %s.coerce) void f9(union simple_union s) {} typedef struct { @@ -77,7 +77,7 @@ int b8 : 8; } bitfield1; // Bitfields should be passed inreg. -// CHECK: define void @f10(i32 inreg %bf1.coerce) +// CHECK: define void @f10(i32 inreg partialinit %bf1.coerce) void f10(bitfield1 bf1) {} // CHECK: define inreg { float, float } @cplx1(float inreg %r) diff --git a/clang/test/CodeGen/arm-aapcs-vfp.c b/clang/test/CodeGen/arm-aapcs-vfp.c --- a/clang/test/CodeGen/arm-aapcs-vfp.c +++ b/clang/test/CodeGen/arm-aapcs-vfp.c @@ -122,19 +122,19 @@ // CHECK: define arm_aapcs_vfpcc void @test_vfp_stack_gpr_split_1(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, i32 %j, i64 %k, i32 %l) void test_vfp_stack_gpr_split_1(double a, double b, double c, double d, double e, double f, double g, double h, double i, int j, long long k, int l) {} -// CHECK: define arm_aapcs_vfpcc void @test_vfp_stack_gpr_split_2(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, i32 %j, [2 x i64] %k.coerce) +// CHECK: define arm_aapcs_vfpcc void @test_vfp_stack_gpr_split_2(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, i32 %j, [2 x i64] partialinit %k.coerce) void test_vfp_stack_gpr_split_2(double a, double b, double c, double d, double e, double f, double g, double h, double i, int j, struct_long_long_int k) {} -// CHECK: define arm_aapcs_vfpcc void @test_vfp_stack_gpr_split_3(%struct.struct_long_long_int* noalias sret align 8 %agg.result, double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, [2 x i64] %k.coerce) +// CHECK: define arm_aapcs_vfpcc void @test_vfp_stack_gpr_split_3(%struct.struct_long_long_int* noalias sret align 8 %agg.result, double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, [2 x i64] partialinit %k.coerce) struct_long_long_int test_vfp_stack_gpr_split_3(double a, double b, double c, double d, double e, double f, double g, double h, double i, struct_long_long_int k) {} typedef struct { int a; int b:4; int c; } struct_int_bitfield_int; -// CHECK: define arm_aapcs_vfpcc void @test_test_vfp_stack_gpr_split_bitfield(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, i32 %j, i32 %k, [3 x i32] %l.coerce) +// CHECK: define arm_aapcs_vfpcc void @test_test_vfp_stack_gpr_split_bitfield(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, i32 %j, i32 %k, [3 x i32] partialinit %l.coerce) void test_test_vfp_stack_gpr_split_bitfield(double a, double b, double c, double d, double e, double f, double g, double h, double i, int j, int k, struct_int_bitfield_int l) {} // Note: this struct requires internal padding typedef struct { int x; long long y; } struct_int_long_long; -// CHECK: define arm_aapcs_vfpcc void @test_vfp_stack_gpr_split_4(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, i32 %j, [2 x i64] %k.coerce) +// CHECK: define arm_aapcs_vfpcc void @test_vfp_stack_gpr_split_4(double %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, double %i, i32 %j, [2 x i64] partialinit %k.coerce) void test_vfp_stack_gpr_split_4(double a, double b, double c, double d, double e, double f, double g, double h, double i, int j, struct_int_long_long k) {} // This very large struct (passed byval) uses up the GPRs, so no padding is needed diff --git a/clang/test/CodeGen/arm-abi-vector.c b/clang/test/CodeGen/arm-abi-vector.c --- a/clang/test/CodeGen/arm-abi-vector.c +++ b/clang/test/CodeGen/arm-abi-vector.c @@ -80,9 +80,9 @@ double test_3c(__char3 *in) { // CHECK: test_3c -// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_3c(i32 3, i32 {{%.*}}) +// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_3c(i32 3, i32 partialinit {{%.*}}) // APCS-GNU: test_3c -// APCS-GNU: call double (i32, ...) @varargs_vec_3c(i32 3, i32 {{%.*}}) +// APCS-GNU: call double (i32, ...) @varargs_vec_3c(i32 3, i32 partialinit {{%.*}}) // ANDROID: test_3c // ANDROID: call double (i32, ...) @varargs_vec_3c(i32 3, <3 x i8> {{%.*}}) return varargs_vec_3c(3, *in); @@ -123,11 +123,11 @@ double test_5c(__char5 *in) { // CHECK: test_5c -// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> {{%.*}}) +// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> partialinit {{%.*}}) // APCS-GNU: test_5c -// APCS-GNU: call double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> {{%.*}}) +// APCS-GNU: call double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> partialinit {{%.*}}) // ANDROID: test_5c -// ANDROID: call double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> {{%.*}}) +// ANDROID: call double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> partialinit {{%.*}}) return varargs_vec_5c(5, *in); } @@ -166,11 +166,11 @@ double test_9c(__char9 *in) { // CHECK: test_9c -// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> {{%.*}}) +// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> partialinit {{%.*}}) // APCS-GNU: test_9c -// APCS-GNU: call double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> {{%.*}}) +// APCS-GNU: call double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> partialinit {{%.*}}) // ANDROID: test_9c -// ANDROID: call double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> {{%.*}}) +// ANDROID: call double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> partialinit {{%.*}}) return varargs_vec_9c(9, *in); } @@ -236,9 +236,9 @@ double test_3s(__short3 *in) { // CHECK: test_3s -// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_3s(i32 3, <2 x i32> {{%.*}}) +// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_3s(i32 3, <2 x i32> partialinit {{%.*}}) // APCS-GNU: test_3s -// APCS-GNU: call double (i32, ...) @varargs_vec_3s(i32 3, <2 x i32> {{%.*}}) +// APCS-GNU: call double (i32, ...) @varargs_vec_3s(i32 3, <2 x i32> partialinit {{%.*}}) // ANDROID: test_3s // ANDROID: call double (i32, ...) @varargs_vec_3s(i32 3, <3 x i16> {{%.*}}) return varargs_vec_3s(3, *in); @@ -278,11 +278,11 @@ double test_5s(__short5 *in) { // CHECK: test_5s -// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> {{%.*}}) +// CHECK: call arm_aapcscc double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> partialinit {{%.*}}) // APCS-GNU: test_5s -// APCS-GNU: call double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> {{%.*}}) +// APCS-GNU: call double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> partialinit {{%.*}}) // ANDROID: test_5s -// ANDROID: call double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> {{%.*}}) +// ANDROID: call double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> partialinit {{%.*}}) return varargs_vec_5s(5, *in); } @@ -320,10 +320,10 @@ double test_struct(StructWithVec* d) { // CHECK: test_struct -// CHECK: call arm_aapcscc double (i32, ...) @varargs_struct(i32 3, [2 x i64] {{%.*}}) +// CHECK: call arm_aapcscc double (i32, ...) @varargs_struct(i32 3, [2 x i64] partialinit {{%.*}}) // APCS-GNU: test_struct -// APCS-GNU: call double (i32, ...) @varargs_struct(i32 3, [2 x i64] {{%.*}}) +// APCS-GNU: call double (i32, ...) @varargs_struct(i32 3, [2 x i64] partialinit {{%.*}}) // ANDROID: test_struct -// ANDROID: call double (i32, ...) @varargs_struct(i32 3, [2 x i64] {{%.*}}) +// ANDROID: call double (i32, ...) @varargs_struct(i32 3, [2 x i64] partialinit {{%.*}}) return varargs_struct(3, *d); } diff --git a/clang/test/CodeGen/arm-arguments.c b/clang/test/CodeGen/arm-arguments.c --- a/clang/test/CodeGen/arm-arguments.c +++ b/clang/test/CodeGen/arm-arguments.c @@ -67,8 +67,8 @@ struct s11 { int : 0; int f0; }; struct s11 f11(void) {} -// APCS-GNU-LABEL: define i32 @f12() -// AAPCS-LABEL: define arm_aapcscc i32 @f12() +// APCS-GNU-LABEL: define partialinit i32 @f12() +// AAPCS-LABEL: define arm_aapcscc partialinit i32 @f12() union u12 { char f0; short f1; int f2; }; union u12 f12(void) {} @@ -94,13 +94,13 @@ // AAPCS-LABEL: define arm_aapcscc void @f16() void f16(struct s8 a0) {} -// APCS-GNU-LABEL: define i32 @f17() -// AAPCS-LABEL: define arm_aapcscc i32 @f17() +// APCS-GNU-LABEL: define partialinit i32 @f17() +// AAPCS-LABEL: define arm_aapcscc partialinit i32 @f17() struct s17 { short f0 : 13; char f1 : 4; }; struct s17 f17(void) {} -// APCS-GNU-LABEL: define i32 @f18() -// AAPCS-LABEL: define arm_aapcscc i32 @f18() +// APCS-GNU-LABEL: define partialinit i32 @f18() +// AAPCS-LABEL: define arm_aapcscc partialinit i32 @f18() struct s18 { short f0; char f1 : 4; }; struct s18 f18(void) {} @@ -116,8 +116,8 @@ struct s20 { struct s8 f1; int f0; }; struct s20 f20(void) {} -// APCS-GNU-LABEL: define i8 @f21() -// AAPCS-LABEL: define arm_aapcscc i32 @f21() +// APCS-GNU-LABEL: define partialinit i8 @f21() +// AAPCS-LABEL: define arm_aapcscc partialinit i32 @f21() struct s21 { struct {} f1; int f0 : 4; }; struct s21 f21(void) {} @@ -158,11 +158,11 @@ // PR11905 struct s31 { char x; }; void f31(struct s31 s) { } -// AAPCS: @f31([1 x i32] %s.coerce) +// AAPCS: @f31([1 x i32] partialinit %s.coerce) // AAPCS: %s = alloca %struct.s31, align 1 // AAPCS: [[TEMP:%.*]] = alloca [1 x i32], align 4 // AAPCS: store [1 x i32] %s.coerce, [1 x i32]* [[TEMP]], align 4 -// APCS-GNU: @f31([1 x i32] %s.coerce) +// APCS-GNU: @f31([1 x i32] partialinit %s.coerce) // APCS-GNU: %s = alloca %struct.s31, align 1 // APCS-GNU: [[TEMP:%.*]] = alloca [1 x i32], align 4 // APCS-GNU: store [1 x i32] %s.coerce, [1 x i32]* [[TEMP]], align 4 diff --git a/clang/test/CodeGen/arm-be-result-return.c b/clang/test/CodeGen/arm-be-result-return.c --- a/clang/test/CodeGen/arm-be-result-return.c +++ b/clang/test/CodeGen/arm-be-result-return.c @@ -15,7 +15,7 @@ void caller_us() { us = callee_us(); // CHECK-LABEL: caller_us() -// CHECK: call i32 +// CHECK: call partialinit i32 // CHECK: lshr i32 // CHECK: trunc i32 } @@ -30,7 +30,7 @@ void caller_ss() { ss = callee_ss(); // CHECK-LABEL: caller_ss() -// CHECK: call i32 +// CHECK: call partialinit i32 // CHECK: lshr i32 // CHECK: trunc i32 } diff --git a/clang/test/CodeGen/arm-bf16-params-returns.c b/clang/test/CodeGen/arm-bf16-params-returns.c --- a/clang/test/CodeGen/arm-bf16-params-returns.c +++ b/clang/test/CodeGen/arm-bf16-params-returns.c @@ -13,7 +13,7 @@ // CHECK32-HARD: ret bfloat %v // CHECK64-HARD: define bfloat @test_ret_bf16(bfloat returned %v) {{.*}} { // CHECK64-HARD: ret bfloat %v -// CHECK32-SOFTFP: define i32 @test_ret_bf16(i32 [[V0:.*]]) {{.*}} { +// CHECK32-SOFTFP: define partialinit i32 @test_ret_bf16(i32 partialinit [[V0:.*]]) {{.*}} { // CHECK32-SOFTFP: %tmp2.0.insert.ext = and i32 [[V0]], 65535 // CHECK32-SOFTFP: ret i32 %tmp2.0.insert.ext // CHECK64-SOFTFP: define bfloat @test_ret_bf16(bfloat returned %v) {{.*}} { diff --git a/clang/test/CodeGen/arm-fp16-arguments.c b/clang/test/CodeGen/arm-fp16-arguments.c --- a/clang/test/CodeGen/arm-fp16-arguments.c +++ b/clang/test/CodeGen/arm-fp16-arguments.c @@ -5,9 +5,9 @@ __fp16 g; void t1(__fp16 a) { g = a; } -// SOFT: define void @t1(i32 [[PARAM:%.*]]) +// SOFT: define void @t1(i32 partialinit [[PARAM:%.*]]) // SOFT: [[TRUNC:%.*]] = trunc i32 [[PARAM]] to i16 -// HARD: define arm_aapcs_vfpcc void @t1(float [[PARAM:%.*]]) +// HARD: define arm_aapcs_vfpcc void @t1(float partialinit [[PARAM:%.*]]) // HARD: [[BITCAST:%.*]] = bitcast float [[PARAM]] to i32 // HARD: [[TRUNC:%.*]] = trunc i32 [[BITCAST]] to i16 // CHECK: store i16 [[TRUNC]], i16* bitcast (half* @g to i16*) @@ -15,8 +15,8 @@ // NATIVE: store half [[PARAM]], half* @g __fp16 t2() { return g; } -// SOFT: define i32 @t2() -// HARD: define arm_aapcs_vfpcc float @t2() +// SOFT: define partialinit i32 @t2() +// HARD: define arm_aapcs_vfpcc partialinit float @t2() // NATIVE: define half @t2() // CHECK: [[LOAD:%.*]] = load i16, i16* bitcast (half* @g to i16*) // CHECK: [[ZEXT:%.*]] = zext i16 [[LOAD]] to i32 @@ -29,9 +29,9 @@ _Float16 h; void t3(_Float16 a) { h = a; } -// SOFT: define void @t3(i32 [[PARAM:%.*]]) +// SOFT: define void @t3(i32 partialinit [[PARAM:%.*]]) // SOFT: [[TRUNC:%.*]] = trunc i32 [[PARAM]] to i16 -// HARD: define arm_aapcs_vfpcc void @t3(float [[PARAM:%.*]]) +// HARD: define arm_aapcs_vfpcc void @t3(float partialinit [[PARAM:%.*]]) // HARD: [[BITCAST:%.*]] = bitcast float [[PARAM]] to i32 // HARD: [[TRUNC:%.*]] = trunc i32 [[BITCAST]] to i16 // CHECK: store i16 [[TRUNC]], i16* bitcast (half* @h to i16*) @@ -39,8 +39,8 @@ // NATIVE: store half [[PARAM]], half* @h _Float16 t4() { return h; } -// SOFT: define i32 @t4() -// HARD: define arm_aapcs_vfpcc float @t4() +// SOFT: define partialinit i32 @t4() +// HARD: define arm_aapcs_vfpcc partialinit float @t4() // NATIVE: define half @t4() // CHECK: [[LOAD:%.*]] = load i16, i16* bitcast (half* @h to i16*) // CHECK: [[ZEXT:%.*]] = zext i16 [[LOAD]] to i32 diff --git a/clang/test/CodeGen/arm-homogenous.c b/clang/test/CodeGen/arm-homogenous.c --- a/clang/test/CodeGen/arm-homogenous.c +++ b/clang/test/CodeGen/arm-homogenous.c @@ -151,15 +151,15 @@ void test_union_with_struct_with_fundamental_elems(void) { takes_union_with_struct_with_fundamental_elems(g_u_s_fe); -// CHECK: call arm_aapcs_vfpcc void @takes_union_with_struct_with_fundamental_elems(%union.union_with_struct_with_fundamental_elems {{.*}}) +// CHECK: call arm_aapcs_vfpcc void @takes_union_with_struct_with_fundamental_elems(%union.union_with_struct_with_fundamental_elems partialinit {{.*}}) } -// CHECK: declare arm_aapcs_vfpcc void @takes_union_with_struct_with_fundamental_elems(%union.union_with_struct_with_fundamental_elems) +// CHECK: declare arm_aapcs_vfpcc void @takes_union_with_struct_with_fundamental_elems(%union.union_with_struct_with_fundamental_elems partialinit) void test_return_union_with_struct_with_fundamental_elems(void) { g_u_s_fe = returns_union_with_struct_with_fundamental_elems(); -// CHECK: call arm_aapcs_vfpcc %union.union_with_struct_with_fundamental_elems @returns_union_with_struct_with_fundamental_elems() +// CHECK: call arm_aapcs_vfpcc partialinit %union.union_with_struct_with_fundamental_elems @returns_union_with_struct_with_fundamental_elems() } -// CHECK: declare arm_aapcs_vfpcc %union.union_with_struct_with_fundamental_elems @returns_union_with_struct_with_fundamental_elems() +// CHECK: declare arm_aapcs_vfpcc partialinit %union.union_with_struct_with_fundamental_elems @returns_union_with_struct_with_fundamental_elems() // Make sure HAs that can be partially fit into VFP registers will be allocated // on stack and that later VFP candidates will go on stack as well. diff --git a/clang/test/CodeGen/arm-mangle-bf16.cpp b/clang/test/CodeGen/arm-mangle-bf16.cpp --- a/clang/test/CodeGen/arm-mangle-bf16.cpp +++ b/clang/test/CodeGen/arm-mangle-bf16.cpp @@ -4,5 +4,5 @@ // CHECK64: define {{.*}}void @_Z3foou6__bf16(bfloat %b) // CHECK32-HARD: define {{.*}}void @_Z3foou6__bf16(bfloat %b) -// CHECK32-SOFTFP: define {{.*}}void @_Z3foou6__bf16(i32 %b.coerce) +// CHECK32-SOFTFP: define {{.*}}void @_Z3foou6__bf16(i32 partialinit %b.coerce) void foo(__bf16 b) {} diff --git a/clang/test/CodeGen/arm64-abi-vector.c b/clang/test/CodeGen/arm64-abi-vector.c --- a/clang/test/CodeGen/arm64-abi-vector.c +++ b/clang/test/CodeGen/arm64-abi-vector.c @@ -51,7 +51,7 @@ double test_3c(__char3 *in) { // CHECK: test_3c -// CHECK: call double (i32, ...) @varargs_vec_3c(i32 3, i32 {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_3c(i32 3, i32 partialinit {{%.*}}) return varargs_vec_3c(3, *in); } @@ -91,7 +91,7 @@ double test_5c(__char5 *in) { // CHECK: test_5c -// CHECK: call double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_5c(i32 5, <2 x i32> partialinit {{%.*}}) return varargs_vec_5c(5, *in); } @@ -113,7 +113,7 @@ double test_9c(__char9 *in) { // CHECK: test_9c -// CHECK: call double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_9c(i32 9, <4 x i32> partialinit {{%.*}}) return varargs_vec_9c(9, *in); } @@ -153,7 +153,7 @@ double test_3s(__short3 *in) { // CHECK: test_3s -// CHECK: call double (i32, ...) @varargs_vec_3s(i32 3, <2 x i32> {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_3s(i32 3, <2 x i32> partialinit {{%.*}}) return varargs_vec_3s(3, *in); } @@ -175,7 +175,7 @@ double test_5s(__short5 *in) { // CHECK: test_5s -// CHECK: call double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_5s(i32 5, <4 x i32> partialinit {{%.*}}) return varargs_vec_5s(5, *in); } @@ -197,7 +197,7 @@ double test_3i(__int3 *in) { // CHECK: test_3i -// CHECK: call double (i32, ...) @varargs_vec_3i(i32 3, <4 x i32> {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec_3i(i32 3, <4 x i32> partialinit {{%.*}}) return varargs_vec_3i(3, *in); } @@ -301,7 +301,7 @@ __short3 *s3, __short5 *s5, __int3 *i3, __int5 *i5, __double3 *d3) { double ret = varargs_vec(3, *c3, *c5, *c9, *c19, *s3, *s5, *i3, *i5, *d3); -// CHECK: call double (i32, ...) @varargs_vec(i32 3, i32 {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <19 x i8>* {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <5 x i32>* {{%.*}}, <3 x double>* {{%.*}}) +// CHECK: call double (i32, ...) @varargs_vec(i32 3, i32 partialinit {{%.*}}, <2 x i32> partialinit {{%.*}}, <4 x i32> partialinit {{%.*}}, <19 x i8>* {{%.*}}, <2 x i32> partialinit {{%.*}}, <4 x i32> partialinit {{%.*}}, <4 x i32> partialinit {{%.*}}, <5 x i32>* {{%.*}}, <3 x double>* {{%.*}}) return ret; } @@ -317,7 +317,7 @@ double fixed_3c(__char3 *in) { // CHECK: fixed_3c -// CHECK: call double @args_vec_3c(i32 3, i32 {{%.*}}) +// CHECK: call double @args_vec_3c(i32 3, i32 partialinit {{%.*}}) return args_vec_3c(3, *in); } @@ -333,7 +333,7 @@ double fixed_5c(__char5 *in) { // CHECK: fixed_5c -// CHECK: call double @args_vec_5c(i32 5, <2 x i32> {{%.*}}) +// CHECK: call double @args_vec_5c(i32 5, <2 x i32> partialinit {{%.*}}) return args_vec_5c(5, *in); } @@ -349,7 +349,7 @@ double fixed_9c(__char9 *in) { // CHECK: fixed_9c -// CHECK: call double @args_vec_9c(i32 9, <4 x i32> {{%.*}}) +// CHECK: call double @args_vec_9c(i32 9, <4 x i32> partialinit {{%.*}}) return args_vec_9c(9, *in); } @@ -379,7 +379,7 @@ double fixed_3s(__short3 *in) { // CHECK: fixed_3s -// CHECK: call double @args_vec_3s(i32 3, <2 x i32> {{%.*}}) +// CHECK: call double @args_vec_3s(i32 3, <2 x i32> partialinit {{%.*}}) return args_vec_3s(3, *in); } @@ -395,7 +395,7 @@ double fixed_5s(__short5 *in) { // CHECK: fixed_5s -// CHECK: call double @args_vec_5s(i32 5, <4 x i32> {{%.*}}) +// CHECK: call double @args_vec_5s(i32 5, <4 x i32> partialinit {{%.*}}) return args_vec_5s(5, *in); } @@ -411,7 +411,7 @@ double fixed_3i(__int3 *in) { // CHECK: fixed_3i -// CHECK: call double @args_vec_3i(i32 3, <4 x i32> {{%.*}}) +// CHECK: call double @args_vec_3i(i32 3, <4 x i32> partialinit {{%.*}}) return args_vec_3i(3, *in); } diff --git a/clang/test/CodeGen/arm64-arguments.c b/clang/test/CodeGen/arm64-arguments.c --- a/clang/test/CodeGen/arm64-arguments.c +++ b/clang/test/CodeGen/arm64-arguments.c @@ -7,27 +7,27 @@ // Struct as return type. Aggregates <= 16 bytes are passed directly and round // up to multiple of 8 bytes. -// CHECK: define i64 @f1() +// CHECK: define partialinit i64 @f1() struct s1 { char f0; }; struct s1 f1(void) {} -// CHECK: define i64 @f2() +// CHECK: define partialinit i64 @f2() struct s2 { short f0; }; struct s2 f2(void) {} -// CHECK: define i64 @f3() +// CHECK: define partialinit i64 @f3() struct s3 { int f0; }; struct s3 f3(void) {} -// CHECK: define i64 @f4() +// CHECK: define partialinit i64 @f4() struct s4 { struct s4_0 { int f0; } f0; }; struct s4 f4(void) {} -// CHECK: define i64 @f5() +// CHECK: define partialinit i64 @f5() struct s5 { struct { } f0; int f1; }; struct s5 f5(void) {} -// CHECK: define i64 @f6() +// CHECK: define partialinit i64 @f6() struct s6 { int f0[1]; }; struct s6 f6(void) {} @@ -39,19 +39,19 @@ struct s8 { struct { int : 0; } f0[1]; }; struct s8 f8(void) {} -// CHECK: define i64 @f9() +// CHECK: define partialinit i64 @f9() struct s9 { int f0; int : 0; }; struct s9 f9(void) {} -// CHECK: define i64 @f10() +// CHECK: define partialinit i64 @f10() struct s10 { int f0; int : 0; int : 0; }; struct s10 f10(void) {} -// CHECK: define i64 @f11() +// CHECK: define partialinit i64 @f11() struct s11 { int : 0; int f0; }; struct s11 f11(void) {} -// CHECK: define i64 @f12() +// CHECK: define partialinit i64 @f12() union u12 { char f0; short f1; int f2; }; union u12 f12(void) {} @@ -69,28 +69,28 @@ // CHECK: define void @f16() void f16(struct s8 a0) {} -// CHECK: define i64 @f17() +// CHECK: define partialinit i64 @f17() struct s17 { short f0 : 13; char f1 : 4; }; struct s17 f17(void) {} -// CHECK: define i64 @f18() +// CHECK: define partialinit i64 @f18() struct s18 { short f0; char f1 : 4; }; struct s18 f18(void) {} -// CHECK: define i64 @f19() +// CHECK: define partialinit i64 @f19() struct s19 { int f0; struct s8 f1; }; struct s19 f19(void) {} -// CHECK: define i64 @f20() +// CHECK: define partialinit i64 @f20() struct s20 { struct s8 f1; int f0; }; struct s20 f20(void) {} -// CHECK: define i64 @f21() +// CHECK: define partialinit i64 @f21() struct s21 { struct {} f1; int f0 : 4; }; struct s21 f21(void) {} -// CHECK: define i64 @f22() -// CHECK: define i64 @f23() +// CHECK: define partialinit i64 @f22() +// CHECK: define partialinit i64 @f23() // CHECK: define i64 @f24() // CHECK: define [2 x i64] @f25() // CHECK: define { float, float } @f26() @@ -102,11 +102,11 @@ _Complex float f26(void) {} _Complex double f27(void) {} -// CHECK: define i64 @f28() +// CHECK: define partialinit i64 @f28() struct s28 { _Complex char f0; }; struct s28 f28() {} -// CHECK: define i64 @f29() +// CHECK: define partialinit i64 @f29() struct s29 { _Complex short f0; }; struct s29 f29() {} @@ -116,7 +116,7 @@ struct s31 { char x; }; void f31(struct s31 s) { } -// CHECK: define void @f31(i64 %s.coerce) +// CHECK: define void @f31(i64 partialinit %s.coerce) // CHECK: %s = alloca %struct.s31, align 1 // CHECK: trunc i64 %s.coerce to i8 // CHECK: store i8 %{{.*}}, @@ -136,7 +136,7 @@ // CHECK: @g34(%struct.s34* %s) // CHECK: %[[a:.*]] = load i8, i8* %{{.*}} // CHECK: zext i8 %[[a]] to i64 -// CHECK: call void @f34(i64 %{{.*}}) +// CHECK: call void @f34(i64 partialinit %{{.*}}) /* * Check that va_arg accesses stack according to ABI alignment @@ -272,7 +272,7 @@ // passing structs in registers __attribute__ ((noinline)) int f38(int i, s38_no_align s1, s38_no_align s2) { -// CHECK: define i32 @f38(i32 %i, i64 %s1.coerce, i64 %s2.coerce) +// CHECK: define i32 @f38(i32 %i, i64 partialinit %s1.coerce, i64 partialinit %s2.coerce) // CHECK: %s1 = alloca %struct.s38, align 4 // CHECK: %s2 = alloca %struct.s38, align 4 // CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 4 @@ -289,14 +289,14 @@ // CHECK: define i32 @caller38() // CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 // CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 -// CHECK: call i32 @f38(i32 3, i64 %[[a]], i64 %[[b]]) +// CHECK: call i32 @f38(i32 3, i64 partialinit %[[a]], i64 partialinit %[[b]]) return f38(3, g38, g38_2); } // passing structs on stack __attribute__ ((noinline)) int f38_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, int i9, s38_no_align s1, s38_no_align s2) { -// CHECK: define i32 @f38_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i64 %s1.coerce, i64 %s2.coerce) +// CHECK: define i32 @f38_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i64 partialinit %s1.coerce, i64 partialinit %s2.coerce) // CHECK: %s1 = alloca %struct.s38, align 4 // CHECK: %s2 = alloca %struct.s38, align 4 // CHECK: store i64 %s1.coerce, i64* %{{.*}}, align 4 @@ -311,7 +311,7 @@ // CHECK: define i32 @caller38_stack() // CHECK: %[[a:.*]] = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4 // CHECK: %[[b:.*]] = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4 -// CHECK: call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i64 %[[a]], i64 %[[b]]) +// CHECK: call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i64 partialinit %[[a]], i64 partialinit %[[b]]) return f38_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g38, g38_2); } @@ -325,7 +325,7 @@ // passing aligned structs in registers __attribute__ ((noinline)) int f39(int i, s39_with_align s1, s39_with_align s2) { -// CHECK: define i32 @f39(i32 %i, i128 %s1.coerce, i128 %s2.coerce) +// CHECK: define i32 @f39(i32 %i, i128 partialinit %s1.coerce, i128 partialinit %s2.coerce) // CHECK: %s1 = alloca %struct.s39, align 16 // CHECK: %s2 = alloca %struct.s39, align 16 // CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 @@ -342,14 +342,14 @@ // CHECK: define i32 @caller39() // CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 // CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 -// CHECK: call i32 @f39(i32 3, i128 %[[a]], i128 %[[b]]) +// CHECK: call i32 @f39(i32 3, i128 partialinit %[[a]], i128 partialinit %[[b]]) return f39(3, g39, g39_2); } // passing aligned structs on stack __attribute__ ((noinline)) int f39_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, int i9, s39_with_align s1, s39_with_align s2) { -// CHECK: define i32 @f39_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i128 %s1.coerce, i128 %s2.coerce) +// CHECK: define i32 @f39_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i128 partialinit %s1.coerce, i128 partialinit %s2.coerce) // CHECK: %s1 = alloca %struct.s39, align 16 // CHECK: %s2 = alloca %struct.s39, align 16 // CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 @@ -364,7 +364,7 @@ // CHECK: define i32 @caller39_stack() // CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16 // CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16 -// CHECK: call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %[[a]], i128 %[[b]]) +// CHECK: call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 partialinit %[[a]], i128 partialinit %[[b]]) return f39_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g39, g39_2); } @@ -380,7 +380,7 @@ // passing structs in registers __attribute__ ((noinline)) int f40(int i, s40_no_align s1, s40_no_align s2) { -// CHECK: define i32 @f40(i32 %i, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce) +// CHECK: define i32 @f40(i32 %i, [2 x i64] partialinit %s1.coerce, [2 x i64] partialinit %s2.coerce) // CHECK: %s1 = alloca %struct.s40, align 4 // CHECK: %s2 = alloca %struct.s40, align 4 // CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 4 @@ -397,14 +397,14 @@ // CHECK: define i32 @caller40() // CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 // CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 -// CHECK: call i32 @f40(i32 3, [2 x i64] %[[a]], [2 x i64] %[[b]]) +// CHECK: call i32 @f40(i32 3, [2 x i64] partialinit %[[a]], [2 x i64] partialinit %[[b]]) return f40(3, g40, g40_2); } // passing structs on stack __attribute__ ((noinline)) int f40_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, int i9, s40_no_align s1, s40_no_align s2) { -// CHECK: define i32 @f40_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce) +// CHECK: define i32 @f40_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, [2 x i64] partialinit %s1.coerce, [2 x i64] partialinit %s2.coerce) // CHECK: %s1 = alloca %struct.s40, align 4 // CHECK: %s2 = alloca %struct.s40, align 4 // CHECK: store [2 x i64] %s1.coerce, [2 x i64]* %{{.*}}, align 4 @@ -419,7 +419,7 @@ // CHECK: define i32 @caller40_stack() // CHECK: %[[a:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4 // CHECK: %[[b:.*]] = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4 -// CHECK: call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, [2 x i64] %[[a]], [2 x i64] %[[b]]) +// CHECK: call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, [2 x i64] partialinit %[[a]], [2 x i64] partialinit %[[b]]) return f40_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g40, g40_2); } @@ -435,7 +435,7 @@ // passing aligned structs in registers __attribute__ ((noinline)) int f41(int i, s41_with_align s1, s41_with_align s2) { -// CHECK: define i32 @f41(i32 %i, i128 %s1.coerce, i128 %s2.coerce) +// CHECK: define i32 @f41(i32 %i, i128 partialinit %s1.coerce, i128 partialinit %s2.coerce) // CHECK: %s1 = alloca %struct.s41, align 16 // CHECK: %s2 = alloca %struct.s41, align 16 // CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 @@ -452,14 +452,14 @@ // CHECK: define i32 @caller41() // CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 // CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 -// CHECK: call i32 @f41(i32 3, i128 %[[a]], i128 %[[b]]) +// CHECK: call i32 @f41(i32 3, i128 partialinit %[[a]], i128 partialinit %[[b]]) return f41(3, g41, g41_2); } // passing aligned structs on stack __attribute__ ((noinline)) int f41_stack(int i, int i2, int i3, int i4, int i5, int i6, int i7, int i8, int i9, s41_with_align s1, s41_with_align s2) { -// CHECK: define i32 @f41_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i128 %s1.coerce, i128 %s2.coerce) +// CHECK: define i32 @f41_stack(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i128 partialinit %s1.coerce, i128 partialinit %s2.coerce) // CHECK: %s1 = alloca %struct.s41, align 16 // CHECK: %s2 = alloca %struct.s41, align 16 // CHECK: store i128 %s1.coerce, i128* %{{.*}}, align 16 @@ -474,7 +474,7 @@ // CHECK: define i32 @caller41_stack() // CHECK: %[[a:.*]] = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16 // CHECK: %[[b:.*]] = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16 -// CHECK: call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 %[[a]], i128 %[[b]]) +// CHECK: call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i128 partialinit %[[a]], i128 partialinit %[[b]]) return f41_stack(1, 2, 3, 4, 5, 6, 7, 8, 9, g41, g41_2); } @@ -597,24 +597,24 @@ __attribute__ ((noinline)) int f40_split(int i, int i2, int i3, int i4, int i5, int i6, int i7, s40_no_align s1, s40_no_align s2) { -// CHECK: define i32 @f40_split(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, [2 x i64] %s1.coerce, [2 x i64] %s2.coerce) +// CHECK: define i32 @f40_split(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, [2 x i64] partialinit %s1.coerce, [2 x i64] partialinit %s2.coerce) return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + s1.s + s2.s; } int caller40_split() { // CHECK: define i32 @caller40_split() -// CHECK: call i32 @f40_split(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, [2 x i64] %{{.*}} [2 x i64] %{{.*}}) +// CHECK: call i32 @f40_split(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, [2 x i64] partialinit %{{.*}} [2 x i64] partialinit %{{.*}}) return f40_split(1, 2, 3, 4, 5, 6, 7, g40, g40_2); } __attribute__ ((noinline)) int f41_split(int i, int i2, int i3, int i4, int i5, int i6, int i7, s41_with_align s1, s41_with_align s2) { -// CHECK: define i32 @f41_split(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i128 %s1.coerce, i128 %s2.coerce) +// CHECK: define i32 @f41_split(i32 %i, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i128 partialinit %s1.coerce, i128 partialinit %s2.coerce) return s1.i + s2.i + i + i2 + i3 + i4 + i5 + i6 + i7 + s1.s + s2.s; } int caller41_split() { // CHECK: define i32 @caller41_split() -// CHECK: call i32 @f41_split(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 %{{.*}}, i128 %{{.*}}) +// CHECK: call i32 @f41_split(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i128 partialinit %{{.*}}, i128 partialinit %{{.*}}) return f41_split(1, 2, 3, 4, 5, 6, 7, g41, g41_2); } diff --git a/clang/test/CodeGen/arm64-be-bitfield.c b/clang/test/CodeGen/arm64-be-bitfield.c --- a/clang/test/CodeGen/arm64-be-bitfield.c +++ b/clang/test/CodeGen/arm64-be-bitfield.c @@ -4,7 +4,7 @@ // Get the high 32-bits and then shift appropriately for big-endian. signed callee_b0f(struct bt3 bp11) { -// IR: callee_b0f(i64 [[ARG:%.*]]) +// IR: callee_b0f(i64 partialinit [[ARG:%.*]]) // IR: store i64 [[ARG]], i64* [[PTR:%.*]], align 8 // IR: [[BITCAST:%.*]] = bitcast i64* [[PTR]] to i8* // IR: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* align 8 [[BITCAST]], i64 4 diff --git a/clang/test/CodeGen/arm64-microsoft-arguments.cpp b/clang/test/CodeGen/arm64-microsoft-arguments.cpp --- a/clang/test/CodeGen/arm64-microsoft-arguments.cpp +++ b/clang/test/CodeGen/arm64-microsoft-arguments.cpp @@ -43,7 +43,7 @@ // Pass and return aggregate (of size < 16 bytes) with non-trivial destructor. // Passed directly but returned indirectly. // CHECK: define {{.*}} void {{.*}}f4{{.*}}(%struct.S4* inreg noalias sret align 4 %agg.result) -// CHECK: call void {{.*}}func4{{.*}}(%struct.S4* inreg sret align 4 %agg.result, [2 x i64] %5) +// CHECK: call void {{.*}}func4{{.*}}(%struct.S4* inreg sret align 4 %agg.result, [2 x i64] partialinit %5) struct S4 { int a[3]; ~S4(); @@ -57,7 +57,7 @@ // Pass and return from instance method called from instance method. // CHECK: define {{.*}} void @{{.*}}bar@Q1{{.*}}(%class.Q1* %this, %class.P1* inreg noalias sret align 1 %agg.result) -// CHECK: call void {{.*}}foo@P1{{.*}}(%class.P1* %ref.tmp, %class.P1* inreg sret align 1 %agg.result, i8 %1) +// CHECK: call void {{.*}}foo@P1{{.*}}(%class.P1* %ref.tmp, %class.P1* inreg sret align 1 %agg.result, i8 partialinit %1) class P1 { public: @@ -76,7 +76,7 @@ // Pass and return from instance method called from free function. // CHECK: define {{.*}} void {{.*}}bar{{.*}}() -// CHECK: call void {{.*}}foo@P2{{.*}}(%class.P2* %ref.tmp, %class.P2* inreg sret align 1 %retval, i8 %0) +// CHECK: call void {{.*}}foo@P2{{.*}}(%class.P2* %ref.tmp, %class.P2* inreg sret align 1 %retval, i8 partialinit %0) class P2 { public: P2 foo(P2 x); @@ -104,8 +104,8 @@ // Pass and return an object with a non-trivial explicitly defaulted constructor // (passed directly, returned directly) -// CHECK: define {{.*}} i64 @"?f6@@YA?AUS6@@XZ"() -// CHECK: call i64 {{.*}}func6{{.*}}(i64 {{.*}}) +// CHECK: define {{.*}} partialinit i64 @"?f6@@YA?AUS6@@XZ"() +// CHECK: call partialinit i64 {{.*}}func6{{.*}}(i64 partialinit {{.*}}) struct S6a { S6a(); }; @@ -123,8 +123,8 @@ // Pass and return an object with a non-trivial implicitly defaulted constructor // (passed directly, returned directly) -// CHECK: define {{.*}} i64 @"?f7@@YA?AUS7@@XZ"() -// CHECK: call i64 {{.*}}func7{{.*}}(i64 {{.*}}) +// CHECK: define {{.*}} partialinit i64 @"?f7@@YA?AUS7@@XZ"() +// CHECK: call partialinit i64 {{.*}}func7{{.*}}(i64 partialinit {{.*}}) struct S7 { S6a x; }; diff --git a/clang/test/CodeGen/arm64_32-vaarg.c b/clang/test/CodeGen/arm64_32-vaarg.c --- a/clang/test/CodeGen/arm64_32-vaarg.c +++ b/clang/test/CodeGen/arm64_32-vaarg.c @@ -103,7 +103,7 @@ // alignment must be passed via "[N x i32]" to be correctly allocated in the // backend. short test_threeshorts(ThreeShorts input, va_list *mylist) { -// CHECK-LABEL: define signext i16 @test_threeshorts([2 x i32] %input +// CHECK-LABEL: define signext i16 @test_threeshorts([2 x i32] partialinit %input // CHECK: [[START:%.*]] = load i8*, i8** %mylist // CHECK: [[NEXT:%.*]] = getelementptr inbounds i8, i8* [[START]], i32 8 diff --git a/clang/test/CodeGen/arm_neon_intrinsics.c b/clang/test/CodeGen/arm_neon_intrinsics.c --- a/clang/test/CodeGen/arm_neon_intrinsics.c +++ b/clang/test/CodeGen/arm_neon_intrinsics.c @@ -20227,10 +20227,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !3 +// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !3 +// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void int8x8x2_t test_vtrn_s8(int8x8_t a, int8x8_t b) { return vtrn_s8(a, b); @@ -20242,10 +20242,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !6 +// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !6 +// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int16x4x2_t test_vtrn_s16(int16x4_t a, int16x4_t b) { return vtrn_s16(a, b); @@ -20257,10 +20257,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VTRN_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VTRN_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !9 +// CHECK: store <2 x i32> [[VTRN_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VTRN1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !9 +// CHECK: store <2 x i32> [[VTRN1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int32x2x2_t test_vtrn_s32(int32x2_t a, int32x2_t b) { return vtrn_s32(a, b); @@ -20270,10 +20270,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !12 +// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !12 +// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void uint8x8x2_t test_vtrn_u8(uint8x8_t a, uint8x8_t b) { return vtrn_u8(a, b); @@ -20285,10 +20285,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !15 +// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !15 +// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint16x4x2_t test_vtrn_u16(uint16x4_t a, uint16x4_t b) { return vtrn_u16(a, b); @@ -20300,10 +20300,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VTRN_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VTRN_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !18 +// CHECK: store <2 x i32> [[VTRN_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VTRN1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !18 +// CHECK: store <2 x i32> [[VTRN1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint32x2x2_t test_vtrn_u32(uint32x2_t a, uint32x2_t b) { return vtrn_u32(a, b); @@ -20315,10 +20315,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x float>* // CHECK: [[VTRN_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> -// CHECK: store <2 x float> [[VTRN_I]], <2 x float>* [[TMP3]], align 4, !alias.scope !21 +// CHECK: store <2 x float> [[VTRN_I]], <2 x float>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x float>, <2 x float>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> -// CHECK: store <2 x float> [[VTRN1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope !21 +// CHECK: store <2 x float> [[VTRN1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope // CHECK: ret void float32x2x2_t test_vtrn_f32(float32x2_t a, float32x2_t b) { return vtrn_f32(a, b); @@ -20328,10 +20328,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !24 +// CHECK: store <8 x i8> [[VTRN_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !24 +// CHECK: store <8 x i8> [[VTRN1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void poly8x8x2_t test_vtrn_p8(poly8x8_t a, poly8x8_t b) { return vtrn_p8(a, b); @@ -20343,10 +20343,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !27 +// CHECK: store <4 x i16> [[VTRN_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !27 +// CHECK: store <4 x i16> [[VTRN1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void poly16x4x2_t test_vtrn_p16(poly16x4_t a, poly16x4_t b) { return vtrn_p16(a, b); @@ -20356,10 +20356,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !30 +// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !30 +// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void int8x16x2_t test_vtrnq_s8(int8x16_t a, int8x16_t b) { return vtrnq_s8(a, b); @@ -20371,10 +20371,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !33 +// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !33 +// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int16x8x2_t test_vtrnq_s16(int16x8_t a, int16x8_t b) { return vtrnq_s16(a, b); @@ -20386,10 +20386,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VTRN_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !36 +// CHECK: store <4 x i32> [[VTRN_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !36 +// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int32x4x2_t test_vtrnq_s32(int32x4_t a, int32x4_t b) { return vtrnq_s32(a, b); @@ -20399,10 +20399,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !39 +// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !39 +// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void uint8x16x2_t test_vtrnq_u8(uint8x16_t a, uint8x16_t b) { return vtrnq_u8(a, b); @@ -20414,10 +20414,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !42 +// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !42 +// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint16x8x2_t test_vtrnq_u16(uint16x8_t a, uint16x8_t b) { return vtrnq_u16(a, b); @@ -20429,10 +20429,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VTRN_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !45 +// CHECK: store <4 x i32> [[VTRN_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !45 +// CHECK: store <4 x i32> [[VTRN1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint32x4x2_t test_vtrnq_u32(uint32x4_t a, uint32x4_t b) { return vtrnq_u32(a, b); @@ -20444,10 +20444,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x float>* // CHECK: [[VTRN_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> -// CHECK: store <4 x float> [[VTRN_I]], <4 x float>* [[TMP3]], align 4, !alias.scope !48 +// CHECK: store <4 x float> [[VTRN_I]], <4 x float>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> -// CHECK: store <4 x float> [[VTRN1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope !48 +// CHECK: store <4 x float> [[VTRN1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope // CHECK: ret void float32x4x2_t test_vtrnq_f32(float32x4_t a, float32x4_t b) { return vtrnq_f32(a, b); @@ -20457,10 +20457,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VTRN_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !51 +// CHECK: store <16 x i8> [[VTRN_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !51 +// CHECK: store <16 x i8> [[VTRN1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void poly8x16x2_t test_vtrnq_p8(poly8x16_t a, poly8x16_t b) { return vtrnq_p8(a, b); @@ -20472,10 +20472,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VTRN_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !54 +// CHECK: store <8 x i16> [[VTRN_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VTRN1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !54 +// CHECK: store <8 x i16> [[VTRN1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void poly16x8x2_t test_vtrnq_p16(poly16x8_t a, poly16x8_t b) { return vtrnq_p16(a, b); @@ -20649,10 +20649,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !57 +// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !57 +// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void int8x8x2_t test_vuzp_s8(int8x8_t a, int8x8_t b) { return vuzp_s8(a, b); @@ -20664,10 +20664,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !60 +// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !60 +// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int16x4x2_t test_vuzp_s16(int16x4_t a, int16x4_t b) { return vuzp_s16(a, b); @@ -20679,10 +20679,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VUZP_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VUZP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !63 +// CHECK: store <2 x i32> [[VUZP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VUZP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !63 +// CHECK: store <2 x i32> [[VUZP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int32x2x2_t test_vuzp_s32(int32x2_t a, int32x2_t b) { return vuzp_s32(a, b); @@ -20692,10 +20692,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !66 +// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !66 +// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void uint8x8x2_t test_vuzp_u8(uint8x8_t a, uint8x8_t b) { return vuzp_u8(a, b); @@ -20707,10 +20707,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !69 +// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !69 +// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint16x4x2_t test_vuzp_u16(uint16x4_t a, uint16x4_t b) { return vuzp_u16(a, b); @@ -20722,10 +20722,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VUZP_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VUZP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !72 +// CHECK: store <2 x i32> [[VUZP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VUZP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !72 +// CHECK: store <2 x i32> [[VUZP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint32x2x2_t test_vuzp_u32(uint32x2_t a, uint32x2_t b) { return vuzp_u32(a, b); @@ -20737,10 +20737,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x float>* // CHECK: [[VUZP_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> -// CHECK: store <2 x float> [[VUZP_I]], <2 x float>* [[TMP3]], align 4, !alias.scope !75 +// CHECK: store <2 x float> [[VUZP_I]], <2 x float>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x float>, <2 x float>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> -// CHECK: store <2 x float> [[VUZP1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope !75 +// CHECK: store <2 x float> [[VUZP1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope // CHECK: ret void float32x2x2_t test_vuzp_f32(float32x2_t a, float32x2_t b) { return vuzp_f32(a, b); @@ -20750,10 +20750,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !78 +// CHECK: store <8 x i8> [[VUZP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !78 +// CHECK: store <8 x i8> [[VUZP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void poly8x8x2_t test_vuzp_p8(poly8x8_t a, poly8x8_t b) { return vuzp_p8(a, b); @@ -20765,10 +20765,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !81 +// CHECK: store <4 x i16> [[VUZP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !81 +// CHECK: store <4 x i16> [[VUZP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void poly16x4x2_t test_vuzp_p16(poly16x4_t a, poly16x4_t b) { return vuzp_p16(a, b); @@ -20778,10 +20778,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !84 +// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !84 +// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void int8x16x2_t test_vuzpq_s8(int8x16_t a, int8x16_t b) { return vuzpq_s8(a, b); @@ -20793,10 +20793,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !87 +// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !87 +// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int16x8x2_t test_vuzpq_s16(int16x8_t a, int16x8_t b) { return vuzpq_s16(a, b); @@ -20808,10 +20808,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VUZP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !90 +// CHECK: store <4 x i32> [[VUZP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !90 +// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int32x4x2_t test_vuzpq_s32(int32x4_t a, int32x4_t b) { return vuzpq_s32(a, b); @@ -20821,10 +20821,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !93 +// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !93 +// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void uint8x16x2_t test_vuzpq_u8(uint8x16_t a, uint8x16_t b) { return vuzpq_u8(a, b); @@ -20836,10 +20836,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !96 +// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !96 +// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint16x8x2_t test_vuzpq_u16(uint16x8_t a, uint16x8_t b) { return vuzpq_u16(a, b); @@ -20851,10 +20851,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VUZP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !99 +// CHECK: store <4 x i32> [[VUZP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !99 +// CHECK: store <4 x i32> [[VUZP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint32x4x2_t test_vuzpq_u32(uint32x4_t a, uint32x4_t b) { return vuzpq_u32(a, b); @@ -20866,10 +20866,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x float>* // CHECK: [[VUZP_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> -// CHECK: store <4 x float> [[VUZP_I]], <4 x float>* [[TMP3]], align 4, !alias.scope !102 +// CHECK: store <4 x float> [[VUZP_I]], <4 x float>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> -// CHECK: store <4 x float> [[VUZP1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope !102 +// CHECK: store <4 x float> [[VUZP1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope // CHECK: ret void float32x4x2_t test_vuzpq_f32(float32x4_t a, float32x4_t b) { return vuzpq_f32(a, b); @@ -20879,10 +20879,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VUZP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !105 +// CHECK: store <16 x i8> [[VUZP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !105 +// CHECK: store <16 x i8> [[VUZP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void poly8x16x2_t test_vuzpq_p8(poly8x16_t a, poly8x16_t b) { return vuzpq_p8(a, b); @@ -20894,10 +20894,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VUZP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !108 +// CHECK: store <8 x i16> [[VUZP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VUZP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !108 +// CHECK: store <8 x i16> [[VUZP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void poly16x8x2_t test_vuzpq_p16(poly16x8_t a, poly16x8_t b) { return vuzpq_p16(a, b); @@ -20907,10 +20907,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !111 +// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !111 +// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void int8x8x2_t test_vzip_s8(int8x8_t a, int8x8_t b) { return vzip_s8(a, b); @@ -20922,10 +20922,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !114 +// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !114 +// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int16x4x2_t test_vzip_s16(int16x4_t a, int16x4_t b) { return vzip_s16(a, b); @@ -20937,10 +20937,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VZIP_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VZIP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !117 +// CHECK: store <2 x i32> [[VZIP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VZIP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !117 +// CHECK: store <2 x i32> [[VZIP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int32x2x2_t test_vzip_s32(int32x2_t a, int32x2_t b) { return vzip_s32(a, b); @@ -20950,10 +20950,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !120 +// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !120 +// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void uint8x8x2_t test_vzip_u8(uint8x8_t a, uint8x8_t b) { return vzip_u8(a, b); @@ -20965,10 +20965,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !123 +// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !123 +// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint16x4x2_t test_vzip_u16(uint16x4_t a, uint16x4_t b) { return vzip_u16(a, b); @@ -20980,10 +20980,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x i32> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x i32>* // CHECK: [[VZIP_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VZIP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope !126 +// CHECK: store <2 x i32> [[VZIP_I]], <2 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x i32>, <2 x i32>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <2 x i32> %a, <2 x i32> %b, <2 x i32> -// CHECK: store <2 x i32> [[VZIP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope !126 +// CHECK: store <2 x i32> [[VZIP1_I]], <2 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint32x2x2_t test_vzip_u32(uint32x2_t a, uint32x2_t b) { return vzip_u32(a, b); @@ -20995,10 +20995,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <2 x float> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <2 x float>* // CHECK: [[VZIP_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> -// CHECK: store <2 x float> [[VZIP_I]], <2 x float>* [[TMP3]], align 4, !alias.scope !129 +// CHECK: store <2 x float> [[VZIP_I]], <2 x float>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <2 x float>, <2 x float>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <2 x float> %a, <2 x float> %b, <2 x i32> -// CHECK: store <2 x float> [[VZIP1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope !129 +// CHECK: store <2 x float> [[VZIP1_I]], <2 x float>* [[TMP4]], align 4, !alias.scope // CHECK: ret void float32x2x2_t test_vzip_f32(float32x2_t a, float32x2_t b) { return vzip_f32(a, b); @@ -21008,10 +21008,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x8x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <8 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope !132 +// CHECK: store <8 x i8> [[VZIP_I]], <8 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <8 x i8>, <8 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> -// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope !132 +// CHECK: store <8 x i8> [[VZIP1_I]], <8 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void poly8x8x2_t test_vzip_p8(poly8x8_t a, poly8x8_t b) { return vzip_p8(a, b); @@ -21023,10 +21023,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i16> %b to <8 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope !135 +// CHECK: store <4 x i16> [[VZIP_I]], <4 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i16>, <4 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i16> %a, <4 x i16> %b, <4 x i32> -// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope !135 +// CHECK: store <4 x i16> [[VZIP1_I]], <4 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void poly16x4x2_t test_vzip_p16(poly16x4_t a, poly16x4_t b) { return vzip_p16(a, b); @@ -21036,10 +21036,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.int8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !138 +// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !138 +// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void int8x16x2_t test_vzipq_s8(int8x16_t a, int8x16_t b) { return vzipq_s8(a, b); @@ -21051,10 +21051,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !141 +// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !141 +// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int16x8x2_t test_vzipq_s16(int16x8_t a, int16x8_t b) { return vzipq_s16(a, b); @@ -21066,10 +21066,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VZIP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !144 +// CHECK: store <4 x i32> [[VZIP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !144 +// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void int32x4x2_t test_vzipq_s32(int32x4_t a, int32x4_t b) { return vzipq_s32(a, b); @@ -21079,10 +21079,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.uint8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !147 +// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !147 +// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void uint8x16x2_t test_vzipq_u8(uint8x16_t a, uint8x16_t b) { return vzipq_u8(a, b); @@ -21094,10 +21094,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !150 +// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !150 +// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint16x8x2_t test_vzipq_u16(uint16x8_t a, uint16x8_t b) { return vzipq_u16(a, b); @@ -21109,10 +21109,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x i32> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x i32>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VZIP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope !153 +// CHECK: store <4 x i32> [[VZIP_I]], <4 x i32>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x i32> %a, <4 x i32> %b, <4 x i32> -// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope !153 +// CHECK: store <4 x i32> [[VZIP1_I]], <4 x i32>* [[TMP4]], align 4, !alias.scope // CHECK: ret void uint32x4x2_t test_vzipq_u32(uint32x4_t a, uint32x4_t b) { return vzipq_u32(a, b); @@ -21124,10 +21124,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <4 x float> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <4 x float>* // CHECK: [[VZIP_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> -// CHECK: store <4 x float> [[VZIP_I]], <4 x float>* [[TMP3]], align 4, !alias.scope !156 +// CHECK: store <4 x float> [[VZIP_I]], <4 x float>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <4 x float>, <4 x float>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> -// CHECK: store <4 x float> [[VZIP1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope !156 +// CHECK: store <4 x float> [[VZIP1_I]], <4 x float>* [[TMP4]], align 4, !alias.scope // CHECK: ret void float32x4x2_t test_vzipq_f32(float32x4_t a, float32x4_t b) { return vzipq_f32(a, b); @@ -21137,10 +21137,10 @@ // CHECK: [[TMP0:%.*]] = bitcast %struct.poly8x16x2_t* [[AGG_RESULT]] to i8* // CHECK: [[TMP1:%.*]] = bitcast i8* [[TMP0]] to <16 x i8>* // CHECK: [[VZIP_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope !159 +// CHECK: store <16 x i8> [[VZIP_I]], <16 x i8>* [[TMP1]], align 4, !alias.scope // CHECK: [[TMP2:%.*]] = getelementptr inbounds <16 x i8>, <16 x i8>* [[TMP1]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> -// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope !159 +// CHECK: store <16 x i8> [[VZIP1_I]], <16 x i8>* [[TMP2]], align 4, !alias.scope // CHECK: ret void poly8x16x2_t test_vzipq_p8(poly8x16_t a, poly8x16_t b) { return vzipq_p8(a, b); @@ -21152,10 +21152,10 @@ // CHECK: [[TMP2:%.*]] = bitcast <8 x i16> %b to <16 x i8> // CHECK: [[TMP3:%.*]] = bitcast i8* [[TMP0]] to <8 x i16>* // CHECK: [[VZIP_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope !162 +// CHECK: store <8 x i16> [[VZIP_I]], <8 x i16>* [[TMP3]], align 4, !alias.scope // CHECK: [[TMP4:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[TMP3]], i32 1 // CHECK: [[VZIP1_I:%.*]] = shufflevector <8 x i16> %a, <8 x i16> %b, <8 x i32> -// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope !162 +// CHECK: store <8 x i16> [[VZIP1_I]], <8 x i16>* [[TMP4]], align 4, !alias.scope // CHECK: ret void poly16x8x2_t test_vzipq_p16(poly16x8_t a, poly16x8_t b) { return vzipq_p16(a, b); diff --git a/clang/test/CodeGen/armv7k-abi.c b/clang/test/CodeGen/armv7k-abi.c --- a/clang/test/CodeGen/armv7k-abi.c +++ b/clang/test/CodeGen/armv7k-abi.c @@ -72,14 +72,14 @@ short y; } PaddedSmallStruct; -// CHECK: define i32 @return_padded_small_struct() +// CHECK: define partialinit i32 @return_padded_small_struct() PaddedSmallStruct return_padded_small_struct() {} typedef struct { char arr[7]; } OddlySizedStruct; -// CHECK: define [2 x i32] @return_oddly_sized_struct() +// CHECK: define partialinit [2 x i32] @return_oddly_sized_struct() OddlySizedStruct return_oddly_sized_struct() {} // CHECK: define <4 x float> @test_va_arg_vec(i8* %l) diff --git a/clang/test/CodeGen/catch-implicit-integer-sign-changes-incdec.c b/clang/test/CodeGen/catch-implicit-integer-sign-changes-incdec.c --- a/clang/test/CodeGen/catch-implicit-integer-sign-changes-incdec.c +++ b/clang/test/CodeGen/catch-implicit-integer-sign-changes-incdec.c @@ -34,7 +34,7 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = zext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], 1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], false, !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -69,7 +69,7 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = zext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], -1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], false, !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -105,7 +105,7 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = zext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], 1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], false, !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -141,7 +141,7 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = zext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], -1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], false, !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -177,8 +177,8 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = sext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], 1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 -// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize +// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], [[DST_NEGATIVITYCHECK]], !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -213,8 +213,8 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = sext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], -1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 -// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize +// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], [[DST_NEGATIVITYCHECK]], !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -250,8 +250,8 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = sext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], 1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 -// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize +// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], [[DST_NEGATIVITYCHECK]], !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: @@ -287,8 +287,8 @@ // CHECK-SANITIZE-NEXT: [[X_PROMOTED:%.*]] = sext i16 [[X_RELOADED]] to i32 // CHECK-SANITIZE-NEXT: [[INC:%.*]] = add i32 [[X_PROMOTED]], -1 // CHECK-SANITIZE-NEXT: [[X_PROMOTED_DEMOTED:%.*]] = trunc i32 [[INC]] to i16 -// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize !2 -// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize !2 +// CHECK-SANITIZE-NEXT: [[SRC_INC_NEGATIVITYCHECK:%.*]] = icmp slt i32 [[INC]], 0, !nosanitize +// CHECK-SANITIZE-NEXT: [[DST_NEGATIVITYCHECK:%.*]] = icmp slt i16 [[X_PROMOTED_DEMOTED]], 0, !nosanitize // CHECK-SANITIZE-NEXT: [[SIGNCHANGECHECK:%.*]] = icmp eq i1 [[SRC_INC_NEGATIVITYCHECK]], [[DST_NEGATIVITYCHECK]], !nosanitize // CHECK-SANITIZE-NEXT: br i1 [[SIGNCHANGECHECK]], label %[[CONT:.*]], label %[[HANDLER_IMPLICIT_X_PROMOTEDERSION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_IMPLICIT_X_PROMOTEDERSION]]: diff --git a/clang/test/CodeGen/cmse-clear-arg.c b/clang/test/CodeGen/cmse-clear-arg.c --- a/clang/test/CodeGen/cmse-clear-arg.c +++ b/clang/test/CodeGen/cmse-clear-arg.c @@ -73,7 +73,7 @@ // CHECK-LE: %cmse.clear1 = and i32 {{.*}}, 134215708 // CHECK-BE: %cmse.clear1 = and i32 {{.*}}, 941621216 // CHECK: %[[R1:.*]] = insertvalue [2 x i32] %[[R0]], i32 %cmse.clear1, 1 -// CHECK: call {{.*}} void %[[FN]]([2 x i32] %[[R1]]) +// CHECK: call {{.*}} void %[[FN]]([2 x i32] partialinit %[[R1]]) void __attribute__((cmse_nonsecure_call)) (*g15_1)(int, int, int, T15); void f15_1() { @@ -87,7 +87,7 @@ // CHECK-LE: %cmse.clear1 = and i32 {{.*}}, 134215708 // CHECK-BE: %cmse.clear1 = and i32 {{.*}}, 941621216 // CHECK: %[[R1:.*]] = insertvalue [2 x i32] %[[R0]], i32 %cmse.clear1, 1 -// CHECK: call {{.*}} void %[[FN]](i32 0, i32 1, i32 2, [2 x i32] %[[R1]]) +// CHECK: call {{.*}} void %[[FN]](i32 0, i32 1, i32 2, [2 x i32] partialinit %[[R1]]) // LE: 11111111 ........ 11111111 11111111 1111.... ...11111 ........ .111111. // LE: 0xff00fffff01f007e/9079291968726434047 @@ -111,7 +111,7 @@ // CHECK-LE: %cmse.clear = and i64 {{.*}}, 9079291968726434047 // CHECK-BE: %cmse.clear = and i64 {{.*}}, -71776123088273282 // CHECK: %[[R:.*]] = insertvalue [1 x i64] undef, i64 %cmse.clear, 0 -// CHECK: call {{.*}} void %0([1 x i64] %[[R]]) +// CHECK: call {{.*}} void %0([1 x i64] partialinit %[[R]]) // LE0: 1111..11 .......1 1111..11 .......1 1111..11 .......1 1111..11 .......1 @@ -150,7 +150,7 @@ // CHECK-LE: %cmse.clear3 = and i32 {{.*}}, 16777215 // CHECK-BE: %cmse.clear3 = and i32 {{.*}}, -256 // CHECK: %[[R3:.*]] = insertvalue [4 x i32] %[[R2]], i32 %cmse.clear3, 3 -// CHECK: call {{.*}} void %[[FN]]([4 x i32] %[[R3]]) +// CHECK: call {{.*}} void %[[FN]]([4 x i32] partialinit %[[R3]]) // LE: 11111111 11111111 ..111... ..111... 0x3838ffff/943259647 // BE: 11111111 11111111 ...111.. ...111.. 0xffff1c1c/-58340 @@ -171,7 +171,7 @@ // CHECK-LE: %cmse.clear = and i32 {{.*}}, 943259647 // CHECK-BE: %cmse.clear = and i32 {{.*}}, -58340 // CHECK: %[[R:.*]] = insertvalue [1 x i32] undef, i32 %cmse.clear, 0 -// CHECK: call {{.*}} void %[[FN]]([1 x i32] %[[R]]) +// CHECK: call {{.*}} void %[[FN]]([1 x i32] partialinit %[[R]]) typedef struct T20 { diff --git a/clang/test/CodeGen/cmse-clear-fp16.c b/clang/test/CodeGen/cmse-clear-fp16.c --- a/clang/test/CodeGen/cmse-clear-fp16.c +++ b/clang/test/CodeGen/cmse-clear-fp16.c @@ -44,16 +44,16 @@ // CHECK-NOPT-SOFT: %[[V0:.*]] = load i32 // CHECK-NOPT-SOFT: %[[V1:.*]] = and i32 %[[V0]], 65535 -// CHECK-NOPT-SOFT: call {{.*}} void {{.*}}(i32 %[[V1]]) +// CHECK-NOPT-SOFT: call {{.*}} void {{.*}}(i32 partialinit %[[V1]]) // CHECK-OPT-SOFT: %[[V1:.*]] = zext i16 {{.*}} to i32 -// CHECK-OPT-SOFT: call {{.*}} void {{.*}}(i32 %[[V1]]) +// CHECK-OPT-SOFT: call {{.*}} void {{.*}}(i32 partialinit %[[V1]]) // CHECK-NOPT-HARD: %[[V0:.*]] = bitcast float {{.*}} to i32 // CHECK-NOPT-HARD: %[[V1:.*]] = and i32 %[[V0]], 65535 // CHECK-NOPT-HARD: %[[V2:.*]] = bitcast i32 %[[V1]] to float -// CHECK-NOPT-HARD: call {{.*}}(float %[[V2]]) +// CHECK-NOPT-HARD: call {{.*}}(float partialinit %[[V2]]) // CHECK-OPT-HARD: %[[V0:.*]] = zext i16 {{.*}} to i32 // CHECK-OPT-HARD: %[[V1:.*]] = bitcast i32 %[[V0]] to float -// CHECK-OPT-HARD: call {{.*}}(float %[[V1]]) +// CHECK-OPT-HARD: call {{.*}}(float partialinit %[[V1]]) diff --git a/clang/test/CodeGen/ext-int-cc.c b/clang/test/CodeGen/ext-int-cc.c --- a/clang/test/CodeGen/ext-int-cc.c +++ b/clang/test/CodeGen/ext-int-cc.c @@ -61,7 +61,7 @@ // ARM: define arm_aapcscc void @ParamPassing(i129* byval(i129) align 8 %{{.+}}, i128* byval(i128) align 8 %{{.+}}, i64 %{{.+}}) void ParamPassing2(_ExtInt(129) a, _ExtInt(127) b, _ExtInt(63) c) {} -// LIN64: define void @ParamPassing2(i129* byval(i129) align 8 %{{.+}}, i64 %{{.+}}, i64 %{{.+}}, i64 %{{.+}}) +// LIN64: define void @ParamPassing2(i129* byval(i129) align 8 %{{.+}}, i64 partialinit %{{.+}}, i64 partialinit %{{.+}}, i64 partialinit %{{.+}}) // WIN64: define dso_local void @ParamPassing2(i129* %{{.+}}, i127* %{{.+}}, i63 %{{.+}}) // LIN32: define void @ParamPassing2(i129* %{{.+}}, i127* %{{.+}}, i63 %{{.+}}) // WIN32: define dso_local void @ParamPassing2(i129* %{{.+}}, i127* %{{.+}}, i63 %{{.+}}) @@ -120,7 +120,7 @@ // ARM: define arm_aapcscc void @ParamPassing3(i15 signext %{{.+}}, i31 signext %{{.+}}) _ExtInt(63) ReturnPassing(){} -// LIN64: define i64 @ReturnPassing( +// LIN64: define partialinit i64 @ReturnPassing( // WIN64: define dso_local i63 @ReturnPassing( // LIN32: define i63 @ReturnPassing( // WIN32: define dso_local i63 @ReturnPassing( @@ -178,7 +178,7 @@ // ARM: define arm_aapcscc i64 @ReturnPassing2( _ExtInt(127) ReturnPassing3(){} -// LIN64: define { i64, i64 } @ReturnPassing3( +// LIN64: define partialinit { i64, i64 } @ReturnPassing3( // WIN64: define dso_local void @ReturnPassing3(i127* noalias sret // LIN32: define void @ReturnPassing3(i127* noalias sret // WIN32: define dso_local void @ReturnPassing3(i127* noalias sret diff --git a/clang/test/CodeGen/lanai-arguments.c b/clang/test/CodeGen/lanai-arguments.c --- a/clang/test/CodeGen/lanai-arguments.c +++ b/clang/test/CodeGen/lanai-arguments.c @@ -62,7 +62,7 @@ char b; }; // Unions should be passed inreg. -// CHECK: define void @f9(i32 inreg %s.coerce) +// CHECK: define void @f9(i32 inreg partialinit %s.coerce) void f9(union simple_union s) {} typedef struct { @@ -71,5 +71,5 @@ int b8 : 8; } bitfield1; // Bitfields should be passed inreg. -// CHECK: define void @f10(i32 inreg %bf1.coerce) +// CHECK: define void @f10(i32 inreg partialinit %bf1.coerce) void f10(bitfield1 bf1) {} diff --git a/clang/test/CodeGen/mips-byval-arg.c b/clang/test/CodeGen/mips-byval-arg.c --- a/clang/test/CodeGen/mips-byval-arg.c +++ b/clang/test/CodeGen/mips-byval-arg.c @@ -8,7 +8,7 @@ extern void foo2(S0); // O32-LABEL: define void @foo1(i32 inreg %a0.coerce0, i32 inreg %a0.coerce1, i32 inreg %a0.coerce2) -// N64-LABEL: define void @foo1(i64 inreg %a0.coerce0, i32 inreg %a0.coerce1) +// N64-LABEL: define void @foo1(i64 inreg partialinit %a0.coerce0, i32 inreg partialinit %a0.coerce1) void foo1(S0 a0) { foo2(a0); diff --git a/clang/test/CodeGen/mips64-class-return.cpp b/clang/test/CodeGen/mips64-class-return.cpp --- a/clang/test/CodeGen/mips64-class-return.cpp +++ b/clang/test/CodeGen/mips64-class-return.cpp @@ -24,12 +24,12 @@ extern D1 gd1; extern D2 gd2; -// CHECK: define inreg { i64, i64 } @_Z4foo1v() +// CHECK: define inreg partialinit { i64, i64 } @_Z4foo1v() D0 foo1(void) { return gd0; } -// CHECK: define inreg { double, float } @_Z4foo2v() +// CHECK: define inreg partialinit { double, float } @_Z4foo2v() D1 foo2(void) { return gd1; } @@ -39,7 +39,7 @@ gd2 = a0; } -// CHECK-LABEL: define void @_Z4foo42D0(i64 inreg %a0.coerce0, i64 inreg %a0.coerce1) +// CHECK-LABEL: define void @_Z4foo42D0(i64 inreg partialinit %a0.coerce0, i64 inreg partialinit %a0.coerce1) void foo4(D0 a0) { gd0 = a0; } diff --git a/clang/test/CodeGen/mips64-padding-arg.c b/clang/test/CodeGen/mips64-padding-arg.c --- a/clang/test/CodeGen/mips64-padding-arg.c +++ b/clang/test/CodeGen/mips64-padding-arg.c @@ -9,9 +9,9 @@ // Insert padding to ensure arguments of type S0 are aligned to 16-byte boundaries. -// N64-LABEL: define void @foo1(i32 signext %a0, i64 %0, double inreg %a1.coerce0, i64 inreg %a1.coerce1, i64 inreg %a1.coerce2, i64 inreg %a1.coerce3, double inreg %a2.coerce0, i64 inreg %a2.coerce1, i64 inreg %a2.coerce2, i64 inreg %a2.coerce3, i32 signext %b, i64 %1, double inreg %a3.coerce0, i64 inreg %a3.coerce1, i64 inreg %a3.coerce2, i64 inreg %a3.coerce3) -// N64: tail call void @foo2(i32 signext 1, i32 signext 2, i32 signext %a0, i64 undef, double inreg %a1.coerce0, i64 inreg %a1.coerce1, i64 inreg %a1.coerce2, i64 inreg %a1.coerce3, double inreg %a2.coerce0, i64 inreg %a2.coerce1, i64 inreg %a2.coerce2, i64 inreg %a2.coerce3, i32 signext 3, i64 undef, double inreg %a3.coerce0, i64 inreg %a3.coerce1, i64 inreg %a3.coerce2, i64 inreg %a3.coerce3) -// N64: declare void @foo2(i32 signext, i32 signext, i32 signext, i64, double inreg, i64 inreg, i64 inreg, i64 inreg, double inreg, i64 inreg, i64 inreg, i64 inreg, i32 signext, i64, double inreg, i64 inreg, i64 inreg, i64 inreg) +// N64-LABEL: define void @foo1(i32 signext %a0, i64 %0, double inreg partialinit %a1.coerce0, i64 inreg partialinit %a1.coerce1, i64 inreg partialinit %a1.coerce2, i64 inreg partialinit %a1.coerce3, double inreg partialinit %a2.coerce0, i64 inreg partialinit %a2.coerce1, i64 inreg partialinit %a2.coerce2, i64 inreg partialinit %a2.coerce3, i32 signext %b, i64 %1, double inreg partialinit %a3.coerce0, i64 inreg partialinit %a3.coerce1, i64 inreg partialinit %a3.coerce2, i64 inreg partialinit %a3.coerce3) +// N64: tail call void @foo2(i32 signext 1, i32 signext 2, i32 signext %a0, i64 undef, double inreg partialinit %a1.coerce0, i64 inreg partialinit %a1.coerce1, i64 inreg partialinit %a1.coerce2, i64 inreg partialinit %a1.coerce3, double inreg partialinit %a2.coerce0, i64 inreg partialinit %a2.coerce1, i64 inreg partialinit %a2.coerce2, i64 inreg partialinit %a2.coerce3, i32 signext 3, i64 undef, double inreg partialinit %a3.coerce0, i64 inreg partialinit %a3.coerce1, i64 inreg partialinit %a3.coerce2, i64 inreg partialinit %a3.coerce3) +// N64: declare void @foo2(i32 signext, i32 signext, i32 signext, i64, double inreg partialinit, i64 inreg partialinit, i64 inreg partialinit, i64 inreg partialinit, double inreg partialinit, i64 inreg partialinit, i64 inreg partialinit, i64 inreg partialinit, i32 signext, i64, double inreg partialinit, i64 inreg partialinit, i64 inreg partialinit, i64 inreg partialinit) extern void foo2(int, int, int, S0, S0, int, S0); diff --git a/clang/test/CodeGen/ppc32-and-aix-struct-return.c b/clang/test/CodeGen/ppc32-and-aix-struct-return.c --- a/clang/test/CodeGen/ppc32-and-aix-struct-return.c +++ b/clang/test/CodeGen/ppc32-and-aix-struct-return.c @@ -84,7 +84,7 @@ Seven ret7(void) { return (Seven){"abcdefg"}; } // CHECK-AIX-LABEL: define void @ret8(%struct.Eight* noalias sret {{[^,]*}}) -// CHECK-SVR4-LABEL: define i64 @ret8() +// CHECK-SVR4-LABEL: define partialinit i64 @ret8() Eight ret8(void) { return (Eight){123, 'a'}; } // CHECK-AIX-LABEL: define void @ret9(%struct.Nine* noalias sret {{[^,]*}}) diff --git a/clang/test/CodeGen/ppc64-align-struct.c b/clang/test/CodeGen/ppc64-align-struct.c --- a/clang/test/CodeGen/ppc64-align-struct.c +++ b/clang/test/CodeGen/ppc64-align-struct.c @@ -15,17 +15,17 @@ { } -// CHECK: define void @test2(i32 signext %x, [1 x i128] %y.coerce) +// CHECK: define void @test2(i32 signext %x, [1 x i128] partialinit %y.coerce) void test2 (int x, struct test2 y) { } -// CHECK: define void @test3(i32 signext %x, [2 x i128] %y.coerce) +// CHECK: define void @test3(i32 signext %x, [2 x i128] partialinit %y.coerce) void test3 (int x, struct test3 y) { } -// CHECK: define void @test4(i32 signext %x, [2 x i64] %y.coerce) +// CHECK: define void @test4(i32 signext %x, [2 x i64] partialinit %y.coerce) void test4 (int x, struct test4 y) { } diff --git a/clang/test/CodeGen/ppc64-soft-float.c b/clang/test/CodeGen/ppc64-soft-float.c --- a/clang/test/CodeGen/ppc64-soft-float.c +++ b/clang/test/CodeGen/ppc64-soft-float.c @@ -37,35 +37,35 @@ // CHECK-BE: define void @func_f2(%struct.f2* noalias sret align 4 %agg.result, i64 %x.coerce) struct f2 func_f2(struct f2 x) { return x; } -// CHECK-LE: define { i64, i64 } @func_f3([2 x i64] %x.coerce) -// CHECK-BE: define void @func_f3(%struct.f3* noalias sret align 4 %agg.result, [2 x i64] %x.coerce) +// CHECK-LE: define partialinit { i64, i64 } @func_f3([2 x i64] partialinit %x.coerce) +// CHECK-BE: define void @func_f3(%struct.f3* noalias sret align 4 %agg.result, [2 x i64] partialinit %x.coerce) struct f3 func_f3(struct f3 x) { return x; } // CHECK-LE: define { i64, i64 } @func_f4([2 x i64] %x.coerce) // CHECK-BE: define void @func_f4(%struct.f4* noalias sret align 4 %agg.result, [2 x i64] %x.coerce) struct f4 func_f4(struct f4 x) { return x; } -// CHECK: define void @func_f5(%struct.f5* noalias sret align 4 %agg.result, [3 x i64] %x.coerce) +// CHECK: define void @func_f5(%struct.f5* noalias sret align 4 %agg.result, [3 x i64] partialinit %x.coerce) struct f5 func_f5(struct f5 x) { return x; } // CHECK: define void @func_f6(%struct.f6* noalias sret align 4 %agg.result, [3 x i64] %x.coerce) struct f6 func_f6(struct f6 x) { return x; } -// CHECK: define void @func_f7(%struct.f7* noalias sret align 4 %agg.result, [4 x i64] %x.coerce) +// CHECK: define void @func_f7(%struct.f7* noalias sret align 4 %agg.result, [4 x i64] partialinit %x.coerce) struct f7 func_f7(struct f7 x) { return x; } // CHECK: define void @func_f8(%struct.f8* noalias sret align 4 %agg.result, [4 x i64] %x.coerce) struct f8 func_f8(struct f8 x) { return x; } -// CHECK: define void @func_f9(%struct.f9* noalias sret align 4 %agg.result, [5 x i64] %x.coerce) +// CHECK: define void @func_f9(%struct.f9* noalias sret align 4 %agg.result, [5 x i64] partialinit %x.coerce) struct f9 func_f9(struct f9 x) { return x; } // CHECK-LE: define i64 @func_fab(i64 %x.coerce) // CHECK-BE: define void @func_fab(%struct.fab* noalias sret align 4 %agg.result, i64 %x.coerce) struct fab func_fab(struct fab x) { return x; } -// CHECK-LE: define { i64, i64 } @func_fabc([2 x i64] %x.coerce) -// CHECK-BE: define void @func_fabc(%struct.fabc* noalias sret align 4 %agg.result, [2 x i64] %x.coerce) +// CHECK-LE: define partialinit { i64, i64 } @func_fabc([2 x i64] partialinit %x.coerce) +// CHECK-BE: define void @func_fabc(%struct.fabc* noalias sret align 4 %agg.result, [2 x i64] partialinit %x.coerce) struct fabc func_fabc(struct fabc x) { return x; } // CHECK-LE: define { i64, i64 } @func_f2a2b([2 x i64] %x.coerce) @@ -94,8 +94,8 @@ // CHECK: %[[TMP2:[^ ]+]] = bitcast [2 x i64]* %[[TMP1]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f3* @global_f3 to i8*), i64 12, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [2 x i64], [2 x i64]* %[[TMP1]] -// CHECK-LE: call { i64, i64 } @func_f3([2 x i64] %[[TMP3]]) -// CHECK-BE: call void @func_f3(%struct.f3* sret align 4 %[[TMP0]], [2 x i64] %[[TMP3]]) +// CHECK-LE: call partialinit { i64, i64 } @func_f3([2 x i64] partialinit %[[TMP3]]) +// CHECK-BE: call void @func_f3(%struct.f3* sret align 4 %[[TMP0]], [2 x i64] partialinit %[[TMP3]]) struct f3 global_f3; void call_f3(void) { global_f3 = func_f3(global_f3); } @@ -113,7 +113,7 @@ // CHECK: %[[TMP2:[^ ]+]] = bitcast [3 x i64]* %[[TMP1]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f5* @global_f5 to i8*), i64 20, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [3 x i64], [3 x i64]* %[[TMP1]] -// CHECK: call void @func_f5(%struct.f5* sret align 4 %[[TMP0]], [3 x i64] %[[TMP3]]) +// CHECK: call void @func_f5(%struct.f5* sret align 4 %[[TMP0]], [3 x i64] partialinit %[[TMP3]]) struct f5 global_f5; void call_f5(void) { global_f5 = func_f5(global_f5); } @@ -130,7 +130,7 @@ // CHECK: %[[TMP2:[^ ]+]] = bitcast [4 x i64]* %[[TMP1]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f7* @global_f7 to i8*), i64 28, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [4 x i64], [4 x i64]* %[[TMP1]], align 8 -// CHECK: call void @func_f7(%struct.f7* sret align 4 %[[TMP0]], [4 x i64] %[[TMP3]]) +// CHECK: call void @func_f7(%struct.f7* sret align 4 %[[TMP0]], [4 x i64] partialinit %[[TMP3]]) struct f7 global_f7; void call_f7(void) { global_f7 = func_f7(global_f7); } @@ -146,7 +146,7 @@ // CHECK: %[[TMP2:[^ ]+]] = bitcast [5 x i64]* %[[TMP1]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f9* @global_f9 to i8*), i64 36, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [5 x i64], [5 x i64]* %[[TMP1]] -// CHECK: call void @func_f9(%struct.f9* sret align 4 %{{[^ ]+}}, [5 x i64] %[[TMP3]]) +// CHECK: call void @func_f9(%struct.f9* sret align 4 %{{[^ ]+}}, [5 x i64] partialinit %[[TMP3]]) struct f9 global_f9; void call_f9(void) { global_f9 = func_f9(global_f9); } @@ -164,8 +164,8 @@ // CHECK: %[[TMP2:[^ ]+]] = bitcast [2 x i64]* %[[TMP0]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.fabc* @global_fabc to i8*), i64 12, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [2 x i64], [2 x i64]* %[[TMP0]], align 8 -// CHECK-LE: %call = call { i64, i64 } @func_fabc([2 x i64] %[[TMP3]]) -// CHECK-BE: call void @func_fabc(%struct.fabc* sret align 4 %[[TMPX]], [2 x i64] %[[TMP3]]) +// CHECK-LE: %call = call partialinit { i64, i64 } @func_fabc([2 x i64] partialinit %[[TMP3]]) +// CHECK-BE: call void @func_fabc(%struct.fabc* sret align 4 %[[TMPX]], [2 x i64] partialinit %[[TMP3]]) struct fabc global_fabc; void call_fabc(void) { global_fabc = func_fabc(global_fabc); } diff --git a/clang/test/CodeGen/ppc64-vector.c b/clang/test/CodeGen/ppc64-vector.c --- a/clang/test/CodeGen/ppc64-vector.c +++ b/clang/test/CodeGen/ppc64-vector.c @@ -15,7 +15,7 @@ return x; } -// CHECK: define i64 @test_v3i16(i64 %x.coerce) +// CHECK: define partialinit i64 @test_v3i16(i64 partialinit %x.coerce) v3i16 test_v3i16(v3i16 x) { return x; diff --git a/clang/test/CodeGen/ppc64le-aggregates.c b/clang/test/CodeGen/ppc64le-aggregates.c --- a/clang/test/CodeGen/ppc64le-aggregates.c +++ b/clang/test/CodeGen/ppc64le-aggregates.c @@ -41,7 +41,7 @@ // CHECK: define [8 x float] @func_f8([8 x float] %x.coerce) struct f8 func_f8(struct f8 x) { return x; } -// CHECK: define void @func_f9(%struct.f9* noalias sret align 4 %agg.result, [5 x i64] %x.coerce) +// CHECK: define void @func_f9(%struct.f9* noalias sret align 4 %agg.result, [5 x i64] partialinit %x.coerce) struct f9 func_f9(struct f9 x) { return x; } // CHECK: define [2 x float] @func_fab([2 x float] %x.coerce) @@ -106,7 +106,7 @@ // CHECK: %[[TMP2:[^ ]+]] = bitcast [5 x i64]* %[[TMP1]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[TMP2]], i8* align 4 bitcast (%struct.f9* @global_f9 to i8*), i64 36, i1 false) // CHECK: %[[TMP3:[^ ]+]] = load [5 x i64], [5 x i64]* %[[TMP1]] -// CHECK: call void @func_f9(%struct.f9* sret align 4 %{{[^ ]+}}, [5 x i64] %[[TMP3]]) +// CHECK: call void @func_f9(%struct.f9* sret align 4 %{{[^ ]+}}, [5 x i64] partialinit %[[TMP3]]) struct f9 global_f9; void call_f9(void) { global_f9 = func_f9(global_f9); } @@ -255,84 +255,84 @@ struct v3fab { float3 a; float3 b; }; struct v3fabc { float3 a; float3 b; float3 c; }; -// CHECK: define [1 x <4 x float>] @func_v3f1(<3 x float> inreg %x.coerce) +// CHECK: define partialinit [1 x <4 x float>] @func_v3f1(<3 x float> inreg partialinit %x.coerce) struct v3f1 func_v3f1(struct v3f1 x) { return x; } -// CHECK: define [2 x <4 x float>] @func_v3f2([2 x <4 x float>] %x.coerce) +// CHECK: define partialinit [2 x <4 x float>] @func_v3f2([2 x <4 x float>] partialinit %x.coerce) struct v3f2 func_v3f2(struct v3f2 x) { return x; } -// CHECK: define [3 x <4 x float>] @func_v3f3([3 x <4 x float>] %x.coerce) +// CHECK: define partialinit [3 x <4 x float>] @func_v3f3([3 x <4 x float>] partialinit %x.coerce) struct v3f3 func_v3f3(struct v3f3 x) { return x; } -// CHECK: define [4 x <4 x float>] @func_v3f4([4 x <4 x float>] %x.coerce) +// CHECK: define partialinit [4 x <4 x float>] @func_v3f4([4 x <4 x float>] partialinit %x.coerce) struct v3f4 func_v3f4(struct v3f4 x) { return x; } -// CHECK: define [5 x <4 x float>] @func_v3f5([5 x <4 x float>] %x.coerce) +// CHECK: define partialinit [5 x <4 x float>] @func_v3f5([5 x <4 x float>] partialinit %x.coerce) struct v3f5 func_v3f5(struct v3f5 x) { return x; } -// CHECK: define [6 x <4 x float>] @func_v3f6([6 x <4 x float>] %x.coerce) +// CHECK: define partialinit [6 x <4 x float>] @func_v3f6([6 x <4 x float>] partialinit %x.coerce) struct v3f6 func_v3f6(struct v3f6 x) { return x; } -// CHECK: define [7 x <4 x float>] @func_v3f7([7 x <4 x float>] %x.coerce) +// CHECK: define partialinit [7 x <4 x float>] @func_v3f7([7 x <4 x float>] partialinit %x.coerce) struct v3f7 func_v3f7(struct v3f7 x) { return x; } -// CHECK: define [8 x <4 x float>] @func_v3f8([8 x <4 x float>] %x.coerce) +// CHECK: define partialinit [8 x <4 x float>] @func_v3f8([8 x <4 x float>] partialinit %x.coerce) struct v3f8 func_v3f8(struct v3f8 x) { return x; } // CHECK: define void @func_v3f9(%struct.v3f9* noalias sret align 16 %agg.result, %struct.v3f9* byval(%struct.v3f9) align 16 %x) struct v3f9 func_v3f9(struct v3f9 x) { return x; } -// CHECK: define [2 x <4 x float>] @func_v3fab([2 x <4 x float>] %x.coerce) +// CHECK: define partialinit [2 x <4 x float>] @func_v3fab([2 x <4 x float>] partialinit %x.coerce) struct v3fab func_v3fab(struct v3fab x) { return x; } -// CHECK: define [3 x <4 x float>] @func_v3fabc([3 x <4 x float>] %x.coerce) +// CHECK: define partialinit [3 x <4 x float>] @func_v3fabc([3 x <4 x float>] partialinit %x.coerce) struct v3fabc func_v3fabc(struct v3fabc x) { return x; } // CHECK-LABEL: @call_v3f1 // CHECK: %[[TMP:[^ ]+]] = load <3 x float>, <3 x float>* getelementptr inbounds (%struct.v3f1, %struct.v3f1* @global_v3f1, i32 0, i32 0, i32 0), align 1 -// CHECK: call [1 x <4 x float>] @func_v3f1(<3 x float> inreg %[[TMP]]) +// CHECK: call partialinit [1 x <4 x float>] @func_v3f1(<3 x float> inreg partialinit %[[TMP]]) struct v3f1 global_v3f1; void call_v3f1(void) { global_v3f1 = func_v3f1(global_v3f1); } // CHECK-LABEL: @call_v3f2 // CHECK: %[[TMP:[^ ]+]] = load [2 x <4 x float>], [2 x <4 x float>]* bitcast (%struct.v3f2* @global_v3f2 to [2 x <4 x float>]*), align 16 -// CHECK: call [2 x <4 x float>] @func_v3f2([2 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [2 x <4 x float>] @func_v3f2([2 x <4 x float>] partialinit %[[TMP]]) struct v3f2 global_v3f2; void call_v3f2(void) { global_v3f2 = func_v3f2(global_v3f2); } // CHECK-LABEL: @call_v3f3 // CHECK: %[[TMP:[^ ]+]] = load [3 x <4 x float>], [3 x <4 x float>]* bitcast (%struct.v3f3* @global_v3f3 to [3 x <4 x float>]*), align 16 -// CHECK: call [3 x <4 x float>] @func_v3f3([3 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [3 x <4 x float>] @func_v3f3([3 x <4 x float>] partialinit %[[TMP]]) struct v3f3 global_v3f3; void call_v3f3(void) { global_v3f3 = func_v3f3(global_v3f3); } // CHECK-LABEL: @call_v3f4 // CHECK: %[[TMP:[^ ]+]] = load [4 x <4 x float>], [4 x <4 x float>]* bitcast (%struct.v3f4* @global_v3f4 to [4 x <4 x float>]*), align 16 -// CHECK: call [4 x <4 x float>] @func_v3f4([4 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [4 x <4 x float>] @func_v3f4([4 x <4 x float>] partialinit %[[TMP]]) struct v3f4 global_v3f4; void call_v3f4(void) { global_v3f4 = func_v3f4(global_v3f4); } // CHECK-LABEL: @call_v3f5 // CHECK: %[[TMP:[^ ]+]] = load [5 x <4 x float>], [5 x <4 x float>]* bitcast (%struct.v3f5* @global_v3f5 to [5 x <4 x float>]*), align 16 -// CHECK: call [5 x <4 x float>] @func_v3f5([5 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [5 x <4 x float>] @func_v3f5([5 x <4 x float>] partialinit %[[TMP]]) struct v3f5 global_v3f5; void call_v3f5(void) { global_v3f5 = func_v3f5(global_v3f5); } // CHECK-LABEL: @call_v3f6 // CHECK: %[[TMP:[^ ]+]] = load [6 x <4 x float>], [6 x <4 x float>]* bitcast (%struct.v3f6* @global_v3f6 to [6 x <4 x float>]*), align 16 -// CHECK: call [6 x <4 x float>] @func_v3f6([6 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [6 x <4 x float>] @func_v3f6([6 x <4 x float>] partialinit %[[TMP]]) struct v3f6 global_v3f6; void call_v3f6(void) { global_v3f6 = func_v3f6(global_v3f6); } // CHECK-LABEL: @call_v3f7 // CHECK: %[[TMP:[^ ]+]] = load [7 x <4 x float>], [7 x <4 x float>]* bitcast (%struct.v3f7* @global_v3f7 to [7 x <4 x float>]*), align 16 -// CHECK: call [7 x <4 x float>] @func_v3f7([7 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [7 x <4 x float>] @func_v3f7([7 x <4 x float>] partialinit %[[TMP]]) struct v3f7 global_v3f7; void call_v3f7(void) { global_v3f7 = func_v3f7(global_v3f7); } // CHECK-LABEL: @call_v3f8 // CHECK: %[[TMP:[^ ]+]] = load [8 x <4 x float>], [8 x <4 x float>]* bitcast (%struct.v3f8* @global_v3f8 to [8 x <4 x float>]*), align 16 -// CHECK: call [8 x <4 x float>] @func_v3f8([8 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [8 x <4 x float>] @func_v3f8([8 x <4 x float>] partialinit %[[TMP]]) struct v3f8 global_v3f8; void call_v3f8(void) { global_v3f8 = func_v3f8(global_v3f8); } @@ -343,13 +343,13 @@ // CHECK-LABEL: @call_v3fab // CHECK: %[[TMP:[^ ]+]] = load [2 x <4 x float>], [2 x <4 x float>]* bitcast (%struct.v3fab* @global_v3fab to [2 x <4 x float>]*), align 16 -// CHECK: call [2 x <4 x float>] @func_v3fab([2 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [2 x <4 x float>] @func_v3fab([2 x <4 x float>] partialinit %[[TMP]]) struct v3fab global_v3fab; void call_v3fab(void) { global_v3fab = func_v3fab(global_v3fab); } // CHECK-LABEL: @call_v3fabc // CHECK: %[[TMP:[^ ]+]] = load [3 x <4 x float>], [3 x <4 x float>]* bitcast (%struct.v3fabc* @global_v3fabc to [3 x <4 x float>]*), align 16 -// CHECK: call [3 x <4 x float>] @func_v3fabc([3 x <4 x float>] %[[TMP]]) +// CHECK: call partialinit [3 x <4 x float>] @func_v3fabc([3 x <4 x float>] partialinit %[[TMP]]) struct v3fabc global_v3fabc; void call_v3fabc(void) { global_v3fabc = func_v3fabc(global_v3fabc); } @@ -408,7 +408,7 @@ return (struct s8) { 17, 18, 19, 20, 21, 22, 23, 24 }; } -// CHECK: define { i64, i64 } @ret_s9() +// CHECK: define partialinit { i64, i64 } @ret_s9() struct s9 ret_s9() { return (struct s9) { 17, 18, 19, 20, 21, 22, 23, 24, 25 }; } diff --git a/clang/test/CodeGen/regcall.c b/clang/test/CodeGen/regcall.c --- a/clang/test/CodeGen/regcall.c +++ b/clang/test/CodeGen/regcall.c @@ -107,10 +107,10 @@ struct OddSizeHVA { v3f32 x, y; }; void __regcall odd_size_hva(struct OddSizeHVA a) {} -// Win32: define dso_local x86_regcallcc void @__regcall3__odd_size_hva(<3 x float> %a.0, <3 x float> %a.1) -// Win64: define dso_local x86_regcallcc void @__regcall3__odd_size_hva(<3 x float> %a.0, <3 x float> %a.1) -// Lin32: define x86_regcallcc void @__regcall3__odd_size_hva(<3 x float> %a.0, <3 x float> %a.1) -// Lin64: define x86_regcallcc void @__regcall3__odd_size_hva(<3 x float> %a.coerce0, <3 x float> %a.coerce1) +// Win32: define dso_local x86_regcallcc void @__regcall3__odd_size_hva(<3 x float> partialinit %a.0, <3 x float> partialinit %a.1) +// Win64: define dso_local x86_regcallcc void @__regcall3__odd_size_hva(<3 x float> partialinit %a.0, <3 x float> partialinit %a.1) +// Lin32: define x86_regcallcc void @__regcall3__odd_size_hva(<3 x float> partialinit %a.0, <3 x float> partialinit %a.1) +// Lin64: define x86_regcallcc void @__regcall3__odd_size_hva(<3 x float> partialinit %a.coerce0, <3 x float> partialinit %a.coerce1) struct HFA6 { __m128 f[4]; }; struct HFA6 __regcall ret_reg_reused(struct HFA6 a, struct HFA6 b, struct HFA6 c, struct HFA6 d){ struct HFA6 h; return h;} diff --git a/clang/test/CodeGen/renderscript.c b/clang/test/CodeGen/renderscript.c --- a/clang/test/CodeGen/renderscript.c +++ b/clang/test/CodeGen/renderscript.c @@ -40,8 +40,8 @@ // CHECK-RS64: void @argChar3([3 x i8] %s.coerce) void argChar3(sChar3 s) {} -// CHECK-RS32: void @argShortChar([2 x i16] %s.coerce) -// CHECK-RS64: void @argShortChar([2 x i16] %s.coerce) +// CHECK-RS32: void @argShortChar([2 x i16] partialinit %s.coerce) +// CHECK-RS64: void @argShortChar([2 x i16] partialinit %s.coerce) void argShortChar(sShortChar s) {} // ============================================================================= @@ -53,8 +53,8 @@ // CHECK-RS64: [3 x i8] @retChar3() sChar3 retChar3() { sChar3 r; return r; } -// CHECK-RS32: [2 x i16] @retShortChar() -// CHECK-RS64: [2 x i16] @retShortChar() +// CHECK-RS32: partialinit [2 x i16] @retShortChar() +// CHECK-RS64: partialinit [2 x i16] @retShortChar() sShortChar retShortChar() { sShortChar r; return r; } // ============================================================================= @@ -66,16 +66,16 @@ typedef struct {int i; short s; char c; } sIntShortChar; typedef struct {long l; int i; } sLongInt; -// CHECK-RS32: void @argShortCharShort([3 x i16] %s.coerce) -// CHECK-RS64: void @argShortCharShort([3 x i16] %s.coerce) +// CHECK-RS32: void @argShortCharShort([3 x i16] partialinit %s.coerce) +// CHECK-RS64: void @argShortCharShort([3 x i16] partialinit %s.coerce) void argShortCharShort(sShortCharShort s) {} -// CHECK-RS32: void @argIntShortChar([2 x i32] %s.coerce) -// CHECK-RS64: void @argIntShortChar([2 x i32] %s.coerce) +// CHECK-RS32: void @argIntShortChar([2 x i32] partialinit %s.coerce) +// CHECK-RS64: void @argIntShortChar([2 x i32] partialinit %s.coerce) void argIntShortChar(sIntShortChar s) {} -// CHECK-RS32: void @argLongInt([2 x i64] %s.coerce) -// CHECK-RS64: void @argLongInt([2 x i64] %s.coerce) +// CHECK-RS32: void @argLongInt([2 x i64] partialinit %s.coerce) +// CHECK-RS64: void @argLongInt([2 x i64] partialinit %s.coerce) void argLongInt(sLongInt s) {} // ============================================================================= @@ -107,7 +107,7 @@ // CHECK-RS64: void @argInt5(%struct.sInt5* %s) void argInt5(sInt5 s) {} -// CHECK-RS32: void @argLong2Char([3 x i64] %s.coerce) +// CHECK-RS32: void @argLong2Char([3 x i64] partialinit %s.coerce) // CHECK-RS64: void @argLong2Char(%struct.sLong2Char* %s) void argLong2Char(sLong2Char s) {} diff --git a/clang/test/CodeGen/riscv32-ilp32f-ilp32d-abi.c b/clang/test/CodeGen/riscv32-ilp32f-ilp32d-abi.c --- a/clang/test/CodeGen/riscv32-ilp32f-ilp32d-abi.c +++ b/clang/test/CodeGen/riscv32-ilp32f-ilp32d-abi.c @@ -136,11 +136,11 @@ return (struct float_int8_zbf_s){1.0, 2}; } -// CHECK: define void @f_float_int8_s_arg_insufficient_gprs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, [2 x i32] %i.coerce) +// CHECK: define void @f_float_int8_s_arg_insufficient_gprs(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, [2 x i32] partialinit %i.coerce) void f_float_int8_s_arg_insufficient_gprs(int a, int b, int c, int d, int e, int f, int g, int h, struct float_int8_s i) {} -// CHECK: define void @f_struct_float_int8_insufficient_fprs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, [2 x i32] %i.coerce) +// CHECK: define void @f_struct_float_int8_insufficient_fprs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, [2 x i32] partialinit %i.coerce) void f_struct_float_int8_insufficient_fprs(float a, float b, float c, float d, float e, float f, float g, float h, struct float_int8_s i) {} @@ -253,10 +253,10 @@ struct char_char_float_s { char a; char b; float c; }; -// CHECK-LABEL: define void @f_char_char_float_s_arg([2 x i32] %a.coerce) +// CHECK-LABEL: define void @f_char_char_float_s_arg([2 x i32] partialinit %a.coerce) void f_char_char_float_s_arg(struct char_char_float_s a) {} -// CHECK: define [2 x i32] @f_ret_char_char_float_s() +// CHECK: define partialinit [2 x i32] @f_ret_char_char_float_s() struct char_char_float_s f_ret_char_char_float_s() { return (struct char_char_float_s){1, 2, 3.0}; } diff --git a/clang/test/CodeGen/riscv64-lp64d-abi.c b/clang/test/CodeGen/riscv64-lp64d-abi.c --- a/clang/test/CodeGen/riscv64-lp64d-abi.c +++ b/clang/test/CodeGen/riscv64-lp64d-abi.c @@ -143,11 +143,11 @@ return (struct double_int8_zbf_s){1.0, 2}; } -// CHECK: define void @f_double_int8_s_arg_insufficient_gprs(i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d, i32 signext %e, i32 signext %f, i32 signext %g, i32 signext %h, [2 x i64] %i.coerce) +// CHECK: define void @f_double_int8_s_arg_insufficient_gprs(i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d, i32 signext %e, i32 signext %f, i32 signext %g, i32 signext %h, [2 x i64] partialinit %i.coerce) void f_double_int8_s_arg_insufficient_gprs(int a, int b, int c, int d, int e, int f, int g, int h, struct double_int8_s i) {} -// CHECK: define void @f_struct_double_int8_insufficient_fprs(float %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, [2 x i64] %i.coerce) +// CHECK: define void @f_struct_double_int8_insufficient_fprs(float %a, double %b, double %c, double %d, double %e, double %f, double %g, double %h, [2 x i64] partialinit %i.coerce) void f_struct_double_int8_insufficient_fprs(float a, double b, double c, double d, double e, double f, double g, double h, struct double_int8_s i) {} @@ -250,10 +250,10 @@ struct char_char_double_s { char a; char b; double c; }; -// CHECK-LABEL: define void @f_char_char_double_s_arg([2 x i64] %a.coerce) +// CHECK-LABEL: define void @f_char_char_double_s_arg([2 x i64] partialinit %a.coerce) void f_char_char_double_s_arg(struct char_char_double_s a) {} -// CHECK: define [2 x i64] @f_ret_char_char_double_s() +// CHECK: define partialinit [2 x i64] @f_ret_char_char_double_s() struct char_char_double_s f_ret_char_char_double_s() { return (struct char_char_double_s){1, 2, 3.0}; } diff --git a/clang/test/CodeGen/riscv64-lp64f-lp64d-abi.c b/clang/test/CodeGen/riscv64-lp64f-lp64d-abi.c --- a/clang/test/CodeGen/riscv64-lp64f-lp64d-abi.c +++ b/clang/test/CodeGen/riscv64-lp64f-lp64d-abi.c @@ -136,11 +136,11 @@ return (struct float_int8_zbf_s){1.0, 2}; } -// CHECK: define void @f_float_int8_s_arg_insufficient_gprs(i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d, i32 signext %e, i32 signext %f, i32 signext %g, i32 signext %h, i64 %i.coerce) +// CHECK: define void @f_float_int8_s_arg_insufficient_gprs(i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d, i32 signext %e, i32 signext %f, i32 signext %g, i32 signext %h, i64 partialinit %i.coerce) void f_float_int8_s_arg_insufficient_gprs(int a, int b, int c, int d, int e, int f, int g, int h, struct float_int8_s i) {} -// CHECK: define void @f_struct_float_int8_insufficient_fprs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, i64 %i.coerce) +// CHECK: define void @f_struct_float_int8_insufficient_fprs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, i64 partialinit %i.coerce) void f_struct_float_int8_insufficient_fprs(float a, float b, float c, float d, float e, float f, float g, float h, struct float_int8_s i) {} @@ -262,20 +262,20 @@ struct int_float_int_s { int a; float b; int c; }; -// CHECK: define void @f_int_float_int_s_arg([2 x i64] %a.coerce) +// CHECK: define void @f_int_float_int_s_arg([2 x i64] partialinit %a.coerce) void f_int_float_int_s_arg(struct int_float_int_s a) {} -// CHECK: define [2 x i64] @f_ret_int_float_int_s() +// CHECK: define partialinit [2 x i64] @f_ret_int_float_int_s() struct int_float_int_s f_ret_int_float_int_s() { return (struct int_float_int_s){1, 2.0, 3}; } struct char_char_float_s { char a; char b; float c; }; -// CHECK-LABEL: define void @f_char_char_float_s_arg(i64 %a.coerce) +// CHECK-LABEL: define void @f_char_char_float_s_arg(i64 partialinit %a.coerce) void f_char_char_float_s_arg(struct char_char_float_s a) {} -// CHECK: define i64 @f_ret_char_char_float_s() +// CHECK: define partialinit i64 @f_ret_char_char_float_s() struct char_char_float_s f_ret_char_char_float_s() { return (struct char_char_float_s){1, 2, 3.0}; } @@ -285,10 +285,10 @@ union float_u { float a; }; -// CHECK: define void @f_float_u_arg(i64 %a.coerce) +// CHECK: define void @f_float_u_arg(i64 partialinit %a.coerce) void f_float_u_arg(union float_u a) {} -// CHECK: define i64 @f_ret_float_u() +// CHECK: define partialinit i64 @f_ret_float_u() union float_u f_ret_float_u() { return (union float_u){1.0}; } diff --git a/clang/test/CodeGen/sparcv9-abi.c b/clang/test/CodeGen/sparcv9-abi.c --- a/clang/test/CodeGen/sparcv9-abi.c +++ b/clang/test/CodeGen/sparcv9-abi.c @@ -89,7 +89,7 @@ double b; }; -// CHECK: define { i64, double } @f_mixed2(i64 %x.coerce0, double %x.coerce1) +// CHECK: define partialinit { i64, double } @f_mixed2(i64 partialinit %x.coerce0, double partialinit %x.coerce1) // CHECK: store i64 %x.coerce0 // CHECK: store double %x.coerce1 struct mixed2 f_mixed2(struct mixed2 x) { @@ -103,7 +103,7 @@ char a; }; -// CHECK-LABEL: define i64 @f_tiny(i64 %x.coerce) +// CHECK-LABEL: define partialinit i64 @f_tiny(i64 partialinit %x.coerce) // CHECK: %[[HB:[^ ]+]] = lshr i64 %x.coerce, 56 // CHECK: = trunc i64 %[[HB]] to i8 struct tiny f_tiny(struct tiny x) { @@ -114,7 +114,7 @@ // CHECK-LABEL: define void @call_tiny() // CHECK: %[[XV:[^ ]+]] = zext i8 %{{[^ ]+}} to i64 // CHECK: %[[HB:[^ ]+]] = shl i64 %[[XV]], 56 -// CHECK: = call i64 @f_tiny(i64 %[[HB]]) +// CHECK: = call partialinit i64 @f_tiny(i64 partialinit %[[HB]]) void call_tiny() { struct tiny x = { 1 }; f_tiny(x); diff --git a/clang/test/CodeGen/systemz-abi-vector.c b/clang/test/CodeGen/systemz-abi-vector.c --- a/clang/test/CodeGen/systemz-abi-vector.c +++ b/clang/test/CodeGen/systemz-abi-vector.c @@ -190,8 +190,8 @@ struct agg_novector4 { v4i8 a __attribute__((aligned (8))); }; struct agg_novector4 pass_agg_novector4(struct agg_novector4 arg) { return arg; } -// CHECK-LABEL: define void @pass_agg_novector4(%struct.agg_novector4* noalias sret align 8 %{{.*}}, i64 %{{.*}}) -// CHECK-VECTOR-LABEL: define void @pass_agg_novector4(%struct.agg_novector4* noalias sret align 8 %{{.*}}, i64 %{{.*}}) +// CHECK-LABEL: define void @pass_agg_novector4(%struct.agg_novector4* noalias sret align 8 %{{.*}}, i64 partialinit %{{.*}}) +// CHECK-VECTOR-LABEL: define void @pass_agg_novector4(%struct.agg_novector4* noalias sret align 8 %{{.*}}, i64 partialinit %{{.*}}) // Accessing variable argument lists diff --git a/clang/test/CodeGen/systemz-abi.c b/clang/test/CodeGen/systemz-abi.c --- a/clang/test/CodeGen/systemz-abi.c +++ b/clang/test/CodeGen/systemz-abi.c @@ -132,8 +132,8 @@ struct agg_float_a8 { float a __attribute__((aligned (8))); }; struct agg_float_a8 pass_agg_float_a8(struct agg_float_a8 arg) { return arg; } -// HARD-FLOAT-LABEL: define void @pass_agg_float_a8(%struct.agg_float_a8* noalias sret align 8 %{{.*}}, double %{{.*}}) -// SOFT-FLOAT-LABEL: define void @pass_agg_float_a8(%struct.agg_float_a8* noalias sret align 8 %{{.*}}, i64 %{{.*}}) +// HARD-FLOAT-LABEL: define void @pass_agg_float_a8(%struct.agg_float_a8* noalias sret align 8 %{{.*}}, double partialinit %{{.*}}) +// SOFT-FLOAT-LABEL: define void @pass_agg_float_a8(%struct.agg_float_a8* noalias sret align 8 %{{.*}}, i64 partialinit %{{.*}}) struct agg_float_a16 { float a __attribute__((aligned (16))); }; struct agg_float_a16 pass_agg_float_a16(struct agg_float_a16 arg) { return arg; } diff --git a/clang/test/CodeGen/tbaa.cpp b/clang/test/CodeGen/tbaa.cpp --- a/clang/test/CodeGen/tbaa.cpp +++ b/clang/test/CodeGen/tbaa.cpp @@ -281,7 +281,7 @@ // NEW-PATH-DAG: [[TYPE_short:!.*]] = !{[[TYPE_char]], i64 2, !"short"} // NEW-PATH-DAG: [[TYPE_int:!.*]] = !{[[TYPE_char]], i64 4, !"int"} // NEW-PATH-DAG: [[TAG_i32:!.*]] = !{[[TYPE_int]], [[TYPE_int]], i64 0, i64 4} -// NEW-PATH-DAG: [[TYPE_A:!.*]] = !{[[TYPE_char]], i64 16, !"_ZTS7StructA", [[TYPE_short]], i64 0, i64 2, [[TYPE_int]], i64 4, i64 4, !12, i64 8, i64 2, [[TYPE_int]], i64 12, i64 4} +// NEW-PATH-DAG: [[TYPE_A:!.*]] = !{[[TYPE_char]], i64 16, !"_ZTS7StructA", [[TYPE_short]], i64 0, i64 2, [[TYPE_int]], i64 4, i64 4, [[TYPE_short]], i64 8, i64 2, [[TYPE_int]], i64 12, i64 4} // NEW-PATH-DAG: [[TAG_A_f16]] = !{[[TYPE_A]], [[TYPE_short]], i64 0, i64 2} // NEW-PATH-DAG: [[TAG_A_f32]] = !{[[TYPE_A]], [[TYPE_int]], i64 4, i64 4} // NEW-PATH-DAG: [[TYPE_B:!.*]] = !{[[TYPE_char]], i64 24, !"_ZTS7StructB", [[TYPE_short]], i64 0, i64 2, [[TYPE_A]], i64 4, i64 16, [[TYPE_int]], i64 20, i64 4} diff --git a/clang/test/CodeGen/vectorcall.c b/clang/test/CodeGen/vectorcall.c --- a/clang/test/CodeGen/vectorcall.c +++ b/clang/test/CodeGen/vectorcall.c @@ -97,8 +97,8 @@ struct OddSizeHVA { v3f32 x, y; }; void __vectorcall odd_size_hva(struct OddSizeHVA a) {} -// X32: define dso_local x86_vectorcallcc void @"\01odd_size_hva@@32"(%struct.OddSizeHVA inreg %a.coerce) -// X64: define dso_local x86_vectorcallcc void @"\01odd_size_hva@@32"(%struct.OddSizeHVA inreg %a.coerce) +// X32: define dso_local x86_vectorcallcc void @"\01odd_size_hva@@32"(%struct.OddSizeHVA inreg partialinit %a.coerce) +// X64: define dso_local x86_vectorcallcc void @"\01odd_size_hva@@32"(%struct.OddSizeHVA inreg partialinit %a.coerce) // The Vectorcall ABI only allows passing the first 6 items in registers in x64, so this shouldn't // consider 'p7' as a register. Instead p5 gets put into the register on the second pass. diff --git a/clang/test/CodeGen/wasm-arguments.c b/clang/test/CodeGen/wasm-arguments.c --- a/clang/test/CodeGen/wasm-arguments.c +++ b/clang/test/CodeGen/wasm-arguments.c @@ -99,7 +99,7 @@ // Unions should be passed as byval structs. // WEBASSEMBLY32: define void @union_arg(%union.simple_union* byval(%union.simple_union) align 4 %s) // WEBASSEMBLY64: define void @union_arg(%union.simple_union* byval(%union.simple_union) align 4 %s) -// EXPERIMENTAL-MV: define void @union_arg(i32 %s.0) +// EXPERIMENTAL-MV: define void @union_arg(i32 partialinit %s.0) void union_arg(union simple_union s) {} // Unions should be returned sret and not simplified by the frontend. @@ -109,7 +109,7 @@ // WEBASSEMBLY64: ret void // The experimental multivalue ABI returns them by value, though. -// EXPERIMENTAL-MV: define %union.simple_union @union_ret() +// EXPERIMENTAL-MV: define partialinit %union.simple_union @union_ret() // EXPERIMENTAL-MV: ret %union.simple_union %0 union simple_union union_ret() { union simple_union bar; @@ -133,7 +133,7 @@ // WEBASSEMBLY64: define void @bitfield_ret(%struct.bitfield1* noalias sret align 4 %agg.result) // Except, of course, in the experimental multivalue ABI -// EXPERIMENTAL-MV: define %struct.bitfield1 @bitfield_ret() +// EXPERIMENTAL-MV: define partialinit %struct.bitfield1 @bitfield_ret() bitfield1 bitfield_ret() { bitfield1 baz; return baz; diff --git a/clang/test/CodeGen/x86_32-arguments-darwin.c b/clang/test/CodeGen/x86_32-arguments-darwin.c --- a/clang/test/CodeGen/x86_32-arguments-darwin.c +++ b/clang/test/CodeGen/x86_32-arguments-darwin.c @@ -48,7 +48,7 @@ // This should be passed just as s8. -// CHECK-LABEL: define i64 @f9_1() +// CHECK-LABEL: define partialinit i64 @f9_1() // FIXME: llvm-gcc expands this, this may have some value for the // backend in terms of optimization but doesn't change the ABI. @@ -126,13 +126,13 @@ // CHECK-LABEL: define i16 @f29() struct s29 { struct { } a[1]; char b; char c; } f29(void) { while (1) {} } -// CHECK-LABEL: define i16 @f30() +// CHECK-LABEL: define partialinit i16 @f30() struct s30 { char a; char b : 4; } f30(void) { while (1) {} } // CHECK-LABEL: define float @f31() struct s31 { char : 0; float b; char : 0; } f31(void) { while (1) {} } -// CHECK-LABEL: define i32 @f32() +// CHECK-LABEL: define partialinit i32 @f32() struct s32 { char a; unsigned : 0; } f32(void) { while (1) {} } // CHECK-LABEL: define float @f33() @@ -285,7 +285,7 @@ union u58 {}; void f58(union u58 x) {} -// CHECK-LABEL: define i64 @f59() +// CHECK-LABEL: define partialinit i64 @f59() struct s59 { float x __attribute((aligned(8))); }; struct s59 f59() { while (1) {} } diff --git a/clang/test/CodeGen/x86_32-arguments-iamcu.c b/clang/test/CodeGen/x86_32-arguments-iamcu.c --- a/clang/test/CodeGen/x86_32-arguments-iamcu.c +++ b/clang/test/CodeGen/x86_32-arguments-iamcu.c @@ -37,7 +37,7 @@ // CHECK-LABEL: define void @smallStructs(i32 %st1.coerce, i32 %st2.coerce, i32 %st3.coerce) void smallStructs(st4_t st1, st4_t st2, st4_t st3) {} -// CHECK-LABEL: define void @paddedStruct(i32 %i1, i32 %st.coerce0, i32 %st.coerce1, i32 %st4.0) +// CHECK-LABEL: define void @paddedStruct(i32 %i1, i32 partialinit %st.coerce0, i32 partialinit %st.coerce1, i32 %st4.0) void paddedStruct(int i1, st5_t st, st4_t st4) {} // CHECK-LABEL: define void @largeStructBegin(%struct.st12_t* byval(%struct.st12_t) align 4 %st) @@ -49,13 +49,13 @@ // CHECK-LABEL: define void @largeStructEnd(i32 %i1, i32 %i2, i32 %i3, i32 %st.0, i32 %st.1, i32 %st.2) void largeStructEnd(int i1, int i2, int i3, st12_t st) {} -// CHECK-LABEL: define i24 @retNonPow2Struct(i32 %r.coerce) +// CHECK-LABEL: define i24 @retNonPow2Struct(i32 partialinit %r.coerce) st3_t retNonPow2Struct(st3_t r) { return r; } // CHECK-LABEL: define i32 @retSmallStruct(i32 %r.coerce) st4_t retSmallStruct(st4_t r) { return r; } -// CHECK-LABEL: define i64 @retPaddedStruct(i32 %r.coerce0, i32 %r.coerce1) +// CHECK-LABEL: define partialinit i64 @retPaddedStruct(i32 partialinit %r.coerce0, i32 partialinit %r.coerce1) st5_t retPaddedStruct(st5_t r) { return r; } // CHECK-LABEL: define void @retLargeStruct(%struct.st12_t* noalias sret align 4 %agg.result, i32 %i1, %struct.st12_t* byval(%struct.st12_t) align 4 %r) diff --git a/clang/test/CodeGen/x86_64-arguments-darwin.c b/clang/test/CodeGen/x86_64-arguments-darwin.c --- a/clang/test/CodeGen/x86_64-arguments-darwin.c +++ b/clang/test/CodeGen/x86_64-arguments-darwin.c @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin -emit-llvm -o - %s| FileCheck %s // rdar://9122143 -// CHECK: declare void @func(i64, double) +// CHECK: declare void @func(i64 partialinit, double partialinit) typedef struct _str { union { long double a; diff --git a/clang/test/CodeGen/x86_64-arguments-nacl.c b/clang/test/CodeGen/x86_64-arguments-nacl.c --- a/clang/test/CodeGen/x86_64-arguments-nacl.c +++ b/clang/test/CodeGen/x86_64-arguments-nacl.c @@ -15,13 +15,13 @@ union PP_VarValue value; }; -// CHECK: define { i64, i64 } @f0() +// CHECK: define partialinit { i64, i64 } @f0() struct PP_Var f0() { struct PP_Var result = { 0, 0, 0 }; return result; } -// CHECK-LABEL: define void @f1(i64 %p1.coerce0, i64 %p1.coerce1) +// CHECK-LABEL: define void @f1(i64 partialinit %p1.coerce0, i64 partialinit %p1.coerce1) void f1(struct PP_Var p1) { while(1) {} } // long doubles are 64 bits on NaCl @@ -34,8 +34,8 @@ void f6(char a0, short a1, int a2, long long a3, void *a4) { } -// CHECK-LABEL: define i64 @f8_1() -// CHECK-LABEL: define void @f8_2(i64 %a0.coerce) +// CHECK-LABEL: define partialinit i64 @f8_1() +// CHECK-LABEL: define void @f8_2(i64 partialinit %a0.coerce) union u8 { long double a; int b; @@ -50,11 +50,11 @@ struct s10 { int a; int b; int : 0; }; void f10(struct s10 a0) {} -// CHECK-LABEL: define double @f11() +// CHECK-LABEL: define partialinit double @f11() union { long double a; float b; } f11() { while (1) {} } -// CHECK-LABEL: define i32 @f12_0() -// CHECK-LABEL: define void @f12_1(i32 %a0.coerce) +// CHECK-LABEL: define partialinit i32 @f12_0() +// CHECK-LABEL: define void @f12_1(i32 partialinit %a0.coerce) struct s12 { int a __attribute__((aligned(16))); }; struct s12 f12_0(void) { while (1) {} } void f12_1(struct s12 a0) {} @@ -76,7 +76,7 @@ void f20(struct s20 x) {} -// CHECK: declare void @func(i64) +// CHECK: declare void @func(i64 partialinit) typedef struct _str { union { long double a; diff --git a/clang/test/CodeGen/x86_64-arguments.c b/clang/test/CodeGen/x86_64-arguments.c --- a/clang/test/CodeGen/x86_64-arguments.c +++ b/clang/test/CodeGen/x86_64-arguments.c @@ -66,8 +66,8 @@ // CHECK-LABEL: define void @f11(%union.anon* noalias sret align 16 %agg.result) union { long double a; float b; } f11() { while (1) {} } -// CHECK-LABEL: define i32 @f12_0() -// CHECK-LABEL: define void @f12_1(i32 %a0.coerce) +// CHECK-LABEL: define partialinit i32 @f12_0() +// CHECK-LABEL: define void @f12_1(i32 partialinit %a0.coerce) struct s12 { int a __attribute__((aligned(16))); }; struct s12 f12_0(void) { while (1) {} } void f12_1(struct s12 a0) {} @@ -144,7 +144,7 @@ void f23(int A, struct f23S B) { - // CHECK-LABEL: define void @f23(i32 %A, i64 %B.coerce0, i32 %B.coerce1) + // CHECK-LABEL: define void @f23(i32 %A, i64 partialinit %B.coerce0, i32 partialinit %B.coerce1) } struct f24s { long a; int b; }; @@ -152,7 +152,7 @@ struct f23S f24(struct f23S *X, struct f24s *P2) { return *X; - // CHECK: define { i64, i32 } @f24(%struct.f23S* %X, %struct.f24s* %P2) + // CHECK: define partialinit { i64, i32 } @f24(%struct.f23S* %X, %struct.f24s* %P2) } // rdar://8248065 @@ -216,7 +216,7 @@ int y; }; void f28(struct f28c C) { - // CHECK-LABEL: define void @f28(double %C.coerce0, i32 %C.coerce1) + // CHECK-LABEL: define void @f28(double partialinit %C.coerce0, i32 partialinit %C.coerce1) } struct f29a { @@ -227,20 +227,20 @@ }; void f29a(struct f29a A) { - // CHECK-LABEL: define void @f29a(double %A.coerce0, i32 %A.coerce1) + // CHECK-LABEL: define void @f29a(double partialinit %A.coerce0, i32 partialinit %A.coerce1) } // rdar://8249586 struct S0 { char f0[8]; char f2; char f3; char f4; }; void f30(struct S0 p_4) { - // CHECK-LABEL: define void @f30(i64 %p_4.coerce0, i24 %p_4.coerce1) + // CHECK-LABEL: define void @f30(i64 partialinit %p_4.coerce0, i24 partialinit %p_4.coerce1) } // Pass the third element as a float when followed by tail padding. // rdar://8251384 struct f31foo { float a, b, c; }; float f31(struct f31foo X) { - // CHECK-LABEL: define float @f31(<2 x float> %X.coerce0, float %X.coerce1) + // CHECK-LABEL: define float @f31(<2 x float> partialinit %X.coerce0, float partialinit %X.coerce1) return X.c; } diff --git a/clang/test/CodeGenCUDA/flush-denormals.cu b/clang/test/CodeGenCUDA/flush-denormals.cu --- a/clang/test/CodeGenCUDA/flush-denormals.cu +++ b/clang/test/CodeGenCUDA/flush-denormals.cu @@ -44,8 +44,8 @@ // FTZ: attributes #0 = {{.*}} "denormal-fp-math-f32"="preserve-sign,preserve-sign" // NOFTZ-NOT: "denormal-fp-math-f32" -// PTXFTZ:!llvm.module.flags = !{{{.*}}[[MODFLAG:![0-9]+]]} -// PTXFTZ:[[MODFLAG]] = !{i32 4, !"nvvm-reflect-ftz", i32 1} +// PTXFTZ:!llvm.module.flags = !{ +// PTXFTZ:[[MODFLAG:![0-9]+]] = !{i32 4, !"nvvm-reflect-ftz", i32 1} -// PTXNOFTZ:!llvm.module.flags = !{{{.*}}[[MODFLAG:![0-9]+]]} -// PTXNOFTZ:[[MODFLAG]] = !{i32 4, !"nvvm-reflect-ftz", i32 0} +// PTXNOFTZ:!llvm.module.flags = !{ +// PTXNOFTZ:[[MODFLAG:![0-9]+]] = !{i32 4, !"nvvm-reflect-ftz", i32 0} diff --git a/clang/test/CodeGenCXX/aarch64-arguments.cpp b/clang/test/CodeGenCXX/aarch64-arguments.cpp --- a/clang/test/CodeGenCXX/aarch64-arguments.cpp +++ b/clang/test/CodeGenCXX/aarch64-arguments.cpp @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -triple arm64-none-linux -emit-llvm -w -o - %s | FileCheck -check-prefix=PCS %s -// PCS: define void @{{.*}}(i8 %a +// PCS: define void @{{.*}}(i8 partialinit %a struct s0 {}; void f0(s0 a) {} diff --git a/clang/test/CodeGenCXX/conditional-temporaries.cpp b/clang/test/CodeGenCXX/conditional-temporaries.cpp --- a/clang/test/CodeGenCXX/conditional-temporaries.cpp +++ b/clang/test/CodeGenCXX/conditional-temporaries.cpp @@ -64,8 +64,8 @@ bool success() { // CHECK-LEGACY-OPT: ret i1 true // X64-NEWPM-OPT: ret i1 true - // AMDGCN-NEWPM-OPT: [[CTORS:%.*]] = load i32, i32* addrspacecast (i32 addrspace(1)* @_ZN12_GLOBAL__N_19ctorcallsE to i32*), align 4, !tbaa !2 - // AMDGCN-NEWPM-OPT: [[DTORS:%.*]] = load i32, i32* addrspacecast (i32 addrspace(1)* @_ZN12_GLOBAL__N_19dtorcallsE to i32*), align 4, !tbaa !2 + // AMDGCN-NEWPM-OPT: [[CTORS:%.*]] = load i32, i32* addrspacecast (i32 addrspace(1)* @_ZN12_GLOBAL__N_19ctorcallsE to i32*), align 4, !tbaa + // AMDGCN-NEWPM-OPT: [[DTORS:%.*]] = load i32, i32* addrspacecast (i32 addrspace(1)* @_ZN12_GLOBAL__N_19dtorcallsE to i32*), align 4, !tbaa // AMDGCN-NEWPM-OPT: %cmp = icmp eq i32 [[CTORS]], [[DTORS]] // AMDGCN-NEWPM-OPT: ret i1 %cmp return ctorcalls == dtorcalls; diff --git a/clang/test/CodeGenCXX/ext-int.cpp b/clang/test/CodeGenCXX/ext-int.cpp --- a/clang/test/CodeGenCXX/ext-int.cpp +++ b/clang/test/CodeGenCXX/ext-int.cpp @@ -106,13 +106,13 @@ } unsigned _ExtInt(33) ManglingTestRetParam(unsigned _ExtInt(33) Param) { -// LIN: define i64 @_Z20ManglingTestRetParamU7_ExtIntILi33EEj(i64 % +// LIN: define partialinit i64 @_Z20ManglingTestRetParamU7_ExtIntILi33EEj(i64 partialinit % // WIN: define dso_local i33 @"?ManglingTestRetParam@@YAU?$_UExtInt@$0CB@@__clang@@U12@@Z"(i33 return 0; } _ExtInt(33) ManglingTestRetParam(_ExtInt(33) Param) { -// LIN: define i64 @_Z20ManglingTestRetParamU7_ExtIntILi33EEi(i64 % +// LIN: define partialinit i64 @_Z20ManglingTestRetParamU7_ExtIntILi33EEi(i64 partialinit % // WIN: define dso_local i33 @"?ManglingTestRetParam@@YAU?$_ExtInt@$0CB@@__clang@@U12@@Z"(i33 return 0; } diff --git a/clang/test/CodeGenCXX/homogeneous-aggregates.cpp b/clang/test/CodeGenCXX/homogeneous-aggregates.cpp --- a/clang/test/CodeGenCXX/homogeneous-aggregates.cpp +++ b/clang/test/CodeGenCXX/homogeneous-aggregates.cpp @@ -38,8 +38,8 @@ struct I3 : Base2 {}; struct D5 : I1, I2, I3 {}; // homogeneous aggregate -// PPC: define void @_Z7func_D12D1(%struct.D1* noalias sret align 8 %agg.result, [3 x i64] %x.coerce) -// ARM32: define arm_aapcs_vfpcc void @_Z7func_D12D1(%struct.D1* noalias sret align 8 %agg.result, [3 x i64] %x.coerce) +// PPC: define void @_Z7func_D12D1(%struct.D1* noalias sret align 8 %agg.result, [3 x i64] partialinit %x.coerce) +// ARM32: define arm_aapcs_vfpcc void @_Z7func_D12D1(%struct.D1* noalias sret align 8 %agg.result, [3 x i64] partialinit %x.coerce) // ARM64: define void @_Z7func_D12D1(%struct.D1* noalias sret align 8 %agg.result, %struct.D1* %x) // X64: define dso_local x86_vectorcallcc void @"\01_Z7func_D12D1@@24"(%struct.D1* noalias sret align 8 %agg.result, %struct.D1* %x) D1 CC func_D1(D1 x) { return x; } @@ -50,8 +50,8 @@ // X64: define dso_local x86_vectorcallcc %struct.D2 @"\01_Z7func_D22D2@@24"(%struct.D2 inreg %x.coerce) D2 CC func_D2(D2 x) { return x; } -// PPC: define void @_Z7func_D32D3(%struct.D3* noalias sret align 8 %agg.result, [4 x i64] %x.coerce) -// ARM32: define arm_aapcs_vfpcc void @_Z7func_D32D3(%struct.D3* noalias sret align 8 %agg.result, [4 x i64] %x.coerce) +// PPC: define void @_Z7func_D32D3(%struct.D3* noalias sret align 8 %agg.result, [4 x i64] partialinit %x.coerce) +// ARM32: define arm_aapcs_vfpcc void @_Z7func_D32D3(%struct.D3* noalias sret align 8 %agg.result, [4 x i64] partialinit %x.coerce) // ARM64: define void @_Z7func_D32D3(%struct.D3* noalias sret align 8 %agg.result, %struct.D3* %x) D3 CC func_D3(D3 x) { return x; } diff --git a/clang/test/CodeGenCXX/inline-functions.cpp b/clang/test/CodeGenCXX/inline-functions.cpp --- a/clang/test/CodeGenCXX/inline-functions.cpp +++ b/clang/test/CodeGenCXX/inline-functions.cpp @@ -146,5 +146,5 @@ __attribute__((used)) inline S Foo() { return S(); } // NORMAL-LABEL: define linkonce_odr void @_ZN7PR229593FooEv( -// MSVCCOMPAT-LABEL: define linkonce_odr dso_local i8 @"?Foo@PR22959@@YA?AU?$S@H@1@XZ"( +// MSVCCOMPAT-LABEL: define linkonce_odr dso_local partialinit i8 @"?Foo@PR22959@@YA?AU?$S@H@1@XZ"( } diff --git a/clang/test/CodeGenCXX/microsoft-abi-sret-and-byval.cpp b/clang/test/CodeGenCXX/microsoft-abi-sret-and-byval.cpp --- a/clang/test/CodeGenCXX/microsoft-abi-sret-and-byval.cpp +++ b/clang/test/CodeGenCXX/microsoft-abi-sret-and-byval.cpp @@ -169,7 +169,7 @@ // WIN64: define dso_local void @"?small_arg_with_dtor@@YAXUSmallWithDtor@@@Z"(i32 %s.coerce) {{.*}} { // WIN64: call void @"??1SmallWithDtor@@QEAA@XZ" // WIN64: } -// WOA64: define dso_local void @"?small_arg_with_dtor@@YAXUSmallWithDtor@@@Z"(i64 %s.coerce) {{.*}} { +// WOA64: define dso_local void @"?small_arg_with_dtor@@YAXUSmallWithDtor@@@Z"(i64 partialinit %s.coerce) {{.*}} { // WOA64: call void @"??1SmallWithDtor@@QEAA@XZ"(%struct.SmallWithDtor* %s) // WOA64: } @@ -181,7 +181,7 @@ // Test that the eligible non-aggregate is passed directly, but returned // indirectly on ARM64 Windows. -// WOA64: define dso_local void @"?small_arg_with_private_member@@YA?AUSmallWithPrivate@@U1@@Z"(%struct.SmallWithPrivate* inreg noalias sret align 4 %agg.result, i64 %s.coerce) {{.*}} { +// WOA64: define dso_local void @"?small_arg_with_private_member@@YA?AUSmallWithPrivate@@U1@@Z"(%struct.SmallWithPrivate* inreg noalias sret align 4 %agg.result, i64 partialinit %s.coerce) {{.*}} { SmallWithPrivate small_arg_with_private_member(SmallWithPrivate s) { return s; } void call_small_arg_with_dtor() { @@ -303,12 +303,12 @@ void thiscall_method_arg(Empty s) {} // LINUX: define {{.*}} void @_ZN5Class19thiscall_method_argE5Empty(%class.Class* %this) // WIN32: define {{.*}} void @"?thiscall_method_arg@Class@@QAEXUEmpty@@@Z"(%class.Class* %this, %struct.Empty* byval(%struct.Empty) align 4 %s) - // WIN64: define linkonce_odr dso_local void @"?thiscall_method_arg@Class@@QEAAXUEmpty@@@Z"(%class.Class* %this, i8 %s.coerce) + // WIN64: define linkonce_odr dso_local void @"?thiscall_method_arg@Class@@QEAAXUEmpty@@@Z"(%class.Class* %this, i8 partialinit %s.coerce) void thiscall_method_arg(EmptyWithCtor s) {} // LINUX: define {{.*}} void @_ZN5Class19thiscall_method_argE13EmptyWithCtor(%class.Class* %this) // WIN32: define {{.*}} void @"?thiscall_method_arg@Class@@QAEXUEmptyWithCtor@@@Z"(%class.Class* %this, %struct.EmptyWithCtor* byval(%struct.EmptyWithCtor) align 4 %s) - // WIN64: define linkonce_odr dso_local void @"?thiscall_method_arg@Class@@QEAAXUEmptyWithCtor@@@Z"(%class.Class* %this, i8 %s.coerce) + // WIN64: define linkonce_odr dso_local void @"?thiscall_method_arg@Class@@QEAAXUEmptyWithCtor@@@Z"(%class.Class* %this, i8 partialinit %s.coerce) void thiscall_method_arg(Small s) {} // LINUX: define {{.*}} void @_ZN5Class19thiscall_method_argE5Small(%class.Class* %this, i32 %s.0) diff --git a/clang/test/CodeGenCXX/pragma-followup_inner.cpp b/clang/test/CodeGenCXX/pragma-followup_inner.cpp --- a/clang/test/CodeGenCXX/pragma-followup_inner.cpp +++ b/clang/test/CodeGenCXX/pragma-followup_inner.cpp @@ -21,7 +21,7 @@ // CHECK-DAG: ![[ACCESSGROUP_2:[0-9]+]] = distinct !{} // CHECK-DAG: ![[INNERLOOP_3:[0-9]+]] = distinct !{![[INNERLOOP_3:[0-9]+]], ![[PARALLEL_ACCESSES_4:[0-9]+]], ![[DISTRIBUTE_5:[0-9]+]], ![[DISTRIBUTE_FOLLOWUP_6:[0-9]+]]} -// CHECK-DAG: ![[PARALLEL_ACCESSES_4:[0-9]+]] = !{!"llvm.loop.parallel_accesses", !2} +// CHECK-DAG: ![[PARALLEL_ACCESSES_4:[0-9]+]] = !{!"llvm.loop.parallel_accesses", ![[ACCESSGROUP_2]]} // CHECK-DAG: ![[DISTRIBUTE_5:[0-9]+]] = !{!"llvm.loop.distribute.enable", i1 true} // CHECK-DAG: ![[DISTRIBUTE_FOLLOWUP_6:[0-9]+]] = !{!"llvm.loop.distribute.followup_all", ![[LOOP_7:[0-9]+]]} @@ -30,7 +30,7 @@ // CHECK-DAG: ![[OUTERLOOP_9:[0-9]+]] = distinct !{![[OUTERLOOP_9:[0-9]+]], ![[UNROLLANDJAM_COUNT_10:[0-9]+]], ![[UNROLLANDJAM_FOLLOWUPINNER_11:[0-9]+]]} // CHECK-DAG: ![[UNROLLANDJAM_COUNT_10:[0-9]+]] = !{!"llvm.loop.unroll_and_jam.count", i32 4} -// CHECK-DAG: ![[UNROLLANDJAM_FOLLOWUPINNER_11:[0-9]+]] = !{!"llvm.loop.unroll_and_jam.followup_inner", !12} +// CHECK-DAG: ![[UNROLLANDJAM_FOLLOWUPINNER_11:[0-9]+]] = !{!"llvm.loop.unroll_and_jam.followup_inner", ![[LOOP_12:[0-9]+]]} // CHECK-DAG: ![[LOOP_12:[0-9]+]] = distinct !{![[LOOP_12:[0-9]+]], ![[PARALLEL_ACCESSES_4:[0-9]+]], ![[ISVECTORIZED_13:[0-9]+]], ![[UNROLL_COUNT_13:[0-9]+]], ![[UNROLL_FOLLOWUP_14:[0-9]+]]} // CHECK-DAG: ![[ISVECTORIZED_13:[0-9]+]] = !{!"llvm.loop.isvectorized"} diff --git a/clang/test/CodeGenCXX/pragma-loop-predicate.cpp b/clang/test/CodeGenCXX/pragma-loop-predicate.cpp --- a/clang/test/CodeGenCXX/pragma-loop-predicate.cpp +++ b/clang/test/CodeGenCXX/pragma-loop-predicate.cpp @@ -59,18 +59,18 @@ } -// CHECK: ![[LOOP0]] = distinct !{![[LOOP0]], !3} -// CHECK-NEXT: !3 = !{!"llvm.loop.vectorize.enable", i1 true} +// CHECK: ![[LOOP0]] = distinct !{![[LOOP0]], ![[LOOP0_MEMB:[0-9]+]]} +// CHECK-NEXT: ![[LOOP0_MEMB]] = !{!"llvm.loop.vectorize.enable", i1 true} -// CHECK-NEXT: ![[LOOP1]] = distinct !{![[LOOP1]], !5, !3} -// CHECK-NEXT: !5 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} +// CHECK-NEXT: ![[LOOP1]] = distinct !{![[LOOP1]], ![[LOOP1_MEMB:[0-9]+]], ![[LOOP0_MEMB]]} +// CHECK-NEXT: ![[LOOP1_MEMB]] = !{!"llvm.loop.vectorize.predicate.enable", i1 true} -// CHECK-NEXT: ![[LOOP2]] = distinct !{![[LOOP2]], !7, !3} -// CHECK-NEXT: !7 = !{!"llvm.loop.vectorize.predicate.enable", i1 false} +// CHECK-NEXT: ![[LOOP2]] = distinct !{![[LOOP2]], ![[LOOP2_MEMB:[0-9]+]], ![[LOOP0_MEMB]]} +// CHECK-NEXT: ![[LOOP2_MEMB]] = !{!"llvm.loop.vectorize.predicate.enable", i1 false} -// CHECK-NEXT: ![[LOOP3]] = distinct !{![[LOOP3]], !5, !3} +// CHECK-NEXT: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOP3_MEMB:[0-9]+]], ![[LOOP0_MEMB]]} -// CHECK-NEXT: ![[LOOP4]] = distinct !{![[LOOP4]], !10} -// CHECK-NEXT: !10 = !{!"llvm.loop.vectorize.width", i32 1} +// CHECK-NEXT: ![[LOOP4]] = distinct !{![[LOOP4]], ![[LOOP4_MEMB:[0-9]+]]} +// CHECK-NEXT: ![[LOOP4_MEMB]] = !{!"llvm.loop.vectorize.width", i32 1} -// CHECK-NEXT: ![[LOOP5]] = distinct !{![[LOOP5]], !10} +// CHECK-NEXT: ![[LOOP5]] = distinct !{![[LOOP5]], ![[LOOP4_MEMB]]} diff --git a/clang/test/CodeGenCXX/trivial_abi.cpp b/clang/test/CodeGenCXX/trivial_abi.cpp --- a/clang/test/CodeGenCXX/trivial_abi.cpp +++ b/clang/test/CodeGenCXX/trivial_abi.cpp @@ -173,7 +173,7 @@ testReturnLarge(); } -// CHECK: define i64 @_Z20testReturnHasTrivialv() +// CHECK: define partialinit i64 @_Z20testReturnHasTrivialv() // CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_TRIVIAL:.*]], align 4 // CHECK: %[[COERCE_DIVE:.*]] = getelementptr inbounds %[[STRUCT_TRIVIAL]], %[[STRUCT_TRIVIAL]]* %[[RETVAL]], i32 0, i32 0 // CHECK: %[[V0:.*]] = load i32, i32* %[[COERCE_DIVE]], align 4 diff --git a/clang/test/CodeGenCXX/wasm-args-returns.cpp b/clang/test/CodeGenCXX/wasm-args-returns.cpp --- a/clang/test/CodeGenCXX/wasm-args-returns.cpp +++ b/clang/test/CodeGenCXX/wasm-args-returns.cpp @@ -93,12 +93,12 @@ int d : 3; }; test(one_bitfield); -// CHECK: define i32 @_Z7forward12one_bitfield(i32 returned %{{.*}}) +// CHECK: define partialinit i32 @_Z7forward12one_bitfield(i32 partialinit returned %{{.*}}) // // CHECK: define void @_Z17test_one_bitfieldv() -// CHECK: %[[call:.*]] = call i32 @_Z16def_one_bitfieldv() -// CHECK: call void @_Z3use12one_bitfield(i32 %[[call]]) +// CHECK: %[[call:.*]] = call partialinit i32 @_Z16def_one_bitfieldv() +// CHECK: call void @_Z3use12one_bitfield(i32 partialinit %[[call]]) // CHECK: ret void // -// CHECK: declare void @_Z3use12one_bitfield(i32) -// CHECK: declare i32 @_Z16def_one_bitfieldv() +// CHECK: declare void @_Z3use12one_bitfield(i32 partialinit) +// CHECK: declare partialinit i32 @_Z16def_one_bitfieldv() diff --git a/clang/test/CodeGenCXX/x86_64-arguments-nacl-x32.cpp b/clang/test/CodeGenCXX/x86_64-arguments-nacl-x32.cpp --- a/clang/test/CodeGenCXX/x86_64-arguments-nacl-x32.cpp +++ b/clang/test/CodeGenCXX/x86_64-arguments-nacl-x32.cpp @@ -27,7 +27,7 @@ // A struct with <= 8 bytes before a member function pointer should still // be allowed in registers, since the member function pointer is only 8 bytes. -// CHECK-LABEL: define void @{{.*}}f_struct_with_mfp_0{{.*}}(i64 %a.coerce0, i32 %a.coerce1) +// CHECK-LABEL: define void @{{.*}}f_struct_with_mfp_0{{.*}}(i64 partialinit %a.coerce0, i32 partialinit %a.coerce1) struct struct_with_mfp_0 { char *a; test_struct_mfp b; }; void f_struct_with_mfp_0(struct_with_mfp_0 a) { (void)a; } @@ -46,12 +46,12 @@ /* Struct containing an empty struct */ typedef struct { int* a; test_struct x; double *b; } struct_with_empty; -// CHECK-LABEL: define void @{{.*}}f_pass_struct_with_empty{{.*}}(i64 %x{{.*}}, double* %x +// CHECK-LABEL: define void @{{.*}}f_pass_struct_with_empty{{.*}}(i64 partialinit %x{{.*}}, double* partialinit %x void f_pass_struct_with_empty(struct_with_empty x) { (void) x; } -// CHECK-LABEL: define { i64, double* } @{{.*}}f_return_struct_with_empty +// CHECK-LABEL: define partialinit { i64, double* } @{{.*}}f_return_struct_with_empty struct_with_empty f_return_struct_with_empty() { return {0, {}, 0}; } diff --git a/clang/test/CodeGenCXX/x86_64-arguments.cpp b/clang/test/CodeGenCXX/x86_64-arguments.cpp --- a/clang/test/CodeGenCXX/x86_64-arguments.cpp +++ b/clang/test/CodeGenCXX/x86_64-arguments.cpp @@ -3,7 +3,7 @@ // Basic base class test. struct f0_s0 { unsigned a; }; struct f0_s1 : public f0_s0 { void *b; }; -// CHECK-LABEL: define void @_Z2f05f0_s1(i32 %a0.coerce0, i8* %a0.coerce1) +// CHECK-LABEL: define void @_Z2f05f0_s1(i32 partialinit %a0.coerce0, i8* partialinit %a0.coerce1) void f0(f0_s1 a0) { } // Check with two eight-bytes in base class. @@ -15,11 +15,11 @@ // Check with two eight-bytes in base class and merge. struct f2_s0 { unsigned a; unsigned b; float c; }; struct f2_s1 : public f2_s0 { char d;}; -// CHECK-LABEL: define void @_Z2f25f2_s1(i64 %a0.coerce0, i64 %a0.coerce1) +// CHECK-LABEL: define void @_Z2f25f2_s1(i64 partialinit %a0.coerce0, i64 partialinit %a0.coerce1) void f2(f2_s1 a0) { } // PR5831 -// CHECK-LABEL: define void @_Z2f34s3_1(i64 %x.coerce) +// CHECK-LABEL: define void @_Z2f34s3_1(i64 partialinit %x.coerce) struct s3_0 {}; struct s3_1 { struct s3_0 a; long b; }; void f3(struct s3_1 x) {} @@ -87,7 +87,7 @@ B1 b1; }; - // CHECK-LABEL: define i8* @_ZN6PR51793barENS_2B2E(i32* %b2.coerce) + // CHECK-LABEL: define i8* @_ZN6PR51793barENS_2B2E(i32* partialinit %b2.coerce) const void *bar(B2 b2) { return b2.b1.pa; } @@ -129,7 +129,7 @@ int test(outer x) { return x.x + x.f; } - // CHECK-LABEL: define i32 @_ZN5test64testENS_5outerE(i64 %x.coerce0, i32 %x.coerce1) + // CHECK-LABEL: define i32 @_ZN5test64testENS_5outerE(i64 partialinit %x.coerce0, i32 partialinit %x.coerce1) } namespace test7 { diff --git a/clang/test/CodeGenCoroutines/microsoft-abi-operator-coawait.cpp b/clang/test/CodeGenCoroutines/microsoft-abi-operator-coawait.cpp --- a/clang/test/CodeGenCoroutines/microsoft-abi-operator-coawait.cpp +++ b/clang/test/CodeGenCoroutines/microsoft-abi-operator-coawait.cpp @@ -19,7 +19,7 @@ B b; // CHECK: call void @"??__LA@@QEAA?AUno_suspend@@XZ"( a.operator co_await(); - // CHECK-NEXT: call i8 @"??__L@YA?AUno_suspend@@AEBUB@@@Z"( + // CHECK-NEXT: call {{.*}} i8 @"??__L@YA?AUno_suspend@@AEBUB@@@Z"( operator co_await(b); } diff --git a/clang/test/CodeGenObjC/arc.m b/clang/test/CodeGenObjC/arc.m --- a/clang/test/CodeGenObjC/arc.m +++ b/clang/test/CodeGenObjC/arc.m @@ -1563,13 +1563,13 @@ // CHECK: %[[V0:.*]] = call i8* @llvm.objc.retain(i8* %[[A]]) // CHECK: %[[V1:.*]] = call i8* @llvm.objc.retain(i8* %[[B]]) #2 // CHECK: %[[ARRAYINIT_BEGIN:.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* %[[T]], i64 0, i64 0 -// CHECK: %[[V3:.*]] = load i8*, i8** %[[A_ADDR]], align 8, !tbaa !7 +// CHECK: %[[V3:.*]] = load i8*, i8** %[[A_ADDR]], align 8, !tbaa // CHECK: %[[V4:.*]] = call i8* @llvm.objc.retain(i8* %[[V3]]) #2 -// CHECK: store i8* %[[V4]], i8** %[[ARRAYINIT_BEGIN]], align 8, !tbaa !7 +// CHECK: store i8* %[[V4]], i8** %[[ARRAYINIT_BEGIN]], align 8, !tbaa // CHECK: %[[ARRAYINIT_ELEMENT:.*]] = getelementptr inbounds i8*, i8** %[[ARRAYINIT_BEGIN]], i64 1 -// CHECK: %[[V5:.*]] = load i8*, i8** %[[B_ADDR]], align 8, !tbaa !7 +// CHECK: %[[V5:.*]] = load i8*, i8** %[[B_ADDR]], align 8, !tbaa // CHECK: %[[V6:.*]] = call i8* @llvm.objc.retain(i8* %[[V5]]) #2 -// CHECK: store i8* %[[V6]], i8** %[[ARRAYINIT_ELEMENT]], align 8, !tbaa !7 +// CHECK: store i8* %[[V6]], i8** %[[ARRAYINIT_ELEMENT]], align 8, !tbaa // CHECK: %[[ARRAY_BEGIN:.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* %[[T]], i32 0, i32 0 // CHECK: %[[V7:.*]] = getelementptr inbounds i8*, i8** %[[ARRAY_BEGIN]], i64 2 @@ -1578,14 +1578,14 @@ // CHECK: %[[ARRAYDESTROY_ELEMENTPAST:.*]] = phi i8** [ %[[V7]], %{{.*}} ], [ %[[ARRAYDESTROY_ELEMENT:.*]], %{{.*}} ] // CHECK: %[[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds i8*, i8** %[[ARRAYDESTROY_ELEMENTPAST]], i64 -1 // CHECK: %[[V8:.*]] = load i8*, i8** %[[ARRAYDESTROY_ELEMENT]], align 8 -// CHECK: call void @llvm.objc.release(i8* %[[V8]]) #2, !clang.imprecise_release !10 +// CHECK: call void @llvm.objc.release(i8* %[[V8]]) #2, !clang.imprecise_release // CHECK-NOT: call void @llvm.objc.release // CHECK: %[[V10:.*]] = load i8*, i8** %[[B_ADDR]], align 8 -// CHECK: call void @llvm.objc.release(i8* %[[V10]]) #2, !clang.imprecise_release !10 +// CHECK: call void @llvm.objc.release(i8* %[[V10]]) #2, !clang.imprecise_release // CHECK: %[[V11:.*]] = load i8*, i8** %[[A_ADDR]], align 8 -// CHECK: call void @llvm.objc.release(i8* %[[V11]]) #2, !clang.imprecise_release !10 +// CHECK: call void @llvm.objc.release(i8* %[[V11]]) #2, !clang.imprecise_release void test72(id a, id b) { __strong id t[] = (__strong id[]){a, b}; diff --git a/clang/test/CodeGenObjC/nontrivial-c-struct-exception.m b/clang/test/CodeGenObjC/nontrivial-c-struct-exception.m --- a/clang/test/CodeGenObjC/nontrivial-c-struct-exception.m +++ b/clang/test/CodeGenObjC/nontrivial-c-struct-exception.m @@ -16,12 +16,12 @@ // CHECK: define void @testStrongException() // CHECK: %[[AGG_TMP:.*]] = alloca %[[STRUCT_STRONG]], align 8 // CHECK: %[[AGG_TMP1:.*]] = alloca %[[STRUCT_STRONG]], align 8 -// CHECK: %[[CALL:.*]] = call [2 x i64] @genStrong() +// CHECK: %[[CALL:.*]] = call partialinit [2 x i64] @genStrong() // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONG]]* %[[AGG_TMP]] to [2 x i64]* // CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V0]], align 8 -// CHECK: invoke [2 x i64] @genStrong() +// CHECK: invoke partialinit [2 x i64] @genStrong() -// CHECK: call void @calleeStrong([2 x i64] %{{.*}}, [2 x i64] %{{.*}}) +// CHECK: call void @calleeStrong([2 x i64] partialinit %{{.*}}, [2 x i64] partialinit %{{.*}}) // CHECK-NEXT: ret void // CHECK: landingpad { i8*, i32 } diff --git a/clang/test/CodeGenObjC/strong-in-c-struct.m b/clang/test/CodeGenObjC/strong-in-c-struct.m --- a/clang/test/CodeGenObjC/strong-in-c-struct.m +++ b/clang/test/CodeGenObjC/strong-in-c-struct.m @@ -422,7 +422,7 @@ *p = getStrongOuter2(); } -// CHECK: define void @test_parameter_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: define void @test_parameter_StrongSmall([2 x i64] partialinit %[[A_COERCE:.*]]) // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* // CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 @@ -433,7 +433,7 @@ void test_parameter_StrongSmall(StrongSmall a) { } -// CHECK: define void @test_argument_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: define void @test_argument_StrongSmall([2 x i64] partialinit %[[A_COERCE:.*]]) // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 // CHECK: %[[TEMP_LVALUE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* @@ -443,7 +443,7 @@ // CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V1]], i8** %[[V2]]) // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TEMP_LVALUE]] to [2 x i64]* // CHECK: %[[V4:.*]] = load [2 x i64], [2 x i64]* %[[V3]], align 8 -// CHECK: call void @calleeStrongSmall([2 x i64] %[[V4]]) +// CHECK: call void @calleeStrongSmall([2 x i64] partialinit %[[V4]]) // CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** // CHECK: call void @__destructor_8_s8(i8** %[[V5]]) // CHECK: ret void @@ -452,7 +452,7 @@ calleeStrongSmall(a); } -// CHECK: define [2 x i64] @test_return_StrongSmall([2 x i64] %[[A_COERCE:.*]]) +// CHECK: define partialinit [2 x i64] @test_return_StrongSmall([2 x i64] partialinit %[[A_COERCE:.*]]) // CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* @@ -472,7 +472,7 @@ // CHECK: define void @test_destructor_ignored_result() // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 -// CHECK: %[[CALL:.*]] = call [2 x i64] @getStrongSmall() +// CHECK: %[[CALL:.*]] = call partialinit [2 x i64] @getStrongSmall() // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to [2 x i64]* // CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V0]], align 8 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to i8** @@ -485,7 +485,7 @@ // CHECK: define void @test_destructor_ignored_result2(%{{.*}}* %[[C:.*]]) // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 -// CHECK: %[[CALL:.*]] = call [2 x i64]{{.*}}@objc_msgSend +// CHECK: %[[CALL:.*]] = call partialinit [2 x i64]{{.*}}@objc_msgSend // CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to [2 x i64]* // CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V5]], align 8 // CHECK: %[[V6:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to i8** diff --git a/clang/test/CodeGenObjCXX/property-object-conditional-exp.mm b/clang/test/CodeGenObjCXX/property-object-conditional-exp.mm --- a/clang/test/CodeGenObjCXX/property-object-conditional-exp.mm +++ b/clang/test/CodeGenObjCXX/property-object-conditional-exp.mm @@ -22,7 +22,7 @@ CGRect dataRect; CGRect virtualBounds; -// CHECK: [[SRC:%.*]] = call { i8*, i32 } bitcast (i8* (i8*, i8*, ...)* @objc_msgSend +// CHECK: [[SRC:%.*]] = call partialinit { i8*, i32 } bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK-NEXT: bitcast // CHECK-NEXT:getelementptr inbounds { i8*, i32 }, { i8*, i32 }* [[SRC:%.*]] // CHECK-NEXT:extractvalue diff --git a/clang/test/CodeGenOpenCL/amdgpu-abi-struct-coerce.cl b/clang/test/CodeGenOpenCL/amdgpu-abi-struct-coerce.cl --- a/clang/test/CodeGenOpenCL/amdgpu-abi-struct-coerce.cl +++ b/clang/test/CodeGenOpenCL/amdgpu-abi-struct-coerce.cl @@ -228,7 +228,7 @@ // CHECK: void @kernel_struct_arg(%struct.struct_arg %arg1.coerce) __kernel void kernel_struct_arg(struct_arg_t arg1) { } -// CHECK: void @kernel_struct_padding_arg(%struct.struct_padding_arg %arg1.coerce) +// CHECK: void @kernel_struct_padding_arg(%struct.struct_padding_arg partialinit %arg1.coerce) __kernel void kernel_struct_padding_arg(struct_padding_arg arg1) { } // CHECK: void @kernel_test_struct_of_arrays_arg(%struct.struct_of_arrays_arg %arg1.coerce) @@ -243,10 +243,10 @@ // CHECK: void @kernel_single_array_element_struct_arg(%struct.single_array_element_struct_arg %arg1.coerce) __kernel void kernel_single_array_element_struct_arg(single_array_element_struct_arg_t arg1) { } -// CHECK: void @kernel_single_struct_element_struct_arg(%struct.single_struct_element_struct_arg %arg1.coerce) +// CHECK: void @kernel_single_struct_element_struct_arg(%struct.single_struct_element_struct_arg partialinit %arg1.coerce) __kernel void kernel_single_struct_element_struct_arg(single_struct_element_struct_arg_t arg1) { } -// CHECK: void @kernel_different_size_type_pair_arg(%struct.different_size_type_pair %arg1.coerce) +// CHECK: void @kernel_different_size_type_pair_arg(%struct.different_size_type_pair partialinit %arg1.coerce) __kernel void kernel_different_size_type_pair_arg(different_size_type_pair arg1) { } // CHECK: define void @func_f32_arg(float %arg) @@ -279,7 +279,7 @@ // CHECK: void @func_struct_arg(i32 %arg1.coerce0, float %arg1.coerce1, i32 %arg1.coerce2) void func_struct_arg(struct_arg_t arg1) { } -// CHECK: void @func_struct_padding_arg(i8 %arg1.coerce0, i64 %arg1.coerce1) +// CHECK: void @func_struct_padding_arg(i8 partialinit %arg1.coerce0, i64 partialinit %arg1.coerce1) void func_struct_padding_arg(struct_padding_arg arg1) { } // CHECK: define void @func_struct_char_x8([2 x i32] %arg.coerce) @@ -288,7 +288,7 @@ // CHECK: define void @func_struct_char_x4(i32 %arg.coerce) void func_struct_char_x4(struct_char_x4 arg) { } -// CHECK: define void @func_struct_char_x3(i32 %arg.coerce) +// CHECK: define void @func_struct_char_x3(i32 partialinit %arg.coerce) void func_struct_char_x3(struct_char_x3 arg) { } // CHECK: define void @func_struct_char_x2(i16 %arg.coerce) @@ -303,10 +303,10 @@ // CHECK: void @func_single_array_element_struct_arg([4 x i32] %arg1.coerce) void func_single_array_element_struct_arg(single_array_element_struct_arg_t arg1) { } -// CHECK: void @func_single_struct_element_struct_arg(%struct.inner %arg1.coerce) +// CHECK: void @func_single_struct_element_struct_arg(%struct.inner partialinit %arg1.coerce) void func_single_struct_element_struct_arg(single_struct_element_struct_arg_t arg1) { } -// CHECK: void @func_different_size_type_pair_arg(i64 %arg1.coerce0, i32 %arg1.coerce1) +// CHECK: void @func_different_size_type_pair_arg(i64 partialinit %arg1.coerce0, i32 partialinit %arg1.coerce1) void func_different_size_type_pair_arg(different_size_type_pair arg1) { } // CHECK: void @func_flexible_array_arg(%struct.flexible_array addrspace(5)* nocapture byval(%struct.flexible_array) align 4 %arg) @@ -349,7 +349,7 @@ return s; } -// CHECK: define %struct.struct_padding_arg @func_struct_padding_ret() +// CHECK: define partialinit %struct.struct_padding_arg @func_struct_padding_ret() // CHECK: ret %struct.struct_padding_arg zeroinitializer struct_padding_arg func_struct_padding_ret() { @@ -373,7 +373,7 @@ return s; } -// CHECK: define i32 @func_struct_char_x3_ret() +// CHECK: define partialinit i32 @func_struct_char_x3_ret() // CHECK: ret i32 0 struct_char_x3 func_struct_char_x3_ret() { @@ -433,7 +433,7 @@ return u; } -// CHECK: define %struct.different_size_type_pair @func_different_size_type_pair_ret() +// CHECK: define partialinit %struct.different_size_type_pair @func_different_size_type_pair_ret() different_size_type_pair func_different_size_type_pair_ret() { different_size_type_pair s = { 0 }; @@ -454,19 +454,19 @@ void func_reg_state_hi(int4 arg0, int4 arg1, int4 arg2, int arg3, int arg4, struct_arg_t s) { } // XXX - Why don't the inner structs flatten? -// CHECK: define void @func_reg_state_num_regs_nested_struct(<4 x i32> %arg0, i32 %arg1, i32 %arg2.coerce0, %struct.nested %arg2.coerce1, i32 %arg3.coerce0, %struct.nested %arg3.coerce1, %struct.num_regs_nested_struct addrspace(5)* nocapture byval(%struct.num_regs_nested_struct) align 8 %arg4) +// CHECK: define void @func_reg_state_num_regs_nested_struct(<4 x i32> %arg0, i32 %arg1, i32 partialinit %arg2.coerce0, %struct.nested partialinit %arg2.coerce1, i32 partialinit %arg3.coerce0, %struct.nested partialinit %arg3.coerce1, %struct.num_regs_nested_struct addrspace(5)* nocapture byval(%struct.num_regs_nested_struct) align 8 %arg4) void func_reg_state_num_regs_nested_struct(int4 arg0, int arg1, num_regs_nested_struct arg2, num_regs_nested_struct arg3, num_regs_nested_struct arg4) { } -// CHECK: define void @func_double_nested_struct_arg(<4 x i32> %arg0, i32 %arg1, i32 %arg2.coerce0, %struct.double_nested %arg2.coerce1, i16 %arg2.coerce2) +// CHECK: define void @func_double_nested_struct_arg(<4 x i32> %arg0, i32 %arg1, i32 partialinit %arg2.coerce0, %struct.double_nested partialinit %arg2.coerce1, i16 partialinit %arg2.coerce2) void func_double_nested_struct_arg(int4 arg0, int arg1, double_nested_struct arg2) { } -// CHECK: define %struct.double_nested_struct @func_double_nested_struct_ret(<4 x i32> %arg0, i32 %arg1) +// CHECK: define partialinit %struct.double_nested_struct @func_double_nested_struct_ret(<4 x i32> %arg0, i32 %arg1) double_nested_struct func_double_nested_struct_ret(int4 arg0, int arg1) { double_nested_struct s = { 0 }; return s; } -// CHECK: define void @func_large_struct_padding_arg_direct(i8 %arg.coerce0, i32 %arg.coerce1, i8 %arg.coerce2, i32 %arg.coerce3, i8 %arg.coerce4, i8 %arg.coerce5, i16 %arg.coerce6, i16 %arg.coerce7, [3 x i8] %arg.coerce8, i64 %arg.coerce9, i32 %arg.coerce10, i8 %arg.coerce11, i32 %arg.coerce12, i16 %arg.coerce13, i8 %arg.coerce14) +// CHECK: define void @func_large_struct_padding_arg_direct(i8 partialinit %arg.coerce0, i32 partialinit %arg.coerce1, i8 partialinit %arg.coerce2, i32 partialinit %arg.coerce3, i8 partialinit %arg.coerce4, i8 partialinit %arg.coerce5, i16 partialinit %arg.coerce6, i16 partialinit %arg.coerce7, [3 x i8] partialinit %arg.coerce8, i64 partialinit %arg.coerce9, i32 partialinit %arg.coerce10, i8 partialinit %arg.coerce11, i32 partialinit %arg.coerce12, i16 partialinit %arg.coerce13, i8 partialinit %arg.coerce14) void func_large_struct_padding_arg_direct(large_struct_padding arg) { } // CHECK: define void @func_large_struct_padding_arg_store(%struct.large_struct_padding addrspace(1)* nocapture %out, %struct.large_struct_padding addrspace(5)* nocapture readonly byval(%struct.large_struct_padding) align 8 %arg) @@ -479,7 +479,7 @@ // Function signature from blender, nothing should be passed byval. The v3i32 // should not count as 4 passed registers. -// CHECK: define void @v3i32_pair_reg_count(%struct.int3_pair addrspace(5)* nocapture %arg0, <3 x i32> %arg1.coerce0, <3 x i32> %arg1.coerce1, <3 x i32> %arg2, <3 x i32> %arg3.coerce0, <3 x i32> %arg3.coerce1, <3 x i32> %arg4, float %arg5) +// CHECK: define void @v3i32_pair_reg_count(%struct.int3_pair addrspace(5)* nocapture %arg0, <3 x i32> partialinit %arg1.coerce0, <3 x i32> partialinit %arg1.coerce1, <3 x i32> %arg2, <3 x i32> partialinit %arg3.coerce0, <3 x i32> partialinit %arg3.coerce1, <3 x i32> %arg4, float %arg5) void v3i32_pair_reg_count(int3_pair *arg0, int3_pair arg1, int3 arg2, int3_pair arg3, int3 arg4, float arg5) { } // Each short4 should fit pack into 2 registers. diff --git a/clang/test/OpenMP/nvptx_unsupported_type_codegen.cpp b/clang/test/OpenMP/nvptx_unsupported_type_codegen.cpp --- a/clang/test/OpenMP/nvptx_unsupported_type_codegen.cpp +++ b/clang/test/OpenMP/nvptx_unsupported_type_codegen.cpp @@ -38,7 +38,7 @@ void foo(T a = T()) { return; } -// CHECK: define{{ hidden | }}[6 x i64] @{{.+}}bar{{.+}}() +// CHECK: define{{ hidden | }}partialinit [6 x i64] @{{.+}}bar{{.+}}() T bar() { // CHECK: bitcast [[T]]* %{{.+}} to [6 x i64]* // CHECK-NEXT: load [6 x i64], [6 x i64]* %{{.+}}, @@ -47,7 +47,7 @@ } // CHECK: define{{ hidden | }}void @{{.+}}baz{{.+}}() void baz() { -// CHECK: call [6 x i64] @{{.+}}bar{{.+}}() +// CHECK: call partialinit [6 x i64] @{{.+}}bar{{.+}}() // CHECK-NEXT: bitcast [[T]]* %{{.+}} to [6 x i64]* // CHECK-NEXT: store [6 x i64] %{{.+}}, [6 x i64]* %{{.+}}, T t = bar(); @@ -58,7 +58,7 @@ void foo1(T1 a = T1()) { return; } -// CHECK: define{{ hidden | }}[[T1]] @{{.+}}bar1{{.+}}() +// CHECK: define{{ hidden | }}partialinit [[T1]] @{{.+}}bar1{{.+}}() T1 bar1() { // CHECK: load [[T1]], [[T1]]* // CHECK-NEXT: ret [[T1]] @@ -66,7 +66,7 @@ } // CHECK: define{{ hidden | }}void @{{.+}}baz1{{.+}}() void baz1() { -// CHECK: call [[T1]] @{{.+}}bar1{{.+}}() +// CHECK: call partialinit [[T1]] @{{.+}}bar1{{.+}}() T1 t = bar1(); } #pragma omp end declare target diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h --- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h +++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h @@ -640,6 +640,7 @@ ATTR_KIND_PREALLOCATED = 65, ATTR_KIND_NO_MERGE = 66, ATTR_KIND_NULL_POINTER_IS_VALID = 67, + ATTR_KIND_PARTIALINIT = 68, }; enum ComdatSelectionKindCodes { diff --git a/llvm/include/llvm/IR/Attributes.td b/llvm/include/llvm/IR/Attributes.td --- a/llvm/include/llvm/IR/Attributes.td +++ b/llvm/include/llvm/IR/Attributes.td @@ -39,6 +39,9 @@ /// Pass structure by value. def ByVal : TypeAttr<"byval">; +/// Structure contains necessarily uninitialized bits +def PartialInit : EnumAttr<"partialinit">; + /// Marks function as being in a cold path. def Cold : EnumAttr<"cold">; diff --git a/llvm/lib/AsmParser/LLLexer.cpp b/llvm/lib/AsmParser/LLLexer.cpp --- a/llvm/lib/AsmParser/LLLexer.cpp +++ b/llvm/lib/AsmParser/LLLexer.cpp @@ -696,6 +696,7 @@ KEYWORD(writeonly); KEYWORD(zeroext); KEYWORD(immarg); + KEYWORD(partialinit); KEYWORD(type); KEYWORD(opaque); diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -1371,6 +1371,7 @@ case lltok::kw_inalloca: case lltok::kw_nest: case lltok::kw_noalias: + case lltok::kw_partialinit: case lltok::kw_nocapture: case lltok::kw_nonnull: case lltok::kw_returned: @@ -1674,6 +1675,7 @@ case lltok::kw_inalloca: B.addAttribute(Attribute::InAlloca); break; case lltok::kw_inreg: B.addAttribute(Attribute::InReg); break; case lltok::kw_nest: B.addAttribute(Attribute::Nest); break; + case lltok::kw_partialinit: B.addAttribute(Attribute::PartialInit); break; case lltok::kw_noalias: B.addAttribute(Attribute::NoAlias); break; case lltok::kw_nocapture: B.addAttribute(Attribute::NoCapture); break; case lltok::kw_nofree: B.addAttribute(Attribute::NoFree); break; @@ -1771,6 +1773,7 @@ } case lltok::kw_inreg: B.addAttribute(Attribute::InReg); break; case lltok::kw_noalias: B.addAttribute(Attribute::NoAlias); break; + case lltok::kw_partialinit: B.addAttribute(Attribute::PartialInit); break; case lltok::kw_nonnull: B.addAttribute(Attribute::NonNull); break; case lltok::kw_signext: B.addAttribute(Attribute::SExt); break; case lltok::kw_zeroext: B.addAttribute(Attribute::ZExt); break; diff --git a/llvm/lib/AsmParser/LLToken.h b/llvm/lib/AsmParser/LLToken.h --- a/llvm/lib/AsmParser/LLToken.h +++ b/llvm/lib/AsmParser/LLToken.h @@ -196,6 +196,7 @@ kw_naked, kw_nest, kw_noalias, + kw_partialinit, kw_nobuiltin, kw_nocapture, kw_noduplicate, diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1532,6 +1532,8 @@ return Attribute::SanitizeMemTag; case bitc::ATTR_KIND_PREALLOCATED: return Attribute::Preallocated; + case bitc::ATTR_KIND_PARTIALINIT: + return Attribute::PartialInit; } } diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -731,6 +731,8 @@ return bitc::ATTR_KIND_SANITIZE_MEMTAG; case Attribute::Preallocated: return bitc::ATTR_KIND_PREALLOCATED; + case Attribute::PartialInit: + return bitc::ATTR_KIND_PARTIALINIT; case Attribute::EndAttrKinds: llvm_unreachable("Can not encode end-attribute kinds marker."); case Attribute::None: diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp --- a/llvm/lib/IR/Attributes.cpp +++ b/llvm/lib/IR/Attributes.cpp @@ -443,6 +443,8 @@ return "cold"; if (hasAttribute(Attribute::ImmArg)) return "immarg"; + if (hasAttribute(Attribute::PartialInit)) + return "partialinit"; if (hasAttribute(Attribute::ByVal)) { std::string Result; diff --git a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp --- a/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -134,6 +134,12 @@ SmallVector ArgAttrVec; AttributeList PAL = F->getAttributes(); + // The attribute list we'll use for promoted arguments. We mark them + // PartialInit because there's no way of knowing the data's initialization + // state on the other side of a pointer or reference. + AttributeSet PartialInitAttrSet = + AttributeSet().addAttribute(F->getContext(), Attribute::PartialInit); + // First, determine the new argument list unsigned ArgNo = 0; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; @@ -144,7 +150,7 @@ StructType *STy = cast(AgTy); Params.insert(Params.end(), STy->element_begin(), STy->element_end()); ArgAttrVec.insert(ArgAttrVec.end(), STy->getNumElements(), - AttributeSet()); + PartialInitAttrSet); ++NumByValArgsPromoted; } else if (!ArgsToPromote.count(&*I)) { // Unchanged argument @@ -198,7 +204,7 @@ Params.push_back(GetElementPtrInst::getIndexedType( cast(I->getType())->getElementType(), ArgIndex.second)); - ArgAttrVec.push_back(AttributeSet()); + ArgAttrVec.push_back(PartialInitAttrSet); assert(Params.back()); } @@ -267,7 +273,7 @@ // TODO: Tell AA about the new values? Args.push_back(IRB.CreateLoad(STy->getElementType(i), Idx, Idx->getName() + ".val")); - ArgAttrVec.push_back(AttributeSet()); + ArgAttrVec.push_back(PartialInitAttrSet); } } else if (!I->use_empty()) { // Non-dead argument: insert GEPs and loads as appropriate. @@ -310,7 +316,7 @@ newLoad->setAAMetadata(AAInfo); Args.push_back(newLoad); - ArgAttrVec.push_back(AttributeSet()); + ArgAttrVec.push_back(PartialInitAttrSet); } } diff --git a/llvm/lib/Transforms/Utils/CodeExtractor.cpp b/llvm/lib/Transforms/Utils/CodeExtractor.cpp --- a/llvm/lib/Transforms/Utils/CodeExtractor.cpp +++ b/llvm/lib/Transforms/Utils/CodeExtractor.cpp @@ -929,6 +929,7 @@ case Attribute::StrictFP: case Attribute::UWTable: case Attribute::NoCfCheck: + case Attribute::PartialInit: break; } diff --git a/llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll b/llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll --- a/llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll +++ b/llvm/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll @@ -3,7 +3,7 @@ define internal i32 @deref(i32* %x) nounwind { ; CHECK-LABEL: define {{[^@]+}}@deref -; CHECK-SAME: (i32 [[X_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[X_VAL:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: ret i32 [[X_VAL]] ; @@ -19,7 +19,7 @@ ; CHECK-NEXT: [[X_ADDR:%.*]] = alloca i32 ; CHECK-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 ; CHECK-NEXT: [[X_ADDR_VAL:%.*]] = load i32, i32* [[X_ADDR]], align 4 -; CHECK-NEXT: [[TMP1:%.*]] = call i32 @deref(i32 [[X_ADDR_VAL]]) +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @deref(i32 partialinit [[X_ADDR_VAL]]) ; CHECK-NEXT: ret i32 [[TMP1]] ; entry: diff --git a/llvm/test/Transforms/ArgumentPromotion/X86/attributes.ll b/llvm/test/Transforms/ArgumentPromotion/X86/attributes.ll --- a/llvm/test/Transforms/ArgumentPromotion/X86/attributes.ll +++ b/llvm/test/Transforms/ArgumentPromotion/X86/attributes.ll @@ -46,7 +46,7 @@ define internal fastcc void @promote_avx2(<4 x i64>* %arg, <4 x i64>* readonly %arg1) #0 { ; CHECK-LABEL: define {{[^@]+}}@promote_avx2 -; CHECK-SAME: (<4 x i64>* [[ARG:%.*]], <4 x i64> [[ARG1_VAL:%.*]]) +; CHECK-SAME: (<4 x i64>* [[ARG:%.*]], <4 x i64> partialinit [[ARG1_VAL:%.*]]) ; CHECK-NEXT: bb: ; CHECK-NEXT: store <4 x i64> [[ARG1_VAL]], <4 x i64>* [[ARG]] ; CHECK-NEXT: ret void @@ -66,7 +66,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i64>* [[TMP]] to i8* ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP3]], i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP_VAL:%.*]] = load <4 x i64>, <4 x i64>* [[TMP]] -; CHECK-NEXT: call fastcc void @promote_avx2(<4 x i64>* [[TMP2]], <4 x i64> [[TMP_VAL]]) +; CHECK-NEXT: call fastcc void @promote_avx2(<4 x i64>* [[TMP2]], <4 x i64> partialinit [[TMP_VAL]]) ; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i64>, <4 x i64>* [[TMP2]], align 32 ; CHECK-NEXT: store <4 x i64> [[TMP4]], <4 x i64>* [[ARG]], align 2 ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll b/llvm/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll --- a/llvm/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll +++ b/llvm/test/Transforms/ArgumentPromotion/X86/min-legal-vector-width.ll @@ -9,7 +9,7 @@ ; This should promote define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #0 { ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512 -; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> [[ARG1_VAL:%.*]]) +; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> partialinit [[ARG1_VAL:%.*]]) ; CHECK-NEXT: bb: ; CHECK-NEXT: store <8 x i64> [[ARG1_VAL]], <8 x i64>* [[ARG]] ; CHECK-NEXT: ret void @@ -29,7 +29,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP3]], i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP_VAL:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]] -; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* [[TMP2]], <8 x i64> [[TMP_VAL]]) +; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer512(<8 x i64>* [[TMP2]], <8 x i64> partialinit [[TMP_VAL]]) ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 32 ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 ; CHECK-NEXT: ret void @@ -48,7 +48,7 @@ ; This should promote define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 { ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256 -; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> [[ARG1_VAL:%.*]]) +; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> partialinit [[ARG1_VAL:%.*]]) ; CHECK-NEXT: bb: ; CHECK-NEXT: store <8 x i64> [[ARG1_VAL]], <8 x i64>* [[ARG]] ; CHECK-NEXT: ret void @@ -68,7 +68,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP3]], i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP_VAL:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]] -; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* [[TMP2]], <8 x i64> [[TMP_VAL]]) +; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer256(<8 x i64>* [[TMP2]], <8 x i64> partialinit [[TMP_VAL]]) ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 32 ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 ; CHECK-NEXT: ret void @@ -87,7 +87,7 @@ ; This should promote define internal fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #1 { ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256 -; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> [[ARG1_VAL:%.*]]) +; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> partialinit [[ARG1_VAL:%.*]]) ; CHECK-NEXT: bb: ; CHECK-NEXT: store <8 x i64> [[ARG1_VAL]], <8 x i64>* [[ARG]] ; CHECK-NEXT: ret void @@ -107,7 +107,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP3]], i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP_VAL:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]] -; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* [[TMP2]], <8 x i64> [[TMP_VAL]]) +; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer512_call_avx512_legal512_prefer256(<8 x i64>* [[TMP2]], <8 x i64> partialinit [[TMP_VAL]]) ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 32 ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 ; CHECK-NEXT: ret void @@ -126,7 +126,7 @@ ; This should promote define internal fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #0 { ; CHECK-LABEL: define {{[^@]+}}@callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512 -; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> [[ARG1_VAL:%.*]]) +; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> partialinit [[ARG1_VAL:%.*]]) ; CHECK-NEXT: bb: ; CHECK-NEXT: store <8 x i64> [[ARG1_VAL]], <8 x i64>* [[ARG]] ; CHECK-NEXT: ret void @@ -146,7 +146,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP3]], i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP_VAL:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]] -; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* [[TMP2]], <8 x i64> [[TMP_VAL]]) +; CHECK-NEXT: call fastcc void @callee_avx512_legal512_prefer256_call_avx512_legal512_prefer512(<8 x i64>* [[TMP2]], <8 x i64> partialinit [[TMP_VAL]]) ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 32 ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 ; CHECK-NEXT: ret void @@ -243,7 +243,7 @@ ; This should promote define internal fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #3 { ; CHECK-LABEL: define {{[^@]+}}@callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256 -; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> [[ARG1_VAL:%.*]]) +; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> partialinit [[ARG1_VAL:%.*]]) ; CHECK-NEXT: bb: ; CHECK-NEXT: store <8 x i64> [[ARG1_VAL]], <8 x i64>* [[ARG]] ; CHECK-NEXT: ret void @@ -263,7 +263,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP3]], i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP_VAL:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]] -; CHECK-NEXT: call fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* [[TMP2]], <8 x i64> [[TMP_VAL]]) +; CHECK-NEXT: call fastcc void @callee_avx2_legal256_prefer256_call_avx2_legal512_prefer256(<8 x i64>* [[TMP2]], <8 x i64> partialinit [[TMP_VAL]]) ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 32 ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 ; CHECK-NEXT: ret void @@ -282,7 +282,7 @@ ; This should promote define internal fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* %arg, <8 x i64>* readonly %arg1) #4 { ; CHECK-LABEL: define {{[^@]+}}@callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256 -; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> [[ARG1_VAL:%.*]]) +; CHECK-SAME: (<8 x i64>* [[ARG:%.*]], <8 x i64> partialinit [[ARG1_VAL:%.*]]) ; CHECK-NEXT: bb: ; CHECK-NEXT: store <8 x i64> [[ARG1_VAL]], <8 x i64>* [[ARG]] ; CHECK-NEXT: ret void @@ -302,7 +302,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i64>* [[TMP]] to i8* ; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP3]], i8 0, i64 32, i1 false) ; CHECK-NEXT: [[TMP_VAL:%.*]] = load <8 x i64>, <8 x i64>* [[TMP]] -; CHECK-NEXT: call fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* [[TMP2]], <8 x i64> [[TMP_VAL]]) +; CHECK-NEXT: call fastcc void @callee_avx2_legal512_prefer256_call_avx2_legal256_prefer256(<8 x i64>* [[TMP2]], <8 x i64> partialinit [[TMP_VAL]]) ; CHECK-NEXT: [[TMP4:%.*]] = load <8 x i64>, <8 x i64>* [[TMP2]], align 32 ; CHECK-NEXT: store <8 x i64> [[TMP4]], <8 x i64>* [[ARG]], align 2 ; CHECK-NEXT: ret void @@ -321,7 +321,7 @@ ; If the arguments are scalar, its ok to promote. define internal i32 @scalar_callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(i32* %X, i32* %Y) #2 { ; CHECK-LABEL: define {{[^@]+}}@scalar_callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256 -; CHECK-SAME: (i32 [[X_VAL:%.*]], i32 [[Y_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[X_VAL:%.*]], i32 partialinit [[Y_VAL:%.*]]) ; CHECK-NEXT: [[C:%.*]] = add i32 [[X_VAL]], [[Y_VAL]] ; CHECK-NEXT: ret i32 [[C]] ; @@ -338,7 +338,7 @@ ; CHECK-NEXT: store i32 1, i32* [[A]] ; CHECK-NEXT: [[A_VAL:%.*]] = load i32, i32* [[A]] ; CHECK-NEXT: [[B_VAL:%.*]] = load i32, i32* [[B]] -; CHECK-NEXT: [[C:%.*]] = call i32 @scalar_callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(i32 [[A_VAL]], i32 [[B_VAL]]) +; CHECK-NEXT: [[C:%.*]] = call i32 @scalar_callee_avx512_legal256_prefer256_call_avx512_legal512_prefer256(i32 partialinit [[A_VAL]], i32 partialinit [[B_VAL]]) ; CHECK-NEXT: ret i32 [[C]] ; %A = alloca i32 @@ -350,7 +350,7 @@ ; If the arguments are scalar, its ok to promote. define internal i32 @scalar_callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(i32* %X, i32* %Y) #2 { ; CHECK-LABEL: define {{[^@]+}}@scalar_callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256 -; CHECK-SAME: (i32 [[X_VAL:%.*]], i32 [[Y_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[X_VAL:%.*]], i32 partialinit [[Y_VAL:%.*]]) ; CHECK-NEXT: [[C:%.*]] = add i32 [[X_VAL]], [[Y_VAL]] ; CHECK-NEXT: ret i32 [[C]] ; @@ -367,7 +367,7 @@ ; CHECK-NEXT: store i32 1, i32* [[A]] ; CHECK-NEXT: [[A_VAL:%.*]] = load i32, i32* [[A]] ; CHECK-NEXT: [[B_VAL:%.*]] = load i32, i32* [[B]] -; CHECK-NEXT: [[C:%.*]] = call i32 @scalar_callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(i32 [[A_VAL]], i32 [[B_VAL]]) +; CHECK-NEXT: [[C:%.*]] = call i32 @scalar_callee_avx512_legal512_prefer256_call_avx512_legal256_prefer256(i32 partialinit [[A_VAL]], i32 partialinit [[B_VAL]]) ; CHECK-NEXT: ret i32 [[C]] ; %A = alloca i32 diff --git a/llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll b/llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll --- a/llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll +++ b/llvm/test/Transforms/ArgumentPromotion/aggregate-promote.ll @@ -7,7 +7,7 @@ define internal i32 @test(%T* %p) { ; CHECK-LABEL: define {{[^@]+}}@test -; CHECK-SAME: (i32 [[P_0_2_VAL:%.*]], i32 [[P_0_3_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[P_0_2_VAL:%.*]], i32 partialinit [[P_0_3_VAL:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: [[V:%.*]] = add i32 [[P_0_3_VAL]], [[P_0_2_VAL]] ; CHECK-NEXT: ret i32 [[V]] @@ -28,7 +28,7 @@ ; CHECK-NEXT: [[G_IDX_VAL:%.*]] = load i32, i32* [[G_IDX]] ; CHECK-NEXT: [[G_IDX1:%.*]] = getelementptr [[T]], %T* @G, i64 0, i32 3 ; CHECK-NEXT: [[G_IDX1_VAL:%.*]] = load i32, i32* [[G_IDX1]] -; CHECK-NEXT: [[V:%.*]] = call i32 @test(i32 [[G_IDX_VAL]], i32 [[G_IDX1_VAL]]) +; CHECK-NEXT: [[V:%.*]] = call i32 @test(i32 partialinit [[G_IDX_VAL]], i32 partialinit [[G_IDX1_VAL]]) ; CHECK-NEXT: ret i32 [[V]] ; entry: diff --git a/llvm/test/Transforms/ArgumentPromotion/attrs.ll b/llvm/test/Transforms/ArgumentPromotion/attrs.ll --- a/llvm/test/Transforms/ArgumentPromotion/attrs.ll +++ b/llvm/test/Transforms/ArgumentPromotion/attrs.ll @@ -7,7 +7,7 @@ ; Don't drop 'byval' on %X here. define internal void @f(%struct.ss* byval %b, i32* byval %X, i32 %i) nounwind { ; CHECK-LABEL: define {{[^@]+}}@f -; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]], i32* byval [[X:%.*]], i32 [[I:%.*]]) +; CHECK-SAME: (i32 partialinit [[B_0:%.*]], i64 partialinit [[B_1:%.*]], i32* byval [[X:%.*]], i32 [[I:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_SS:%.*]], align 8 ; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[B]], i32 0, i32 0 @@ -46,7 +46,7 @@ ; CHECK-NEXT: [[S_0_VAL:%.*]] = load i32, i32* [[S_0]], align 4 ; CHECK-NEXT: [[S_1:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1 ; CHECK-NEXT: [[S_1_VAL:%.*]] = load i64, i64* [[S_1]], align 4 -; CHECK-NEXT: call void @f(i32 [[S_0_VAL]], i64 [[S_1_VAL]], i32* byval [[X]], i32 zeroext 0) +; CHECK-NEXT: call void @f(i32 partialinit [[S_0_VAL]], i64 partialinit [[S_1_VAL]], i32* byval [[X]], i32 zeroext 0) ; CHECK-NEXT: ret i32 0 ; entry: diff --git a/llvm/test/Transforms/ArgumentPromotion/basictest.ll b/llvm/test/Transforms/ArgumentPromotion/basictest.ll --- a/llvm/test/Transforms/ArgumentPromotion/basictest.ll +++ b/llvm/test/Transforms/ArgumentPromotion/basictest.ll @@ -4,7 +4,7 @@ define internal i32 @test(i32* %X, i32* %Y) { ; CHECK-LABEL: define {{[^@]+}}@test -; CHECK-SAME: (i32 [[X_VAL:%.*]], i32 [[Y_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[X_VAL:%.*]], i32 partialinit [[Y_VAL:%.*]]) ; CHECK-NEXT: [[C:%.*]] = add i32 [[X_VAL]], [[Y_VAL]] ; CHECK-NEXT: ret i32 [[C]] ; @@ -16,8 +16,8 @@ define internal i32 @caller(i32* %B) { ; CHECK-LABEL: define {{[^@]+}}@caller -; CHECK-SAME: (i32 [[B_VAL1:%.*]]) -; CHECK-NEXT: [[C:%.*]] = call i32 @test(i32 1, i32 [[B_VAL1]]) +; CHECK-SAME: (i32 partialinit [[B_VAL1:%.*]]) +; CHECK-NEXT: [[C:%.*]] = call i32 @test(i32 partialinit 1, i32 partialinit [[B_VAL1]]) ; CHECK-NEXT: ret i32 [[C]] ; %A = alloca i32 @@ -28,7 +28,7 @@ define i32 @callercaller() { ; CHECK-LABEL: define {{[^@]+}}@callercaller() -; CHECK-NEXT: [[X:%.*]] = call i32 @caller(i32 2) +; CHECK-NEXT: [[X:%.*]] = call i32 @caller(i32 partialinit 2) ; CHECK-NEXT: ret i32 [[X]] ; %B = alloca i32 diff --git a/llvm/test/Transforms/ArgumentPromotion/byval-2.ll b/llvm/test/Transforms/ArgumentPromotion/byval-2.ll --- a/llvm/test/Transforms/ArgumentPromotion/byval-2.ll +++ b/llvm/test/Transforms/ArgumentPromotion/byval-2.ll @@ -9,7 +9,7 @@ define internal void @f(%struct.ss* byval %b, i32* byval %X) nounwind { ; CHECK-LABEL: define {{[^@]+}}@f -; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]], i32* byval [[X:%.*]]) +; CHECK-SAME: (i32 partialinit [[B_0:%.*]], i64 partialinit [[B_1:%.*]], i32* byval [[X:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_SS:%.*]], align 8 ; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[B]], i32 0, i32 0 @@ -46,7 +46,7 @@ ; CHECK-NEXT: [[S_0_VAL:%.*]] = load i32, i32* [[S_0]], align 4 ; CHECK-NEXT: [[S_1:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1 ; CHECK-NEXT: [[S_1_VAL:%.*]] = load i64, i64* [[S_1]], align 4 -; CHECK-NEXT: call void @f(i32 [[S_0_VAL]], i64 [[S_1_VAL]], i32* byval [[X]]) +; CHECK-NEXT: call void @f(i32 partialinit [[S_0_VAL]], i64 partialinit [[S_1_VAL]], i32* byval [[X]]) ; CHECK-NEXT: ret i32 0 ; entry: diff --git a/llvm/test/Transforms/ArgumentPromotion/byval.ll b/llvm/test/Transforms/ArgumentPromotion/byval.ll --- a/llvm/test/Transforms/ArgumentPromotion/byval.ll +++ b/llvm/test/Transforms/ArgumentPromotion/byval.ll @@ -8,7 +8,7 @@ define internal void @f(%struct.ss* byval %b) nounwind { ; CHECK-LABEL: define {{[^@]+}}@f -; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]]) +; CHECK-SAME: (i32 partialinit [[B_0:%.*]], i64 partialinit [[B_1:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 ; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[B]], i32 0, i32 0 @@ -32,7 +32,7 @@ define internal void @g(%struct.ss* byval align 32 %b) nounwind { ; CHECK-LABEL: define {{[^@]+}}@g -; CHECK-SAME: (i32 [[B_0:%.*]], i64 [[B_1:%.*]]) +; CHECK-SAME: (i32 partialinit [[B_0:%.*]], i64 partialinit [[B_1:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_SS:%.*]], align 32 ; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[B]], i32 0, i32 0 @@ -66,12 +66,12 @@ ; CHECK-NEXT: [[S_0_VAL:%.*]] = load i32, i32* [[S_0]], align 4 ; CHECK-NEXT: [[S_1:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1 ; CHECK-NEXT: [[S_1_VAL:%.*]] = load i64, i64* [[S_1]], align 4 -; CHECK-NEXT: call void @f(i32 [[S_0_VAL]], i64 [[S_1_VAL]]) +; CHECK-NEXT: call void @f(i32 partialinit [[S_0_VAL]], i64 partialinit [[S_1_VAL]]) ; CHECK-NEXT: [[S_01:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 0 ; CHECK-NEXT: [[S_01_VAL:%.*]] = load i32, i32* [[S_01]], align 4 ; CHECK-NEXT: [[S_12:%.*]] = getelementptr [[STRUCT_SS]], %struct.ss* [[S]], i32 0, i32 1 ; CHECK-NEXT: [[S_12_VAL:%.*]] = load i64, i64* [[S_12]], align 4 -; CHECK-NEXT: call void @g(i32 [[S_01_VAL]], i64 [[S_12_VAL]]) +; CHECK-NEXT: call void @g(i32 partialinit [[S_01_VAL]], i64 partialinit [[S_12_VAL]]) ; CHECK-NEXT: ret i32 0 ; entry: diff --git a/llvm/test/Transforms/ArgumentPromotion/chained.ll b/llvm/test/Transforms/ArgumentPromotion/chained.ll --- a/llvm/test/Transforms/ArgumentPromotion/chained.ll +++ b/llvm/test/Transforms/ArgumentPromotion/chained.ll @@ -7,7 +7,7 @@ define internal i32 @test(i32** %x) { ; CHECK-LABEL: define {{[^@]+}}@test -; CHECK-SAME: (i32 [[X_VAL_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[X_VAL_VAL:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: ret i32 [[X_VAL_VAL]] ; @@ -22,7 +22,7 @@ ; CHECK-NEXT: entry: ; CHECK-NEXT: [[G2_VAL:%.*]] = load i32*, i32** @G2 ; CHECK-NEXT: [[G2_VAL_VAL:%.*]] = load i32, i32* [[G2_VAL]] -; CHECK-NEXT: [[X:%.*]] = call i32 @test(i32 [[G2_VAL_VAL]]) +; CHECK-NEXT: [[X:%.*]] = call i32 @test(i32 partialinit [[G2_VAL_VAL]]) ; CHECK-NEXT: ret i32 [[X]] ; entry: diff --git a/llvm/test/Transforms/ArgumentPromotion/control-flow2.ll b/llvm/test/Transforms/ArgumentPromotion/control-flow2.ll --- a/llvm/test/Transforms/ArgumentPromotion/control-flow2.ll +++ b/llvm/test/Transforms/ArgumentPromotion/control-flow2.ll @@ -6,7 +6,7 @@ define internal i32 @callee(i1 %C, i32* %P) { ; CHECK-LABEL: define {{[^@]+}}@callee -; CHECK-SAME: (i1 [[C:%.*]], i32 [[P_VAL:%.*]]) +; CHECK-SAME: (i1 [[C:%.*]], i32 partialinit [[P_VAL:%.*]]) ; CHECK-NEXT: br i1 [[C]], label [[T:%.*]], label [[F:%.*]] ; CHECK: T: ; CHECK-NEXT: ret i32 17 @@ -28,7 +28,7 @@ ; CHECK-NEXT: [[A:%.*]] = alloca i32 ; CHECK-NEXT: store i32 17, i32* [[A]] ; CHECK-NEXT: [[A_VAL:%.*]] = load i32, i32* [[A]] -; CHECK-NEXT: [[X:%.*]] = call i32 @callee(i1 false, i32 [[A_VAL]]) +; CHECK-NEXT: [[X:%.*]] = call i32 @callee(i1 false, i32 partialinit [[A_VAL]]) ; CHECK-NEXT: ret i32 [[X]] ; %A = alloca i32 ; [#uses=2] diff --git a/llvm/test/Transforms/ArgumentPromotion/dbg.ll b/llvm/test/Transforms/ArgumentPromotion/dbg.ll --- a/llvm/test/Transforms/ArgumentPromotion/dbg.ll +++ b/llvm/test/Transforms/ArgumentPromotion/dbg.ll @@ -6,7 +6,7 @@ define internal void @test(i32** %X) !dbg !2 { ; CHECK-LABEL: define {{[^@]+}}@test -; CHECK-SAME: (i32 [[X_VAL_VAL:%.*]]) !dbg !3 +; CHECK-SAME: (i32 partialinit [[X_VAL_VAL:%.*]]) !dbg !3 ; CHECK-NEXT: call void @sink(i32 [[X_VAL_VAL]]) ; CHECK-NEXT: ret void ; @@ -20,7 +20,7 @@ define internal void @test_byval(%struct.pair* byval %P) { ; CHECK-LABEL: define {{[^@]+}}@test_byval -; CHECK-SAME: (i32 [[P_0:%.*]], i32 [[P_1:%.*]]) +; CHECK-SAME: (i32 partialinit [[P_0:%.*]], i32 partialinit [[P_1:%.*]]) ; CHECK-NEXT: [[P:%.*]] = alloca [[STRUCT_PAIR:%.*]], align 8 ; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[STRUCT_PAIR]], %struct.pair* [[P]], i32 0, i32 0 ; CHECK-NEXT: store i32 [[P_0]], i32* [[DOT0]], align 4 @@ -36,12 +36,12 @@ ; CHECK-SAME: (i32** [[Y:%.*]], %struct.pair* [[P:%.*]]) ; CHECK-NEXT: [[Y_VAL:%.*]] = load i32*, i32** [[Y]], align 8, !dbg !4 ; CHECK-NEXT: [[Y_VAL_VAL:%.*]] = load i32, i32* [[Y_VAL]], align 8, !dbg !4 -; CHECK-NEXT: call void @test(i32 [[Y_VAL_VAL]]), !dbg !4 +; CHECK-NEXT: call void @test(i32 partialinit [[Y_VAL_VAL]]), !dbg !4 ; CHECK-NEXT: [[P_0:%.*]] = getelementptr [[STRUCT_PAIR:%.*]], %struct.pair* [[P]], i32 0, i32 0, !dbg !5 ; CHECK-NEXT: [[P_0_VAL:%.*]] = load i32, i32* [[P_0]], align 4, !dbg !5 ; CHECK-NEXT: [[P_1:%.*]] = getelementptr [[STRUCT_PAIR]], %struct.pair* [[P]], i32 0, i32 1, !dbg !5 ; CHECK-NEXT: [[P_1_VAL:%.*]] = load i32, i32* [[P_1]], align 4, !dbg !5 -; CHECK-NEXT: call void @test_byval(i32 [[P_0_VAL]], i32 [[P_1_VAL]]), !dbg !5 +; CHECK-NEXT: call void @test_byval(i32 partialinit [[P_0_VAL]], i32 partialinit [[P_1_VAL]]), !dbg !5 ; CHECK-NEXT: ret void ; call void @test(i32** %Y), !dbg !1 diff --git a/llvm/test/Transforms/ArgumentPromotion/fp80.ll b/llvm/test/Transforms/ArgumentPromotion/fp80.ll --- a/llvm/test/Transforms/ArgumentPromotion/fp80.ll +++ b/llvm/test/Transforms/ArgumentPromotion/fp80.ll @@ -19,7 +19,7 @@ ; CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @UseLongDoubleUnsafely(%union.u* byval align 16 bitcast (%struct.s* @b to %union.u*)) ; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[UNION_U:%.*]], %union.u* bitcast (%struct.s* @b to %union.u*), i32 0, i32 0 ; CHECK-NEXT: [[DOT0_VAL:%.*]] = load x86_fp80, x86_fp80* [[DOT0]] -; CHECK-NEXT: [[TMP1:%.*]] = tail call x86_fp80 @UseLongDoubleSafely(x86_fp80 [[DOT0_VAL]]) +; CHECK-NEXT: [[TMP1:%.*]] = tail call x86_fp80 @UseLongDoubleSafely(x86_fp80 partialinit [[DOT0_VAL]]) ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @AccessPaddingOfStruct(%struct.Foo* @a) ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @CaptureAStruct(%struct.Foo* @a) ; CHECK-NEXT: ret void @@ -50,7 +50,7 @@ define internal x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 %arg) { ; CHECK-LABEL: define {{[^@]+}}@UseLongDoubleSafely -; CHECK-SAME: (x86_fp80 [[ARG_0:%.*]]) +; CHECK-SAME: (x86_fp80 partialinit [[ARG_0:%.*]]) ; CHECK-NEXT: [[ARG:%.*]] = alloca [[UNION_U:%.*]], align 16 ; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[UNION_U]], %union.u* [[ARG]], i32 0, i32 0 ; CHECK-NEXT: store x86_fp80 [[ARG_0]], x86_fp80* [[DOT0]] diff --git a/llvm/test/Transforms/ArgumentPromotion/inalloca.ll b/llvm/test/Transforms/ArgumentPromotion/inalloca.ll --- a/llvm/test/Transforms/ArgumentPromotion/inalloca.ll +++ b/llvm/test/Transforms/ArgumentPromotion/inalloca.ll @@ -9,7 +9,7 @@ ; Argpromote + sroa should change this to passing the two integers by value. define internal i32 @f(%struct.ss* inalloca %s) { ; CHECK-LABEL: define {{[^@]+}}@f -; CHECK-SAME: (i32 [[S_0_0_VAL:%.*]], i32 [[S_0_1_VAL:%.*]]) unnamed_addr +; CHECK-SAME: (i32 partialinit [[S_0_0_VAL:%.*]], i32 partialinit [[S_0_1_VAL:%.*]]) unnamed_addr ; CHECK-NEXT: entry: ; CHECK-NEXT: [[R:%.*]] = add i32 [[S_0_0_VAL]], [[S_0_1_VAL]] ; CHECK-NEXT: ret i32 [[R]] @@ -26,7 +26,7 @@ define i32 @main() { ; CHECK-LABEL: define {{[^@]+}}@main() local_unnamed_addr ; CHECK-NEXT: entry: -; CHECK-NEXT: [[R:%.*]] = call fastcc i32 @f(i32 1, i32 2) +; CHECK-NEXT: [[R:%.*]] = call fastcc i32 @f(i32 partialinit 1, i32 partialinit 2) ; CHECK-NEXT: ret i32 [[R]] ; entry: diff --git a/llvm/test/Transforms/ArgumentPromotion/invalidation.ll b/llvm/test/Transforms/ArgumentPromotion/invalidation.ll --- a/llvm/test/Transforms/ArgumentPromotion/invalidation.ll +++ b/llvm/test/Transforms/ArgumentPromotion/invalidation.ll @@ -13,7 +13,7 @@ define internal i32 @a(i32* %x) { ; CHECK-LABEL: define {{[^@]+}}@a -; CHECK-SAME: (i32 [[X_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[X_VAL:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: ret i32 [[X_VAL]] ; @@ -26,7 +26,7 @@ ; CHECK-LABEL: define {{[^@]+}}@b() ; CHECK-NEXT: entry: ; CHECK-NEXT: [[G_VAL:%.*]] = load i32, i32* @G -; CHECK-NEXT: [[V:%.*]] = call i32 @a(i32 [[G_VAL]]) +; CHECK-NEXT: [[V:%.*]] = call i32 @a(i32 partialinit [[G_VAL]]) ; CHECK-NEXT: ret i32 [[V]] ; entry: @@ -38,7 +38,7 @@ ; CHECK-LABEL: define {{[^@]+}}@c() ; CHECK-NEXT: entry: ; CHECK-NEXT: [[G_VAL:%.*]] = load i32, i32* @G -; CHECK-NEXT: [[V1:%.*]] = call i32 @a(i32 [[G_VAL]]) +; CHECK-NEXT: [[V1:%.*]] = call i32 @a(i32 partialinit [[G_VAL]]) ; CHECK-NEXT: [[V2:%.*]] = call i32 @b() ; CHECK-NEXT: [[RESULT:%.*]] = add i32 [[V1]], [[V2]] ; CHECK-NEXT: ret i32 [[RESULT]] diff --git a/llvm/test/Transforms/ArgumentPromotion/pr32917.ll b/llvm/test/Transforms/ArgumentPromotion/pr32917.ll --- a/llvm/test/Transforms/ArgumentPromotion/pr32917.ll +++ b/llvm/test/Transforms/ArgumentPromotion/pr32917.ll @@ -12,7 +12,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to i32* ; CHECK-NEXT: [[DOTIDX:%.*]] = getelementptr i32, i32* [[TMP3]], i64 -1 ; CHECK-NEXT: [[DOTIDX_VAL:%.*]] = load i32, i32* [[DOTIDX]], align 4 -; CHECK-NEXT: call fastcc void @fn1(i32 [[DOTIDX_VAL]]) +; CHECK-NEXT: call fastcc void @fn1(i32 partialinit [[DOTIDX_VAL]]) ; CHECK-NEXT: ret i32 undef ; %1 = load i32, i32* @b, align 4 @@ -24,7 +24,7 @@ define internal fastcc void @fn1(i32* nocapture readonly) unnamed_addr { ; CHECK-LABEL: define {{[^@]+}}@fn1 -; CHECK-SAME: (i32 [[DOT18446744073709551615_VAL:%.*]]) unnamed_addr +; CHECK-SAME: (i32 partialinit [[DOT18446744073709551615_VAL:%.*]]) unnamed_addr ; CHECK-NEXT: store i32 [[DOT18446744073709551615_VAL]], i32* @a, align 4 ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/ArgumentPromotion/profile.ll b/llvm/test/Transforms/ArgumentPromotion/profile.ll --- a/llvm/test/Transforms/ArgumentPromotion/profile.ll +++ b/llvm/test/Transforms/ArgumentPromotion/profile.ll @@ -6,7 +6,7 @@ define void @caller() #0 { ; CHECK-LABEL: define {{[^@]+}}@caller() -; CHECK-NEXT: call void @promote_i32_ptr(i32 42), !prof !0 +; CHECK-NEXT: call void @promote_i32_ptr(i32 partialinit 42), !prof !0 ; CHECK-NEXT: ret void ; %x = alloca i32 @@ -17,7 +17,7 @@ define internal void @promote_i32_ptr(i32* %xp) { ; CHECK-LABEL: define {{[^@]+}}@promote_i32_ptr -; CHECK-SAME: (i32 [[XP_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[XP_VAL:%.*]]) ; CHECK-NEXT: call void @use_i32(i32 [[XP_VAL]]) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll b/llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll --- a/llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll +++ b/llvm/test/Transforms/ArgumentPromotion/reserve-tbaa.ll @@ -16,7 +16,7 @@ define internal fastcc void @fn(i32* nocapture readonly %p1, i64* nocapture readonly %p2) { ; CHECK-LABEL: define {{[^@]+}}@fn -; CHECK-SAME: (i32 [[P1_VAL:%.*]], i64 [[P2_VAL:%.*]]) +; CHECK-SAME: (i32 partialinit [[P1_VAL:%.*]], i64 partialinit [[P2_VAL:%.*]]) ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[P2_VAL]] to i32 ; CHECK-NEXT: [[CONV1:%.*]] = trunc i32 [[P1_VAL]] to i8 @@ -41,7 +41,7 @@ ; CHECK-NEXT: store i32 1, i32* [[TMP1]], align 4, !tbaa !5 ; CHECK-NEXT: [[G_VAL:%.*]] = load i32, i32* @g, align 4, !tbaa !5 ; CHECK-NEXT: [[C_VAL:%.*]] = load i64, i64* @c, align 8, !tbaa !7 -; CHECK-NEXT: call fastcc void @fn(i32 [[G_VAL]], i64 [[C_VAL]]) +; CHECK-NEXT: call fastcc void @fn(i32 partialinit [[G_VAL]], i64 partialinit [[C_VAL]]) ; CHECK-NEXT: ret i32 0 ; entry: diff --git a/llvm/test/Transforms/ArgumentPromotion/sret.ll b/llvm/test/Transforms/ArgumentPromotion/sret.ll --- a/llvm/test/Transforms/ArgumentPromotion/sret.ll +++ b/llvm/test/Transforms/ArgumentPromotion/sret.ll @@ -7,7 +7,7 @@ define internal void @add({i32, i32}* %this, i32* sret %r) { ; CHECK-LABEL: define {{[^@]+}}@add -; CHECK-SAME: (i32 [[THIS_0_0_VAL:%.*]], i32 [[THIS_0_1_VAL:%.*]], i32* noalias [[R:%.*]]) +; CHECK-SAME: (i32 partialinit [[THIS_0_0_VAL:%.*]], i32 partialinit [[THIS_0_1_VAL:%.*]], i32* noalias [[R:%.*]]) ; CHECK-NEXT: [[AB:%.*]] = add i32 [[THIS_0_0_VAL]], [[THIS_0_1_VAL]] ; CHECK-NEXT: store i32 [[AB]], i32* [[R]] ; CHECK-NEXT: ret void @@ -29,7 +29,7 @@ ; CHECK-NEXT: [[PAIR_IDX_VAL:%.*]] = load i32, i32* [[PAIR_IDX]] ; CHECK-NEXT: [[PAIR_IDX1:%.*]] = getelementptr { i32, i32 }, { i32, i32 }* [[PAIR]], i64 0, i32 1 ; CHECK-NEXT: [[PAIR_IDX1_VAL:%.*]] = load i32, i32* [[PAIR_IDX1]] -; CHECK-NEXT: call void @add(i32 [[PAIR_IDX_VAL]], i32 [[PAIR_IDX1_VAL]], i32* noalias [[R]]) +; CHECK-NEXT: call void @add(i32 partialinit [[PAIR_IDX_VAL]], i32 partialinit [[PAIR_IDX1_VAL]], i32* noalias [[R]]) ; CHECK-NEXT: ret void ; %r = alloca i32 diff --git a/llvm/test/Transforms/ArgumentPromotion/tail.ll b/llvm/test/Transforms/ArgumentPromotion/tail.ll --- a/llvm/test/Transforms/ArgumentPromotion/tail.ll +++ b/llvm/test/Transforms/ArgumentPromotion/tail.ll @@ -11,7 +11,7 @@ define internal void @bar(%pair* byval %Data) { ; CHECK-LABEL: define {{[^@]+}}@bar -; CHECK-SAME: (i32 [[DATA_0:%.*]], i32 [[DATA_1:%.*]]) +; CHECK-SAME: (i32 partialinit [[DATA_0:%.*]], i32 partialinit [[DATA_1:%.*]]) ; CHECK-NEXT: [[DATA:%.*]] = alloca [[PAIR:%.*]], align 8 ; CHECK-NEXT: [[DOT0:%.*]] = getelementptr [[PAIR]], %pair* [[DATA]], i32 0, i32 0 ; CHECK-NEXT: store i32 [[DATA_0]], i32* [[DOT0]], align 4 @@ -31,7 +31,7 @@ ; CHECK-NEXT: [[DATA_0_VAL:%.*]] = load i32, i32* [[DATA_0]], align 4 ; CHECK-NEXT: [[DATA_1:%.*]] = getelementptr [[PAIR]], %pair* [[DATA]], i32 0, i32 1 ; CHECK-NEXT: [[DATA_1_VAL:%.*]] = load i32, i32* [[DATA_1]], align 4 -; CHECK-NEXT: call void @bar(i32 [[DATA_0_VAL]], i32 [[DATA_1_VAL]]) +; CHECK-NEXT: call void @bar(i32 partialinit [[DATA_0_VAL]], i32 partialinit [[DATA_1_VAL]]) ; CHECK-NEXT: ret void ; call void @bar(%pair* byval %Data)