Index: include/llvm/IR/Attributes.h =================================================================== --- include/llvm/IR/Attributes.h +++ include/llvm/IR/Attributes.h @@ -95,6 +95,7 @@ uint64_t Bytes); static Attribute getWithDereferenceableOrNullBytes(LLVMContext &Context, uint64_t Bytes); + static Attribute getWithByValTy(LLVMContext &Context, Type *Ty); //===--------------------------------------------------------------------===// // Attribute Accessors @@ -155,6 +156,9 @@ /// dereferenceable_or_null attribute. uint64_t getDereferenceableOrNullBytes() const; + /// \brief Returns the type from the byval attribute. + Type *getByValTy() const; + /// \brief The Attribute is converted to a string of equivalent mnemonic. This /// is, presumably, for writing out the mnemonics for the assembly writer. std::string getAsString(bool InAttrGrp = false) const; @@ -275,6 +279,10 @@ AttributeSet addDereferenceableOrNullAttr(LLVMContext &C, unsigned Index, uint64_t Bytes) const; + /// \brief Add the byval attribute to the attribute set at the given index. + /// Because attribute sets are immutable, this returns a new set. + AttributeSet addByValAttr(LLVMContext &C, unsigned Index, Type *Ty) const; + //===--------------------------------------------------------------------===// // AttributeSet Accessors //===--------------------------------------------------------------------===// @@ -323,6 +331,9 @@ /// unknown). uint64_t getDereferenceableOrNullBytes(unsigned Index) const; + /// \brief Get the type from the byval attribute. + Type *getByValTy(unsigned Index) const; + /// \brief Return the attributes at the index as a string. std::string getAsString(unsigned Index, bool InAttrGrp = false) const; @@ -404,19 +415,20 @@ uint64_t StackAlignment; uint64_t DerefBytes; uint64_t DerefOrNullBytes; + Type *ByValTy; public: AttrBuilder() : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0), - DerefOrNullBytes(0) {} + DerefOrNullBytes(0), ByValTy(nullptr) {} explicit AttrBuilder(uint64_t Val) : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0), - DerefOrNullBytes(0) { + DerefOrNullBytes(0), ByValTy(nullptr) { addRawValue(Val); } AttrBuilder(const Attribute &A) : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0), - DerefOrNullBytes(0) { + DerefOrNullBytes(0), ByValTy(nullptr) { addAttribute(A); } AttrBuilder(AttributeSet AS, unsigned Idx); @@ -485,6 +497,10 @@ /// dereferenceable_or_null attribute exists (zero is returned otherwise). uint64_t getDereferenceableOrNullBytes() const { return DerefOrNullBytes; } + /// \brief Retrieve the type from the byval attribute, if the byval attribute + /// exists (nullptr is returned otherwise). + Type *getByValTy() const { return ByValTy; } + /// \brief This turns an int alignment (which must be a power of 2) into the /// form used internally in Attribute. AttrBuilder &addAlignmentAttr(unsigned Align); @@ -501,6 +517,10 @@ /// form used internally in Attribute. AttrBuilder &addDereferenceableOrNullAttr(uint64_t Bytes); + /// \brief This turns a type for the byval attribute into the form used + /// internally in Attribute. + AttrBuilder &addByValAttr(Type *Ty); + /// \brief Return true if the builder contains no target-independent /// attributes. bool empty() const { return Attrs.none(); } Index: include/llvm/IR/Type.h =================================================================== --- include/llvm/IR/Type.h +++ include/llvm/IR/Type.h @@ -109,6 +109,7 @@ public: void print(raw_ostream &O, bool IsForDebug = false) const; + void printWithoutBody(raw_ostream &O) const; void dump() const; /// getContext - Return the LLVMContext in which this type was uniqued. Index: lib/AsmParser/LLParser.cpp =================================================================== --- lib/AsmParser/LLParser.cpp +++ lib/AsmParser/LLParser.cpp @@ -1297,7 +1297,20 @@ B.addAlignmentAttr(Alignment); continue; } - case lltok::kw_byval: B.addAttribute(Attribute::ByVal); break; + case lltok::kw_byval: { + Lex.Lex(); + LocTy ParenLoc = Lex.getLoc(); + if (!EatIfPresent(lltok::lparen)) + return Error(ParenLoc, "expected '('"); + Type *Ty; + if (ParseType(Ty)) + return true; + ParenLoc = Lex.getLoc(); + if (!EatIfPresent(lltok::rparen)) + return Error(ParenLoc, "expected ')'"); + B.addByValAttr(Ty); + continue; + } case lltok::kw_dereferenceable: { uint64_t Bytes; if (ParseOptionalDerefAttrBytes(lltok::kw_dereferenceable, Bytes)) Index: lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- lib/Bitcode/Reader/BitcodeReader.cpp +++ lib/Bitcode/Reader/BitcodeReader.cpp @@ -848,6 +848,22 @@ } } +static AttributeSet fixByValAttr(AttributeSet Attributes, FunctionType &FTy) { + for (size_t i = 0; i < FTy.getNumParams(); ++i) { + if (!Attributes.hasAttribute(i + 1, Attribute::ByVal)) + continue; + Type *ByValTy = Attributes.getByValTy(i + 1); + Type *ParamPointeeTy = FTy.getParamType(i)->getPointerElementType(); + if (ByValTy == (Type *) -1) { + Attributes = + Attributes.addByValAttr(FTy.getContext(), i + 1, ParamPointeeTy); + } else { + assert(ByValTy == ParamPointeeTy); + } + } + return Attributes; +} + namespace llvm { namespace { /// \brief A class for maintaining the slot number definition @@ -1378,7 +1394,10 @@ if (std::error_code EC = parseAttrKind(Record[++i], &Kind)) return EC; - B.addAttribute(Kind); + if (Kind == Attribute::ByVal) + B.addByValAttr((Type *) -1); + else + B.addAttribute(Kind); } else if (Record[i] == 1) { // Integer attribute Attribute::AttrKind Kind; if (std::error_code EC = parseAttrKind(Record[++i], &Kind)) @@ -1395,6 +1414,8 @@ Attribute::AttrKind Kind; if (std::error_code EC = parseAttrKind(Record[++i], &Kind)) return EC; + if (Kind == Attribute::ByVal) + B.addByValAttr(getTypeByID(Record[++i])); } else { // String attribute assert((Record[i] == 3 || Record[i] == 4) && "Invalid attribute group entry"); @@ -3565,7 +3586,7 @@ bool isProto = Record[2]; uint64_t RawLinkage = Record[3]; Func->setLinkage(getDecodedLinkage(RawLinkage)); - Func->setAttributes(getAttributes(Record[4])); + Func->setAttributes(fixByValAttr(getAttributes(Record[4]), *FTy)); unsigned Alignment; if (std::error_code EC = parseAlignmentValue(Record[5], Alignment)) Index: lib/IR/AsmWriter.cpp =================================================================== --- lib/IR/AsmWriter.cpp +++ lib/IR/AsmWriter.cpp @@ -3268,6 +3268,11 @@ } } +void Type::printWithoutBody(raw_ostream &OS) const { + TypePrinting TP; + TP.print(const_cast(this), OS); +} + static bool isReferencingMDNode(const Instruction &I) { if (const auto *CI = dyn_cast(&I)) if (Function *F = CI->getCalledFunction()) Index: lib/IR/AttributeImpl.h =================================================================== --- lib/IR/AttributeImpl.h +++ lib/IR/AttributeImpl.h @@ -194,6 +194,7 @@ unsigned getStackAlignment() const; uint64_t getDereferenceableBytes() const; uint64_t getDereferenceableOrNullBytes() const; + Type *getByValTy() const; std::string getAsString(bool InAttrGrp) const; typedef const Attribute *iterator; Index: lib/IR/Attributes.cpp =================================================================== --- lib/IR/Attributes.cpp +++ lib/IR/Attributes.cpp @@ -125,6 +125,11 @@ return get(Context, DereferenceableOrNull, Bytes); } +Attribute Attribute::getWithByValTy(LLVMContext &Context, Type *Ty) { + assert(Ty && "Ty must be non-nullptr."); + return get(Context, ByVal, Ty); +} + //===----------------------------------------------------------------------===// // Attribute Accessor Methods //===----------------------------------------------------------------------===// @@ -215,6 +220,12 @@ return pImpl->getValueAsInt(); } +Type *Attribute::getByValTy() const { + assert(hasAttribute(Attribute::ByVal) && + "Trying to get type from non-byval attribute!"); + return pImpl->getValueAsType(); +} + std::string Attribute::getAsString(bool InAttrGrp) const { if (!pImpl) return ""; @@ -226,8 +237,14 @@ return "argmemonly"; if (hasAttribute(Attribute::Builtin)) return "builtin"; - if (hasAttribute(Attribute::ByVal)) - return "byval"; + if (hasAttribute(Attribute::ByVal)) { + std::string Result = "byval("; + raw_string_ostream Tmp(Result); + getValueAsType()->printWithoutBody(Tmp); + Tmp.flush(); + Result += ")"; + return Result; + } if (hasAttribute(Attribute::Convergent)) return "convergent"; if (hasAttribute(Attribute::InaccessibleMemOnly)) @@ -605,6 +622,13 @@ return 0; } +Type *AttributeSetNode::getByValTy() const { + for (iterator I = begin(), E = end(); I != E; ++I) + if (I->hasAttribute(Attribute::ByVal)) + return I->getByValTy(); + return nullptr; +} + std::string AttributeSetNode::getAsString(bool InAttrGrp) const { std::string Str; for (iterator I = begin(), E = end(); I != E; ++I) { @@ -755,6 +779,9 @@ Attr = Attribute::getWithDereferenceableOrNullBytes( C, B.getDereferenceableOrNullBytes()); break; + case Attribute::ByVal: + Attr = Attribute::getWithByValTy(C, B.getByValTy()); + break; default: Attr = Attribute::get(C, Kind); } @@ -1006,6 +1033,13 @@ return addAttributes(C, Index, AttributeSet::get(C, Index, B)); } +AttributeSet AttributeSet::addByValAttr(LLVMContext &C, unsigned Index, + Type *Ty) const { + llvm::AttrBuilder B; + B.addByValAttr(Ty); + return addAttributes(C, Index, AttributeSet::get(C, Index, B)); +} + //===----------------------------------------------------------------------===// // AttributeSet Accessor Methods //===----------------------------------------------------------------------===// @@ -1099,6 +1133,11 @@ return ASN ? ASN->getDereferenceableOrNullBytes() : 0; } +Type *AttributeSet::getByValTy(unsigned Index) const { + AttributeSetNode *ASN = getAttributes(Index); + return ASN ? ASN->getByValTy() : nullptr; +} + std::string AttributeSet::getAsString(unsigned Index, bool InAttrGrp) const { AttributeSetNode *ASN = getAttributes(Index); @@ -1175,7 +1214,7 @@ AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Index) : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0), - DerefOrNullBytes(0) { + DerefOrNullBytes(0), ByValTy(nullptr) { AttributeSetImpl *pImpl = AS.pImpl; if (!pImpl) return; @@ -1194,6 +1233,7 @@ Attrs.reset(); TargetDepAttrs.clear(); Alignment = StackAlignment = DerefBytes = DerefOrNullBytes = 0; + ByValTy = nullptr; } AttrBuilder &AttrBuilder::addAttribute(Attribute::AttrKind Val) { @@ -1222,6 +1262,8 @@ DerefBytes = Attr.getDereferenceableBytes(); else if (Kind == Attribute::DereferenceableOrNull) DerefOrNullBytes = Attr.getDereferenceableOrNullBytes(); + else if (Kind == Attribute::ByVal) + ByValTy = Attr.getByValTy(); return *this; } @@ -1242,6 +1284,8 @@ DerefBytes = 0; else if (Val == Attribute::DereferenceableOrNull) DerefOrNullBytes = 0; + else if (Val == Attribute::ByVal) + ByValTy = 0; return *this; } @@ -1317,6 +1361,12 @@ return *this; } +AttrBuilder &AttrBuilder::addByValAttr(Type *Ty) { + Attrs[Attribute::ByVal] = true; + ByValTy = Ty; + return *this; +} + AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) { // FIXME: What if both have alignments, but they don't match?! if (!Alignment) @@ -1331,6 +1381,9 @@ if (!DerefOrNullBytes) DerefOrNullBytes = B.DerefOrNullBytes; + if (!ByValTy) + ByValTy = B.ByValTy; + Attrs |= B.Attrs; for (auto I : B.td_attrs()) @@ -1353,6 +1406,9 @@ if (B.DerefOrNullBytes) DerefOrNullBytes = 0; + if (B.ByValTy) + ByValTy = nullptr; + Attrs &= ~B.Attrs; for (auto I : B.td_attrs()) @@ -1440,6 +1496,8 @@ Alignment = 1ULL << ((A >> 16) - 1); else if (I == Attribute::StackAlignment) StackAlignment = 1ULL << ((A >> 26)-1); + else if (I == Attribute::ByVal) + ByValTy = (Type *) -1; } } @@ -1461,7 +1519,7 @@ if (!Ty->isPointerTy()) // Attribute that only apply to pointers. - Incompatible.addAttribute(Attribute::ByVal) + Incompatible.addByValAttr((Type *) -1) // the pointer here is ignored .addAttribute(Attribute::Nest) .addAttribute(Attribute::NoAlias) .addAttribute(Attribute::NoCapture) Index: test/Analysis/BasicAA/2008-04-15-Byval.ll =================================================================== --- test/Analysis/BasicAA/2008-04-15-Byval.ll +++ test/Analysis/BasicAA/2008-04-15-Byval.ll @@ -4,13 +4,13 @@ target triple = "i386-apple-darwin8" %struct.x = type { [4 x i32] } -define void @foo(%struct.x* byval align 4 %X) nounwind { +define void @foo(%struct.x* align 4 byval(%struct.x) %X) nounwind { ; CHECK: store i32 2, i32* %tmp1 entry: %tmp = getelementptr %struct.x, %struct.x* %X, i32 0, i32 0 ; <[4 x i32]*> [#uses=1] %tmp1 = getelementptr [4 x i32], [4 x i32]* %tmp, i32 0, i32 3 ; [#uses=1] store i32 2, i32* %tmp1, align 4 - %tmp2 = call i32 (...) @bar( %struct.x* byval align 4 %X ) nounwind ; [#uses=0] + %tmp2 = call i32 (...) @bar( %struct.x* align 4 byval(%struct.x) %X ) nounwind ; [#uses=0] br label %return return: ; preds = %entry ret void Index: test/Analysis/BasicAA/byval.ll =================================================================== --- test/Analysis/BasicAA/byval.ll +++ test/Analysis/BasicAA/byval.ll @@ -4,7 +4,7 @@ %struct.x = type { i32, i32, i32, i32 } @g = weak global i32 0 ; [#uses=1] -define i32 @foo(%struct.x* byval %a) nounwind { +define i32 @foo(%struct.x* byval(%struct.x) %a) nounwind { ; CHECK: ret i32 1 %tmp1 = tail call i32 (...) @bar( %struct.x* %a ) nounwind ; [#uses=0] %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0 ; [#uses=2] Index: test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll =================================================================== --- test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll +++ test/Analysis/DivergenceAnalysis/AMDGPU/kernel-args.ll @@ -9,7 +9,7 @@ ; CHECK: DIVERGENT: float %arg5 ; CHECK: DIVERGENT: i32 %arg6 -define void @main([4 x <16 x i8>] addrspace(2)* byval %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 { +define void @main([4 x <16 x i8>] addrspace(2)* byval([4 x <16 x i8>]) %arg0, float inreg %arg1, i32 inreg %arg2, <2 x i32> %arg3, <3 x i32> %arg4, float %arg5, i32 %arg6) #0 { ret void } Index: test/Assembler/2008-01-11-VarargAttrs.ll =================================================================== --- test/Assembler/2008-01-11-VarargAttrs.ll +++ test/Assembler/2008-01-11-VarargAttrs.ll @@ -6,6 +6,6 @@ declare void @foo(...) define void @bar() { - call void (...) @foo(%struct* byval null ) + call void (...) @foo(%struct* byval(%struct) null ) ret void } Index: test/Bitcode/attributes-3.3.ll =================================================================== --- test/Bitcode/attributes-3.3.ll +++ test/Bitcode/attributes-3.3.ll @@ -47,8 +47,8 @@ ret void; } -define void @f8(i8* byval) -; CHECK: define void @f8(i8* byval) +define void @f8(i8* byval(i8)) +; CHECK: define void @f8(i8* byval(i8)) { ret void; } Index: test/Bitcode/attributes.ll =================================================================== --- test/Bitcode/attributes.ll +++ test/Bitcode/attributes.ll @@ -44,8 +44,8 @@ ret void; } -define void @f8(i8* byval) -; CHECK: define void @f8(i8* byval) +define void @f8(i8* byval(i8)) +; CHECK: define void @f8(i8* byval(i8)) { ret void; } Index: test/Bitcode/compatibility-3.6.ll =================================================================== --- test/Bitcode/compatibility-3.6.ll +++ test/Bitcode/compatibility-3.6.ll @@ -403,8 +403,8 @@ ; CHECK: declare void @f.param.signext(i8 signext) declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) -declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: test/Bitcode/compatibility-3.7.ll =================================================================== --- test/Bitcode/compatibility-3.7.ll +++ test/Bitcode/compatibility-3.7.ll @@ -409,8 +409,8 @@ ; CHECK: declare void @f.param.signext(i8 signext) declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) -declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: test/Bitcode/compatibility.ll =================================================================== --- test/Bitcode/compatibility.ll +++ test/Bitcode/compatibility.ll @@ -437,8 +437,8 @@ ; CHECK: declare void @f.param.signext(i8 signext) declare void @f.param.inreg(i8 inreg) ; CHECK: declare void @f.param.inreg(i8 inreg) -declare void @f.param.byval({ i8, i8 }* byval) -; CHECK: declare void @f.param.byval({ i8, i8 }* byval) +declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) +; CHECK: declare void @f.param.byval({ i8, i8 }* byval({ i8, i8 })) declare void @f.param.inalloca(i8* inalloca) ; CHECK: declare void @f.param.inalloca(i8* inalloca) declare void @f.param.sret(i8* sret) Index: test/Bitcode/highLevelStructure.3.2.ll =================================================================== --- test/Bitcode/highLevelStructure.3.2.ll +++ test/Bitcode/highLevelStructure.3.2.ll @@ -41,8 +41,8 @@ declare void @ParamAttr4(i8 signext) ; CHECK: declare void @ParamAttr5(i8* inreg) declare void @ParamAttr5(i8* inreg) -; CHECK: declare void @ParamAttr6(i8* byval) -declare void @ParamAttr6(i8* byval) +; CHECK: declare void @ParamAttr6(i8* byval(i8)) +declare void @ParamAttr6(i8* byval(i8)) ; CHECK: declare void @ParamAttr7(i8* noalias) declare void @ParamAttr7(i8* noalias) ; CHECK: declare void @ParamAttr8(i8* nocapture) @@ -51,8 +51,8 @@ declare void @ParamAttr9(i8* nest noalias nocapture) ; CHECK: declare void @ParamAttr10{{[(i8* sret noalias nocapture) | (i8* noalias nocapture sret)]}} declare void @ParamAttr10(i8* sret noalias nocapture) -;CHECK: declare void @ParamAttr11{{[(i8* byval noalias nocapture) | (i8* noalias nocapture byval)]}} -declare void @ParamAttr11(i8* byval noalias nocapture) +;CHECK: declare void @ParamAttr11{{[(i8* noalias nocapture byval(i8)) | (i8* noalias nocapture byval(i8))]}} +declare void @ParamAttr11(i8* noalias nocapture byval(i8)) ;CHECK: declare void @ParamAttr12{{[(i8* inreg noalias nocapture) | (i8* noalias nocapture inreg)]}} declare void @ParamAttr12(i8* inreg noalias nocapture) Index: test/CodeGen/AArch64/func-argpassing.ll =================================================================== --- test/CodeGen/AArch64/func-argpassing.ll +++ test/CodeGen/AArch64/func-argpassing.ll @@ -32,7 +32,7 @@ ; byval pointers should be allocated to the stack and copied as if ; with memcpy. -define void @take_struct(%myStruct* byval %structval) { +define void @take_struct(%myStruct* byval(%myStruct) %structval) { ; CHECK-LABEL: take_struct: %addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2 %addr1 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 0 @@ -52,7 +52,7 @@ } ; %structval should be at sp + 16 -define void @check_byval_align(i32* byval %ignore, %myStruct* byval align 16 %structval) { +define void @check_byval_align(i32* byval(i32) %ignore, %myStruct* align 16 byval(%myStruct) %structval) { ; CHECK-LABEL: check_byval_align: %addr0 = getelementptr %myStruct, %myStruct* %structval, i64 0, i32 2 @@ -126,7 +126,7 @@ ; available, but it needs two). Also make sure that %stacked doesn't ; sneak into x7 behind. define i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45, - i32* %var6, %myStruct* byval %struct, i32* byval %stacked, + i32* %var6, %myStruct* byval(%myStruct) %struct, i32* byval(i32) %stacked, double %notstacked) { ; CHECK-LABEL: struct_on_stack: %addr = getelementptr %myStruct, %myStruct* %struct, i64 0, i32 0 Index: test/CodeGen/AArch64/func-calls.ll =================================================================== --- test/CodeGen/AArch64/func-calls.ll +++ test/CodeGen/AArch64/func-calls.ll @@ -74,7 +74,7 @@ declare i32 @struct_on_stack(i8 %var0, i16 %var1, i32 %var2, i64 %var3, i128 %var45, - i32* %var6, %myStruct* byval %struct, i32 %stacked, + i32* %var6, %myStruct* byval(%myStruct) %struct, i32 %stacked, double %notstacked) declare void @stacked_fpu(float %var0, double %var1, float %var2, float %var3, float %var4, float %var5, float %var6, float %var7, @@ -83,7 +83,7 @@ define void @check_stack_args() { ; CHECK-LABEL: check_stack_args: call i32 @struct_on_stack(i8 0, i16 12, i32 42, i64 99, i128 1, - i32* @var32, %myStruct* byval @varstruct, + i32* @var32, %myStruct* byval(%myStruct) @varstruct, i32 999, double 1.0) ; Want to check that the final double is passed in registers and ; that varstruct is passed on the stack. Rather dependent on how a Index: test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll +++ test/CodeGen/AMDGPU/llvm.AMDGPU.kill.ll @@ -21,7 +21,7 @@ ; SI: v_cmp_gt_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0, v{{[0-9]+}} ; SI: v_cmpx_le_f32_e32 vcc, 0, v{{[0-9]+}} ; SI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1.0, [[CMP]] -define void @kill_vcc_implicit_def([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) #1 { +define void @kill_vcc_implicit_def([6 x <16 x i8>] addrspace(2)* byval([6 x <16 x i8>]), [17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]), [17 x <4 x i32>] addrspace(2)* byval([17 x <4 x i32>]), [34 x <8 x i32>] addrspace(2)* byval([34 x <8 x i32>]), float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) #1 { entry: %tmp0 = fcmp olt float %13, 0.0 call void @llvm.AMDGPU.kill(float %14) Index: test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll +++ test/CodeGen/AMDGPU/llvm.SI.fs.interp.ll @@ -24,7 +24,7 @@ ; 16BANK-LABEL: {{^}}v_interp_p1_bank16_bug: ; 16BANK-NOT: v_interp_p1_f32 [[DST:v[0-9]+]], [[DST]] -define void @v_interp_p1_bank16_bug([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) #0 { +define void @v_interp_p1_bank16_bug([6 x <16 x i8>] addrspace(2)* byval([6 x <16 x i8>]), [17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]), [17 x <4 x i32>] addrspace(2)* byval([17 x <4 x i32>]), [34 x <8 x i32>] addrspace(2)* byval([34 x <8 x i32>]), float inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, i32, float, float) #0 { main_body: %22 = call float @llvm.SI.fs.interp(i32 0, i32 0, i32 %5, <2 x i32> %7) %23 = call float @llvm.SI.fs.interp(i32 1, i32 0, i32 %5, <2 x i32> %7) Index: test/CodeGen/AMDGPU/llvm.SI.load.dword.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.SI.load.dword.ll +++ test/CodeGen/AMDGPU/llvm.SI.load.dword.ll @@ -14,7 +14,7 @@ ; CHECK: s_movk_i32 [[K:s[0-9]+]], 0x4d2 ; encoding ; CHECK: buffer_load_dword {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, [[K]] idxen offen offset:65535 glc slc -define void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, [2 x <16 x i8>] addrspace(2)* byval %arg3, [17 x <16 x i8>] addrspace(2)* inreg %arg4, [17 x <16 x i8>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) #0 { +define void @main([17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]) %arg, [32 x <16 x i8>] addrspace(2)* byval([32 x <16 x i8>]) %arg1, [16 x <32 x i8>] addrspace(2)* byval([16 x <32 x i8>]) %arg2, [2 x <16 x i8>] addrspace(2)* byval([2 x <16 x i8>]) %arg3, [17 x <16 x i8>] addrspace(2)* inreg %arg4, [17 x <16 x i8>] addrspace(2)* inreg %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9) #0 { main_body: %tmp = getelementptr [2 x <16 x i8>], [2 x <16 x i8>] addrspace(2)* %arg3, i64 0, i32 1 %tmp10 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 Index: test/CodeGen/AMDGPU/mubuf.ll =================================================================== --- test/CodeGen/AMDGPU/mubuf.ll +++ test/CodeGen/AMDGPU/mubuf.ll @@ -55,7 +55,7 @@ ; CHECK-LABEL: {{^}}soffset_max_imm: ; CHECK: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+}}:{{[0-9]+}}], 64 offen glc -define void @soffset_max_imm([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 { +define void @soffset_max_imm([6 x <16 x i8>] addrspace(2)* byval([6 x <16 x i8>]), [17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]), [16 x <4 x i32>] addrspace(2)* byval([16 x <4 x i32>]), [32 x <8 x i32>] addrspace(2)* byval([32 x <8 x i32>]), i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 { main_body: %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0 %tmp1 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp0 @@ -74,7 +74,7 @@ ; CHECK-LABEL: {{^}}soffset_no_fold: ; CHECK: s_movk_i32 [[SOFFSET:s[0-9]+]], 0x41 ; CHECK: buffer_load_dword v{{[0-9+]}}, v{{[0-9+]}}, s[{{[0-9]+}}:{{[0-9]+}}], [[SOFFSET]] offen glc -define void @soffset_no_fold([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [16 x <4 x i32>] addrspace(2)* byval, [32 x <8 x i32>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 { +define void @soffset_no_fold([6 x <16 x i8>] addrspace(2)* byval([6 x <16 x i8>]), [17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]), [16 x <4 x i32>] addrspace(2)* byval([16 x <4 x i32>]), [32 x <8 x i32>] addrspace(2)* byval([32 x <8 x i32>]), i32 inreg, i32 inreg, i32, i32, i32, i32, i32, i32, i32, i32) #1 { main_body: %tmp0 = getelementptr [6 x <16 x i8>], [6 x <16 x i8>] addrspace(2)* %0, i32 0, i32 0 %tmp1 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp0 Index: test/CodeGen/AMDGPU/ret.ll =================================================================== --- test/CodeGen/AMDGPU/ret.ll +++ test/CodeGen/AMDGPU/ret.ll @@ -11,7 +11,7 @@ ; GCN-DAG: exp 15, 0, 1, 1, 1, v1, v1, v1, v1 ; GCN: s_waitcnt expcnt(0) ; GCN-NOT: s_endpgm -define {float, float} @vgpr([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) #0 { +define {float, float} @vgpr([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, float) #0 { call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float %3) %x = fadd float %3, 1.0 %a = insertvalue {float, float} undef, float %x, 0 @@ -28,7 +28,7 @@ ; GCN: exp 15, 0, 1, 1, 1, v4, v4, v4, v4 ; GCN: s_waitcnt expcnt(0) ; GCN-NOT: s_endpgm -define {float, float, float, float} @vgpr_literal([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) #0 { +define {float, float, float, float} @vgpr_literal([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, float) #0 { call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float %3) ret {float, float, float, float} {float 1.0, float 2.0, float 4.0, float -1.0} } @@ -46,7 +46,7 @@ ; GCN: v_mov_b32_e32 v4, v6 ; GCN-NOT: s_endpgm attributes #1 = { "ShaderType"="0" "InitialPSInputAddr"="0" } -define {float, float, float, float, float} @vgpr_ps_addr0([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #1 { +define {float, float, float, float, float} @vgpr_ps_addr0([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #1 { %i0 = extractelement <2 x i32> %4, i32 0 %i1 = extractelement <2 x i32> %4, i32 1 %i2 = extractelement <2 x i32> %7, i32 0 @@ -71,7 +71,7 @@ ; GCN-LABEL: {{^}}ps_input_ena_no_inputs: ; GCN: v_mov_b32_e32 v0, 1.0 ; GCN-NOT: s_endpgm -define float @ps_input_ena_no_inputs([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #1 { +define float @ps_input_ena_no_inputs([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #1 { ret float 1.0 } @@ -85,7 +85,7 @@ ; GCN-DAG: v_mov_b32_e32 v1, v2 ; GCN: v_mov_b32_e32 v2, v3 ; GCN-NOT: s_endpgm -define {float, <2 x float>} @ps_input_ena_pos_w([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #1 { +define {float, <2 x float>} @ps_input_ena_pos_w([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #1 { %f = bitcast <2 x i32> %8 to <2 x float> %s = insertvalue {float, <2 x float>} undef, float %14, 0 %s1 = insertvalue {float, <2 x float>} %s, <2 x float> %f, 1 @@ -105,7 +105,7 @@ ; GCN-DAG: v_mov_b32_e32 v4, v8 ; GCN-NOT: s_endpgm attributes #2 = { "ShaderType"="0" "InitialPSInputAddr"="1" } -define {float, float, float, float, float} @vgpr_ps_addr1([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #2 { +define {float, float, float, float, float} @vgpr_ps_addr1([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #2 { %i0 = extractelement <2 x i32> %4, i32 0 %i1 = extractelement <2 x i32> %4, i32 1 %i2 = extractelement <2 x i32> %7, i32 0 @@ -135,7 +135,7 @@ ; GCN: v_mov_b32_e32 v4, v12 ; GCN-NOT: s_endpgm attributes #3 = { "ShaderType"="0" "InitialPSInputAddr"="119" } -define {float, float, float, float, float} @vgpr_ps_addr119([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #3 { +define {float, float, float, float, float} @vgpr_ps_addr119([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #3 { %i0 = extractelement <2 x i32> %4, i32 0 %i1 = extractelement <2 x i32> %4, i32 1 %i2 = extractelement <2 x i32> %7, i32 0 @@ -165,7 +165,7 @@ ; GCN: v_mov_b32_e32 v4, v8 ; GCN-NOT: s_endpgm attributes #4 = { "ShaderType"="0" "InitialPSInputAddr"="418" } -define {float, float, float, float, float} @vgpr_ps_addr418([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #4 { +define {float, float, float, float, float} @vgpr_ps_addr418([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, <2 x i32>, <2 x i32>, <2 x i32>, <3 x i32>, <2 x i32>, <2 x i32>, <2 x i32>, float, float, float, float, float, float, float, float, float) #4 { %i0 = extractelement <2 x i32> %4, i32 0 %i1 = extractelement <2 x i32> %4, i32 1 %i2 = extractelement <2 x i32> %7, i32 0 @@ -187,7 +187,7 @@ ; GCN: s_add_i32 s0, s3, 2 ; GCN: s_mov_b32 s2, s3 ; GCN-NOT: s_endpgm -define {i32, i32, i32} @sgpr([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) #0 { +define {i32, i32, i32} @sgpr([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, float) #0 { %x = add i32 %2, 2 %a = insertvalue {i32, i32, i32} undef, i32 %x, 0 %b = insertvalue {i32, i32, i32} %a, i32 %1, 1 @@ -203,7 +203,7 @@ ; GCN-DAG: s_mov_b32 s2, 7 ; GCN-DAG: s_mov_b32 s3, 8 ; GCN-NOT: s_endpgm -define {i32, i32, i32, i32} @sgpr_literal([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) #0 { +define {i32, i32, i32, i32} @sgpr_literal([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, float) #0 { %x = add i32 %2, 2 ret {i32, i32, i32, i32} {i32 5, i32 6, i32 7, i32 8} } @@ -218,7 +218,7 @@ ; GCN: s_mov_b32 s2, s3 ; GCN: s_waitcnt expcnt(0) ; GCN-NOT: s_endpgm -define {float, i32, float, i32, i32} @both([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) #0 { +define {float, i32, float, i32, i32} @both([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, float) #0 { call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float %3) %v = fadd float %3, 1.0 %s = add i32 %2, 2 @@ -239,7 +239,7 @@ ; GCN-DAG: v_mov_b32_e32 v1, 2.0 ; GCN-DAG: v_mov_b32_e32 v2, 4.0 ; GCN-DAG: exp 15, 0, 1, 1, 1, v3, v3, v3, v3 -define {{float, i32}, {i32, <2 x float>}} @structure_literal([9 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, float) #0 { +define {{float, i32}, {i32, <2 x float>}} @structure_literal([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]), i32 inreg, i32 inreg, float) #0 { call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %3, float %3, float %3, float %3) ret {{float, i32}, {i32, <2 x float>}} {{float, i32} {float 1.0, i32 2}, {i32, <2 x float>} {i32 3, <2 x float> }} } Index: test/CodeGen/AMDGPU/sgpr-copy.ll =================================================================== --- test/CodeGen/AMDGPU/sgpr-copy.ll +++ test/CodeGen/AMDGPU/sgpr-copy.ll @@ -222,7 +222,7 @@ ; CHECK: image_sample ; CHECK: exp ; CHECK: s_endpgm -define void @sample_v3([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { +define void @sample_v3([17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]) %arg, [32 x <16 x i8>] addrspace(2)* byval([32 x <16 x i8>]) %arg1, [16 x <32 x i8>] addrspace(2)* byval([16 x <32 x i8>]) %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { entry: %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0 %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 @@ -285,7 +285,7 @@ ; This test is just checking that we don't crash / assertion fail. ; CHECK-LABEL: {{^}}copy2: ; CHECK: s_endpgm -define void @copy2([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { +define void @copy2([17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]) %arg, [32 x <16 x i8>] addrspace(2)* byval([32 x <16 x i8>]) %arg1, [16 x <32 x i8>] addrspace(2)* byval([16 x <32 x i8>]) %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { entry: br label %LOOP68 @@ -315,7 +315,7 @@ ; CHECK: image_sample ; CHECK: image_sample ; CHECK: s_endpgm -define void @sample_rsrc([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <4 x i32>] addrspace(2)* byval %arg2, [32 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 { +define void @sample_rsrc([6 x <16 x i8>] addrspace(2)* byval([6 x <16 x i8>]) %arg, [17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]) %arg1, [16 x <4 x i32>] addrspace(2)* byval([16 x <4 x i32>]) %arg2, [32 x <8 x i32>] addrspace(2)* byval([32 x <8 x i32>]) %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 { bb: %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i32 0, i32 0 %tmp22 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !2 Index: test/CodeGen/AMDGPU/si-scheduler.ll =================================================================== --- test/CodeGen/AMDGPU/si-scheduler.ll +++ test/CodeGen/AMDGPU/si-scheduler.ll @@ -11,7 +11,7 @@ ; CHECK: s_waitcnt vmcnt(0) ; CHECK: exp ; CHECK: s_endpgm -define void @main([6 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 { +define void @main([6 x <16 x i8>] addrspace(2)* byval([6 x <16 x i8>]) %arg, [17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]) %arg1, [17 x <4 x i32>] addrspace(2)* byval([17 x <4 x i32>]) %arg2, [34 x <8 x i32>] addrspace(2)* byval([34 x <8 x i32>]) %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 { main_body: %tmp = bitcast [34 x <8 x i32>] addrspace(2)* %arg3 to <32 x i8> addrspace(2)* %tmp22 = load <32 x i8>, <32 x i8> addrspace(2)* %tmp, align 32, !tbaa !0 Index: test/CodeGen/AMDGPU/si-sgpr-spill.ll =================================================================== --- test/CodeGen/AMDGPU/si-sgpr-spill.ll +++ test/CodeGen/AMDGPU/si-sgpr-spill.ll @@ -22,7 +22,7 @@ ; Writing to M0 from an SMRD instruction will hang the GPU. ; CHECK-NOT: s_buffer_load_dword m0 ; CHECK: s_endpgm -define void @main([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { +define void @main([17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]) %arg, [32 x <16 x i8>] addrspace(2)* byval([32 x <16 x i8>]) %arg1, [16 x <32 x i8>] addrspace(2)* byval([16 x <32 x i8>]) %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { main_body: %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0 %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 @@ -660,7 +660,7 @@ ; CHECK-LABEL: {{^}}main1: ; CHECK: s_endpgm -define void @main1([17 x <16 x i8>] addrspace(2)* byval %arg, [32 x <16 x i8>] addrspace(2)* byval %arg1, [16 x <32 x i8>] addrspace(2)* byval %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { +define void @main1([17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]) %arg, [32 x <16 x i8>] addrspace(2)* byval([32 x <16 x i8>]) %arg1, [16 x <32 x i8>] addrspace(2)* byval([16 x <32 x i8>]) %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { main_body: %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg, i64 0, i32 0 %tmp21 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 Index: test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll =================================================================== --- test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll +++ test/CodeGen/AMDGPU/vgpr-spill-emergency-stack-slot.ll @@ -23,7 +23,7 @@ ; GCN: NumVgprs: 256 ; GCN: ScratchSize: 1024 -define void @main([9 x <16 x i8>] addrspace(2)* byval %arg, [17 x <16 x i8>] addrspace(2)* byval %arg1, [17 x <4 x i32>] addrspace(2)* byval %arg2, [34 x <8 x i32>] addrspace(2)* byval %arg3, [16 x <16 x i8>] addrspace(2)* byval %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 { +define void @main([9 x <16 x i8>] addrspace(2)* byval([9 x <16 x i8>]) %arg, [17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]) %arg1, [17 x <4 x i32>] addrspace(2)* byval([17 x <4 x i32>]) %arg2, [34 x <8 x i32>] addrspace(2)* byval([34 x <8 x i32>]) %arg3, [16 x <16 x i8>] addrspace(2)* byval([16 x <16 x i8>]) %arg4, i32 inreg %arg5, i32 inreg %arg6, i32 %arg7, i32 %arg8, i32 %arg9, i32 %arg10) #0 { bb: %tmp = getelementptr [17 x <16 x i8>], [17 x <16 x i8>] addrspace(2)* %arg1, i64 0, i64 0 %tmp11 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, align 16, !tbaa !0 Index: test/CodeGen/AMDGPU/wait.ll =================================================================== --- test/CodeGen/AMDGPU/wait.ll +++ test/CodeGen/AMDGPU/wait.ll @@ -45,7 +45,7 @@ ; ILPMAX: s_waitcnt vmcnt(0) ; ILPMAX: s_endpgm -define void @main2([6 x <16 x i8>] addrspace(2)* byval, [17 x <16 x i8>] addrspace(2)* byval, [17 x <4 x i32>] addrspace(2)* byval, [34 x <8 x i32>] addrspace(2)* byval, [16 x <16 x i8>] addrspace(2)* +define void @main2([6 x <16 x i8>] addrspace(2)* byval([6 x <16 x i8>]), [17 x <16 x i8>] addrspace(2)* byval([17 x <16 x i8>]), [17 x <4 x i32>] addrspace(2)* byval([17 x <4 x i32>]), [34 x <8 x i32>] addrspace(2)* byval([34 x <8 x i32>]), [16 x <16 x i8>] addrspace(2)* byval, i32 inreg, i32 inreg, i32, i32, i32, i32) #0 { main_body: %11 = getelementptr [16 x <16 x i8>], [16 x <16 x i8>] addrspace(2)* %4, i64 0, i64 0 Index: test/CodeGen/ARM/2009-05-05-DAGCombineBug.ll =================================================================== --- test/CodeGen/ARM/2009-05-05-DAGCombineBug.ll +++ test/CodeGen/ARM/2009-05-05-DAGCombineBug.ll @@ -6,6 +6,6 @@ define fastcc void @t() { entry: - %tmp28 = call fastcc i1 null(i32* null, %"byte[]" undef, %"byte[]" undef, %tango.time.Time.Time* byval null) ; [#uses=0] + %tmp28 = call fastcc i1 null(i32* null, %"byte[]" undef, %"byte[]" undef, %tango.time.Time.Time* byval(%tango.time.Time.Time) null) ; [#uses=0] ret void } Index: test/CodeGen/ARM/2011-06-09-TailCallByVal.ll =================================================================== --- test/CodeGen/ARM/2011-06-09-TailCallByVal.ll +++ test/CodeGen/ARM/2011-06-09-TailCallByVal.ll @@ -33,8 +33,8 @@ define i32 @"\01_fnmatch"(i8* %pattern, i8* %string, i32 %flags) nounwind optsize { entry: - %call4 = tail call i32 @fnmatch1(i8* %pattern, i8* %string, i8* %string, i32 %flags, %union.__mbstate_t* byval @"\01_fnmatch.initial", %union.__mbstate_t* byval @"\01_fnmatch.initial", %struct._xlocale* undef, i32 64) optsize + %call4 = tail call i32 @fnmatch1(i8* %pattern, i8* %string, i8* %string, i32 %flags, %union.__mbstate_t* byval(%union.__mbstate_t) @"\01_fnmatch.initial", %union.__mbstate_t* byval(%union.__mbstate_t) @"\01_fnmatch.initial", %struct._xlocale* undef, i32 64) optsize ret i32 %call4 } -declare i32 @fnmatch1(i8*, i8*, i8*, i32, %union.__mbstate_t* byval, %union.__mbstate_t* byval, %struct._xlocale*, i32) nounwind optsize +declare i32 @fnmatch1(i8*, i8*, i8*, i32, %union.__mbstate_t* byval(%union.__mbstate_t), %union.__mbstate_t* byval(%union.__mbstate_t), %struct._xlocale*, i32) nounwind optsize Index: test/CodeGen/ARM/2011-06-16-TailCallByVal.ll =================================================================== --- test/CodeGen/ARM/2011-06-16-TailCallByVal.ll +++ test/CodeGen/ARM/2011-06-16-TailCallByVal.ll @@ -16,7 +16,7 @@ ; CHECK: add sp, #12 ; CHECK: b.w _puts -define void @f(i8* %s, %struct.A* nocapture byval %a) nounwind optsize { +define void @f(i8* %s, %struct.A* nocapture byval(%struct.A) %a) nounwind optsize { entry: %puts = tail call i32 @puts(i8* %s) ret void Index: test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll =================================================================== --- test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll +++ test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll @@ -33,7 +33,7 @@ ; CHECK: movw r0, #555 define i32 @main() { entry: - call void (i32, ...) @test_byval_8_bytes_alignment(i32 555, %struct_t* byval @static_val) + call void (i32, ...) @test_byval_8_bytes_alignment(i32 555, %struct_t* byval(%struct_t) @static_val) ret i32 0 } @@ -44,7 +44,7 @@ ; CHECK: str r3, [sp, #12] ; CHECK: str r2, [sp, #8] ; CHECK-NOT: str r1 -define void @test_byval_8_bytes_alignment_fixed_arg(i32 %n1, %struct_t* byval %val) nounwind { +define void @test_byval_8_bytes_alignment_fixed_arg(i32 %n1, %struct_t* byval(%struct_t) %val) nounwind { entry: %a = getelementptr inbounds %struct_t, %struct_t* %val, i32 0, i32 0 %0 = load double, double* %a @@ -60,6 +60,6 @@ ; CHECK: movw r0, #555 define i32 @main_fixed_arg() { entry: - call void (i32, %struct_t*) @test_byval_8_bytes_alignment_fixed_arg(i32 555, %struct_t* byval @static_val) + call void (i32, %struct_t*) @test_byval_8_bytes_alignment_fixed_arg(i32 555, %struct_t* byval(%struct_t) @static_val) ret i32 0 } Index: test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll =================================================================== --- test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll +++ test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll @@ -10,7 +10,7 @@ ; CHECK: str r3, [sp, #12] ; CHECK: str r2, [sp, #8] ; CHECK: vldr d16, [sp, #8] -define void @test_byval_usage_scheduling(i32 %n1, i32 %n2, %struct_t* byval %val) nounwind { +define void @test_byval_usage_scheduling(i32 %n1, i32 %n2, %struct_t* byval(%struct_t) %val) nounwind { entry: %a = getelementptr inbounds %struct_t, %struct_t* %val, i32 0, i32 0 %0 = load double, double* %a Index: test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll =================================================================== --- test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll +++ test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll @@ -4,13 +4,13 @@ %my_struct_t = type { i8, i8, i8, i8, i8 } @main.val = private unnamed_addr constant %my_struct_t { i8 1, i8 2, i8 3, i8 4, i8 5 } -declare void @f(i32 %n1, i32 %n2, i32 %n3, %my_struct_t* byval %val); +declare void @f(i32 %n1, i32 %n2, i32 %n3, %my_struct_t* byval(%my_struct_t) %val); ; CHECK-LABEL: main: define i32 @main() nounwind { entry: ; CHECK: ldrb {{(r[0-9]+)}}, {{(\[r[0-9]+\])}}, #1 - call void @f(i32 555, i32 555, i32 555, %my_struct_t* byval @main.val) + call void @f(i32 555, i32 555, i32 555, %my_struct_t* byval(%my_struct_t) @main.val) ret i32 0 } Index: test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll =================================================================== --- test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll +++ test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll @@ -6,7 +6,7 @@ declare void @f(%struct.s* %p); ; CHECK-LABEL: t: -define void @t(i32 %a, %struct.s* byval %s) nounwind { +define void @t(i32 %a, %struct.s* byval(%struct.s) %s) nounwind { entry: ; Here we need to only check proper start address of restored %s argument. Index: test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll =================================================================== --- test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll +++ test/CodeGen/ARM/2013-04-05-Small-ByVal-Structs-PR15293.ll @@ -49,12 +49,12 @@ declare void @fooUseParam(%artz* ) -define void @foo(%artz* byval %s) { +define void @foo(%artz* byval(%artz) %s) { call void @fooUseParam(%artz* %s) ret void } -define void @foo2(%artz* byval %s, i32 %p, %artz* byval %s2) { +define void @foo2(%artz* byval(%artz) %s, i32 %p, %artz* byval(%artz) %s2) { call void @fooUseParam(%artz* %s) call void @fooUseParam(%artz* %s2) ret void @@ -62,12 +62,12 @@ define void @doFoo() { - call void @foo(%artz* byval @static_val) + call void @foo(%artz* byval(%artz) @static_val) ret void } define void @doFoo2() { - call void @foo2(%artz* byval @static_val, i32 0, %artz* byval @static_val) + call void @foo2(%artz* byval(%artz) @static_val, i32 0, %artz* byval(%artz) @static_val) ret void } Index: test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll =================================================================== --- test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll +++ test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP.ll @@ -42,7 +42,7 @@ double 23.6, double 23.7, double 23.8, - i32 0, %st_t* byval @static_val, i32 1, i32 2) + i32 0, %st_t* byval(%st_t) @static_val, i32 1, i32 2) ret void } Index: test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP2.ll =================================================================== --- test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP2.ll +++ test/CodeGen/ARM/2013-05-02-AAPCS-ByVal-Structs-C4-C5-VFP2.ll @@ -39,7 +39,7 @@ double 23.6, double 23.7, double 23.8, - i32 0, %st_t* byval @static_val, i32 1) + i32 0, %st_t* byval(%st_t) @static_val, i32 1) ret void } Index: test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll =================================================================== --- test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll +++ test/CodeGen/ARM/2013-05-13-AAPCS-byval-padding2.ll @@ -4,7 +4,7 @@ %struct4bytes = type { i32 } %struct20bytes = type { i32, i32, i32, i32, i32 } -define void @foo(%struct4bytes* byval %p0, ; --> R0 +define void @foo(%struct4bytes* byval(%struct4bytes) %p0, ; --> R0 %struct20bytes* byval %p1 ; --> R1,R2,R3, [SP+0 .. SP+8) ) { ;CHECK: sub sp, sp, #16 Index: test/CodeGen/ARM/2013-06-03-ByVal-2Kbytes.ll =================================================================== --- test/CodeGen/ARM/2013-06-03-ByVal-2Kbytes.ll +++ test/CodeGen/ARM/2013-06-03-ByVal-2Kbytes.ll @@ -12,19 +12,19 @@ ;CHECK: sub sp, sp, #8 ;CHECK: sub sp, sp, #2048 ;CHECK: bl callme0 - call void @callme0(%big_struct0* byval %p0) + call void @callme0(%big_struct0* byval(%big_struct0) %p0) ;CHECK: add sp, sp, #8 ;CHECK: add sp, sp, #2048 ;CHECK: sub sp, sp, #2048 ;CHECK: bl callme1 - call void @callme1(%big_struct1* byval %p1) + call void @callme1(%big_struct1* byval(%big_struct1) %p1) ;CHECK: add sp, sp, #2048 ret void } -declare void @callme0(%big_struct0* byval) -declare void @callme1(%big_struct1* byval) +declare void @callme0(%big_struct0* byval(%big_struct0)) +declare void @callme1(%big_struct1* byval(%big_struct1)) Index: test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll =================================================================== --- test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll +++ test/CodeGen/ARM/2014-02-21-byval-reg-split-alignment.ll @@ -11,7 +11,7 @@ ; a -> r0 ; b -> r1..r3 ; c -> sp+0..sp+7 -define void @foo1(i32 %a, %struct12bytes* byval %b, i64 %c) { +define void @foo1(i32 %a, %struct12bytes* byval(%struct12bytes) %b, i64 %c) { ; CHECK-LABEL: foo1 ; CHECK: sub sp, sp, #12 ; CHECK: push {r11, lr} @@ -30,7 +30,7 @@ ; a -> r0 ; b -> r2..r3 -define void @foo2(i32 %a, %struct8bytes8align* byval %b) { +define void @foo2(i32 %a, %struct8bytes8align* byval(%struct8bytes8align) %b) { ; CHECK-LABEL: foo2 ; CHECK: sub sp, sp, #8 ; CHECK: push {r11, lr} @@ -47,7 +47,7 @@ ; a -> r0..r1 ; b -> r2 -define void @foo3(%struct8bytes8align* byval %a, %struct4bytes* byval %b) { +define void @foo3(%struct8bytes8align* byval(%struct8bytes8align) %a, %struct4bytes* byval(%struct4bytes) %b) { ; CHECK-LABEL: foo3 ; CHECK: sub sp, sp, #16 ; CHECK: push {r11, lr} @@ -64,7 +64,7 @@ ; a -> r0 ; b -> r2..r3 -define void @foo4(%struct4bytes* byval %a, %struct8bytes8align* byval %b) { +define void @foo4(%struct4bytes* byval(%struct4bytes) %a, %struct8bytes8align* byval(%struct8bytes8align) %b) { ; CHECK-LABEL: foo4 ; CHECK: sub sp, sp, #16 ; CHECK: push {r11, lr} @@ -84,7 +84,7 @@ ; a -> r0..r1 ; b -> r2 ; c -> r3 -define void @foo5(%struct8bytes8align* byval %a, %struct4bytes* byval %b, %struct4bytes* byval %c) { +define void @foo5(%struct8bytes8align* byval(%struct8bytes8align) %a, %struct4bytes* byval(%struct4bytes) %b, %struct4bytes* byval(%struct4bytes) %c) { ; CHECK-LABEL: foo5 ; CHECK: sub sp, sp, #16 ; CHECK: push {r11, lr} @@ -102,7 +102,7 @@ ; a..c -> r0..r2 ; d -> sp+0..sp+7 -define void @foo6(i32 %a, i32 %b, i32 %c, %struct8bytes8align* byval %d) { +define void @foo6(i32 %a, i32 %b, i32 %c, %struct8bytes8align* byval(%struct8bytes8align) %d) { ; CHECK-LABEL: foo6 ; CHECK: push {r11, lr} ; CHECK: add r0, sp, #8 Index: test/CodeGen/ARM/align-sp-adjustment.ll =================================================================== --- test/CodeGen/ARM/align-sp-adjustment.ll +++ test/CodeGen/ARM/align-sp-adjustment.ll @@ -11,7 +11,7 @@ @.str.3 = private unnamed_addr constant [2 x i8] c"d\00", align 1 declare i32* @_Z4bar3iiPKcS0_i(i32, i32, i8*, i8*, i32) -declare void @_Z4bar1i8struct_2(i32, %struct.struct_2* byval align 4) +declare void @_Z4bar1i8struct_2(i32, %struct.struct_2* align 4 byval(%struct.struct_2)) declare i32 @_Z4bar2PiPKc(i32*, i8*) define void @_Z3fooiiiii(i32 %p1, i32 %p2, i32 %p3, i32 %p4, i32 %p5) #0 { @@ -38,7 +38,7 @@ br i1 %cmp, label %for.body, label %for.end for.end: - call void @_Z4bar1i8struct_2(i32 %p4, %struct.struct_2* byval nonnull align 4 %params) #4 + call void @_Z4bar1i8struct_2(i32 %p4, %struct.struct_2* non byval(%struct.struct_2)null align 4 %params) #4 br label %cleanup.8 cleanup.8: Index: test/CodeGen/ARM/byval-align.ll =================================================================== --- test/CodeGen/ARM/byval-align.ll +++ test/CodeGen/ARM/byval-align.ll @@ -6,7 +6,7 @@ ; users of byval alignments > 4, so no real calls for ABI stability. ; "byval align 16" can't fit in any regs with an i8* taking up r0. -define i32 @test_align16(i8*, [4 x i32]* byval align 16 %b) { +define i32 @test_align16(i8*, [4 x i32]* align 16 byval([4 x i32]) %b) { ; CHECK-LABEL: test_align16: ; CHECK-NOT: sub sp ; CHECK: push {r4, r7, lr} @@ -22,7 +22,7 @@ ; byval align 8 can, but we used to incorrectly set r7 here (miscalculating the ; space taken up by arg regs). -define i32 @test_align8(i8*, [4 x i32]* byval align 8 %b) { +define i32 @test_align8(i8*, [4 x i32]* align 8 byval([4 x i32]) %b) { ; CHECK-LABEL: test_align8: ; CHECK: sub sp, #8 ; CHECK: push {r4, r7, lr} @@ -40,7 +40,7 @@ ; "byval align 32" can't fit in regs no matter what: it would be misaligned ; unless the incoming stack was deliberately misaligned. -define i32 @test_align32(i8*, [4 x i32]* byval align 32 %b) { +define i32 @test_align32(i8*, [4 x i32]* align 32 byval([4 x i32]) %b) { ; CHECK-LABEL: test_align32: ; CHECK-NOT: sub sp ; CHECK: push {r4, r7, lr} @@ -67,7 +67,7 @@ ; While we're here, make sure the caller also puts it at sp ; CHECK: mov r[[BASE:[0-9]+]], sp ; CHECK: vst1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r[[BASE]]] - call i32 @test_align16(i8* null, [4 x i32]* byval align 16 @var) + call i32 @test_align16(i8* null, [4 x i32]* align 16 byval([4 x i32]) @var) ret void } Index: test/CodeGen/ARM/byval_load_align.ll =================================================================== --- test/CodeGen/ARM/byval_load_align.ll +++ test/CodeGen/ARM/byval_load_align.ll @@ -16,11 +16,11 @@ ; Function Attrs: nounwind ssp define void @Client() #0 { entry: - tail call void @Logger(i8 signext 97, %struct.ModuleID* byval @sID) #2 + tail call void @Logger(i8 signext 97, %struct.ModuleID* byval(%struct.ModuleID) @sID) #2 ret void } -declare void @Logger(i8 signext, %struct.ModuleID* byval) #1 +declare void @Logger(i8 signext, %struct.ModuleID* byval(%struct.ModuleID)) #1 attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } Index: test/CodeGen/ARM/ssp-data-layout.ll =================================================================== --- test/CodeGen/ARM/ssp-data-layout.ll +++ test/CodeGen/ARM/ssp-data-layout.ll @@ -159,7 +159,7 @@ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0 %7 = bitcast [2 x i16]* %coerce.dive26 to i32* %8 = load i32, i32* %7, align 1 - call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) + call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* align 4 byval(%struct.struct_large_nonchar) %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) ret void } @@ -304,7 +304,7 @@ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0 %7 = bitcast [2 x i16]* %coerce.dive26 to i32* %8 = load i32, i32* %7, align 1 - call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) + call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* align 4 byval(%struct.struct_large_nonchar) %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) ret void } @@ -437,7 +437,7 @@ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0 %7 = bitcast [2 x i16]* %coerce.dive26 to i32* %8 = load i32, i32* %7, align 1 - call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 4 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) + call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* align 4 byval(%struct.struct_large_nonchar) %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) ret void } @@ -476,7 +476,7 @@ %coerce.dive5 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d2, i32 0, i32 0 %5 = bitcast [2 x i16]* %coerce.dive5 to i32* %6 = load i32, i32* %5, align 1 - call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* byval align 4 %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0) + call void @takes_all(i64 %2, i16 %4, %struct.struct_large_nonchar* align 4 byval(%struct.struct_large_nonchar) %d1, i32 %6, i8* null, i8* null, i32* null, i16* null, i32* null, i32 0, i32 0, i32 0) ret void } @@ -519,4 +519,4 @@ declare signext i16 @get_struct_small_nonchar() declare void @end_struct_small_nonchar() -declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32) +declare void @takes_all(i64, i16, %struct.struct_large_nonchar* align 8 byval(%struct.struct_large_nonchar), i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32) Index: test/CodeGen/ARM/struct-byval-frame-index.ll =================================================================== --- test/CodeGen/ARM/struct-byval-frame-index.ll +++ test/CodeGen/ARM/struct-byval-frame-index.ll @@ -144,9 +144,9 @@ if.then248: ; preds = %land.lhs.true246 tail call void asm sideeffect "", "~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11}"() nounwind - tail call void @RestoreMVBlock8x8(i32 1, i32 0, %structN* byval @tr8x8, i32 0) #0 - tail call void @RestoreMVBlock8x8(i32 1, i32 2, %structN* byval @tr8x8, i32 0) #0 - tail call void @RestoreMVBlock8x8(i32 1, i32 3, %structN* byval @tr8x8, i32 0) #0 + tail call void @RestoreMVBlock8x8(i32 1, i32 0, %structN* byval(%structN) @tr8x8, i32 0) #0 + tail call void @RestoreMVBlock8x8(i32 1, i32 2, %structN* byval(%structN) @tr8x8, i32 0) #0 + tail call void @RestoreMVBlock8x8(i32 1, i32 3, %structN* byval(%structN) @tr8x8, i32 0) #0 br label %if.end249 if.end249: ; preds = %if.then248, %land.lhs.true246, %if.end236 @@ -213,7 +213,7 @@ declare void @update_offset_params(i32, i32) #1 ; Function Attrs: nounwind -declare void @RestoreMVBlock8x8(i32, i32, %structN* byval nocapture, i32) #1 +declare void @RestoreMVBlock8x8(i32, i32, %structN* nocapture byval(%structN), i32) #1 attributes #0 = { nounwind } attributes #1 = { nounwind "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "unsafe-fp-math"="false" "use-soft-float"="false" } Index: test/CodeGen/ARM/struct_byval.ll =================================================================== --- test/CodeGen/ARM/struct_byval.ll +++ test/CodeGen/ARM/struct_byval.ll @@ -20,7 +20,7 @@ ; THUMB: str ; THUMB-NOT:bne %st = alloca %struct.SmallStruct, align 4 - %call = call i32 @e1(%struct.SmallStruct* byval %st) + %call = call i32 @e1(%struct.SmallStruct* byval(%struct.SmallStruct) %st) ret i32 0 } @@ -46,7 +46,7 @@ ; NACL: str ; NACL: bne %st = alloca %struct.LargeStruct, align 4 - %call = call i32 @e2(%struct.LargeStruct* byval %st) + %call = call i32 @e2(%struct.LargeStruct* byval(%struct.LargeStruct) %st) ret i32 0 } @@ -69,18 +69,18 @@ ; NACL: vst1 ; NACL: bne %st = alloca %struct.LargeStruct, align 16 - %call = call i32 @e3(%struct.LargeStruct* byval align 16 %st) + %call = call i32 @e3(%struct.LargeStruct* align 16 byval(%struct.LargeStruct) %st) ret i32 0 } -declare i32 @e1(%struct.SmallStruct* nocapture byval %in) nounwind -declare i32 @e2(%struct.LargeStruct* nocapture byval %in) nounwind -declare i32 @e3(%struct.LargeStruct* nocapture byval align 16 %in) nounwind +declare i32 @e1(%struct.SmallStruct* nocapture byval(%struct.SmallStruct) %in) nounwind +declare i32 @e2(%struct.LargeStruct* nocapture byval(%struct.LargeStruct) %in) nounwind +declare i32 @e3(%struct.LargeStruct* nocapture align 16 byval(%struct.LargeStruct) %in) nounwind ; rdar://12442472 ; We can't do tail call since address of s is passed to the callee and part of ; s is in caller's local frame. -define void @f3(%struct.SmallStruct* nocapture byval %s) nounwind optsize { +define void @f3(%struct.SmallStruct* nocapture byval(%struct.SmallStruct) %s) nounwind optsize { ; CHECK-LABEL: f3 ; CHECK: bl _consumestruct ; THUMB-LABEL: f3 @@ -91,7 +91,7 @@ ret void } -define void @f4(%struct.SmallStruct* nocapture byval %s) nounwind optsize { +define void @f4(%struct.SmallStruct* nocapture byval(%struct.SmallStruct) %s) nounwind optsize { ; CHECK-LABEL: f4 ; CHECK: bl _consumestruct ; THUMB-LABEL: f4 @@ -104,7 +104,7 @@ } ; We can do tail call here since s is in the incoming argument area. -define void @f5(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval %s) nounwind optsize { +define void @f5(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval(%struct.SmallStruct) %s) nounwind optsize { ; CHECK-LABEL: f5 ; CHECK: b _consumestruct ; THUMB-LABEL: f5 @@ -115,7 +115,7 @@ ret void } -define void @f6(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval %s) nounwind optsize { +define void @f6(i32 %a, i32 %b, i32 %c, i32 %d, %struct.SmallStruct* nocapture byval(%struct.SmallStruct) %s) nounwind optsize { ; CHECK-LABEL: f6 ; CHECK: b _consumestruct ; THUMB-LABEL: f6 @@ -132,7 +132,7 @@ ; PR17309 %struct.I.8 = type { [10 x i32], [3 x i8] } -declare void @use_I(%struct.I.8* byval) +declare void @use_I(%struct.I.8* byval(%struct.I.8)) define void @test_I_16() { ; CHECK-LABEL: test_I_16 ; CHECK: ldrb @@ -141,6 +141,6 @@ ; THUMB: ldrb ; THUMB: strb entry: - call void @use_I(%struct.I.8* byval align 16 undef) + call void @use_I(%struct.I.8* align 16 byval(%struct.I.8) undef) ret void } Index: test/CodeGen/ARM/struct_byval_arm_t1_t2.ll =================================================================== --- test/CodeGen/ARM/struct_byval_arm_t1_t2.ll +++ test/CodeGen/ARM/struct_byval_arm_t1_t2.ll @@ -21,29 +21,29 @@ ;cleanup if the number of bytes does not divide evenly by the store size %struct.A = type <{ [ 10 x i32 ] }> ; 40 bytes -declare void @use_A(%struct.A* byval) +declare void @use_A(%struct.A* byval(%struct.A)) %struct.B = type <{ [ 10 x i32 ], i8 }> ; 41 bytes -declare void @use_B(%struct.B* byval) +declare void @use_B(%struct.B* byval(%struct.B)) %struct.C = type <{ [ 10 x i32 ], [ 3 x i8 ] }> ; 43 bytes -declare void @use_C(%struct.C* byval) +declare void @use_C(%struct.C* byval(%struct.C)) %struct.D = type <{ [ 100 x i32 ] }> ; 400 bytes -declare void @use_D(%struct.D* byval) +declare void @use_D(%struct.D* byval(%struct.D)) %struct.E = type <{ [ 100 x i32 ], i8 }> ; 401 bytes -declare void @use_E(%struct.E* byval) +declare void @use_E(%struct.E* byval(%struct.E)) %struct.F = type <{ [ 100 x i32 ], [ 3 x i8 ] }> ; 403 bytes -declare void @use_F(%struct.F* byval) +declare void @use_F(%struct.F* byval(%struct.F)) %struct.G = type { [ 10 x i32 ] } ; 40 bytes -declare void @use_G(%struct.G* byval) +declare void @use_G(%struct.G* byval(%struct.G)) %struct.H = type { [ 10 x i32 ], i8 } ; 41 bytes -declare void @use_H(%struct.H* byval) +declare void @use_H(%struct.H* byval(%struct.H)) %struct.I = type { [ 10 x i32 ], [ 3 x i8 ] } ; 43 bytes -declare void @use_I(%struct.I* byval) +declare void @use_I(%struct.I* byval(%struct.I)) %struct.J = type { [ 100 x i32 ] } ; 400 bytes -declare void @use_J(%struct.J* byval) +declare void @use_J(%struct.J* byval(%struct.J)) %struct.K = type { [ 100 x i32 ], i8 } ; 401 bytes -declare void @use_K(%struct.K* byval) +declare void @use_K(%struct.K* byval(%struct.K)) %struct.L = type { [ 100 x i32 ], [ 3 x i8 ] } ; 403 bytes -declare void @use_L(%struct.L* byval) +declare void @use_L(%struct.L* byval(%struct.L)) ;ARM-LABEL: test_A_1: ;THUMB2-LABEL: test_A_1: @@ -63,7 +63,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.A, align 1 - call void @use_A(%struct.A* byval align 1 %a) + call void @use_A(%struct.A* align 1 byval(%struct.A) %a) ret void } ;ARM-LABEL: test_A_2: @@ -84,7 +84,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.A, align 2 - call void @use_A(%struct.A* byval align 2 %a) + call void @use_A(%struct.A* align 2 byval(%struct.A) %a) ret void } ;ARM-LABEL: test_A_4: @@ -105,7 +105,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.A, align 4 - call void @use_A(%struct.A* byval align 4 %a) + call void @use_A(%struct.A* align 4 byval(%struct.A) %a) ret void } ;ARM-LABEL: test_A_8: @@ -127,7 +127,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.A, align 8 - call void @use_A(%struct.A* byval align 8 %a) + call void @use_A(%struct.A* align 8 byval(%struct.A) %a) ret void } ;ARM-LABEL: test_A_16: @@ -151,7 +151,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.A, align 16 - call void @use_A(%struct.A* byval align 16 %a) + call void @use_A(%struct.A* align 16 byval(%struct.A) %a) ret void } ;ARM-LABEL: test_B_1: @@ -172,7 +172,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.B, align 1 - call void @use_B(%struct.B* byval align 1 %a) + call void @use_B(%struct.B* align 1 byval(%struct.B) %a) ret void } ;ARM-LABEL: test_B_2: @@ -197,7 +197,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.B, align 2 - call void @use_B(%struct.B* byval align 2 %a) + call void @use_B(%struct.B* align 2 byval(%struct.B) %a) ret void } ;ARM-LABEL: test_B_4: @@ -222,7 +222,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.B, align 4 - call void @use_B(%struct.B* byval align 4 %a) + call void @use_B(%struct.B* align 4 byval(%struct.B) %a) ret void } ;ARM-LABEL: test_B_8: @@ -248,7 +248,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.B, align 8 - call void @use_B(%struct.B* byval align 8 %a) + call void @use_B(%struct.B* align 8 byval(%struct.B) %a) ret void } ;ARM-LABEL: test_B_16: @@ -274,7 +274,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.B, align 16 - call void @use_B(%struct.B* byval align 16 %a) + call void @use_B(%struct.B* align 16 byval(%struct.B) %a) ret void } ;ARM-LABEL: test_C_1: @@ -295,7 +295,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.C, align 1 - call void @use_C(%struct.C* byval align 1 %a) + call void @use_C(%struct.C* align 1 byval(%struct.C) %a) ret void } ;ARM-LABEL: test_C_2: @@ -320,7 +320,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.C, align 2 - call void @use_C(%struct.C* byval align 2 %a) + call void @use_C(%struct.C* align 2 byval(%struct.C) %a) ret void } ;ARM-LABEL: test_C_4: @@ -346,7 +346,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.C, align 4 - call void @use_C(%struct.C* byval align 4 %a) + call void @use_C(%struct.C* align 4 byval(%struct.C) %a) ret void } ;ARM-LABEL: test_C_8: @@ -373,7 +373,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.C, align 8 - call void @use_C(%struct.C* byval align 8 %a) + call void @use_C(%struct.C* align 8 byval(%struct.C) %a) ret void } ;ARM-LABEL: test_C_16: @@ -400,7 +400,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.C, align 16 - call void @use_C(%struct.C* byval align 16 %a) + call void @use_C(%struct.C* align 16 byval(%struct.C) %a) ret void } ;ARM-LABEL: test_D_1: @@ -425,7 +425,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.D, align 1 - call void @use_D(%struct.D* byval align 1 %a) + call void @use_D(%struct.D* align 1 byval(%struct.D) %a) ret void } ;ARM-LABEL: test_D_2: @@ -450,7 +450,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.D, align 2 - call void @use_D(%struct.D* byval align 2 %a) + call void @use_D(%struct.D* align 2 byval(%struct.D) %a) ret void } ;ARM-LABEL: test_D_4: @@ -475,7 +475,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.D, align 4 - call void @use_D(%struct.D* byval align 4 %a) + call void @use_D(%struct.D* align 4 byval(%struct.D) %a) ret void } ;ARM-LABEL: test_D_8: @@ -501,7 +501,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.D, align 8 - call void @use_D(%struct.D* byval align 8 %a) + call void @use_D(%struct.D* align 8 byval(%struct.D) %a) ret void } ;ARM-LABEL: test_D_16: @@ -527,7 +527,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.D, align 16 - call void @use_D(%struct.D* byval align 16 %a) + call void @use_D(%struct.D* align 16 byval(%struct.D) %a) ret void } ;ARM-LABEL: test_E_1: @@ -552,7 +552,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.E, align 1 - call void @use_E(%struct.E* byval align 1 %a) + call void @use_E(%struct.E* align 1 byval(%struct.E) %a) ret void } ;ARM-LABEL: test_E_2: @@ -581,7 +581,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.E, align 2 - call void @use_E(%struct.E* byval align 2 %a) + call void @use_E(%struct.E* align 2 byval(%struct.E) %a) ret void } ;ARM-LABEL: test_E_4: @@ -610,7 +610,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.E, align 4 - call void @use_E(%struct.E* byval align 4 %a) + call void @use_E(%struct.E* align 4 byval(%struct.E) %a) ret void } ;ARM-LABEL: test_E_8: @@ -640,7 +640,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.E, align 8 - call void @use_E(%struct.E* byval align 8 %a) + call void @use_E(%struct.E* align 8 byval(%struct.E) %a) ret void } ;ARM-LABEL: test_E_16: @@ -670,7 +670,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.E, align 16 - call void @use_E(%struct.E* byval align 16 %a) + call void @use_E(%struct.E* align 16 byval(%struct.E) %a) ret void } ;ARM-LABEL: test_F_1: @@ -695,7 +695,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.F, align 1 - call void @use_F(%struct.F* byval align 1 %a) + call void @use_F(%struct.F* align 1 byval(%struct.F) %a) ret void } ;ARM-LABEL: test_F_2: @@ -724,7 +724,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.F, align 2 - call void @use_F(%struct.F* byval align 2 %a) + call void @use_F(%struct.F* align 2 byval(%struct.F) %a) ret void } ;ARM-LABEL: test_F_4: @@ -754,7 +754,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.F, align 4 - call void @use_F(%struct.F* byval align 4 %a) + call void @use_F(%struct.F* align 4 byval(%struct.F) %a) ret void } ;ARM-LABEL: test_F_8: @@ -785,7 +785,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.F, align 8 - call void @use_F(%struct.F* byval align 8 %a) + call void @use_F(%struct.F* align 8 byval(%struct.F) %a) ret void } ;ARM-LABEL: test_F_16: @@ -816,7 +816,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.F, align 16 - call void @use_F(%struct.F* byval align 16 %a) + call void @use_F(%struct.F* align 16 byval(%struct.F) %a) ret void } ;ARM-LABEL: test_G_1: @@ -837,7 +837,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.G, align 1 - call void @use_G(%struct.G* byval align 1 %a) + call void @use_G(%struct.G* align 1 byval(%struct.G) %a) ret void } ;ARM-LABEL: test_G_2: @@ -858,7 +858,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.G, align 2 - call void @use_G(%struct.G* byval align 2 %a) + call void @use_G(%struct.G* align 2 byval(%struct.G) %a) ret void } ;ARM-LABEL: test_G_4: @@ -879,7 +879,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.G, align 4 - call void @use_G(%struct.G* byval align 4 %a) + call void @use_G(%struct.G* align 4 byval(%struct.G) %a) ret void } ;ARM-LABEL: test_G_8: @@ -901,7 +901,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.G, align 8 - call void @use_G(%struct.G* byval align 8 %a) + call void @use_G(%struct.G* align 8 byval(%struct.G) %a) ret void } ;ARM-LABEL: test_G_16: @@ -923,7 +923,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.G, align 16 - call void @use_G(%struct.G* byval align 16 %a) + call void @use_G(%struct.G* align 16 byval(%struct.G) %a) ret void } ;ARM-LABEL: test_H_1: @@ -944,7 +944,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.H, align 1 - call void @use_H(%struct.H* byval align 1 %a) + call void @use_H(%struct.H* align 1 byval(%struct.H) %a) ret void } ;ARM-LABEL: test_H_2: @@ -965,7 +965,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.H, align 2 - call void @use_H(%struct.H* byval align 2 %a) + call void @use_H(%struct.H* align 2 byval(%struct.H) %a) ret void } ;ARM-LABEL: test_H_4: @@ -986,7 +986,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.H, align 4 - call void @use_H(%struct.H* byval align 4 %a) + call void @use_H(%struct.H* align 4 byval(%struct.H) %a) ret void } ;ARM-LABEL: test_H_8: @@ -1008,7 +1008,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.H, align 8 - call void @use_H(%struct.H* byval align 8 %a) + call void @use_H(%struct.H* align 8 byval(%struct.H) %a) ret void } ;ARM-LABEL: test_H_16: @@ -1030,7 +1030,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.H, align 16 - call void @use_H(%struct.H* byval align 16 %a) + call void @use_H(%struct.H* align 16 byval(%struct.H) %a) ret void } ;ARM-LABEL: test_I_1: @@ -1051,7 +1051,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.I, align 1 - call void @use_I(%struct.I* byval align 1 %a) + call void @use_I(%struct.I* align 1 byval(%struct.I) %a) ret void } ;ARM-LABEL: test_I_2: @@ -1072,7 +1072,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.I, align 2 - call void @use_I(%struct.I* byval align 2 %a) + call void @use_I(%struct.I* align 2 byval(%struct.I) %a) ret void } ;ARM-LABEL: test_I_4: @@ -1093,7 +1093,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.I, align 4 - call void @use_I(%struct.I* byval align 4 %a) + call void @use_I(%struct.I* align 4 byval(%struct.I) %a) ret void } ;ARM-LABEL: test_I_8: @@ -1115,7 +1115,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.I, align 8 - call void @use_I(%struct.I* byval align 8 %a) + call void @use_I(%struct.I* align 8 byval(%struct.I) %a) ret void } ;ARM-LABEL: test_I_16: @@ -1137,7 +1137,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.I, align 16 - call void @use_I(%struct.I* byval align 16 %a) + call void @use_I(%struct.I* align 16 byval(%struct.I) %a) ret void } ;ARM-LABEL: test_J_1: @@ -1162,7 +1162,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.J, align 1 - call void @use_J(%struct.J* byval align 1 %a) + call void @use_J(%struct.J* align 1 byval(%struct.J) %a) ret void } ;ARM-LABEL: test_J_2: @@ -1187,7 +1187,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.J, align 2 - call void @use_J(%struct.J* byval align 2 %a) + call void @use_J(%struct.J* align 2 byval(%struct.J) %a) ret void } ;ARM-LABEL: test_J_4: @@ -1212,7 +1212,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.J, align 4 - call void @use_J(%struct.J* byval align 4 %a) + call void @use_J(%struct.J* align 4 byval(%struct.J) %a) ret void } ;ARM-LABEL: test_J_8: @@ -1238,7 +1238,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.J, align 8 - call void @use_J(%struct.J* byval align 8 %a) + call void @use_J(%struct.J* align 8 byval(%struct.J) %a) ret void } ;ARM-LABEL: test_J_16: @@ -1264,7 +1264,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.J, align 16 - call void @use_J(%struct.J* byval align 16 %a) + call void @use_J(%struct.J* align 16 byval(%struct.J) %a) ret void } ;ARM-LABEL: test_K_1: @@ -1289,7 +1289,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.K, align 1 - call void @use_K(%struct.K* byval align 1 %a) + call void @use_K(%struct.K* align 1 byval(%struct.K) %a) ret void } ;ARM-LABEL: test_K_2: @@ -1314,7 +1314,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.K, align 2 - call void @use_K(%struct.K* byval align 2 %a) + call void @use_K(%struct.K* align 2 byval(%struct.K) %a) ret void } ;ARM-LABEL: test_K_4: @@ -1339,7 +1339,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.K, align 4 - call void @use_K(%struct.K* byval align 4 %a) + call void @use_K(%struct.K* align 4 byval(%struct.K) %a) ret void } ;ARM-LABEL: test_K_8: @@ -1365,7 +1365,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.K, align 8 - call void @use_K(%struct.K* byval align 8 %a) + call void @use_K(%struct.K* align 8 byval(%struct.K) %a) ret void } ;ARM-LABEL: test_K_16: @@ -1391,7 +1391,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.K, align 16 - call void @use_K(%struct.K* byval align 16 %a) + call void @use_K(%struct.K* align 16 byval(%struct.K) %a) ret void } ;ARM-LABEL: test_L_1: @@ -1416,7 +1416,7 @@ ;T1POST-NOT: ldrb r{{[0-9]+}}, [{{.*}}], #1 entry: %a = alloca %struct.L, align 1 - call void @use_L(%struct.L* byval align 1 %a) + call void @use_L(%struct.L* align 1 byval(%struct.L) %a) ret void } ;ARM-LABEL: test_L_2: @@ -1441,7 +1441,7 @@ ;T1POST-NOT: ldrh r{{[0-9]+}}, [{{.*}}], #2 entry: %a = alloca %struct.L, align 2 - call void @use_L(%struct.L* byval align 2 %a) + call void @use_L(%struct.L* align 2 byval(%struct.L) %a) ret void } ;ARM-LABEL: test_L_4: @@ -1466,7 +1466,7 @@ ;T1POST-NOT: ldr r{{[0-9]+}}, [{{.*}}], #4 entry: %a = alloca %struct.L, align 4 - call void @use_L(%struct.L* byval align 4 %a) + call void @use_L(%struct.L* align 4 byval(%struct.L) %a) ret void } ;ARM-LABEL: test_L_8: @@ -1492,7 +1492,7 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.L, align 8 - call void @use_L(%struct.L* byval align 8 %a) + call void @use_L(%struct.L* align 8 byval(%struct.L) %a) ret void } ;ARM-LABEL: test_L_16: @@ -1518,6 +1518,6 @@ ;T1POST-NOT: vld1.32 {d{{[0-9]+}}, d{{[0-9]+}}}, [r{{.*}}]! entry: %a = alloca %struct.L, align 16 - call void @use_L(%struct.L* byval align 16 %a) + call void @use_L(%struct.L* align 16 byval(%struct.L) %a) ret void } Index: test/CodeGen/BPF/byval.ll =================================================================== --- test/CodeGen/BPF/byval.ll +++ test/CodeGen/BPF/byval.ll @@ -17,11 +17,11 @@ %arrayinit.start = getelementptr inbounds %struct.S, %struct.S* %.compoundliteral, i64 0, i32 0, i64 3 %scevgep4 = bitcast i32* %arrayinit.start to i8* call void @llvm.memset.p0i8.i64(i8* %scevgep4, i8 0, i64 28, i32 4, i1 false) - call void @foo(i32 %a, %struct.S* byval align 8 %.compoundliteral) #3 + call void @foo(i32 %a, %struct.S* align 8 byval(%struct.S) %.compoundliteral) #3 ret void } -declare void @foo(i32, %struct.S* byval align 8) #1 +declare void @foo(i32, %struct.S* align 8 byval(%struct.S)) #1 ; Function Attrs: nounwind declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #3 Index: test/CodeGen/CPP/attributes.ll =================================================================== --- test/CodeGen/CPP/attributes.ll +++ test/CodeGen/CPP/attributes.ll @@ -1,6 +1,6 @@ ; RUN: llc < %s -march=cpp | FileCheck %s -define void @f1(i8* byval, i8* inalloca) { +define void @f1(i8* byval(i8), i8* inalloca) { ; CHECK: ByVal ; CHECK: InAlloca ret void Index: test/CodeGen/Generic/2010-11-04-BigByval.ll =================================================================== --- test/CodeGen/Generic/2010-11-04-BigByval.ll +++ test/CodeGen/Generic/2010-11-04-BigByval.ll @@ -3,9 +3,9 @@ %big = type [131072 x i8] -declare void @foo(%big* byval align 1) +declare void @foo(%big* align 1 byval(%big)) -define void @bar(%big* byval align 1 %x) { - call void @foo(%big* byval align 1 %x) +define void @bar(%big* align 1 byval(%big) %x) { + call void @foo(%big* align 1 byval(%big) %x) ret void } Index: test/CodeGen/Hexagon/struct_args_large.ll =================================================================== --- test/CodeGen/Hexagon/struct_args_large.ll +++ test/CodeGen/Hexagon/struct_args_large.ll @@ -10,8 +10,8 @@ define void @foo() nounwind { entry: - call void @bar(%struct.large* byval @s2) + call void @bar(%struct.large* byval(%struct.large) @s2) ret void } -declare void @bar(%struct.large* byval) +declare void @bar(%struct.large* byval(%struct.large)) Index: test/CodeGen/MSP430/byval.ll =================================================================== --- test/CodeGen/MSP430/byval.ll +++ test/CodeGen/MSP430/byval.ll @@ -6,7 +6,7 @@ %struct.Foo = type { i16, i16, i16 } @foo = global %struct.Foo { i16 1, i16 2, i16 3 }, align 2 -define i16 @callee(%struct.Foo* byval %f) nounwind { +define i16 @callee(%struct.Foo* byval(%struct.Foo) %f) nounwind { entry: ; CHECK-LABEL: callee: ; CHECK: mov.w 2(r1), r15 @@ -21,6 +21,6 @@ ; CHECK: mov.w &foo+4, 4(r1) ; CHECK-NEXT: mov.w &foo+2, 2(r1) ; CHECK-NEXT: mov.w &foo, 0(r1) - %call = call i16 @callee(%struct.Foo* byval @foo) + %call = call i16 @callee(%struct.Foo* byval(%struct.Foo) @foo) ret void } Index: test/CodeGen/Mips/cprestore.ll =================================================================== --- test/CodeGen/Mips/cprestore.ll +++ test/CodeGen/Mips/cprestore.ll @@ -13,8 +13,8 @@ define void @foo2() nounwind { entry: %s = alloca %struct.S, align 4 - call void @foo1(%struct.S* byval %s) + call void @foo1(%struct.S* byval(%struct.S) %s) ret void } -declare void @foo1(%struct.S* byval) +declare void @foo1(%struct.S* byval(%struct.S)) Index: test/CodeGen/Mips/largeimmprinting.ll =================================================================== --- test/CodeGen/Mips/largeimmprinting.ll +++ test/CodeGen/Mips/largeimmprinting.ll @@ -29,10 +29,10 @@ %agg.tmp = alloca %struct.S1, align 1 %tmp = getelementptr inbounds %struct.S1, %struct.S1* %agg.tmp, i32 0, i32 0, i32 0 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* getelementptr inbounds (%struct.S1, %struct.S1* @s1, i32 0, i32 0, i32 0), i32 65536, i32 1, i1 false) - call void @f2(%struct.S1* byval %agg.tmp) nounwind + call void @f2(%struct.S1* byval(%struct.S1) %agg.tmp) nounwind ret void } -declare void @f2(%struct.S1* byval) +declare void @f2(%struct.S1* byval(%struct.S1)) declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind Index: test/CodeGen/Mips/load-store-left-right.ll =================================================================== --- test/CodeGen/Mips/load-store-left-right.ll +++ test/CodeGen/Mips/load-store-left-right.ll @@ -432,8 +432,8 @@ ; MIPS64R6: ld $[[SPTR:[0-9]+]], %got_disp(arr)( - tail call void @extern_func([7 x i8]* byval @arr) nounwind + tail call void @extern_func([7 x i8]* byval([7 x i8]) @arr) nounwind ret void } -declare void @extern_func([7 x i8]* byval) +declare void @extern_func([7 x i8]* byval([7 x i8])) Index: test/CodeGen/Mips/o32_cc_byval.ll =================================================================== --- test/CodeGen/Mips/o32_cc_byval.ll +++ test/CodeGen/Mips/o32_cc_byval.ll @@ -28,21 +28,21 @@ ; CHECK-DAG: lw $6, %lo(f1.s1)($[[R1]]) ; CHECK-DAG: lw $7, 4($[[R0]]) %agg.tmp10 = alloca %struct.S3, align 4 - call void @callee1(float 2.000000e+01, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind - call void @callee2(%struct.S2* byval @f1.s2) nounwind + call void @callee1(float 2.000000e+01, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind + call void @callee2(%struct.S2* byval(%struct.S2) @f1.s2) nounwind %tmp11 = getelementptr inbounds %struct.S3, %struct.S3* %agg.tmp10, i32 0, i32 0 store i8 11, i8* %tmp11, align 4 - call void @callee3(float 2.100000e+01, %struct.S3* byval %agg.tmp10, %struct.S1* byval bitcast (%0* @f1.s1 to %struct.S1*)) nounwind + call void @callee3(float 2.100000e+01, %struct.S3* byval(%struct.S3) %agg.tmp10, %struct.S1* byval(%struct.S1) bitcast (%0* @f1.s1 to %struct.S1*)) nounwind ret void } -declare void @callee1(float, %struct.S1* byval) +declare void @callee1(float, %struct.S1* byval(%struct.S1)) -declare void @callee2(%struct.S2* byval) +declare void @callee2(%struct.S2* byval(%struct.S2)) -declare void @callee3(float, %struct.S3* byval, %struct.S1* byval) +declare void @callee3(float, %struct.S3* byval(%struct.S3), %struct.S1* byval(%struct.S1)) -define void @f2(float %f, %struct.S1* nocapture byval %s1) nounwind { +define void @f2(float %f, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 ; CHECK: sw $7, 60($sp) @@ -79,7 +79,7 @@ declare void @callee4(i32, double, i64, i32, i16 signext, i8 signext, float) -define void @f3(%struct.S2* nocapture byval %s2) nounwind { +define void @f3(%struct.S2* nocapture byval(%struct.S2) %s2) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 ; CHECK: sw $7, 60($sp) @@ -98,7 +98,7 @@ ret void } -define void @f4(float %f, %struct.S3* nocapture byval %s3, %struct.S1* nocapture byval %s1) nounwind { +define void @f4(float %f, %struct.S3* nocapture byval(%struct.S3) %s3, %struct.S1* nocapture byval(%struct.S1) %s1) nounwind { entry: ; CHECK: addiu $sp, $sp, -48 ; CHECK: sw $7, 60($sp) @@ -122,10 +122,10 @@ %struct.S4 = type { [4 x i32] } -define void @f5(i64 %a0, %struct.S4* nocapture byval %a1) nounwind { +define void @f5(i64 %a0, %struct.S4* nocapture byval(%struct.S4) %a1) nounwind { entry: - tail call void @f6(%struct.S4* byval %a1, i64 %a0) nounwind + tail call void @f6(%struct.S4* byval(%struct.S4) %a1, i64 %a0) nounwind ret void } -declare void @f6(%struct.S4* nocapture byval, i64) +declare void @f6(%struct.S4* nocapture byval(%struct.S4), i64) Index: test/CodeGen/Mips/tailcall.ll =================================================================== --- test/CodeGen/Mips/tailcall.ll +++ test/CodeGen/Mips/tailcall.ll @@ -144,7 +144,7 @@ @gs1 = external global %struct.S -declare i32 @callee9(%struct.S* byval) +declare i32 @callee9(%struct.S* byval(%struct.S)) define i32 @caller9_0() nounwind { entry: @@ -167,7 +167,7 @@ ; PIC16: jalrc ; PIC16: .end caller9_1 - %call = tail call i32 @callee9(%struct.S* byval @gs1) nounwind + %call = tail call i32 @callee9(%struct.S* byval(%struct.S) @gs1) nounwind ret i32 %call } @@ -188,7 +188,7 @@ ret i32 %call } -declare i32 @callee11(%struct.S* byval) +declare i32 @callee11(%struct.S* byval(%struct.S)) define i32 @caller11() nounwind noinline { entry: @@ -201,7 +201,7 @@ ; PIC16: .ent caller11 ; PIC16: jalrc - %call = tail call i32 @callee11(%struct.S* byval @gs1) nounwind + %call = tail call i32 @callee11(%struct.S* byval(%struct.S) @gs1) nounwind ret i32 %call } @@ -209,7 +209,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind -define i32 @caller12(%struct.S* nocapture byval %a0) nounwind { +define i32 @caller12(%struct.S* nocapture byval(%struct.S) %a0) nounwind { entry: ; PIC32: .ent caller12 ; PIC32: jalr Index: test/CodeGen/Mips/unalignedload.ll =================================================================== --- test/CodeGen/Mips/unalignedload.ll +++ test/CodeGen/Mips/unalignedload.ll @@ -30,7 +30,7 @@ ; MIPS32R6-DAG: lhu $[[PART1:[0-9]+]], 2($[[R0]]) - tail call void @foo2(%struct.S1* byval getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1)) nounwind + tail call void @foo2(%struct.S1* byval(%struct.S1) getelementptr inbounds (%struct.S2, %struct.S2* @s2, i32 0, i32 1)) nounwind ret void } @@ -76,10 +76,10 @@ ; MIPS32R6-EB-DAG: sll $[[T3:[0-9]+]], $[[T1]], 8 ; MIPS32R6-EB-DAG: or $5, $[[T2]], $[[T3]] - tail call void @foo4(%struct.S4* byval @s4) nounwind + tail call void @foo4(%struct.S4* byval(%struct.S4) @s4) nounwind ret void } -declare void @foo2(%struct.S1* byval) +declare void @foo2(%struct.S1* byval(%struct.S1)) -declare void @foo4(%struct.S4* byval) +declare void @foo4(%struct.S4* byval(%struct.S4)) Index: test/CodeGen/NVPTX/bug21465.ll =================================================================== --- test/CodeGen/NVPTX/bug21465.ll +++ test/CodeGen/NVPTX/bug21465.ll @@ -7,7 +7,7 @@ %struct.S = type { i32, i32 } ; Function Attrs: nounwind -define void @_Z11TakesStruct1SPi(%struct.S* byval nocapture readonly %input, i32* nocapture %output) #0 { +define void @_Z11TakesStruct1SPi(%struct.S* nocapture readonly byval(%struct.S) %input, i32* nocapture %output) #0 { entry: ; CHECK-LABEL: @_Z11TakesStruct1SPi ; PTX-LABEL: .visible .entry _Z11TakesStruct1SPi( Index: test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll =================================================================== --- test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll +++ test/CodeGen/NVPTX/lower-kernel-ptr-arg.ll @@ -28,7 +28,7 @@ %struct.S = type { i32*, i32* } -define void @ptr_in_byval(%struct.S* byval %input, i32* %output) { +define void @ptr_in_byval(%struct.S* byval(%struct.S) %input, i32* %output) { ; CHECK-LABEL: .visible .entry ptr_in_byval( ; CHECK: cvta.to.global.u64 ; CHECK: cvta.to.global.u64 Index: test/CodeGen/NVPTX/param-align.ll =================================================================== --- test/CodeGen/NVPTX/param-align.ll +++ test/CodeGen/NVPTX/param-align.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s ;;; Need 4-byte alignment on float* passed byval -define ptx_device void @t1(float* byval %x) { +define ptx_device void @t1(float* byval(float) %x) { ; CHECK: .func t1 ; CHECK: .param .align 4 .b8 t1_param_0[4] ret void @@ -9,7 +9,7 @@ ;;; Need 8-byte alignment on double* passed byval -define ptx_device void @t2(double* byval %x) { +define ptx_device void @t2(double* byval(double) %x) { ; CHECK: .func t2 ; CHECK: .param .align 8 .b8 t2_param_0[8] ret void @@ -18,7 +18,7 @@ ;;; Need 4-byte alignment on float2* passed byval %struct.float2 = type { float, float } -define ptx_device void @t3(%struct.float2* byval %x) { +define ptx_device void @t3(%struct.float2* byval(%struct.float2) %x) { ; CHECK: .func t3 ; CHECK: .param .align 4 .b8 t3_param_0[8] ret void Index: test/CodeGen/PowerPC/a2-fp-basic.ll =================================================================== --- test/CodeGen/PowerPC/a2-fp-basic.ll +++ test/CodeGen/PowerPC/a2-fp-basic.ll @@ -2,7 +2,7 @@ %0 = type { double, double } -define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind { +define void @maybe_an_fma(%0* sret %agg.result, %0* byval(%0) %a, %0* byval(%0) %b, %0* byval(%0) %c) nounwind { entry: %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0 %a.real = load double, double* %a.realp Index: test/CodeGen/PowerPC/anon_aggr.ll =================================================================== --- test/CodeGen/PowerPC/anon_aggr.ll +++ test/CodeGen/PowerPC/anon_aggr.ll @@ -48,7 +48,7 @@ ; DARWIN64: ld r3, -[[OFFSET2]] -define i8* @func2({ i64, i8* } %array1, %tarray* byval %array2) { +define i8* @func2({ i64, i8* } %array1, %tarray* byval(%tarray) %array2) { entry: %array1_ptr = extractvalue {i64, i8* } %array1, 1 %tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1 @@ -91,7 +91,7 @@ ; DARWIN64: ld r3, -[[OFFSET2]] -define i8* @func3({ i64, i8* }* byval %array1, %tarray* byval %array2) { +define i8* @func3({ i64, i8* }* byval({ i64, i8* }) %array1, %tarray* byval(%tarray) %array2) { entry: %tmp1 = getelementptr inbounds { i64, i8* }, { i64, i8* }* %array1, i32 0, i32 1 %array1_ptr = load i8*, i8** %tmp1 @@ -137,7 +137,7 @@ define i8* @func4(i64 %p1, i64 %p2, i64 %p3, i64 %p4, i64 %p5, i64 %p6, i64 %p7, i64 %p8, - { i64, i8* } %array1, %tarray* byval %array2) { + { i64, i8* } %array1, %tarray* byval(%tarray) %array2) { entry: %array1_ptr = extractvalue {i64, i8* } %array1, 1 %tmp = getelementptr inbounds %tarray, %tarray* %array2, i32 0, i32 1 Index: test/CodeGen/PowerPC/byval-agg-info.ll =================================================================== --- test/CodeGen/PowerPC/byval-agg-info.ll +++ test/CodeGen/PowerPC/byval-agg-info.ll @@ -5,7 +5,7 @@ %struct.anon = type { i32, i32 } declare void @foo(%struct.anon* %v) -define void @test(i32 %a, i32 %b, %struct.anon* byval nocapture %v) { +define void @test(i32 %a, i32 %b, %struct.anon* nocapture byval(%struct.anon) %v) { entry: call void @foo(%struct.anon* %v) ret void Index: test/CodeGen/PowerPC/byval-aliased.ll =================================================================== --- test/CodeGen/PowerPC/byval-aliased.ll +++ test/CodeGen/PowerPC/byval-aliased.ll @@ -5,7 +5,7 @@ %struct.sm = type { i8, i8 } ; Function Attrs: nounwind ssp -define void @foo(%struct.sm* byval %s) #0 { +define void @foo(%struct.sm* byval(%struct.sm) %s) #0 { entry: %a = getelementptr inbounds %struct.sm, %struct.sm* %s, i32 0, i32 0 %0 = load i8, i8* %a, align 1 @@ -13,7 +13,7 @@ %add = add nuw nsw i32 %conv2, 3 %conv1 = trunc i32 %add to i8 store i8 %conv1, i8* %a, align 1 - call void @bar(%struct.sm* byval %s, %struct.sm* byval %s) #1 + call void @bar(%struct.sm* byval(%struct.sm) %s, %struct.sm* byval(%struct.sm) %s) #1 ret void } @@ -23,7 +23,7 @@ ; CHECK: bl _bar ; CHECK: blr -declare void @bar(%struct.sm* byval, %struct.sm* byval) +declare void @bar(%struct.sm* byval(%struct.sm), %struct.sm* byval(%struct.sm)) attributes #0 = { nounwind ssp } attributes #1 = { nounwind } Index: test/CodeGen/PowerPC/dyn-alloca-aligned.ll =================================================================== --- test/CodeGen/PowerPC/dyn-alloca-aligned.ll +++ test/CodeGen/PowerPC/dyn-alloca-aligned.ll @@ -6,7 +6,7 @@ declare void @bar(i32*, i32*) #0 -define void @goo(%struct.s* byval nocapture readonly %a, i32 signext %n) #0 { +define void @goo(%struct.s* nocapture readonly byval(%struct.s) %a, i32 signext %n) #0 { entry: %0 = zext i32 %n to i64 %vla = alloca i32, i64 %0, align 128 Index: test/CodeGen/PowerPC/emptystruct.ll =================================================================== --- test/CodeGen/PowerPC/emptystruct.ll +++ test/CodeGen/PowerPC/emptystruct.ll @@ -14,7 +14,7 @@ %struct.empty = type {} -define void @callee(%struct.empty* noalias sret %agg.result, %struct.empty* byval %a1, %struct.empty* %a2, %struct.empty* byval %a3) nounwind { +define void @callee(%struct.empty* noalias sret %agg.result, %struct.empty* byval(%struct.empty) %a1, %struct.empty* %a2, %struct.empty* byval(%struct.empty) %a3) nounwind { entry: %a2.addr = alloca %struct.empty*, align 8 store %struct.empty* %a2, %struct.empty** %a2.addr, align 8 @@ -38,7 +38,7 @@ %e1 = alloca %struct.empty, align 1 %e2 = alloca %struct.empty, align 1 %e3 = alloca %struct.empty, align 1 - call void @callee(%struct.empty* sret %agg.result, %struct.empty* byval %e1, %struct.empty* %e2, %struct.empty* byval %e3) + call void @callee(%struct.empty* sret %agg.result, %struct.empty* byval(%struct.empty) %e1, %struct.empty* %e2, %struct.empty* byval(%struct.empty) %e3) ret void } Index: test/CodeGen/PowerPC/glob-comp-aa-crash.ll =================================================================== --- test/CodeGen/PowerPC/glob-comp-aa-crash.ll +++ test/CodeGen/PowerPC/glob-comp-aa-crash.ll @@ -36,7 +36,7 @@ %__exception_ = getelementptr inbounds %"class.std::__1::__assoc_sub_state", %"class.std::__1::__assoc_sub_state"* %this, i64 0, i32 1 %0 = bitcast { i64, i64 }* %tmp to i8* call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 16, i32 8, i1 false) - call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp, { i64, i64 }* byval %tmp) #5 + call void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp, { i64, i64 }* byval({ i64, i64 }) %tmp) #5 %call = call zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(%"class.std::__exception_ptr::exception_ptr"* %__exception_, %"class.std::__exception_ptr::exception_ptr"* %ref.tmp) #5 call void @_ZNSt15__exception_ptr13exception_ptrD1Ev(%"class.std::__exception_ptr::exception_ptr"* %ref.tmp) #5 br i1 %call, label %if.then, label %if.end @@ -102,7 +102,7 @@ declare zeroext i1 @_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_(%"class.std::__exception_ptr::exception_ptr"*, %"class.std::__exception_ptr::exception_ptr"*) #1 ; Function Attrs: nounwind optsize -declare void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"*, { i64, i64 }* byval) #1 +declare void @_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE(%"class.std::__exception_ptr::exception_ptr"*, { i64, i64 }* byval({ i64, i64 })) #1 ; Function Attrs: nounwind optsize declare void @_ZNSt15__exception_ptr13exception_ptrD1Ev(%"class.std::__exception_ptr::exception_ptr"*) #1 Index: test/CodeGen/PowerPC/jaggedstructs.ll =================================================================== --- test/CodeGen/PowerPC/jaggedstructs.ll +++ test/CodeGen/PowerPC/jaggedstructs.ll @@ -12,9 +12,9 @@ %struct.S6 = type { [6 x i8] } %struct.S7 = type { [7 x i8] } -define void @test(%struct.S3* byval %s3, %struct.S5* byval %s5, %struct.S6* byval %s6, %struct.S7* byval %s7) nounwind { +define void @test(%struct.S3* byval(%struct.S3) %s3, %struct.S5* byval(%struct.S5) %s5, %struct.S6* byval(%struct.S6) %s6, %struct.S7* byval(%struct.S7) %s7) nounwind { entry: - call void @check(%struct.S3* byval %s3, %struct.S5* byval %s5, %struct.S6* byval %s6, %struct.S7* byval %s7) + call void @check(%struct.S3* byval(%struct.S3) %s3, %struct.S5* byval(%struct.S5) %s5, %struct.S6* byval(%struct.S6) %s6, %struct.S7* byval(%struct.S7) %s7) ret void } @@ -45,4 +45,4 @@ ; CHECK: ld 4, 56(1) ; CHECK: ld 3, 48(1) -declare void @check(%struct.S3* byval, %struct.S5* byval, %struct.S6* byval, %struct.S7* byval) +declare void @check(%struct.S3* byval(%struct.S3), %struct.S5* byval(%struct.S5), %struct.S6* byval(%struct.S6), %struct.S7* byval(%struct.S7)) Index: test/CodeGen/PowerPC/ppc440-fp-basic.ll =================================================================== --- test/CodeGen/PowerPC/ppc440-fp-basic.ll +++ test/CodeGen/PowerPC/ppc440-fp-basic.ll @@ -2,7 +2,7 @@ %0 = type { double, double } -define void @maybe_an_fma(%0* sret %agg.result, %0* byval %a, %0* byval %b, %0* byval %c) nounwind { +define void @maybe_an_fma(%0* sret %agg.result, %0* byval(%0) %a, %0* byval(%0) %b, %0* byval(%0) %c) nounwind { entry: %a.realp = getelementptr inbounds %0, %0* %a, i32 0, i32 0 %a.real = load double, double* %a.realp Index: test/CodeGen/PowerPC/ppc64-align-long-double.ll =================================================================== --- test/CodeGen/PowerPC/ppc64-align-long-double.ll +++ test/CodeGen/PowerPC/ppc64-align-long-double.ll @@ -11,7 +11,7 @@ %struct.S = type { double, ppc_fp128 } -define ppc_fp128 @test(%struct.S* byval %x) nounwind { +define ppc_fp128 @test(%struct.S* byval(%struct.S) %x) nounwind { entry: %b = getelementptr inbounds %struct.S, %struct.S* %x, i32 0, i32 1 %0 = load ppc_fp128, ppc_fp128* %b, align 16 Index: test/CodeGen/PowerPC/ppc64-byval-align.ll =================================================================== --- test/CodeGen/PowerPC/ppc64-byval-align.ll +++ test/CodeGen/PowerPC/ppc64-byval-align.ll @@ -9,7 +9,7 @@ @gt = common global %struct.test zeroinitializer, align 16 @gp = common global %struct.pad zeroinitializer, align 8 -define signext i32 @callee1(i32 signext %x, %struct.test* byval align 16 nocapture readnone %y, i32 signext %z) { +define signext i32 @callee1(i32 signext %x, %struct.test* align 16 nocapture readnone byval(%struct.test) %y, i32 signext %z) { entry: ret i32 %z } @@ -17,10 +17,10 @@ ; CHECK: mr 3, 7 ; CHECK: blr -declare signext i32 @test1(i32 signext, %struct.test* byval align 16, i32 signext) +declare signext i32 @test1(i32 signext, %struct.test* align 16 byval(%struct.test), i32 signext) define void @caller1(i32 signext %z) { entry: - %call = tail call signext i32 @test1(i32 signext 0, %struct.test* byval align 16 @gt, i32 signext %z) + %call = tail call signext i32 @test1(i32 signext 0, %struct.test* align 16 byval(%struct.test) @gt, i32 signext %z) ret void } ; CHECK-LABEL: @caller1 @@ -28,7 +28,7 @@ ; CHECK: mr 7, [[REG]] ; CHECK: bl test1 -define i64 @callee2(%struct.pad* byval nocapture readnone %x, i32 signext %y, %struct.test* byval align 16 nocapture readonly %z) { +define i64 @callee2(%struct.pad* nocapture readnone byval(%struct.pad) %x, i32 signext %y, %struct.test* align 16 nocapture readonly byval(%struct.test) %z) { entry: %x1 = getelementptr inbounds %struct.test, %struct.test* %z, i64 0, i32 0 %0 = load i64, i64* %x1, align 16 @@ -39,13 +39,13 @@ ; CHECK: mr 3, [[REG]] ; CHECK: blr -declare i64 @test2(%struct.pad* byval, i32 signext, %struct.test* byval align 16) +declare i64 @test2(%struct.pad* byval(%struct.pad), i32 signext, %struct.test* align 16 byval(%struct.test)) define void @caller2(i64 %z) { entry: %tmp = alloca %struct.test, align 16 %.compoundliteral.sroa.0.0..sroa_idx = getelementptr inbounds %struct.test, %struct.test* %tmp, i64 0, i32 0 store i64 %z, i64* %.compoundliteral.sroa.0.0..sroa_idx, align 16 - %call = call i64 @test2(%struct.pad* byval @gp, i32 signext 0, %struct.test* byval align 16 %tmp) + %call = call i64 @test2(%struct.pad* byval(%struct.pad) @gp, i32 signext 0, %struct.test* align 16 byval(%struct.test) %tmp) ret void } ; CHECK-LABEL: @caller2 Index: test/CodeGen/PowerPC/ppc64-crash.ll =================================================================== --- test/CodeGen/PowerPC/ppc64-crash.ll +++ test/CodeGen/PowerPC/ppc64-crash.ll @@ -8,7 +8,7 @@ %struct.pos_T = type { i64 } ; check that we're not copying stuff between R and X registers -define internal void @serialize_pos(%struct.pos_T* byval %pos, %struct.__sFILE* %fp) nounwind { +define internal void @serialize_pos(%struct.pos_T* byval(%struct.pos_T) %pos, %struct.__sFILE* %fp) nounwind { entry: ret void } Index: test/CodeGen/PowerPC/ppc64-smallarg.ll =================================================================== --- test/CodeGen/PowerPC/ppc64-smallarg.ll +++ test/CodeGen/PowerPC/ppc64-smallarg.ll @@ -13,7 +13,7 @@ @gs = common global %struct.small_arg zeroinitializer, align 2 @gf = common global float 0.000000e+00, align 4 -define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval nocapture readnone %pad, %struct.small_arg* byval nocapture readonly %x) { +define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* nocapture readnone byval(%struct.large_arg) %pad, %struct.small_arg* nocapture readonly byval(%struct.small_arg) %x) { entry: %0 = bitcast %struct.small_arg* %x to i32* %1 = bitcast %struct.small_arg* %agg.result to i32* @@ -28,14 +28,14 @@ define void @caller1() { entry: %tmp = alloca %struct.small_arg, align 2 - call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval @gl, %struct.small_arg* byval @gs) + call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval(%struct.large_arg) @gl, %struct.small_arg* byval(%struct.small_arg) @gs) ret void } ; CHECK: @caller1 ; CHECK: stw {{[0-9]+}}, 124(1) ; CHECK: bl test1 -declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval, %struct.small_arg* byval) +declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval(%struct.large_arg), %struct.small_arg* byval(%struct.small_arg)) define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) { entry: Index: test/CodeGen/PowerPC/ppc64le-smallarg.ll =================================================================== --- test/CodeGen/PowerPC/ppc64le-smallarg.ll +++ test/CodeGen/PowerPC/ppc64le-smallarg.ll @@ -13,7 +13,7 @@ @gs = common global %struct.small_arg zeroinitializer, align 2 @gf = common global float 0.000000e+00, align 4 -define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* byval nocapture readnone %pad, %struct.small_arg* byval nocapture readonly %x) { +define void @callee1(%struct.small_arg* noalias nocapture sret %agg.result, %struct.large_arg* nocapture readnone byval(%struct.large_arg) %pad, %struct.small_arg* nocapture readonly byval(%struct.small_arg) %x) { entry: %0 = bitcast %struct.small_arg* %x to i32* %1 = bitcast %struct.small_arg* %agg.result to i32* @@ -28,14 +28,14 @@ define void @caller1() { entry: %tmp = alloca %struct.small_arg, align 2 - call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval @gl, %struct.small_arg* byval @gs) + call void @test1(%struct.small_arg* sret %tmp, %struct.large_arg* byval(%struct.large_arg) @gl, %struct.small_arg* byval(%struct.small_arg) @gs) ret void } ; CHECK: @caller1 ; CHECK: stw {{[0-9]+}}, 104(1) ; CHECK: bl test1 -declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval, %struct.small_arg* byval) +declare void @test1(%struct.small_arg* sret, %struct.large_arg* byval(%struct.large_arg), %struct.small_arg* byval(%struct.small_arg)) define float @callee2(float %pad1, float %pad2, float %pad3, float %pad4, float %pad5, float %pad6, float %pad7, float %pad8, float %pad9, float %pad10, float %pad11, float %pad12, float %pad13, float %x) { entry: Index: test/CodeGen/PowerPC/pr13891.ll =================================================================== --- test/CodeGen/PowerPC/pr13891.ll +++ test/CodeGen/PowerPC/pr13891.ll @@ -4,7 +4,7 @@ %struct.foo = type { i8, i8 } -define void @_Z5check3foos(%struct.foo* nocapture byval %f, i16 signext %i) noinline { +define void @_Z5check3foos(%struct.foo* nocapture byval(%struct.foo) %f, i16 signext %i) noinline { ; CHECK-LABEL: _Z5check3foos: ; CHECK: sth 3, {{[0-9]+}}(1) ; CHECK: lha {{[0-9]+}}, {{[0-9]+}}(1) Index: test/CodeGen/PowerPC/reloc-align.ll =================================================================== --- test/CodeGen/PowerPC/reloc-align.ll +++ test/CodeGen/PowerPC/reloc-align.ll @@ -15,13 +15,13 @@ ; Function Attrs: nounwind readonly define signext i32 @main() #0 { entry: - %call = tail call fastcc signext i32 @func_90(%struct.S1* byval bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @main.l_1554 to %struct.S1*)) + %call = tail call fastcc signext i32 @func_90(%struct.S1* byval(%struct.S1) bitcast ({ i8, i8, i8, i8, i8, i8, i8, i8 }* @main.l_1554 to %struct.S1*)) ; CHECK-NOT: ld {{[0-9]+}}, main.l_1554@toc@l ret i32 %call } ; Function Attrs: nounwind readonly -define internal fastcc signext i32 @func_90(%struct.S1* byval nocapture %p_91) #0 { +define internal fastcc signext i32 @func_90(%struct.S1* nocapture byval(%struct.S1) %p_91) #0 { entry: %0 = bitcast %struct.S1* %p_91 to i64* %bf.load = load i64, i64* %0, align 1 Index: test/CodeGen/PowerPC/resolvefi-basereg.ll =================================================================== --- test/CodeGen/PowerPC/resolvefi-basereg.ll +++ test/CodeGen/PowerPC/resolvefi-basereg.ll @@ -332,15 +332,15 @@ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %62, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) %63 = bitcast %struct.S1998* %agg.tmp112 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %63, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false) - call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* byval align 16 %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* byval align 16 %agg.tmp112) - call void @checkx1998(%struct.S1998* byval align 16 %agg.tmp) + call void @check1998(%struct.S1998* sret %agg.tmp, %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp111, %struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 1), %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp112) + call void @checkx1998(%struct.S1998* align 16 byval(%struct.S1998) %agg.tmp) %64 = bitcast %struct.S1998* %agg.tmp113 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %64, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) %65 = bitcast %struct.S1998* %agg.tmp114 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %65, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false) %66 = bitcast %struct.S1998* %agg.tmp115 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %66, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false) - call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* byval align 16 %agg.tmp113, i64 2, %struct.S1998* byval align 16 %agg.tmp114, %struct.S1998* byval align 16 %agg.tmp115) + call void (i32, ...) @check1998va(i32 signext 1, double 1.000000e+00, %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp113, i64 2, %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp114, %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp115) %67 = bitcast %struct.S1998* %agg.tmp116 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %67, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) %68 = bitcast %struct.S1998* %agg.tmp117 to i8* @@ -349,14 +349,14 @@ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %69, i8* bitcast (%struct.S1998* getelementptr inbounds ([5 x %struct.S1998], [5 x %struct.S1998]* @a1998, i32 0, i64 2) to i8*), i64 5168, i32 16, i1 false) %70 = bitcast %struct.S1998* %agg.tmp119 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %70, i8* bitcast (%struct.S1998* @s1998 to i8*), i64 5168, i32 16, i1 false) - call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* byval align 16 %agg.tmp116, %struct.S1998* byval align 16 %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* byval align 16 %agg.tmp118, %struct.S1998* byval align 16 %agg.tmp119) + call void (i32, ...) @check1998va(i32 signext 2, %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp116, %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp117, ppc_fp128 0xM40000000000000000000000000000000, %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp118, %struct.S1998* align 16 byval(%struct.S1998) %agg.tmp119) ret void } declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) -declare void @check1998(%struct.S1998* sret, %struct.S1998* byval align 16, %struct.S1998*, %struct.S1998* byval align 16) +declare void @check1998(%struct.S1998* sret, %struct.S1998* align 16 byval(%struct.S1998), %struct.S1998*, %struct.S1998* align 16 byval(%struct.S1998)) declare void @check1998va(i32 signext, ...) -declare void @checkx1998(%struct.S1998* byval align 16 %arg) +declare void @checkx1998(%struct.S1998* align 16 byval(%struct.S1998) %arg) Index: test/CodeGen/PowerPC/resolvefi-disp.ll =================================================================== --- test/CodeGen/PowerPC/resolvefi-disp.ll +++ test/CodeGen/PowerPC/resolvefi-disp.ll @@ -20,7 +20,7 @@ @s2760 = external global %struct.S2760 @fails = external global i32 -define void @check2760(%struct.S2760* noalias sret %agg.result, %struct.S2760* byval align 16, %struct.S2760* %arg1, %struct.S2760* byval align 16) { +define void @check2760(%struct.S2760* noalias sret %agg.result, %struct.S2760* align 16 byval(%struct.S2760), %struct.S2760* %arg1, %struct.S2760* align 16 byval(%struct.S2760)) { entry: %arg0 = alloca %struct.S2760, align 32 %arg2 = alloca %struct.S2760, align 32 Index: test/CodeGen/PowerPC/stack-realign.ll =================================================================== --- test/CodeGen/PowerPC/stack-realign.ll +++ test/CodeGen/PowerPC/stack-realign.ll @@ -11,7 +11,7 @@ @barbaz = external global i32 -define void @goo(%struct.s* byval nocapture readonly %a) { +define void @goo(%struct.s* nocapture readonly byval(%struct.s) %a) { entry: %x = alloca [2 x i32], align 32 %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0 @@ -95,7 +95,7 @@ ; CHECK-32-PIC: stwux 1, 1, 0 ; The large-frame-size case. -define void @hoo(%struct.s* byval nocapture readonly %a) { +define void @hoo(%struct.s* nocapture readonly byval(%struct.s) %a) { entry: %x = alloca [200000 x i32], align 32 %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0 @@ -156,7 +156,7 @@ ; Make sure that the FP save area is still allocated correctly relative to ; where r30 is saved. -define void @loo(%struct.s* byval nocapture readonly %a) { +define void @loo(%struct.s* nocapture readonly byval(%struct.s) %a) { entry: %x = alloca [2 x i32], align 32 %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0 Index: test/CodeGen/PowerPC/structsinmem.ll =================================================================== --- test/CodeGen/PowerPC/structsinmem.ll +++ test/CodeGen/PowerPC/structsinmem.ll @@ -56,7 +56,7 @@ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false) %6 = bitcast %struct.s7* %p7 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false) - %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) + %call = call i32 @callee1(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7) ret i32 %call ; CHECK: stb {{[0-9]+}}, 119(1) @@ -70,7 +70,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind -define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind { +define internal i32 @callee1(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind { entry: %z1.addr = alloca i32, align 4 %z2.addr = alloca i32, align 4 @@ -145,7 +145,7 @@ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false) %6 = bitcast %struct.t7* %p7 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false) - %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) + %call = call i32 @callee2(i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, %struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7) ret i32 %call ; CHECK: stb {{[0-9]+}}, 119(1) @@ -162,7 +162,7 @@ ; CHECK: stw {{[0-9]+}}, 161(1) } -define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind { +define internal i32 @callee2(i32 %z1, i32 %z2, i32 %z3, i32 %z4, i32 %z5, i32 %z6, i32 %z7, i32 %z8, %struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind { entry: %z1.addr = alloca i32, align 4 %z2.addr = alloca i32, align 4 Index: test/CodeGen/PowerPC/structsinregs.ll =================================================================== --- test/CodeGen/PowerPC/structsinregs.ll +++ test/CodeGen/PowerPC/structsinregs.ll @@ -56,7 +56,7 @@ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast ({ i32, i16, [2 x i8] }* @caller1.p6 to i8*), i64 8, i32 4, i1 false) %6 = bitcast %struct.s7* %p7 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast ({ i32, i16, i8, i8 }* @caller1.p7 to i8*), i64 8, i32 4, i1 false) - %call = call i32 @callee1(%struct.s1* byval %p1, %struct.s2* byval %p2, %struct.s3* byval %p3, %struct.s4* byval %p4, %struct.s5* byval %p5, %struct.s6* byval %p6, %struct.s7* byval %p7) + %call = call i32 @callee1(%struct.s1* byval(%struct.s1) %p1, %struct.s2* byval(%struct.s2) %p2, %struct.s3* byval(%struct.s3) %p3, %struct.s4* byval(%struct.s4) %p4, %struct.s5* byval(%struct.s5) %p5, %struct.s6* byval(%struct.s6) %p6, %struct.s7* byval(%struct.s7) %p7) ret i32 %call ; CHECK: ld 9, 112(31) @@ -70,7 +70,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind -define internal i32 @callee1(%struct.s1* byval %v1, %struct.s2* byval %v2, %struct.s3* byval %v3, %struct.s4* byval %v4, %struct.s5* byval %v5, %struct.s6* byval %v6, %struct.s7* byval %v7) nounwind { +define internal i32 @callee1(%struct.s1* byval(%struct.s1) %v1, %struct.s2* byval(%struct.s2) %v2, %struct.s3* byval(%struct.s3) %v3, %struct.s4* byval(%struct.s4) %v4, %struct.s5* byval(%struct.s5) %v5, %struct.s6* byval(%struct.s6) %v6, %struct.s7* byval(%struct.s7) %v7) nounwind { entry: %a = getelementptr inbounds %struct.s1, %struct.s1* %v1, i32 0, i32 0 %0 = load i8, i8* %a, align 1 @@ -136,7 +136,7 @@ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %5, i8* bitcast (%struct.t6* @caller2.p6 to i8*), i64 6, i32 1, i1 false) %6 = bitcast %struct.t7* %p7 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* bitcast (%struct.t7* @caller2.p7 to i8*), i64 7, i32 1, i1 false) - %call = call i32 @callee2(%struct.t1* byval %p1, %struct.t2* byval %p2, %struct.t3* byval %p3, %struct.t4* byval %p4, %struct.t5* byval %p5, %struct.t6* byval %p6, %struct.t7* byval %p7) + %call = call i32 @callee2(%struct.t1* byval(%struct.t1) %p1, %struct.t2* byval(%struct.t2) %p2, %struct.t3* byval(%struct.t3) %p3, %struct.t4* byval(%struct.t4) %p4, %struct.t5* byval(%struct.t5) %p5, %struct.t6* byval(%struct.t6) %p6, %struct.t7* byval(%struct.t7) %p7) ret i32 %call ; CHECK: stb {{[0-9]+}}, 71(1) @@ -157,7 +157,7 @@ ; CHECK: lbz 3, 160(31) } -define internal i32 @callee2(%struct.t1* byval %v1, %struct.t2* byval %v2, %struct.t3* byval %v3, %struct.t4* byval %v4, %struct.t5* byval %v5, %struct.t6* byval %v6, %struct.t7* byval %v7) nounwind { +define internal i32 @callee2(%struct.t1* byval(%struct.t1) %v1, %struct.t2* byval(%struct.t2) %v2, %struct.t3* byval(%struct.t3) %v3, %struct.t4* byval(%struct.t4) %v4, %struct.t5* byval(%struct.t5) %v5, %struct.t6* byval(%struct.t6) %v6, %struct.t7* byval(%struct.t7) %v7) nounwind { entry: %a = getelementptr inbounds %struct.t1, %struct.t1* %v1, i32 0, i32 0 %0 = load i8, i8* %a, align 1 Index: test/CodeGen/PowerPC/vec-abi-align.ll =================================================================== --- test/CodeGen/PowerPC/vec-abi-align.ll +++ test/CodeGen/PowerPC/vec-abi-align.ll @@ -24,7 +24,7 @@ } ; Function Attrs: nounwind -define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* byval nocapture readonly %vs) #0 { +define void @test2(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, %struct.s2* nocapture readonly byval(%struct.s2) %vs) #0 { entry: %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0 %0 = load i64, i64* %m, align 8 @@ -50,7 +50,7 @@ } ; Function Attrs: nounwind -define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* byval nocapture readonly %vs) #0 { +define void @test3(i64 %d1, i64 %d2, i64 %d3, i64 %d4, i64 %d5, i64 %d6, i64 %d7, i64 %d8, i64 %d9, %struct.s2* nocapture readonly byval(%struct.s2) %vs) #0 { entry: %m = getelementptr inbounds %struct.s2, %struct.s2* %vs, i64 0, i32 0 %0 = load i64, i64* %m, align 8 Index: test/CodeGen/SPARC/2011-01-21-ByValArgs.ll =================================================================== --- test/CodeGen/SPARC/2011-01-21-ByValArgs.ll +++ test/CodeGen/SPARC/2011-01-21-ByValArgs.ll @@ -11,8 +11,8 @@ ;CHECK: st ;CHECK: st ;CHECK: bar - %0 = tail call i32 @bar(%struct.foo_t* byval @s) nounwind + %0 = tail call i32 @bar(%struct.foo_t* byval(%struct.foo_t) @s) nounwind ret i32 %0 } -declare i32 @bar(%struct.foo_t* byval) +declare i32 @bar(%struct.foo_t* byval(%struct.foo_t)) Index: test/CodeGen/SPARC/fp128.ll =================================================================== --- test/CodeGen/SPARC/fp128.ll +++ test/CodeGen/SPARC/fp128.ll @@ -26,7 +26,7 @@ ; SOFT: std ; SOFT: std -define void @f128_ops(fp128* noalias sret %scalar.result, fp128* byval %a, fp128* byval %b, fp128* byval %c, fp128* byval %d) { +define void @f128_ops(fp128* noalias sret %scalar.result, fp128* byval(fp128) %a, fp128* byval(fp128) %b, fp128* byval(fp128) %c, fp128* byval(fp128) %d) { entry: %0 = load fp128, fp128* %a, align 8 %1 = load fp128, fp128* %b, align 8 @@ -54,7 +54,7 @@ ; SOFT-DAG: ldd [%[[S1]]], %f{{.+}} ; SOFT: jmp {{%[oi]7}}+12 -define void @f128_spill(fp128* noalias sret %scalar.result, fp128* byval %a) { +define void @f128_spill(fp128* noalias sret %scalar.result, fp128* byval(fp128) %a) { entry: %0 = load fp128, fp128* %a, align 8 call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"() @@ -69,7 +69,7 @@ ; SOFT-LABEL: f128_compare ; SOFT: _Q_cmp -define i32 @f128_compare(fp128* byval %f0, fp128* byval %f1, i32 %a, i32 %b) { +define i32 @f128_compare(fp128* byval(fp128) %f0, fp128* byval(fp128) %f1, i32 %a, i32 %b) { entry: %0 = load fp128, fp128* %f0, align 8 %1 = load fp128, fp128* %f1, align 8 @@ -105,7 +105,7 @@ ; SOFT-LABEL: f128_abs ; SOFT: fabss -define void @f128_abs(fp128* noalias sret %scalar.result, fp128* byval %a) { +define void @f128_abs(fp128* noalias sret %scalar.result, fp128* byval(fp128) %a) { entry: %0 = load fp128, fp128* %a, align 8 %1 = tail call fp128 @llvm.fabs.f128(fp128 %0) @@ -240,7 +240,7 @@ ; SOFT-LABEL: f128_neg ; SOFT: fnegs -define void @f128_neg(fp128* noalias sret %scalar.result, fp128* byval %a) { +define void @f128_neg(fp128* noalias sret %scalar.result, fp128* byval(fp128) %a) { entry: %0 = load fp128, fp128* %a, align 8 %1 = fsub fp128 0xL00000000000000008000000000000000, %0 Index: test/CodeGen/SPARC/setjmp.ll =================================================================== --- test/CodeGen/SPARC/setjmp.ll +++ test/CodeGen/SPARC/setjmp.ll @@ -24,7 +24,7 @@ ; V9: st %o0, [%[[R]]+{{.+}}] ; Function Attrs: nounwind -define i32 @foo(%struct.jmpbuf_env* byval %inbuf) #0 { +define i32 @foo(%struct.jmpbuf_env* byval(%struct.jmpbuf_env) %inbuf) #0 { entry: %0 = getelementptr inbounds %struct.jmpbuf_env, %struct.jmpbuf_env* %inbuf, i32 0, i32 0 store i32 0, i32* %0, align 4, !tbaa !4 Index: test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll =================================================================== --- test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll +++ test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll @@ -48,7 +48,7 @@ %tmp20 = bitcast %struct.RRRRRRRR* %agg.tmp16 to i8* %tmp21 = bitcast %struct.RRRRRRRR* %arrayidx19 to i8* call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp20, i8* %tmp21, i32 312, i32 4, i1 false) - call void (i8*, i32, i8*, i8*, ...) @CLLoggingLog(i8* %tmp, i32 2, i8* getelementptr inbounds ([62 x i8], [62 x i8]* @__PRETTY_FUNCTION__._ZN12CLGll, i32 0, i32 0), i8* getelementptr inbounds ([75 x i8], [75 x i8]* @.str, i32 0, i32 0), %struct.RRRRRRRR* byval %agg.tmp, %struct.RRRRRRRR* byval %agg.tmp4, %struct.RRRRRRRR* byval %agg.tmp10, %struct.RRRRRRRR* byval %agg.tmp16) + call void (i8*, i32, i8*, i8*, ...) @CLLoggingLog(i8* %tmp, i32 2, i8* getelementptr inbounds ([62 x i8], [62 x i8]* @__PRETTY_FUNCTION__._ZN12CLGll, i32 0, i32 0), i8* getelementptr inbounds ([75 x i8], [75 x i8]* @.str, i32 0, i32 0), %struct.RRRRRRRR* byval(%struct.RRRRRRRR) %agg.tmp, %struct.RRRRRRRR* byval(%struct.RRRRRRRR) %agg.tmp4, %struct.RRRRRRRR* byval(%struct.RRRRRRRR) %agg.tmp10, %struct.RRRRRRRR* byval(%struct.RRRRRRRR) %agg.tmp16) br label %do.end do.end: ; preds = %do.body Index: test/CodeGen/Thumb/PR17309.ll =================================================================== --- test/CodeGen/Thumb/PR17309.ll +++ test/CodeGen/Thumb/PR17309.ll @@ -12,7 +12,7 @@ %c = alloca %struct.C, align 1 %0 = getelementptr inbounds %struct.C, %struct.C* %c, i32 0, i32 0, i32 0 call void @llvm.lifetime.start(i64 1000, i8* %0) #1 - call void @use_C(%struct.C* byval %c) #3 + call void @use_C(%struct.C* byval(%struct.C) %c) #3 call void @llvm.lifetime.end(i64 1000, i8* %0) #1 ret void } @@ -25,7 +25,7 @@ %s = alloca %struct.S, align 2 %0 = bitcast %struct.S* %s to i8* call void @llvm.lifetime.start(i64 2000, i8* %0) #1 - call void @use_S(%struct.S* byval %s) #3 + call void @use_S(%struct.S* byval(%struct.S) %s) #3 call void @llvm.lifetime.end(i64 2000, i8* %0) #1 ret void } @@ -38,14 +38,14 @@ %i = alloca %struct.I, align 4 %0 = bitcast %struct.I* %i to i8* call void @llvm.lifetime.start(i64 4000, i8* %0) #1 - call void @use_I(%struct.I* byval %i) #3 + call void @use_I(%struct.I* byval(%struct.I) %i) #3 call void @llvm.lifetime.end(i64 4000, i8* %0) #1 ret void } -declare void @use_C(%struct.C* byval) #2 -declare void @use_S(%struct.S* byval) #2 -declare void @use_I(%struct.I* byval) #2 +declare void @use_C(%struct.C* byval(%struct.C)) #2 +declare void @use_S(%struct.S* byval(%struct.S)) #2 +declare void @use_I(%struct.I* byval(%struct.I)) #2 declare void @llvm.lifetime.start(i64, i8* nocapture) #1 declare void @llvm.lifetime.end(i64, i8* nocapture) #1 Index: test/CodeGen/X86/2008-04-24-MemCpyBug.ll =================================================================== --- test/CodeGen/X86/2008-04-24-MemCpyBug.ll +++ test/CodeGen/X86/2008-04-24-MemCpyBug.ll @@ -4,9 +4,9 @@ %struct.S63 = type { [63 x i8] } @g1s63 = external global %struct.S63 ; <%struct.S63*> [#uses=1] -declare void @test63(%struct.S63* byval align 4 ) nounwind +declare void @test63(%struct.S63* align 4 byval(%struct.S63) ) nounwind define void @testit63_entry_2E_ce() nounwind { - tail call void @test63( %struct.S63* byval align 4 @g1s63 ) nounwind + tail call void @test63( %struct.S63* align 4 byval(%struct.S63) @g1s63 ) nounwind ret void } Index: test/CodeGen/X86/2009-04-14-IllegalRegs.ll =================================================================== --- test/CodeGen/X86/2009-04-14-IllegalRegs.ll +++ test/CodeGen/X86/2009-04-14-IllegalRegs.ll @@ -21,7 +21,7 @@ store i8 %5, i8* %7, align 1 %8 = getelementptr %struct.X, %struct.X* %xxx, i32 0, i32 0 ; [#uses=1] store i8 15, i8* %8, align 1 - %9 = call i32 (...) bitcast (i32 (%struct.X*, %struct.X*)* @f to i32 (...)*)(%struct.X* byval align 4 %xxx, %struct.X* byval align 4 %xxx) nounwind ; [#uses=1] + %9 = call i32 (...) bitcast (i32 (%struct.X*, %struct.X*)* @f to i32 (...)*)(%struct.X* align 4 byval(%struct.X) %xxx, %struct.X* align 4 byval(%struct.X) %xxx) nounwind ; [#uses=1] store i32 %9, i32* %0, align 4 %10 = load i32, i32* %0, align 4 ; [#uses=1] store i32 %10, i32* %retval, align 4 @@ -32,4 +32,4 @@ ret i32 %retval1 } -declare i32 @f(%struct.X* byval align 4, %struct.X* byval align 4) nounwind ssp +declare i32 @f(%struct.X* align 4 byval(%struct.X), %struct.X* align 4 byval(%struct.X)) nounwind ssp Index: test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll =================================================================== --- test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll +++ test/CodeGen/X86/2009-11-13-VirtRegRewriterBug.ll @@ -3,7 +3,7 @@ %struct.JVTLib_100487 = type <{ i8 }> -define i32 @_Z13JVTLib_10335613JVTLib_10266513JVTLib_100579S_S_S_jPhj(i16* nocapture %ResidualX_Array.0, %struct.JVTLib_100487* nocapture byval align 4 %xqp, i16* nocapture %ResidualL_Array.0, i16* %ResidualDCZ_Array.0, i16* nocapture %ResidualACZ_FOArray.0, i32 %useFRextDequant, i8* nocapture %JVTLib_103357, i32 %use_field_scan) ssp { +define i32 @_Z13JVTLib_10335613JVTLib_10266513JVTLib_100579S_S_S_jPhj(i16* nocapture %ResidualX_Array.0, %struct.JVTLib_100487* nocapture align 4 byval(%struct.JVTLib_100487) %xqp, i16* nocapture %ResidualL_Array.0, i16* %ResidualDCZ_Array.0, i16* nocapture %ResidualACZ_FOArray.0, i32 %useFRextDequant, i8* nocapture %JVTLib_103357, i32 %use_field_scan) ssp { bb.nph: %0 = shl i32 undef, 1 ; [#uses=2] %mask133.masked.masked.masked.masked.masked.masked = or i640 undef, undef ; [#uses=1] Index: test/CodeGen/X86/2010-01-18-DbgValue.ll =================================================================== --- test/CodeGen/X86/2010-01-18-DbgValue.ll +++ test/CodeGen/X86/2010-01-18-DbgValue.ll @@ -6,7 +6,7 @@ %struct.Pt = type { double, double } %struct.Rect = type { %struct.Pt, %struct.Pt } -define double @foo(%struct.Rect* byval %my_r0) nounwind ssp !dbg !1 { +define double @foo(%struct.Rect* byval(%struct.Rect) %my_r0) nounwind ssp !dbg !1 { entry: ;CHECK: DEBUG_VALUE %retval = alloca double ; [#uses=2] Index: test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll =================================================================== --- test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll +++ test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll @@ -13,7 +13,7 @@ ; CHECK: movl %esi,{{.*}}(%ebp) ; CHECK: calll __Z6throwsv -define i8* @_Z4test1SiS_(%struct.S* byval %s1, i32 %n, %struct.S* byval %s2) ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define i8* @_Z4test1SiS_(%struct.S* byval(%struct.S) %s1, i32 %n, %struct.S* byval(%struct.S) %s2) ssp personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { entry: %retval = alloca i8*, align 4 ; [#uses=2] %n.addr = alloca i32, align 4 ; [#uses=1] Index: test/CodeGen/X86/2011-03-30-CreateFixedObjCrash.ll =================================================================== --- test/CodeGen/X86/2011-03-30-CreateFixedObjCrash.ll +++ test/CodeGen/X86/2011-03-30-CreateFixedObjCrash.ll @@ -4,7 +4,7 @@ %struct.T0 = type {} -define void @fn4(%struct.T0* byval %arg0) nounwind ssp { +define void @fn4(%struct.T0* byval(%struct.T0) %arg0) nounwind ssp { entry: ret void } Index: test/CodeGen/X86/2012-11-30-handlemove-dbg.ll =================================================================== --- test/CodeGen/X86/2012-11-30-handlemove-dbg.ll +++ test/CodeGen/X86/2012-11-30-handlemove-dbg.ll @@ -14,7 +14,7 @@ declare void @llvm.dbg.declare(metadata, metadata, metadata) nounwind readnone -define signext i16 @subdivp(%struct.node.0.27* nocapture %p, double %dsq, double %tolsq, %struct.hgstruct.2.29* nocapture byval align 8 %hg) nounwind uwtable readonly ssp !dbg !14 { +define signext i16 @subdivp(%struct.node.0.27* nocapture %p, double %dsq, double %tolsq, %struct.hgstruct.2.29* nocapture align 8 byval(%struct.hgstruct.2.29) %hg) nounwind uwtable readonly ssp !dbg !14 { entry: call void @llvm.dbg.declare(metadata %struct.hgstruct.2.29* %hg, metadata !4, metadata !DIExpression()), !dbg !DILocation(scope: !14) %type = getelementptr inbounds %struct.node.0.27, %struct.node.0.27* %p, i64 0, i32 0 Index: test/CodeGen/X86/aligned-variadic.ll =================================================================== --- test/CodeGen/X86/aligned-variadic.ll +++ test/CodeGen/X86/aligned-variadic.ll @@ -5,7 +5,7 @@ %struct.__va_list_tag = type { i32, i32, i8*, i8* } ; Function Attrs: nounwind uwtable -define void @bar(%struct.Baz* byval nocapture readnone align 8 %x, ...) { +define void @bar(%struct.Baz* nocapture readnone align 8 byval(%struct.Baz) %x, ...) { entry: %va = alloca [1 x %struct.__va_list_tag], align 16 %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %va, i64 0, i64 0 Index: test/CodeGen/X86/byval-align.ll =================================================================== --- test/CodeGen/X86/byval-align.ll +++ test/CodeGen/X86/byval-align.ll @@ -7,7 +7,7 @@ @.str3 = private constant [7 x i8] c"test.c\00", align 1 ; <[7 x i8]*> [#uses=1] @__PRETTY_FUNCTION__.2067 = internal constant [13 x i8] c"aligned_func\00" ; <[13 x i8]*> [#uses=1] -define void @aligned_func(%struct.S* byval align 64 %obj) nounwind { +define void @aligned_func(%struct.S* align 64 byval(%struct.S) %obj) nounwind { entry: %ptr = alloca i8* ; [#uses=3] %p = alloca i64 ; [#uses=3] @@ -51,7 +51,7 @@ %"alloca point" = bitcast i32 0 to i32 ; [#uses=0] %0 = getelementptr inbounds %struct.S, %struct.S* %s1, i32 0, i32 0 ; [#uses=1] store i32 1, i32* %0, align 4 - call void @aligned_func(%struct.S* byval align 64 %s1) nounwind + call void @aligned_func(%struct.S* align 64 byval(%struct.S) %s1) nounwind br label %return return: ; preds = %entry Index: test/CodeGen/X86/byval-callee-cleanup.ll =================================================================== --- test/CodeGen/X86/byval-callee-cleanup.ll +++ test/CodeGen/X86/byval-callee-cleanup.ll @@ -8,19 +8,19 @@ %struct.Six = type { [6 x i8] } -define x86_stdcallcc void @f(%struct.Six* byval %a) { +define x86_stdcallcc void @f(%struct.Six* byval(%struct.Six) %a) { ret void } ; CHECK-LABEL: _f@8: ; CHECK: retl $8 -define x86_thiscallcc void @g(i8* %this, %struct.Six* byval %a) { +define x86_thiscallcc void @g(i8* %this, %struct.Six* byval(%struct.Six) %a) { ret void } ; CHECK-LABEL: _g: ; CHECK: retl $8 -define x86_fastcallcc void @h(i32 inreg %x, i32 inreg %y, %struct.Six* byval %a) { +define x86_fastcallcc void @h(i32 inreg %x, i32 inreg %y, %struct.Six* byval(%struct.Six) %a) { ret void } ; CHECK-LABEL: @h@16: Index: test/CodeGen/X86/byval.ll =================================================================== --- test/CodeGen/X86/byval.ll +++ test/CodeGen/X86/byval.ll @@ -9,7 +9,7 @@ %struct.s = type { i64, i64, i64 } -define i64 @f(%struct.s* byval %a) { +define i64 @f(%struct.s* byval(%struct.s) %a) { entry: %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0 %tmp3 = load i64, i64* %tmp2, align 8 Index: test/CodeGen/X86/byval2.ll =================================================================== --- test/CodeGen/X86/byval2.ll +++ test/CodeGen/X86/byval2.ll @@ -37,9 +37,9 @@ store i64 %b, i64* %tmp2, align 16 %tmp4 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 2 store i64 %c, i64* %tmp4, align 16 - call void @f( %struct.s* byval %d ) - call void @f( %struct.s* byval %d ) + call void @f( %struct.s* byval(%struct.s) %d ) + call void @f( %struct.s* byval(%struct.s) %d ) ret void } -declare void @f(%struct.s* byval) +declare void @f(%struct.s* byval(%struct.s)) Index: test/CodeGen/X86/byval3.ll =================================================================== --- test/CodeGen/X86/byval3.ll +++ test/CodeGen/X86/byval3.ll @@ -45,9 +45,9 @@ store i32 %a5, i32* %tmp8, align 16 %tmp10 = getelementptr %struct.s, %struct.s* %d, i32 0, i32 5 store i32 %a6, i32* %tmp10, align 16 - call void @f( %struct.s* byval %d) - call void @f( %struct.s* byval %d) + call void @f( %struct.s* byval(%struct.s) %d) + call void @f( %struct.s* byval(%struct.s) %d) ret void } -declare void @f(%struct.s* byval) +declare void @f(%struct.s* byval(%struct.s)) Index: test/CodeGen/X86/byval4.ll =================================================================== --- test/CodeGen/X86/byval4.ll +++ test/CodeGen/X86/byval4.ll @@ -51,9 +51,9 @@ store i16 %a5, i16* %tmp8, align 16 %tmp10 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 5 store i16 %a6, i16* %tmp10, align 16 - call void @f( %struct.s* byval %a ) - call void @f( %struct.s* byval %a ) + call void @f( %struct.s* byval(%struct.s) %a ) + call void @f( %struct.s* byval(%struct.s) %a ) ret void } -declare void @f(%struct.s* byval) +declare void @f(%struct.s* byval(%struct.s)) Index: test/CodeGen/X86/byval5.ll =================================================================== --- test/CodeGen/X86/byval5.ll +++ test/CodeGen/X86/byval5.ll @@ -59,9 +59,9 @@ store i8 %a5, i8* %tmp8, align 8 %tmp10 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 5 store i8 %a6, i8* %tmp10, align 8 - call void @f( %struct.s* byval %a ) - call void @f( %struct.s* byval %a ) + call void @f( %struct.s* byval(%struct.s) %a ) + call void @f( %struct.s* byval(%struct.s) %a ) ret void } -declare void @f(%struct.s* byval) +declare void @f(%struct.s* byval(%struct.s)) Index: test/CodeGen/X86/byval6.ll =================================================================== --- test/CodeGen/X86/byval6.ll +++ test/CodeGen/X86/byval6.ll @@ -6,8 +6,8 @@ define i32 @main() nounwind { entry: - tail call void (i32, ...) @bar( i32 3, %struct.W* byval @.cpx ) nounwind - tail call void (i32, ...) @baz( i32 3, %struct.W* byval @B ) nounwind + tail call void (i32, ...) @bar( i32 3, %struct.W* byval(%struct.W) @.cpx ) nounwind + tail call void (i32, ...) @baz( i32 3, %struct.W* byval(%struct.W) @B ) nounwind ret i32 undef } Index: test/CodeGen/X86/byval7.ll =================================================================== --- test/CodeGen/X86/byval7.ll +++ test/CodeGen/X86/byval7.ll @@ -14,8 +14,8 @@ %s = alloca %struct.S ; <%struct.S*> [#uses=2] %tmp15 = getelementptr %struct.S, %struct.S* %s, i32 0, i32 0 ; <<2 x i64>*> [#uses=1] store <2 x i64> < i64 8589934595, i64 1 >, <2 x i64>* %tmp15, align 16 - call void @t( i32 1, %struct.S* byval %s ) nounwind + call void @t( i32 1, %struct.S* byval(%struct.S) %s ) nounwind ret i32 0 } -declare void @t(i32, %struct.S* byval ) +declare void @t(i32, %struct.S* byval(%struct.S) ) Index: test/CodeGen/X86/crash.ll =================================================================== --- test/CodeGen/X86/crash.ll +++ test/CodeGen/X86/crash.ll @@ -276,7 +276,7 @@ br label %bb29 bb28: ; preds = %bb7 - call void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10* %tmp2, %t21* byval align 4 undef, %t13* undef) + call void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10* %tmp2, %t21* align 4 byval(%t21) undef, %t13* undef) br label %bb29 bb29: ; preds = %bb28, %bb27 @@ -300,7 +300,7 @@ br label %bb37 bb36: ; preds = %bb34 - call void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10* %tmp2, %t21* byval align 4 undef, %t13* undef) + call void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10* %tmp2, %t21* align 4 byval(%t21) undef, %t13* undef) br label %bb37 bb37: ; preds = %bb36, %bb35, %bb31 @@ -312,7 +312,7 @@ declare %t14* @_ZN4llvm9MCContext16CreateTempSymbolEv(%t2*) -declare void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10*, %t21* byval align 4, %t13*) +declare void @_ZNSt6vectorIN4llvm11MachineMoveESaIS1_EE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPS1_S3_EERKS1_(%t10*, %t21* align 4 byval(%t21), %t13*) declare void @llvm.lifetime.start(i64, i8* nocapture) nounwind Index: test/CodeGen/X86/dynamic-allocas-VLAs.ll =================================================================== --- test/CodeGen/X86/dynamic-allocas-VLAs.ll +++ test/CodeGen/X86/dynamic-allocas-VLAs.ll @@ -156,14 +156,14 @@ ; the base pointer we use the original adjustment. %struct.struct_t = type { [5 x i32] } -define void @t7(i32 %size, %struct.struct_t* byval align 8 %arg1) nounwind uwtable { +define void @t7(i32 %size, %struct.struct_t* align 8 byval(%struct.struct_t) %arg1) nounwind uwtable { entry: %x = alloca i32, align 32 store i32 0, i32* %x, align 32 %0 = zext i32 %size to i64 %vla = alloca i32, i64 %0, align 16 %1 = load i32, i32* %x, align 32 - call void @bar(i32 %1, i32* %vla, %struct.struct_t* byval align 8 %arg1) + call void @bar(i32 %1, i32* %vla, %struct.struct_t* align 8 byval(%struct.struct_t) %arg1) ret void ; CHECK: _t7 @@ -185,7 +185,7 @@ declare i8* @llvm.stacksave() nounwind -declare void @bar(i32, i32*, %struct.struct_t* byval align 8) +declare void @bar(i32, i32*, %struct.struct_t* align 8 byval(%struct.struct_t)) declare void @llvm.stackrestore(i8*) nounwind Index: test/CodeGen/X86/extract-extract.ll =================================================================== --- test/CodeGen/X86/extract-extract.ll +++ test/CodeGen/X86/extract-extract.ll @@ -9,7 +9,7 @@ %crd = type { i64, %cr* } %pp = type { %cc } -define fastcc void @foo(%pp* nocapture byval %p_arg) { +define fastcc void @foo(%pp* nocapture byval(%pp) %p_arg) { entry: %tmp2 = getelementptr %pp, %pp* %p_arg, i64 0, i32 0 ; <%cc*> [#uses= %tmp3 = load %cc, %cc* %tmp2 ; <%cc> [#uses=1] Index: test/CodeGen/X86/fast-isel-args-fail2.ll =================================================================== --- test/CodeGen/X86/fast-isel-args-fail2.ll +++ test/CodeGen/X86/fast-isel-args-fail2.ll @@ -4,7 +4,7 @@ %struct.s0 = type { x86_fp80, x86_fp80 } ; FastISel cannot handle this case yet. Make sure that we abort. -define i8* @args_fail(%struct.s0* byval nocapture readonly align 16 %y) { +define i8* @args_fail(%struct.s0* nocapture readonly align 16 byval(%struct.s0) %y) { %1 = bitcast %struct.s0* %y to i8* ret i8* %1 } Index: test/CodeGen/X86/fast-isel-call.ll =================================================================== --- test/CodeGen/X86/fast-isel-call.ll +++ test/CodeGen/X86/fast-isel-call.ll @@ -16,10 +16,10 @@ } declare zeroext i1 @foo() nounwind -declare void @foo2(%struct.s* byval) +declare void @foo2(%struct.s* byval(%struct.s)) define void @test2(%struct.s* %d) nounwind { - call void @foo2(%struct.s* byval %d ) + call void @foo2(%struct.s* byval(%struct.s) %d ) ret void ; CHECK-LABEL: test2: ; CHECK: movl (%eax) Index: test/CodeGen/X86/fastcc-byval.ll =================================================================== --- test/CodeGen/X86/fastcc-byval.ll +++ test/CodeGen/X86/fastcc-byval.ll @@ -16,9 +16,9 @@ %V = alloca %struct.MVT %a = getelementptr %struct.MVT, %struct.MVT* %V, i32 0, i32 0 store i32 1, i32* %a - call fastcc void @foo(%struct.MVT* byval %V) nounwind + call fastcc void @foo(%struct.MVT* byval(%struct.MVT) %V) nounwind %t = load i32, i32* %a ret i32 %t } -declare fastcc void @foo(%struct.MVT* byval) +declare fastcc void @foo(%struct.MVT* byval(%struct.MVT)) Index: test/CodeGen/X86/fp-stack-retcopy.ll =================================================================== --- test/CodeGen/X86/fp-stack-retcopy.ll +++ test/CodeGen/X86/fp-stack-retcopy.ll @@ -4,7 +4,7 @@ declare double @foo() -define double @carg({ double, double }* byval %z) nounwind { +define double @carg({ double, double }* byval({ double, double }) %z) nounwind { entry: %tmp5 = tail call double @foo() nounwind ; [#uses=1] ret double %tmp5 Index: test/CodeGen/X86/fp128-i128.ll =================================================================== --- test/CodeGen/X86/fp128-i128.ll +++ test/CodeGen/X86/fp128-i128.ll @@ -281,7 +281,7 @@ declare fp128 @copysignl(fp128, fp128) #1 ; Test more complicated logical operations generated from copysignl. -define void @TestCopySign({ fp128, fp128 }* noalias nocapture sret %agg.result, { fp128, fp128 }* byval nocapture readonly align 16 %z) #0 { +define void @TestCopySign({ fp128, fp128 }* noalias nocapture sret %agg.result, { fp128, fp128 }* nocapture readonly align 16 byval({ fp128, fp128 }) %z) #0 { entry: %z.realp = getelementptr inbounds { fp128, fp128 }, { fp128, fp128 }* %z, i64 0, i32 0 %z.real = load fp128, fp128* %z.realp, align 16 Index: test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll =================================================================== --- test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll +++ test/CodeGen/X86/inline-asm-sp-clobber-memcpy.ll @@ -2,12 +2,12 @@ %struct.foo = type { [88 x i8] } -declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind +declare void @bar(i8* nocapture, %struct.foo* align 4 byval(%struct.foo)) nounwind ; PR19012 ; Don't clobber %esi if we have inline asm that clobbers %esp. define void @test1(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind { - call void @bar(i8* %z, %struct.foo* align 4 byval %x) + call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x) call void asm sideeffect inteldialect "xor esp, esp", "=*m,~{flags},~{esp},~{esp},~{dirflag},~{fpsr},~{flags}"(i8* %z) ret void Index: test/CodeGen/X86/mcu-abi.ll =================================================================== --- test/CodeGen/X86/mcu-abi.ll +++ test/CodeGen/X86/mcu-abi.ll @@ -66,7 +66,7 @@ ; CHECK-NEXT: popl %esi ; CHECK-NOT: retl $4 ; CHECK-NEXT: retl -define void @ret_large_struct(%struct.st12_t* noalias nocapture sret %agg.result, %struct.st12_t* byval nocapture readonly align 4 %r) #0 { +define void @ret_large_struct(%struct.st12_t* noalias nocapture sret %agg.result, %struct.st12_t* nocapture readonly align 4 byval(%struct.st12_t) %r) #0 { entry: %0 = bitcast %struct.st12_t* %agg.result to i8* %1 = bitcast %struct.st12_t* %r to i8* Index: test/CodeGen/X86/misched-aa-colored.ll =================================================================== --- test/CodeGen/X86/misched-aa-colored.ll +++ test/CodeGen/X86/misched-aa-colored.ll @@ -137,7 +137,7 @@ @.str100 = external hidden unnamed_addr constant [50 x i8], align 1 @__PRETTY_FUNCTION__._ZNK4llvm6SDNode10getOperandEj = external hidden unnamed_addr constant [66 x i8], align 1 -declare { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"*, i32, i8*, i32, i32, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"*, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8) +declare { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"*, i32, i8*, i32, i32, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"*, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* align 8 byval(%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"), %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* align 8 byval(%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083")) ; Function Attrs: noreturn nounwind declare void @__assert_fail(i8*, i8*, i32, i8*) #0 @@ -171,7 +171,7 @@ ; CHECK: movl $-1, %ecx ; CHECK: callq _ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_ - %call18 = call { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"* undef, i32 undef, i8* undef, i32 -1, i32 %retval.sroa.0.0.copyload.i37, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"* undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8 undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* byval align 8 undef) #1 + %call18 = call { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } @_ZN4llvm12SelectionDAG7getNodeEjNS_5SDLocENS_3EVTENS_7SDValueES3_(%"class.llvm::SelectionDAG.104.704.1064.2024.2144.2384.4184"* undef, i32 undef, i8* undef, i32 -1, i32 %retval.sroa.0.0.copyload.i37, %"class.llvm::Type.7.607.967.1927.2047.2287.4087"* undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* align 8 byval(%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083") undef, %"class.llvm::SDValue.3.603.963.1923.2043.2283.4083"* align 8 byval(%"class.llvm::SDValue.3.603.963.1923.2043.2283.4083") undef) #1 ret { %"class.llvm::SDNode.10.610.970.1930.2050.2290.4090"*, i32 } %call18 } Index: test/CodeGen/X86/movtopush.ll =================================================================== --- test/CodeGen/X86/movtopush.ll +++ test/CodeGen/X86/movtopush.ll @@ -10,7 +10,7 @@ declare x86_thiscallcc void @thiscall(%class.Class* %class, i32 %a, i32 %b, i32 %c, i32 %d) declare void @oneparam(i32 %a) declare void @eightparams(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h) -declare void @struct(%struct.s* byval %a, i32 %b, i32 %c, i32 %d) +declare void @struct(%struct.s* byval(%struct.s) %a, i32 %b, i32 %c, i32 %d) ; Here, we should have a reserved frame, so we don't expect pushes ; NORMAL-LABEL: test1: @@ -266,7 +266,7 @@ call void @good(i32 1, i32 2, i32 3, i32 4) %pv = ptrtoint i32* %p to i32 %qv = ptrtoint i32* %q to i32 - call void @struct(%struct.s* byval %s, i32 6, i32 %qv, i32 %pv) + call void @struct(%struct.s* byval(%struct.s) %s, i32 6, i32 %qv, i32 %pv) ret void } Index: test/CodeGen/X86/negate-add-zero.ll =================================================================== --- test/CodeGen/X86/negate-add-zero.ll +++ test/CodeGen/X86/negate-add-zero.ll @@ -937,7 +937,7 @@ declare %"struct.FixedMatrixBase"* @_ZN15FixedMatrixBaseIdLi6ELi6EEmIERKS0_(%"struct.FixedMatrixBase"*, %"struct.FixedMatrixBase"*) -declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEEC2EiS2_(%"struct.CDSVectorBase"*, i32, %"struct.CDS::DefaultAlloc"* byval align 4) +declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEEC2EiS2_(%"struct.CDSVectorBase"*, i32, %"struct.CDS::DefaultAlloc"* align 4 byval(%"struct.CDS::DefaultAlloc")) declare void @_ZN13CDSVectorBaseI4Vec3N3CDS12DefaultAllocEED2Ev(%"struct.CDSVectorBase"*) Index: test/CodeGen/X86/pr2656.ll =================================================================== --- test/CodeGen/X86/pr2656.ll +++ test/CodeGen/X86/pr2656.ll @@ -11,7 +11,7 @@ ; We can fold the 16-byte constant load into either 'xor' instruction, ; but we do not. It has more than one use, so it gets loaded into a register. -define void @foo(%struct.anon* byval %p) nounwind { +define void @foo(%struct.anon* byval(%struct.anon) %p) nounwind { ; CHECK-LABEL: foo: ; CHECK: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero Index: test/CodeGen/X86/sibcall-byval.ll =================================================================== --- test/CodeGen/X86/sibcall-byval.ll +++ test/CodeGen/X86/sibcall-byval.ll @@ -3,20 +3,20 @@ %struct.p = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } -define i32 @f(%struct.p* byval align 4 %q) nounwind ssp { +define i32 @f(%struct.p* align 4 byval(%struct.p) %q) nounwind ssp { entry: ; 32: _f: ; 32: jmp L_g$stub ; 64: _f: ; 64: jmp _g - %call = tail call i32 @g(%struct.p* byval align 4 %q) nounwind + %call = tail call i32 @g(%struct.p* align 4 byval(%struct.p) %q) nounwind ret i32 %call } -declare i32 @g(%struct.p* byval align 4) +declare i32 @g(%struct.p* align 4 byval(%struct.p)) -define i32 @h(%struct.p* byval align 4 %q, i32 %r) nounwind ssp { +define i32 @h(%struct.p* align 4 byval(%struct.p) %q, i32 %r) nounwind ssp { entry: ; 32: _h: ; 32: jmp L_i$stub @@ -24,8 +24,8 @@ ; 64: _h: ; 64: jmp _i - %call = tail call i32 @i(%struct.p* byval align 4 %q, i32 %r) nounwind + %call = tail call i32 @i(%struct.p* align 4 byval(%struct.p) %q, i32 %r) nounwind ret i32 %call } -declare i32 @i(%struct.p* byval align 4, i32) +declare i32 @i(%struct.p* align 4 byval(%struct.p), i32) Index: test/CodeGen/X86/sibcall.ll =================================================================== --- test/CodeGen/X86/sibcall.ll +++ test/CodeGen/X86/sibcall.ll @@ -214,7 +214,7 @@ %struct.t = type { i32, i32, i32, i32, i32 } -define i32 @t12(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind ssp { +define i32 @t12(i32 %x, i32 %y, %struct.t* align 4 byval(%struct.t) %z) nounwind ssp { ; 32-LABEL: t12: ; 32-NOT: subl ${{[0-9]+}}, %esp ; 32-NOT: addl ${{[0-9]+}}, %esp @@ -234,14 +234,14 @@ br i1 %0, label %bb2, label %bb bb: - %1 = tail call i32 @foo6(i32 %x, i32 %y, %struct.t* byval align 4 %z) nounwind + %1 = tail call i32 @foo6(i32 %x, i32 %y, %struct.t* align 4 byval(%struct.t) %z) nounwind ret i32 %1 bb2: ret i32 0 } -declare i32 @foo6(i32, i32, %struct.t* byval align 4) +declare i32 @foo6(i32, i32, %struct.t* align 4 byval(%struct.t)) ; rdar://r7717598 %struct.ns = type { i32, i32 } @@ -263,13 +263,13 @@ ; X32ABI: callq ; X32ABI: ret entry: - %0 = tail call fastcc %struct.ns* @foo7(%struct.cp* byval align 4 %yy, i8 signext 0) nounwind + %0 = tail call fastcc %struct.ns* @foo7(%struct.cp* align 4 byval(%struct.cp) %yy, i8 signext 0) nounwind ret %struct.ns* %0 } ; rdar://6195379 ; llvm can't do sibcall for this in 32-bit mode (yet). -declare fastcc %struct.ns* @foo7(%struct.cp* byval align 4, i8 signext) nounwind ssp +declare fastcc %struct.ns* @foo7(%struct.cp* align 4 byval(%struct.cp), i8 signext) nounwind ssp %struct.__block_descriptor = type { i64, i64 } %struct.__block_descriptor_withcopydispose = type { i64, i64, i8*, i8* } Index: test/CodeGen/X86/sjlj-baseptr.ll =================================================================== --- test/CodeGen/X86/sjlj-baseptr.ll +++ test/CodeGen/X86/sjlj-baseptr.ll @@ -11,7 +11,7 @@ attributes #0 = { nounwind uwtable "no-frame-pointer-elim"="true" } -define i32 @test1(i64 %n, %Foo* byval nocapture readnone align 8 %f) #0 { +define i32 @test1(i64 %n, %Foo* nocapture readnone align 8 byval(%Foo) %f) #0 { entry: %buf = alloca [5 x i8*], align 16 %p = alloca i8*, align 8 Index: test/CodeGen/X86/ssp-data-layout.ll =================================================================== --- test/CodeGen/X86/ssp-data-layout.ll +++ test/CodeGen/X86/ssp-data-layout.ll @@ -165,7 +165,7 @@ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0 %7 = bitcast [2 x i16]* %coerce.dive26 to i32* %8 = load i32, i32* %7, align 1 - call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) + call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* align 8 byval(%struct.struct_large_nonchar) %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) ret void } @@ -309,7 +309,7 @@ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0 %7 = bitcast [2 x i16]* %coerce.dive26 to i32* %8 = load i32, i32* %7, align 1 - call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) + call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* align 8 byval(%struct.struct_large_nonchar) %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) ret void } @@ -441,7 +441,7 @@ %coerce.dive26 = getelementptr %struct.struct_small_nonchar, %struct.struct_small_nonchar* %d, i32 0, i32 0 %7 = bitcast [2 x i16]* %coerce.dive26 to i32* %8 = load i32, i32* %7, align 1 - call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) + call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* align 8 byval(%struct.struct_large_nonchar) %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2) ret void } @@ -506,5 +506,5 @@ declare signext i16 @get_struct_small_nonchar() declare void @end_struct_small_nonchar() -declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32) +declare void @takes_all(i64, i16, %struct.struct_large_nonchar* align 8 byval(%struct.struct_large_nonchar), i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32) declare void @takes_two(i32, i8*) Index: test/CodeGen/X86/stack-align-memcpy.ll =================================================================== --- test/CodeGen/X86/stack-align-memcpy.ll +++ test/CodeGen/X86/stack-align-memcpy.ll @@ -2,14 +2,14 @@ %struct.foo = type { [88 x i8] } -declare void @bar(i8* nocapture, %struct.foo* align 4 byval) nounwind +declare void @bar(i8* nocapture, %struct.foo* align 4 byval(%struct.foo)) nounwind declare void @baz(i8*) nounwind ; PR15249 ; We can't use rep;movsl here because it clobbers the base pointer in %esi. define void @test1(%struct.foo* nocapture %x, i32 %y) nounwind { %dynalloc = alloca i8, i32 %y, align 1 - call void @bar(i8* %dynalloc, %struct.foo* align 4 byval %x) + call void @bar(i8* %dynalloc, %struct.foo* align 4 byval(%struct.foo) %x) ret void ; CHECK-LABEL: test1: @@ -21,7 +21,7 @@ ; PR19012 ; Also don't clobber %esi if the dynamic alloca comes after the memcpy. define void @test2(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind { - call void @bar(i8* %z, %struct.foo* align 4 byval %x) + call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x) %dynalloc = alloca i8, i32 %y, align 1 call void @baz(i8* %dynalloc) ret void @@ -33,7 +33,7 @@ ; Check that we do use rep movs if we make the alloca static. define void @test3(%struct.foo* nocapture %x, i32 %y, i8* %z) nounwind { - call void @bar(i8* %z, %struct.foo* align 4 byval %x) + call void @bar(i8* %z, %struct.foo* align 4 byval(%struct.foo) %x) %statalloc = alloca i8, i32 8, align 1 call void @baz(i8* %statalloc) ret void Index: test/CodeGen/X86/stack-align.ll =================================================================== --- test/CodeGen/X86/stack-align.ll +++ test/CodeGen/X86/stack-align.ll @@ -10,7 +10,7 @@ target triple = "i686-apple-darwin8" @G = external global double -define void @test({ double, double }* byval %z, double* %P) nounwind { +define void @test({ double, double }* byval({ double, double }) %z, double* %P) nounwind { entry: %tmp3 = load double, double* @G, align 16 ; [#uses=1] %tmp4 = tail call double @fabs( double %tmp3 ) readnone ; [#uses=1] Index: test/CodeGen/X86/tailcallbyval.ll =================================================================== --- test/CodeGen/X86/tailcallbyval.ll +++ test/CodeGen/X86/tailcallbyval.ll @@ -3,7 +3,7 @@ i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } -define fastcc i32 @tailcallee(%struct.s* byval %a) nounwind { +define fastcc i32 @tailcallee(%struct.s* byval(%struct.s) %a) nounwind { entry: %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 0 %tmp3 = load i32, i32* %tmp2 @@ -12,9 +12,9 @@ ; CHECK: movl 4(%esp), %eax } -define fastcc i32 @tailcaller(%struct.s* byval %a) nounwind { +define fastcc i32 @tailcaller(%struct.s* byval(%struct.s) %a) nounwind { entry: - %tmp4 = tail call fastcc i32 @tailcallee(%struct.s* byval %a ) + %tmp4 = tail call fastcc i32 @tailcallee(%struct.s* byval(%struct.s) %a ) ret i32 %tmp4 ; CHECK: tailcaller ; CHECK: jmp tailcallee Index: test/CodeGen/X86/tailcallbyval64.ll =================================================================== --- test/CodeGen/X86/tailcallbyval64.ll +++ test/CodeGen/X86/tailcallbyval64.ll @@ -30,13 +30,13 @@ i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 } -declare fastcc i64 @tailcallee(%struct.s* byval %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5) +declare fastcc i64 @tailcallee(%struct.s* byval(%struct.s) %a, i64 %val, i64 %val2, i64 %val3, i64 %val4, i64 %val5) -define fastcc i64 @tailcaller(i64 %b, %struct.s* byval %a) { +define fastcc i64 @tailcaller(i64 %b, %struct.s* byval(%struct.s) %a) { entry: %tmp2 = getelementptr %struct.s, %struct.s* %a, i32 0, i32 1 %tmp3 = load i64, i64* %tmp2, align 8 - %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17) + %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* byval(%struct.s) %a , i64 %tmp3, i64 %b, i64 7, i64 13, i64 17) ret i64 %tmp4 } Index: test/CodeGen/XCore/byVal.ll =================================================================== --- test/CodeGen/XCore/byVal.ll +++ test/CodeGen/XCore/byVal.ll @@ -6,7 +6,7 @@ ; CHECK: retsp 1 %struct.st0 = type { [0 x i32] } declare void @f0(%struct.st0*) nounwind -define void @f0Test(%struct.st0* byval %s0) nounwind { +define void @f0Test(%struct.st0* byval(%struct.st0) %s0) nounwind { entry: call void @f0(%struct.st0* %s0) nounwind ret void @@ -29,7 +29,7 @@ ; CHECK: retsp 13 %struct.st1 = type { [10 x i32] } declare void @f1(%struct.st1*) nounwind -define i32 @f1Test(i32 %i, %struct.st1* byval %s1) nounwind { +define i32 @f1Test(i32 %i, %struct.st1* byval(%struct.st1) %s1) nounwind { entry: call void @f1(%struct.st1* %s1) nounwind ret i32 %i @@ -51,7 +51,7 @@ ; CHECK: retsp 0 %struct.st2 = type { i32 } declare void @f2(i32, %struct.st2*) nounwind -define void @f2Test(%struct.st2* byval %s2, i32 %i, ...) nounwind { +define void @f2Test(%struct.st2* byval(%struct.st2) %s2, i32 %i, ...) nounwind { entry: call void @f2(i32 %i, %struct.st2* %s2) ret void @@ -66,7 +66,7 @@ ; CHECK: bl f ; CHECK: retsp 2 declare void @f3(i8*) nounwind -define void @f3Test(i8* byval %v) nounwind { +define void @f3Test(i8* byval(i8) %v) nounwind { entry: call void @f3(i8* %v) nounwind ret void Index: test/DebugInfo/Generic/2010-10-01-crash.ll =================================================================== --- test/DebugInfo/Generic/2010-10-01-crash.ll +++ test/DebugInfo/Generic/2010-10-01-crash.ll @@ -1,6 +1,6 @@ ; RUN: llc -O0 %s -o /dev/null -define void @CGRectStandardize(i32* sret %agg.result, i32* byval %rect) nounwind ssp !dbg !0 { +define void @CGRectStandardize(i32* sret %agg.result, i32* byval(i32) %rect) nounwind ssp !dbg !0 { entry: call void @llvm.dbg.declare(metadata i32* %rect, metadata !23, metadata !DIExpression()), !dbg !24 ret void Index: test/DebugInfo/X86/byvalstruct.ll =================================================================== --- test/DebugInfo/X86/byvalstruct.ll +++ test/DebugInfo/X86/byvalstruct.ll @@ -58,7 +58,7 @@ @llvm.used = appending global [5 x i8*] [i8* getelementptr inbounds ([7 x i8], [7 x i8]* @"\01L_OBJC_CLASS_NAME_", i32 0, i32 0), i8* getelementptr inbounds ([32 x i8], [32 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i32 0, i32 0), i8* getelementptr inbounds ([23 x i8], [23 x i8]* @"\01L_OBJC_METH_VAR_TYPE_", i32 0, i32 0), i8* bitcast ({ i32, i32, [1 x %struct._objc_method] }* @"\01l_OBJC_$_INSTANCE_METHODS_Bitmap" to i8*), i8* bitcast ([1 x i8*]* @"\01L_OBJC_LABEL_CLASS_$" to i8*)], section "llvm.metadata" ; Function Attrs: ssp uwtable -define internal i8* @"\01-[Bitmap initWithCopy:andInfo:andLength:]"(%0* %self, i8* %_cmd, %0* %otherBitmap, %struct.ImageInfo* byval align 8 %info, i64 %length) #0 !dbg !7 { +define internal i8* @"\01-[Bitmap initWithCopy:andInfo:andLength:]"(%0* %self, i8* %_cmd, %0* %otherBitmap, %struct.ImageInfo* align 8 byval(%struct.ImageInfo) %info, i64 %length) #0 !dbg !7 { entry: %retval = alloca i8*, align 8 %self.addr = alloca %0*, align 8 Index: test/DebugInfo/X86/dbg-byval-parameter.ll =================================================================== --- test/DebugInfo/X86/dbg-byval-parameter.ll +++ test/DebugInfo/X86/dbg-byval-parameter.ll @@ -4,7 +4,7 @@ %struct.Pt = type { double, double } %struct.Rect = type { %struct.Pt, %struct.Pt } -define double @foo(%struct.Rect* byval %my_r0) nounwind ssp !dbg !1 { +define double @foo(%struct.Rect* byval(%struct.Rect) %my_r0) nounwind ssp !dbg !1 { entry: %retval = alloca double ; [#uses=2] %0 = alloca double ; [#uses=2] Index: test/DebugInfo/X86/pieces-2.ll =================================================================== --- test/DebugInfo/X86/pieces-2.ll +++ test/DebugInfo/X86/pieces-2.ll @@ -29,7 +29,7 @@ %struct.Inner = type { i32, i64 } ; Function Attrs: nounwind ssp uwtable -define i32 @foo(%struct.Outer* byval align 8 %outer) #0 !dbg !4 { +define i32 @foo(%struct.Outer* align 8 byval(%struct.Outer) %outer) #0 !dbg !4 { entry: call void @llvm.dbg.declare(metadata %struct.Outer* %outer, metadata !25, metadata !DIExpression()), !dbg !26 %i1.sroa.0.0..sroa_idx = getelementptr inbounds %struct.Outer, %struct.Outer* %outer, i64 0, i32 0, i64 1, i32 0, !dbg !27 Index: test/DebugInfo/X86/safestack-byval.ll =================================================================== --- test/DebugInfo/X86/safestack-byval.ll +++ test/DebugInfo/X86/safestack-byval.ll @@ -22,7 +22,7 @@ @__safestack_unsafe_stack_ptr = external thread_local(initialexec) global i8* ; Function Attrs: norecurse nounwind readonly safestack uwtable -define i32 @_Z1f1Sm(%struct.S* byval nocapture readonly align 8 %zzz, i64 %len) #0 !dbg !12 { +define i32 @_Z1f1Sm(%struct.S* nocapture readonly align 8 byval(%struct.S) %zzz, i64 %len) #0 !dbg !12 { entry: %unsafe_stack_ptr = load i8*, i8** @__safestack_unsafe_stack_ptr, !dbg !22 %unsafe_stack_static_top = getelementptr i8, i8* %unsafe_stack_ptr, i32 -400, !dbg !22 Index: test/DebugInfo/X86/sroasplit-1.ll =================================================================== --- test/DebugInfo/X86/sroasplit-1.ll +++ test/DebugInfo/X86/sroasplit-1.ll @@ -36,7 +36,7 @@ %struct.Inner = type { i32, i64 } ; Function Attrs: nounwind ssp uwtable -define i32 @foo(%struct.Outer* byval align 8 %outer) #0 !dbg !4 { +define i32 @foo(%struct.Outer* align 8 byval(%struct.Outer) %outer) #0 !dbg !4 { entry: %i1 = alloca %struct.Inner, align 8 call void @llvm.dbg.declare(metadata %struct.Outer* %outer, metadata !25, metadata !2), !dbg !26 Index: test/DebugInfo/X86/sroasplit-4.ll =================================================================== --- test/DebugInfo/X86/sroasplit-4.ll +++ test/DebugInfo/X86/sroasplit-4.ll @@ -82,7 +82,7 @@ %4 = bitcast %struct.r* %agg.tmp to i8*, !dbg !33 %5 = bitcast %struct.r* %r to i8*, !dbg !33 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %4, i8* %5, i64 40, i32 8, i1 false), !dbg !33 - %call4 = call i32 @_Z7call_me1r(%struct.r* byval align 8 %agg.tmp), !dbg !33 + %call4 = call i32 @_Z7call_me1r(%struct.r* align 8 byval(%struct.r) %agg.tmp), !dbg !33 store i32 %call4, i32* %retval, !dbg !33 br label %return, !dbg !33 @@ -99,7 +99,7 @@ ; Function Attrs: nounwind declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #3 -declare i32 @_Z7call_me1r(%struct.r* byval align 8) +declare i32 @_Z7call_me1r(%struct.r* align 8 byval(%struct.r)) attributes #0 = { nounwind } attributes #2 = { nounwind readnone } Index: test/Instrumentation/BoundsChecking/simple.ll =================================================================== --- test/Instrumentation/BoundsChecking/simple.ll +++ test/Instrumentation/BoundsChecking/simple.ll @@ -122,7 +122,7 @@ } ; CHECK: @f11 -define void @f11(i128* byval %x) nounwind { +define void @f11(i128* byval(i128) %x) nounwind { %1 = bitcast i128* %x to i8* %2 = getelementptr inbounds i8, i8* %1, i64 16 ; CHECK: br label @@ -131,7 +131,7 @@ } ; CHECK: @f11_as1 -define void @f11_as1(i128 addrspace(1)* byval %x) nounwind { +define void @f11_as1(i128 addrspace(1)* byval(i128) %x) nounwind { %1 = bitcast i128 addrspace(1)* %x to i8 addrspace(1)* %2 = getelementptr inbounds i8, i8 addrspace(1)* %1, i16 16 ; CHECK: br label Index: test/Instrumentation/MemorySanitizer/byval-alignment.ll =================================================================== --- test/Instrumentation/MemorySanitizer/byval-alignment.ll +++ test/Instrumentation/MemorySanitizer/byval-alignment.ll @@ -13,8 +13,8 @@ define void @Caller() sanitize_memory { entry: %agg.tmp = alloca %struct.S, align 16 - call void @Callee(i32 1, %struct.S* byval align 16 %agg.tmp) + call void @Callee(i32 1, %struct.S* align 16 byval(%struct.S) %agg.tmp) ret void } -declare void @Callee(i32, %struct.S* byval align 16) +declare void @Callee(i32, %struct.S* align 16 byval(%struct.S)) Index: test/Instrumentation/MemorySanitizer/check_access_address.ll =================================================================== --- test/Instrumentation/MemorySanitizer/check_access_address.ll +++ test/Instrumentation/MemorySanitizer/check_access_address.ll @@ -6,7 +6,7 @@ ; Test byval argument shadow alignment -define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval %p) sanitize_memory { +define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval(<2 x i64>) %p) sanitize_memory { entry: %x = load <2 x i64>, <2 x i64>* %p ret <2 x i64> %x @@ -17,7 +17,7 @@ ; CHECK: ret <2 x i64> -define i16 @ByValArgumentShadowSmallAlignment(i16* byval %p) sanitize_memory { +define i16 @ByValArgumentShadowSmallAlignment(i16* byval(i16) %p) sanitize_memory { entry: %x = load i16, i16* %p ret i16 %x Index: test/Instrumentation/MemorySanitizer/msan_basic.ll =================================================================== --- test/Instrumentation/MemorySanitizer/msan_basic.ll +++ test/Instrumentation/MemorySanitizer/msan_basic.ll @@ -906,7 +906,7 @@ %agg.tmp.sroa.2.0.copyload = load i64, i64* %agg.tmp.sroa.2.0..sroa_cast, align 4 %1 = bitcast %struct.StructByVal* %agg.tmp2 to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %1, i8* %0, i64 16, i32 4, i1 false) - call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* byval align 8 %agg.tmp2) + call void (i32, ...) @VAArgStructFn(i32 undef, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, i64 %agg.tmp.sroa.0.0.copyload, i64 %agg.tmp.sroa.2.0.copyload, %struct.StructByVal* align 8 byval(%struct.StructByVal) %agg.tmp2) ret void } Index: test/Linker/func-attrs-a.ll =================================================================== --- test/Linker/func-attrs-a.ll +++ test/Linker/func-attrs-a.ll @@ -1,13 +1,13 @@ ; RUN: llvm-link %s %p/func-attrs-b.ll -S -o - | FileCheck %s ; PR2382 -; CHECK: call void @check0(%struct.S0* sret null, %struct.S0* byval align 4 null, %struct.S0* align 4 null, %struct.S0* byval align 4 null) -; CHECK: define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval %arg0, %struct.S0* %arg1, %struct.S0* byval %arg2) +; CHECK: call void @check0(%struct.S0* sret null, %struct.S0* align 4 byval(%struct.S0) null, %struct.S0* align 4 null, %struct.S0* align 4 byval(%struct.S0) null) +; CHECK: define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval(%struct.S0) %arg0, %struct.S0* %arg1, %struct.S0* byval(%struct.S0) %arg2) %struct.S0 = type <{ i8, i8, i8, i8 }> define void @a() { - call void @check0(%struct.S0* sret null, %struct.S0* byval align 4 null, %struct.S0* align 4 null, %struct.S0* byval align 4 null) + call void @check0(%struct.S0* sret null, %struct.S0* align 4 byval(%struct.S0) null, %struct.S0* align 4 null, %struct.S0* align 4 byval(%struct.S0) null) ret void } Index: test/Linker/func-attrs-b.ll =================================================================== --- test/Linker/func-attrs-b.ll +++ test/Linker/func-attrs-b.ll @@ -3,6 +3,6 @@ %struct.S0 = type <{ i8, i8, i8, i8 }> -define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval %arg0, %struct.S0* %arg1, %struct.S0* byval %arg2) { +define void @check0(%struct.S0* sret %agg.result, %struct.S0* byval(%struct.S0) %arg0, %struct.S0* %arg1, %struct.S0* byval(%struct.S0) %arg2) { ret void } Index: test/Transforms/ArgumentPromotion/attrs.ll =================================================================== --- test/Transforms/ArgumentPromotion/attrs.ll +++ test/Transforms/ArgumentPromotion/attrs.ll @@ -2,7 +2,7 @@ %struct.ss = type { i32, i64 } -define internal void @f(%struct.ss* byval %b, i32* byval %X, i32 %i) nounwind { +define internal void @f(%struct.ss* byval(%struct.ss) %b, i32* byval(i32) %X, i32 %i) nounwind { entry: %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 %tmp1 = load i32, i32* %tmp, align 4 @@ -20,6 +20,6 @@ store i32 1, i32* %tmp1, align 8 %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; [#uses=1] store i64 2, i64* %tmp4, align 4 - call void @f( %struct.ss* byval %S, i32* byval %X, i32 zeroext 0) + call void @f( %struct.ss* byval(%struct.ss) %S, i32* byval(i32) %X, i32 zeroext 0) ret i32 0 } Index: test/Transforms/ArgumentPromotion/byval-2.ll =================================================================== --- test/Transforms/ArgumentPromotion/byval-2.ll +++ test/Transforms/ArgumentPromotion/byval-2.ll @@ -5,8 +5,8 @@ %struct.ss = type { i32, i64 } -define internal void @f(%struct.ss* byval %b, i32* byval %X) nounwind { -; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1, i32* byval %X) +define internal void @f(%struct.ss* byval(%struct.ss) %b, i32* byval(i32) %X) nounwind { +; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1, i32* byval(i32) %X) entry: %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 %tmp1 = load i32, i32* %tmp, align 4 @@ -25,7 +25,7 @@ store i32 1, i32* %tmp1, align 8 %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 store i64 2, i64* %tmp4, align 4 - call void @f( %struct.ss* byval %S, i32* byval %X) -; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}}, i32* byval %{{.*}}) + call void @f( %struct.ss* byval(%struct.ss) %S, i32* byval(i32) %X) +; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}}, i32* byval(i32) %{{.*}}) ret i32 0 } Index: test/Transforms/ArgumentPromotion/byval.ll =================================================================== --- test/Transforms/ArgumentPromotion/byval.ll +++ test/Transforms/ArgumentPromotion/byval.ll @@ -4,7 +4,7 @@ %struct.ss = type { i32, i64 } -define internal void @f(%struct.ss* byval %b) nounwind { +define internal void @f(%struct.ss* byval(%struct.ss) %b) nounwind { ; CHECK-LABEL: define internal void @f(i32 %b.0, i64 %b.1) entry: %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; [#uses=2] @@ -22,7 +22,7 @@ store i32 1, i32* %tmp1, align 8 %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; [#uses=1] store i64 2, i64* %tmp4, align 4 - call void @f( %struct.ss* byval %S ) nounwind + call void @f( %struct.ss* byval(%struct.ss) %S ) nounwind ; CHECK: call void @f(i32 %{{.*}}, i64 %{{.*}}) ret i32 0 } Index: test/Transforms/ArgumentPromotion/fp80.ll =================================================================== --- test/Transforms/ArgumentPromotion/fp80.ll +++ test/Transforms/ArgumentPromotion/fp80.ll @@ -13,15 +13,15 @@ define void @run() { entry: - tail call i8 @UseLongDoubleUnsafely(%union.u* byval align 16 bitcast (%struct.s* @b to %union.u*)) - tail call x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 bitcast (%struct.s* @b to %union.u*)) + tail call i8 @UseLongDoubleUnsafely(%union.u* align 16 byval(%union.u) bitcast (%struct.s* @b to %union.u*)) + tail call x86_fp80 @UseLongDoubleSafely(%union.u* align 16 byval(%union.u) bitcast (%struct.s* @b to %union.u*)) call i64 @AccessPaddingOfStruct(%struct.Foo* @a) call i64 @CaptureAStruct(%struct.Foo* @a) ret void } -; CHECK: internal i8 @UseLongDoubleUnsafely(%union.u* byval align 16 %arg) { -define internal i8 @UseLongDoubleUnsafely(%union.u* byval align 16 %arg) { +; CHECK: internal i8 @UseLongDoubleUnsafely(%union.u* align 16 byval(%union.u) %arg) { +define internal i8 @UseLongDoubleUnsafely(%union.u* align 16 byval(%union.u) %arg) { entry: %bitcast = bitcast %union.u* %arg to %struct.s* %gep = getelementptr inbounds %struct.s, %struct.s* %bitcast, i64 0, i32 2 @@ -30,21 +30,21 @@ } ; CHECK: internal x86_fp80 @UseLongDoubleSafely(x86_fp80 {{%.*}}) { -define internal x86_fp80 @UseLongDoubleSafely(%union.u* byval align 16 %arg) { +define internal x86_fp80 @UseLongDoubleSafely(%union.u* align 16 byval(%union.u) %arg) { %gep = getelementptr inbounds %union.u, %union.u* %arg, i64 0, i32 0 %fp80 = load x86_fp80, x86_fp80* %gep ret x86_fp80 %fp80 } -; CHECK: define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval %a) { -define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval %a) { +; CHECK: define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval(%struct.Foo) %a) { +define internal i64 @AccessPaddingOfStruct(%struct.Foo* byval(%struct.Foo) %a) { %p = bitcast %struct.Foo* %a to i64* %v = load i64, i64* %p ret i64 %v } -; CHECK: define internal i64 @CaptureAStruct(%struct.Foo* byval %a) { -define internal i64 @CaptureAStruct(%struct.Foo* byval %a) { +; CHECK: define internal i64 @CaptureAStruct(%struct.Foo* byval(%struct.Foo) %a) { +define internal i64 @CaptureAStruct(%struct.Foo* byval(%struct.Foo) %a) { entry: %a_ptr = alloca %struct.Foo* br label %loop Index: test/Transforms/ArgumentPromotion/tail.ll =================================================================== --- test/Transforms/ArgumentPromotion/tail.ll +++ test/Transforms/ArgumentPromotion/tail.ll @@ -7,7 +7,7 @@ declare i8* @foo(%pair*) -define internal void @bar(%pair* byval %Data) { +define internal void @bar(%pair* byval(%pair) %Data) { ; CHECK: define internal void @bar(i32 %Data.0, i32 %Data.1) ; CHECK: %Data = alloca %pair ; CHECK-NOT: tail @@ -16,7 +16,7 @@ ret void } -define void @zed(%pair* byval %Data) { - call void @bar(%pair* byval %Data) +define void @zed(%pair* byval(%pair) %Data) { + call void @bar(%pair* byval(%pair) %Data) ret void } Index: test/Transforms/ArgumentPromotion/variadic.ll =================================================================== --- test/Transforms/ArgumentPromotion/variadic.ll +++ test/Transforms/ArgumentPromotion/variadic.ll @@ -15,7 +15,7 @@ ; Function Attrs: nounwind uwtable define i32 @main(i32 %argc, i8** nocapture readnone %argv) #0 { entry: - tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* byval align 8 @t45) + tail call void (i8*, i8*, i8*, i8*, i8*, ...) @callee_t0f(i8* undef, i8* undef, i8* undef, i8* undef, i8* undef, %struct.tt0* align 8 byval(%struct.tt0) @t45) ret i32 0 } Index: test/Transforms/BBVectorize/X86/wr-aliases.ll =================================================================== --- test/Transforms/BBVectorize/X86/wr-aliases.ll +++ test/Transforms/BBVectorize/X86/wr-aliases.ll @@ -8,7 +8,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture readonly, i64, i32, i1) #0 ; Function Attrs: uwtable -declare fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval nocapture readonly align 8) #1 +declare fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* nocapture readonly align 8 byval(%class.QBezier.15)) #1 ; Function Attrs: nounwind declare void @llvm.lifetime.start(i64, i8* nocapture) #0 @@ -56,7 +56,7 @@ call void @llvm.lifetime.start(i64 64, i8* %v2) %v3 = bitcast [10 x %class.QBezier.15]* %beziers to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v0, i8* %v3, i64 64, i32 8, i1 false) - call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp.i) + call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* align 8 byval(%class.QBezier.15) %agg.tmp.i) %x2.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 2 %v4 = load double, double* %x2.i, align 16 %x3.i = getelementptr inbounds [10 x %class.QBezier.15], [10 x %class.QBezier.15]* %beziers, i64 0, i64 0, i32 4 @@ -131,9 +131,9 @@ store double %mul52.i, double* %y454.i, align 8 %v22 = bitcast %class.QBezier.15* %add.ptr to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v1, i8* %v22, i64 64, i32 8, i1 false) - call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp55.i) + call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* align 8 byval(%class.QBezier.15) %agg.tmp55.i) call void @llvm.memcpy.p0i8.p0i8.i64(i8* %v2, i8* %v3, i64 64, i32 8, i1 false) - call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* byval align 8 %agg.tmp56.i) + call fastcc void @_ZL12printQBezier7QBezier(%class.QBezier.15* align 8 byval(%class.QBezier.15) %agg.tmp56.i) call void @llvm.lifetime.end(i64 64, i8* %v0) call void @llvm.lifetime.end(i64 64, i8* %v1) call void @llvm.lifetime.end(i64 64, i8* %v2) Index: test/Transforms/DeadArgElim/2007-12-20-ParamAttrs.ll =================================================================== --- test/Transforms/DeadArgElim/2007-12-20-ParamAttrs.ll +++ test/Transforms/DeadArgElim/2007-12-20-ParamAttrs.ll @@ -13,7 +13,7 @@ define i32 @bar() { ; CHECK: call void @foo(i8 signext 1) [[NUW]] - %A = call zeroext i8(i8*, i8, ...) @foo(i8* inreg null, i8 signext 1, %struct* byval null ) nounwind + %A = call zeroext i8(i8*, i8, ...) @foo(i8* inreg null, i8 signext 1, %struct* byval(%struct) null ) nounwind ret i32 0 } Index: test/Transforms/DeadArgElim/2008-01-16-VarargsParamAttrs.ll =================================================================== --- test/Transforms/DeadArgElim/2008-01-16-VarargsParamAttrs.ll +++ test/Transforms/DeadArgElim/2008-01-16-VarargsParamAttrs.ll @@ -23,7 +23,7 @@ entry: %"alloca point" = bitcast i32 0 to i32 ; [#uses=0] %tmp = getelementptr [4 x %struct.point], [4 x %struct.point]* @pts, i32 0, i32 0 ; <%struct.point*> [#uses=1] - %tmp1 = call i32 (i32, ...) @va1( i32 1, %struct.point* byval %tmp ) nounwind ; [#uses=0] + %tmp1 = call i32 (i32, ...) @va1( i32 1, %struct.point* byval(%struct.point) %tmp ) nounwind ; [#uses=0] call void @exit( i32 0 ) noreturn nounwind unreachable } Index: test/Transforms/DeadArgElim/variadic_safety.ll =================================================================== --- test/Transforms/DeadArgElim/variadic_safety.ll +++ test/Transforms/DeadArgElim/variadic_safety.ll @@ -17,9 +17,9 @@ define i32 @call_va(i32 %in) { %stacked = alloca i32 store i32 42, i32* %stacked - %res = call i32(i32, i32, ...) @va_func(i32 %in, i32 %in, [6 x i32] undef, i32* byval %stacked) + %res = call i32(i32, i32, ...) @va_func(i32 %in, i32 %in, [6 x i32] undef, i32* byval(i32) %stacked) ret i32 %res -; CHECK: call i32 (i32, i32, ...) @va_func(i32 undef, i32 %in, [6 x i32] undef, i32* byval %stacked) +; CHECK: call i32 (i32, i32, ...) @va_func(i32 undef, i32 %in, [6 x i32] undef, i32* byval(i32) %stacked) } define internal i32 @va_deadret_func(i32 %a, i32 %b, ...) { @@ -32,7 +32,7 @@ define void @call_deadret(i32 %in) { %stacked = alloca i32 store i32 42, i32* %stacked - call i32 (i32, i32, ...) @va_deadret_func(i32 undef, i32 %in, [6 x i32] undef, i32* byval %stacked) + call i32 (i32, i32, ...) @va_deadret_func(i32 undef, i32 %in, [6 x i32] undef, i32* byval(i32) %stacked) ret void -; CHECK: call void (i32, i32, ...) @va_deadret_func(i32 undef, i32 undef, [6 x i32] undef, i32* byval %stacked) +; CHECK: call void (i32, i32, ...) @va_deadret_func(i32 undef, i32 undef, [6 x i32] undef, i32* byval(i32) %stacked) } Index: test/Transforms/DeadStoreElimination/simple.ll =================================================================== --- test/Transforms/DeadStoreElimination/simple.ll +++ test/Transforms/DeadStoreElimination/simple.ll @@ -97,7 +97,7 @@ ; Test for byval handling. %struct.x = type { i32, i32, i32, i32 } -define void @test9(%struct.x* byval %a) nounwind { +define void @test9(%struct.x* byval(%struct.x) %a) nounwind { %tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0 store i32 1, i32* %tmp2, align 4 ret void @@ -263,13 +263,13 @@ ; The store here is not dead because the byval call reads it. -declare void @test19f({i32}* byval align 4 %P) +declare void @test19f({i32}* align 4 byval({i32}) %P) -define void @test19({i32} * nocapture byval align 4 %arg5) nounwind ssp { +define void @test19({i32} * nocapture align 4 byval({i32}) %arg5) nounwind ssp { bb: %tmp7 = getelementptr inbounds {i32}, {i32}* %arg5, i32 0, i32 0 store i32 912, i32* %tmp7 - call void @test19f({i32}* byval align 4 %arg5) + call void @test19f({i32}* align 4 byval({i32}) %arg5) ret void ; CHECK-LABEL: @test19( Index: test/Transforms/GVN/pr17852.ll =================================================================== --- test/Transforms/GVN/pr17852.ll +++ test/Transforms/GVN/pr17852.ll @@ -1,7 +1,7 @@ ; RUN: opt < %s -basicaa -gvn target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" %struct.S0 = type { [2 x i8], [2 x i8], [4 x i8], [2 x i8], i32, i32, i32, i32 } -define void @fn1(%struct.S0* byval align 8 %p1) { +define void @fn1(%struct.S0* align 8 byval(%struct.S0) %p1) { br label %for.cond for.cond: ; preds = %1, %0 br label %for.end Index: test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll =================================================================== --- test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll +++ test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll @@ -3,7 +3,7 @@ ; PR5038 %struct.MYstr = type { i8, i32 } @mystr = internal global %struct.MYstr zeroinitializer ; <%struct.MYstr*> [#uses=3] -define internal void @vfu1(%struct.MYstr* byval align 4 %u) nounwind { +define internal void @vfu1(%struct.MYstr* align 4 byval(%struct.MYstr) %u) nounwind { entry: %0 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 1 ; [#uses=1] store i32 99, i32* %0, align 4 @@ -17,7 +17,7 @@ ret void } -define internal i32 @vfu2(%struct.MYstr* byval align 4 %u) nounwind readonly { +define internal i32 @vfu2(%struct.MYstr* align 4 byval(%struct.MYstr) %u) nounwind readonly { entry: %0 = getelementptr %struct.MYstr, %struct.MYstr* %u, i32 0, i32 1 ; [#uses=1] %1 = load i32, i32* %0 @@ -32,8 +32,8 @@ define i32 @unions() nounwind { entry: - call void @vfu1(%struct.MYstr* byval align 4 @mystr) nounwind - %result = call i32 @vfu2(%struct.MYstr* byval align 4 @mystr) nounwind + call void @vfu1(%struct.MYstr* align 4 byval(%struct.MYstr) @mystr) nounwind + %result = call i32 @vfu2(%struct.MYstr* align 4 byval(%struct.MYstr) @mystr) nounwind ret i32 %result } Index: test/Transforms/IndVarSimplify/loop_evaluate9.ll =================================================================== --- test/Transforms/IndVarSimplify/loop_evaluate9.ll +++ test/Transforms/IndVarSimplify/loop_evaluate9.ll @@ -72,11 +72,11 @@ declare fastcc void @cc70a02__complex_integers__complex.164(%struct.cc70a02__complex_integers__complex_type* noalias nocapture sret, i8 signext, i8 signext) nounwind -declare fastcc void @cc70a02__complex_integers__Osubtract.149(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* byval align 4) +declare fastcc void @cc70a02__complex_integers__Osubtract.149(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* align 4 byval(%struct.cc70a02__complex_integers__complex_type)) -declare fastcc void @cc70a02__complex_integers__Oadd.153(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* byval align 4, %struct.cc70a02__complex_integers__complex_type* byval align 4) +declare fastcc void @cc70a02__complex_integers__Oadd.153(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* align 4 byval(%struct.cc70a02__complex_integers__complex_type), %struct.cc70a02__complex_integers__complex_type* align 4 byval(%struct.cc70a02__complex_integers__complex_type)) -declare fastcc void @cc70a02__complex_multiplication.170(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* byval align 4) +declare fastcc void @cc70a02__complex_multiplication.170(%struct.cc70a02__complex_integers__complex_type* noalias sret, %struct.cc70a02__complex_integers__complex_type* align 4 byval(%struct.cc70a02__complex_integers__complex_type)) declare void @__gnat_rcheck_12(i8*, i32) noreturn Index: test/Transforms/Inline/alloca-merge-align.ll =================================================================== --- test/Transforms/Inline/alloca-merge-align.ll +++ test/Transforms/Inline/alloca-merge-align.ll @@ -5,7 +5,7 @@ %struct.s = type { i32, i32 } -define void @foo(%struct.s* byval nocapture readonly %a) { +define void @foo(%struct.s* nocapture readonly byval(%struct.s) %a) { entry: %x = alloca [2 x i32], align 4 %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0 @@ -20,7 +20,7 @@ ret void } -define void @foo0(%struct.s* byval nocapture readonly %a) { +define void @foo0(%struct.s* nocapture readonly byval(%struct.s) %a) { entry: %x = alloca [2 x i32] %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0 @@ -35,7 +35,7 @@ ret void } -define void @foo1(%struct.s* byval nocapture readonly %a) { +define void @foo1(%struct.s* nocapture readonly byval(%struct.s) %a) { entry: %x = alloca [2 x i32], align 1 %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0 @@ -52,7 +52,7 @@ declare void @bar(i32*) #1 -define void @goo(%struct.s* byval nocapture readonly %a) { +define void @goo(%struct.s* nocapture readonly byval(%struct.s) %a) { entry: %x = alloca [2 x i32], align 32 %a1 = getelementptr inbounds %struct.s, %struct.s* %a, i64 0, i32 0 @@ -79,9 +79,9 @@ store i64 0, i64* %a, align 8 %a1 = bitcast i64* %a to i32* store i32 1, i32* %a1, align 8 - call void @foo(%struct.s* byval %tmpcast) + call void @foo(%struct.s* byval(%struct.s) %tmpcast) store i32 2, i32* %a1, align 8 - call void @goo(%struct.s* byval %tmpcast) + call void @goo(%struct.s* byval(%struct.s) %tmpcast) ret i32 0 } @@ -97,8 +97,8 @@ store i64 0, i64* %a, align 8 %a1 = bitcast i64* %a to i32* store i32 1, i32* %a1, align 8 - call void @foo0(%struct.s* byval %tmpcast) + call void @foo0(%struct.s* byval(%struct.s) %tmpcast) store i32 2, i32* %a1, align 8 - call void @goo(%struct.s* byval %tmpcast) + call void @goo(%struct.s* byval(%struct.s) %tmpcast) ret i32 0 } Index: test/Transforms/Inline/byval-tail-call.ll =================================================================== --- test/Transforms/Inline/byval-tail-call.ll +++ test/Transforms/Inline/byval-tail-call.ll @@ -9,7 +9,7 @@ declare void @ext(i32*) -define void @bar(i32* byval %x) { +define void @bar(i32* byval(i32) %x) { call void @ext(i32* %x) ret void } @@ -18,11 +18,11 @@ ; CHECK-LABEL: define void @foo( ; CHECK: llvm.lifetime.start ; CHECK: store i32 %2, i32* %x - call void @bar(i32* byval %x) + call void @bar(i32* byval(i32) %x) ret void } -define internal void @qux(i32* byval %x) { +define internal void @qux(i32* byval(i32) %x) { call void @ext(i32* %x) tail call void @ext(i32* null) ret void @@ -36,6 +36,6 @@ ; CHECK: {{^ *}}call void @ext(i32* nonnull %[[POS]] ; CHECK: tail call void @ext(i32* null) ; CHECK: ret void - tail call void @qux(i32* byval %x) + tail call void @qux(i32* byval(i32) %x) ret void } Index: test/Transforms/Inline/byval.ll =================================================================== --- test/Transforms/Inline/byval.ll +++ test/Transforms/Inline/byval.ll @@ -5,7 +5,7 @@ %struct.ss = type { i32, i64 } @.str = internal constant [10 x i8] c"%d, %lld\0A\00" ; <[10 x i8]*> [#uses=1] -define internal void @f(%struct.ss* byval %b) nounwind { +define internal void @f(%struct.ss* byval(%struct.ss) %b) nounwind { entry: %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; [#uses=2] %tmp1 = load i32, i32* %tmp, align 4 ; [#uses=1] @@ -23,7 +23,7 @@ store i32 1, i32* %tmp1, align 8 %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; [#uses=1] store i64 2, i64* %tmp4, align 4 - call void @f( %struct.ss* byval %S ) nounwind + call void @f( %struct.ss* byval(%struct.ss) %S ) nounwind ret i32 0 ; CHECK: @test1() ; CHECK: %S1 = alloca %struct.ss @@ -35,7 +35,7 @@ ; Inlining a byval struct should NOT cause an explicit copy ; into an alloca if the function is readonly -define internal i32 @f2(%struct.ss* byval %b) nounwind readonly { +define internal i32 @f2(%struct.ss* byval(%struct.ss) %b) nounwind readonly { entry: %tmp = getelementptr %struct.ss, %struct.ss* %b, i32 0, i32 0 ; [#uses=2] %tmp1 = load i32, i32* %tmp, align 4 ; [#uses=1] @@ -50,7 +50,7 @@ store i32 1, i32* %tmp1, align 8 %tmp4 = getelementptr %struct.ss, %struct.ss* %S, i32 0, i32 1 ; [#uses=1] store i64 2, i64* %tmp4, align 4 - %X = call i32 @f2( %struct.ss* byval %S ) nounwind + %X = call i32 @f2( %struct.ss* byval(%struct.ss) %S ) nounwind ret i32 %X ; CHECK: @test2() ; CHECK: %S = alloca %struct.ss @@ -64,7 +64,7 @@ ; PR8769 declare void @g3(%struct.ss* %p) -define internal void @f3(%struct.ss* byval align 64 %b) nounwind { +define internal void @f3(%struct.ss* align 64 byval(%struct.ss) %b) nounwind { call void @g3(%struct.ss* %b) ;; Could make alignment assumptions! ret void } @@ -72,7 +72,7 @@ define void @test3() nounwind { entry: %S = alloca %struct.ss, align 1 ;; May not be aligned. - call void @f3( %struct.ss* byval align 64 %S) nounwind + call void @f3( %struct.ss* align 64 byval(%struct.ss) %S) nounwind ret void ; CHECK: @test3() ; CHECK: %S1 = alloca %struct.ss, align 64 @@ -87,7 +87,7 @@ ; into an alloca if the function is readonly, but should increase an alloca's ; alignment to satisfy an explicit alignment request. -define internal i32 @f4(%struct.ss* byval align 64 %b) nounwind readonly { +define internal i32 @f4(%struct.ss* align 64 byval(%struct.ss) %b) nounwind readonly { call void @g3(%struct.ss* %b) ret i32 4 } @@ -95,7 +95,7 @@ define i32 @test4() nounwind { entry: %S = alloca %struct.ss, align 2 ; <%struct.ss*> [#uses=4] - %X = call i32 @f4( %struct.ss* byval align 64 %S ) nounwind + %X = call i32 @f4( %struct.ss* align 64 byval(%struct.ss) %S ) nounwind ret i32 %X ; CHECK: @test4() ; CHECK: %S = alloca %struct.ss, align 64 @@ -109,7 +109,7 @@ @b = global %struct.S0 { i32 1 }, align 4 @a = common global i32 0, align 4 -define internal void @f5(%struct.S0* byval nocapture readonly align 4 %p) { +define internal void @f5(%struct.S0* nocapture readonly align 4 byval(%struct.S0) %p) { entry: store i32 0, i32* getelementptr inbounds (%struct.S0, %struct.S0* @b, i64 0, i32 0), align 4 %f2 = getelementptr inbounds %struct.S0, %struct.S0* %p, i64 0, i32 0 @@ -120,7 +120,7 @@ define i32 @test5() { entry: - tail call void @f5(%struct.S0* byval align 4 @b) + tail call void @f5(%struct.S0* align 4 byval(%struct.S0) @b) %0 = load i32, i32* @a, align 4 ret i32 %0 ; CHECK: @test5() Index: test/Transforms/Inline/byval_lifetime.ll =================================================================== --- test/Transforms/Inline/byval_lifetime.ll +++ test/Transforms/Inline/byval_lifetime.ll @@ -8,7 +8,7 @@ @gFoo = global %struct.foo zeroinitializer, align 8 -define i32 @foo(%struct.foo* byval align 8 %f, i32 %a) { +define i32 @foo(%struct.foo* align 8 byval(%struct.foo) %f, i32 %a) { entry: %a1 = getelementptr inbounds %struct.foo, %struct.foo* %f, i32 0, i32 1 %arrayidx = getelementptr inbounds [16 x i32], [16 x i32]* %a1, i32 0, i32 %a @@ -21,6 +21,6 @@ ; CHECK: llvm.lifetime.start ; CHECK: memcpy entry: - %call = call i32 @foo(%struct.foo* byval align 8 @gFoo, i32 %argc) + %call = call i32 @foo(%struct.foo* align 8 byval(%struct.foo) @gFoo, i32 %argc) ret i32 %call } Index: test/Transforms/Inline/inline-byval-bonus.ll =================================================================== --- test/Transforms/Inline/inline-byval-bonus.ll +++ test/Transforms/Inline/inline-byval-bonus.ll @@ -18,7 +18,7 @@ %shadow_ray = alloca %struct.ray, align 8 call void @fix(%struct.ray* %shadow_ray) - %call = call i32 @ray_sphere(%struct.sphere* %i, %struct.ray* byval align 8 %shadow_ray, %struct.spoint* null) + %call = call i32 @ray_sphere(%struct.sphere* %i, %struct.ray* align 8 byval(%struct.ray) %shadow_ray, %struct.spoint* null) ret i32 %call ; CHECK-LABEL: @caller( @@ -28,7 +28,7 @@ declare void @fix(%struct.ray*) -define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture byval align 8 %ray, %struct.spoint* %sp) nounwind uwtable ssp { +define i32 @ray_sphere(%struct.sphere* nocapture %sph, %struct.ray* nocapture align 8 byval(%struct.ray) %ray, %struct.spoint* %sp) nounwind uwtable ssp { %1 = getelementptr inbounds %struct.ray, %struct.ray* %ray, i64 0, i32 1, i32 0 %2 = load double, double* %1, align 8 %3 = fmul double %2, %2 Index: test/Transforms/Inline/inline-tail.ll =================================================================== --- test/Transforms/Inline/inline-tail.ll +++ test/Transforms/Inline/inline-tail.ll @@ -55,13 +55,13 @@ ; CHECK: musttail call void @test_byval_c( ; CHECK-NEXT: ret void -declare void @test_byval_c(i32* byval %p) -define internal void @test_byval_b(i32* byval %p) { - musttail call void @test_byval_c(i32* byval %p) +declare void @test_byval_c(i32* byval(i32) %p) +define internal void @test_byval_b(i32* byval(i32) %p) { + musttail call void @test_byval_c(i32* byval(i32) %p) ret void } -define void @test_byval_a(i32* byval %p) { - musttail call void @test_byval_b(i32* byval %p) +define void @test_byval_a(i32* byval(i32) %p) { + musttail call void @test_byval_b(i32* byval(i32) %p) ret void } @@ -73,15 +73,15 @@ ; CHECK-NEXT: ret void declare void @escape(i8* %buf) -declare void @test_dynalloca_c(i32* byval %p, i32 %n) -define internal void @test_dynalloca_b(i32* byval %p, i32 %n) alwaysinline { +declare void @test_dynalloca_c(i32* byval(i32) %p, i32 %n) +define internal void @test_dynalloca_b(i32* byval(i32) %p, i32 %n) alwaysinline { %buf = alloca i8, i32 %n ; dynamic alloca call void @escape(i8* %buf) ; escape it - musttail call void @test_dynalloca_c(i32* byval %p, i32 %n) + musttail call void @test_dynalloca_c(i32* byval(i32) %p, i32 %n) ret void } -define void @test_dynalloca_a(i32* byval %p, i32 %n) { - musttail call void @test_dynalloca_b(i32* byval %p, i32 %n) +define void @test_dynalloca_a(i32* byval(i32) %p, i32 %n) { + musttail call void @test_dynalloca_b(i32* byval(i32) %p, i32 %n) ret void } Index: test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll =================================================================== --- test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll +++ test/Transforms/InstCombine/2008-04-22-ByValBitcast.ll @@ -8,7 +8,7 @@ define void @foo(i8* %context) nounwind { entry: %tmp1 = bitcast i8* %context to %struct.NSRect* ; <%struct.NSRect*> [#uses=1] - call void (i32, ...) @bar( i32 3, %struct.NSRect* byval align 4 %tmp1 ) nounwind + call void (i32, ...) @bar( i32 3, %struct.NSRect* align 4 byval(%struct.NSRect) %tmp1 ) nounwind ret void } Index: test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll =================================================================== --- test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll +++ test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll @@ -20,7 +20,7 @@ %3 = getelementptr %struct.Key, %struct.Key* %iospec, i32 0, i32 0 ; <{ i32, i32 }*> [#uses=1] %4 = bitcast { i32, i32 }* %3 to i64* ; [#uses=1] store i64 %key_token2, i64* %4, align 4 - %5 = call i32 (...) @foo(%struct.Key* byval align 4 %iospec, i32* %ret) nounwind ; [#uses=0] + %5 = call i32 (...) @foo(%struct.Key* align 4 byval(%struct.Key) %iospec, i32* %ret) nounwind ; [#uses=0] %6 = load i32, i32* %ret, align 4 ; [#uses=1] ret i32 %6 } Index: test/Transforms/InstCombine/call-cast-target.ll =================================================================== --- test/Transforms/InstCombine/call-cast-target.ll +++ test/Transforms/InstCombine/call-cast-target.ll @@ -73,7 +73,7 @@ ret i32 %call } -declare i1 @fn5({ i32, i32 }* byval align 4 %r) +declare i1 @fn5({ i32, i32 }* align 4 byval({ i32, i32 }) %r) define i1 @test5() { ; CHECK-LABEL: @test5 Index: test/Transforms/InstCombine/crash.ll =================================================================== --- test/Transforms/InstCombine/crash.ll +++ test/Transforms/InstCombine/crash.ll @@ -294,7 +294,7 @@ define void @test14() nounwind readnone { entry: %tmp = bitcast i32 (i8* (i8*)*)* @test14f to i32 (i32*)* - %call10 = call i32 %tmp(i32* byval undef) + %call10 = call i32 %tmp(i32* byval(i32) undef) ret void } Index: test/Transforms/InstCombine/memcpy-from-global.ll =================================================================== --- test/Transforms/InstCombine/memcpy-from-global.ll +++ test/Transforms/InstCombine/memcpy-from-global.ll @@ -120,9 +120,9 @@ %A = alloca %T %a = bitcast %T* %A to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false) - call void @baz(i8* byval %a) + call void @baz(i8* byval(i8) %a) ; CHECK-LABEL: @test4( -; CHECK-NEXT: call void @baz(i8* byval getelementptr inbounds (%T, %T* @G, i64 0, i32 0)) +; CHECK-NEXT: call void @baz(i8* byval(i8) getelementptr inbounds (%T, %T* @G, i64 0, i32 0)) ret void } @@ -132,14 +132,14 @@ %a = bitcast %T* %A to i8* call void @llvm.lifetime.start(i64 -1, i8* %a) call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false) - call void @baz(i8* byval %a) + call void @baz(i8* byval(i8) %a) ; CHECK-LABEL: @test5( -; CHECK-NEXT: call void @baz(i8* byval getelementptr inbounds (%T, %T* @G, i64 0, i32 0)) +; CHECK-NEXT: call void @baz(i8* byval(i8) getelementptr inbounds (%T, %T* @G, i64 0, i32 0)) ret void } -declare void @baz(i8* byval) +declare void @baz(i8* byval(i8)) define void @test6() { Index: test/Transforms/MemCpyOpt/memcpy.ll =================================================================== --- test/Transforms/MemCpyOpt/memcpy.ll +++ test/Transforms/MemCpyOpt/memcpy.ll @@ -70,13 +70,13 @@ %A = alloca %1 %a = bitcast %1* %A to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %P, i64 8, i32 4, i1 false) - call void @test4a(i8* align 1 byval %a) + call void @test4a(i8* align 1 byval(i8) %a) ret void ; CHECK-LABEL: @test4( ; CHECK-NEXT: call void @test4a( } -declare void @test4a(i8* align 1 byval) +declare void @test4a(i8* align 1 byval(i8)) declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i32, i1) nounwind @@ -84,7 +84,7 @@ @sS = external global %struct.S, align 16 -declare void @test5a(%struct.S* align 16 byval) nounwind ssp +declare void @test5a(%struct.S* align 16 byval(%struct.S)) nounwind ssp ; rdar://8713376 - This memcpy can't be eliminated. @@ -95,11 +95,11 @@ call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp, i8* bitcast (%struct.S* @sS to i8*), i64 32, i32 16, i1 false) %a = getelementptr %struct.S, %struct.S* %y, i64 0, i32 1, i64 0 store i8 4, i8* %a - call void @test5a(%struct.S* align 16 byval %y) + call void @test5a(%struct.S* align 16 byval(%struct.S) %y) ret i32 0 ; CHECK-LABEL: @test5( ; CHECK: store i8 4 - ; CHECK: call void @test5a(%struct.S* byval align 16 %y) + ; CHECK: call void @test5a(%struct.S* align 16 byval(%struct.S) %y) } ;; Noop memcpy should be zapped. @@ -115,19 +115,19 @@ ; isn't itself 8 byte aligned. %struct.p = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } -define i32 @test7(%struct.p* nocapture align 8 byval %q) nounwind ssp { +define i32 @test7(%struct.p* nocapture align 8 byval(%struct.p) %q) nounwind ssp { entry: %agg.tmp = alloca %struct.p, align 4 %tmp = bitcast %struct.p* %agg.tmp to i8* %tmp1 = bitcast %struct.p* %q to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp, i8* %tmp1, i64 48, i32 4, i1 false) - %call = call i32 @g(%struct.p* align 8 byval %agg.tmp) nounwind + %call = call i32 @g(%struct.p* align 8 byval(%struct.p) %agg.tmp) nounwind ret i32 %call ; CHECK-LABEL: @test7( -; CHECK: call i32 @g(%struct.p* byval align 8 %q) [[NUW:#[0-9]+]] +; CHECK: call i32 @g(%struct.p* align 8 byval(%struct.p) %q) [[NUW:#[0-9]+]] } -declare i32 @g(%struct.p* align 8 byval) +declare i32 @g(%struct.p* align 8 byval(%struct.p)) declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind Index: test/Transforms/MemCpyOpt/smaller.ll =================================================================== --- test/Transforms/MemCpyOpt/smaller.ll +++ test/Transforms/MemCpyOpt/smaller.ll @@ -13,7 +13,7 @@ @.str = private constant [11 x i8] c"0123456789\00" @cell = external global %struct.s -declare void @check(%struct.s* byval %p) nounwind +declare void @check(%struct.s* byval(%struct.s) %p) nounwind declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind define void @foo() nounwind { @@ -23,6 +23,6 @@ call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds (%struct.s, %struct.s* @cell, i32 0, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @.str, i32 0, i32 0), i32 11, i32 1, i1 false) %tmp = getelementptr inbounds %struct.s, %struct.s* %agg.tmp, i32 0, i32 0, i32 0 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp, i8* getelementptr inbounds (%struct.s, %struct.s* @cell, i32 0, i32 0, i32 0), i32 16, i32 4, i1 false) - call void @check(%struct.s* byval %agg.tmp) + call void @check(%struct.s* byval(%struct.s) %agg.tmp) ret void } Index: test/Transforms/MemCpyOpt/sret.ll =================================================================== --- test/Transforms/MemCpyOpt/sret.ll +++ test/Transforms/MemCpyOpt/sret.ll @@ -5,7 +5,7 @@ %0 = type { x86_fp80, x86_fp80 } -define void @ccosl(%0* noalias sret %agg.result, %0* byval align 8 %z) nounwind { +define void @ccosl(%0* noalias sret %agg.result, %0* align 8 byval(%0) %z) nounwind { entry: %iz = alloca %0 %memtmp = alloca %0, align 16 @@ -18,13 +18,13 @@ %tmp8 = load x86_fp80, x86_fp80* %tmp7, align 16 store x86_fp80 %tmp3, x86_fp80* %real, align 16 store x86_fp80 %tmp8, x86_fp80* %tmp4, align 16 - call void @ccoshl(%0* noalias sret %memtmp, %0* byval align 8 %iz) nounwind + call void @ccoshl(%0* noalias sret %memtmp, %0* align 8 byval(%0) %iz) nounwind %memtmp14 = bitcast %0* %memtmp to i8* %agg.result15 = bitcast %0* %agg.result to i8* call void @llvm.memcpy.p0i8.p0i8.i32(i8* %agg.result15, i8* %memtmp14, i32 32, i32 16, i1 false) ret void } -declare void @ccoshl(%0* noalias nocapture sret, %0* byval) nounwind +declare void @ccoshl(%0* noalias nocapture sret, %0* byval(%0)) nounwind declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind Index: test/Transforms/SafeStack/byval.ll =================================================================== --- test/Transforms/SafeStack/byval.ll +++ test/Transforms/SafeStack/byval.ll @@ -7,7 +7,7 @@ %struct.S = type { [100 x i32] } ; Safe access to a byval argument. -define i32 @ByValSafe(%struct.S* byval nocapture readonly align 8 %zzz) norecurse nounwind readonly safestack uwtable { +define i32 @ByValSafe(%struct.S* nocapture readonly align 8 byval(%struct.S) %zzz) norecurse nounwind readonly safestack uwtable { entry: ; CHECK-LABEL: @ByValSafe ; CHECK-NOT: __safestack_unsafe_stack_ptr @@ -19,7 +19,7 @@ ; Unsafe access to a byval argument. ; Argument is copied to the unsafe stack. -define i32 @ByValUnsafe(%struct.S* byval nocapture readonly align 8 %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable { +define i32 @ByValUnsafe(%struct.S* nocapture readonly align 8 byval(%struct.S) %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable { entry: ; CHECK-LABEL: @ByValUnsafe ; CHECK: %[[A:.*]] = load {{.*}} @__safestack_unsafe_stack_ptr @@ -34,7 +34,7 @@ } ; Highly aligned byval argument. -define i32 @ByValUnsafeAligned(%struct.S* byval nocapture readonly align 64 %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable { +define i32 @ByValUnsafeAligned(%struct.S* nocapture readonly align 64 byval(%struct.S) %zzz, i64 %idx) norecurse nounwind readonly safestack uwtable { entry: ; CHECK-LABEL: @ByValUnsafeAligned ; CHECK: %[[A:.*]] = load {{.*}} @__safestack_unsafe_stack_ptr Index: test/Transforms/SafeStack/debug-loc.ll =================================================================== --- test/Transforms/SafeStack/debug-loc.ll +++ test/Transforms/SafeStack/debug-loc.ll @@ -8,7 +8,7 @@ %struct.S = type { [100 x i8] } ; Function Attrs: safestack uwtable -define void @f(%struct.S* byval align 8 %zzz) #0 !dbg !12 { +define void @f(%struct.S* align 8 byval(%struct.S) %zzz) #0 !dbg !12 { ; CHECK: define void @f entry: Index: test/Transforms/ScalarRepl/only-memcpy-uses.ll =================================================================== --- test/Transforms/ScalarRepl/only-memcpy-uses.ll +++ test/Transforms/ScalarRepl/only-memcpy-uses.ll @@ -5,7 +5,7 @@ %struct.S = type { [12 x i32] } ; CHECK-LABEL: @bar4( -define void @bar4(%struct.S* byval %s) nounwind ssp { +define void @bar4(%struct.S* byval(%struct.S) %s) nounwind ssp { entry: ; CHECK: alloca ; CHECK-NOT: load @@ -18,7 +18,7 @@ %tmp2 = bitcast %struct.S* %agg.tmp to i8* %tmp3 = bitcast %struct.S* %t to i8* call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp2, i8* %tmp3, i64 48, i32 4, i1 false) - %call = call i32 (...) @bazz(%struct.S* byval %agg.tmp) + %call = call i32 (...) @bazz(%struct.S* byval(%struct.S) %agg.tmp) ret void } Index: test/Transforms/TailCallElim/basic.ll =================================================================== --- test/Transforms/TailCallElim/basic.ll +++ test/Transforms/TailCallElim/basic.ll @@ -145,7 +145,7 @@ } ; Don't tail call if a byval arg is captured. -define void @test9(i32* byval %a) { +define void @test9(i32* byval(i32) %a) { ; CHECK-LABEL: define void @test9( ; CHECK: {{^ *}}call void @use( call void @use(i32* %a) Index: test/Verifier/byval-4.ll =================================================================== --- test/Verifier/byval-4.ll +++ test/Verifier/byval-4.ll @@ -1,4 +1,4 @@ ; RUN: llvm-as %s -o /dev/null %struct.foo = type { i64 } -declare void @h(%struct.foo* byval %num) +declare void @h(%struct.foo* byval(%struct.foo) %num) Index: test/Verifier/inalloca1.ll =================================================================== --- test/Verifier/inalloca1.ll +++ test/Verifier/inalloca1.ll @@ -1,6 +1,6 @@ ; RUN: not llvm-as %s -o /dev/null 2>&1 | FileCheck %s -declare void @a(i64* byval inalloca %p) +declare void @a(i64* inalloca byval(i64) %p) ; CHECK: Attributes {{.*}} are incompatible declare void @b(i64* inreg inalloca %p) Index: test/Verifier/musttail-invalid.ll =================================================================== --- test/Verifier/musttail-invalid.ll +++ test/Verifier/musttail-invalid.ll @@ -40,7 +40,7 @@ } declare void @mismatched_byval_callee({ i32 }*) -define void @mismatched_byval({ i32 }* byval %a) { +define void @mismatched_byval({ i32 }* byval({ i32 }) %a) { ; CHECK: mismatched ABI impacting function attributes musttail call void @mismatched_byval_callee({ i32 }* %a) ret void @@ -60,10 +60,10 @@ ret void } -declare void @mismatched_alignment_callee(i32* byval align 8) -define void @mismatched_alignment(i32* byval align 4 %a) { +declare void @mismatched_alignment_callee(i32* align 8 byval(i32)) +define void @mismatched_alignment(i32* align 4 byval(i32) %a) { ; CHECK: mismatched ABI impacting function attributes - musttail call void @mismatched_alignment_callee(i32* byval align 8 %a) + musttail call void @mismatched_alignment_callee(i32* align 8 byval(i32) %a) ret void }