Index: llvm/lib/CodeGen/MachineOutliner.cpp =================================================================== --- llvm/lib/CodeGen/MachineOutliner.cpp +++ llvm/lib/CodeGen/MachineOutliner.cpp @@ -56,6 +56,7 @@ //===----------------------------------------------------------------------===// #include "llvm/CodeGen/MachineOutliner.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/Twine.h" #include "llvm/CodeGen/MachineFunction.h" @@ -1245,31 +1246,48 @@ // make sure that the ranges we yank things out of aren't wrong. if (MBB.getParent()->getProperties().hasProperty( MachineFunctionProperties::Property::TracksLiveness)) { - // Helper lambda for adding implicit def operands to the call + // The following code is to add implicit def operands to the call // instruction. It also updates call site information for moved // code. - auto CopyDefsAndUpdateCalls = [&CallInst](MachineInstr &MI) { - for (MachineOperand &MOP : MI.operands()) { + SmallSet UseRegs; + // Copy over the defs in the outlined range. + // First inst in outlined range <-- Anything that's defined in this + // ... .. range has to be added as an + // implicit Last inst in outlined range <-- def to the call + // instruction. Also remove call site information for outlined block + // of code. The exposed uses need to be copied in the outlined range. + for (MachineBasicBlock::reverse_iterator Iter = EndIt.getReverse(), + Last = std::next(CallInst.getReverse()); + Iter != Last; Iter++) { + MachineInstr *MI = &*Iter; + for (MachineOperand &MOP : MI->operands()) { // Skip over anything that isn't a register. if (!MOP.isReg()) continue; // If it's a def, add it to the call instruction. - if (MOP.isDef()) + if (MOP.isDef()) { CallInst->addOperand(MachineOperand::CreateReg( MOP.getReg(), true, /* isDef = true */ true /* isImp = true */)); + if (UseRegs.count(MOP.getReg())) + // Since the regiester is modeled as defined, + // it is not necessary to be put in use register set. + UseRegs.erase(MOP.getReg()); + } else if (!MOP.isUndef()) { + // Any register which is not undefined should + // be put in the use register set. + UseRegs.insert(MOP.getReg()); + } } - if (MI.isCandidateForCallSiteEntry()) - MI.getMF()->eraseCallSiteInfo(&MI); - }; - // Copy over the defs in the outlined range. - // First inst in outlined range <-- Anything that's defined in this - // ... .. range has to be added as an - // implicit Last inst in outlined range <-- def to the call - // instruction. Also remove call site information for outlined block - // of code. - std::for_each(CallInst, std::next(EndIt), CopyDefsAndUpdateCalls); + if (MI->isCandidateForCallSiteEntry()) + MI->getMF()->eraseCallSiteInfo(MI); + } + for (const Register &I : UseRegs) + // If it's a exposed use, add it to the call instruction. + CallInst->addOperand( + MachineOperand::CreateReg(I, false, /* isDef = false */ + true /* isImp = true */)); } // Erase from the point after where the call was inserted up to, and Index: llvm/test/CodeGen/AArch64/machine-outliner-side-effect.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AArch64/machine-outliner-side-effect.ll @@ -0,0 +1,374 @@ +; RUN: llc -verify-machineinstrs -enable-machine-outliner -mtriple=aarch64-apple-darwin -stop-after=machine-outliner < %s | FileCheck %s + +; The test checks whether the compiler updates the side effect of function @OUTLINED_FUNCTION_3 by adding the use of register x27. + +%0 = type { double, double, double, double } +%1 = type { i64 } +%2 = type { %1*, i64 } +%3 = type opaque +%4 = type <{ [41 x i8] }> +%5 = type opaque +%6 = type <{ %7 }> +%7 = type <{ %8 }> +%8 = type <{ %3* }> +%9 = type <{ [40 x i8] }> +%10 = type <{ %2, %9, %11, %13, %14, %15, %16, %16, %16, %16, %16, %16, %16, %16, %16 }> +%11 = type <{ %12 }> +%12 = type <{ double }> +%13 = type <{ [8 x i8] }> +%14 = type { %5* } +%15 = type { %2* } +%16 = type <{ [1 x i8] }> +%17 = type <{ %2, %6 }> +%18 = type <{ %2, %19, %6 }> +%19 = type <{ %11, %11, %11, %11 }> +%20 = type <{ %2, %21 }> +%21 = type <{ %22 }> +%22 = type <{ %23, %23 }> +%23 = type <{ i64 }> +%24 = type <{ i8 }> + +@global = external local_unnamed_addr constant %0, align 8 +@global.3 = external hidden local_unnamed_addr global %1*, align 8 +@global.4 = external global %1 + +; Function Attrs: nounwind +declare void @blam(%2*) local_unnamed_addr #0 + +; Function Attrs: nounwind +declare %2* @widget(%2* returned) local_unnamed_addr #0 + +; Function Attrs: nounwind +declare %3* @blam.5(%3* returned) local_unnamed_addr #0 + +; Function Attrs: argmemonly nounwind willreturn +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: nounwind +declare void @foo(i8*, [24 x i8]*, i64, i8*) local_unnamed_addr #0 + +; Function Attrs: nounwind +declare %2* @widget.6(%1*, i64, i64) local_unnamed_addr #0 + +; Function Attrs: nounwind +declare void @widget.7(%3*) local_unnamed_addr #0 + +; Function Attrs: argmemonly nounwind willreturn +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1 + +; Function Attrs: noinline nounwind +declare hidden %4* @hoge(%4*) local_unnamed_addr #2 + +; Function Attrs: nounwind +declare void @blam.8([24 x i8]*) local_unnamed_addr #0 + +; Function Attrs: nounwind +declare %2* @blam.9(%1*, %2*) local_unnamed_addr #0 + +; Function Attrs: cold noreturn nounwind +declare void @llvm.trap() #3 + +; Function Attrs: minsize +declare swiftcc %2 @barney(i64, %1*) local_unnamed_addr #4 + +; Function Attrs: minsize noinline nounwind readnone +declare hidden swiftcc %2 @foo.10(i64, %1**, %1*, %2 (i64, %1*)*) local_unnamed_addr #5 + +; Function Attrs: nounwind readnone speculatable willreturn +declare { i64, i1 } @llvm.sadd.with.overflow.i64(i64, i64) #6 + +; Function Attrs: minsize +declare swiftcc i64 @spam(%5*) local_unnamed_addr #4 + +; Function Attrs: minsize +declare hidden swiftcc void @blam.11(i64, %6* nocapture swiftself dereferenceable(8), void (i64, %6*)*) local_unnamed_addr #4 + +; Function Attrs: noinline nounwind +declare hidden %9* @foo.12(%9*, %9*) local_unnamed_addr #2 + +; Function Attrs: minsize noinline nounwind readnone +declare hidden swiftcc %2 @zot(i64) #5 + +; Function Attrs: minsize noinline nounwind readnone +declare hidden swiftcc %2 @foo.13(i64) #5 + +; Function Attrs: minsize +declare hidden swiftcc i1 @baz(%10* swiftself) #4 + +; Function Attrs: minsize +define hidden swiftcc void @baz.14(%10* swiftself %arg) #4 { +bb: + %tmp = alloca %9, align 8 + %tmp1 = alloca [24 x i8], align 8 + %tmp2 = alloca [33 x i8], align 8 + %tmp3 = alloca [24 x i8], align 8 + %tmp4 = alloca [24 x i8], align 8 + %tmp5 = alloca [36 x i8], align 8 + %tmp6 = alloca [24 x i8], align 8 + %tmp7 = alloca [34 x i8], align 8 + %tmp8 = alloca [24 x i8], align 8 + %tmp9 = alloca [24 x i8], align 8 + %tmp10 = alloca [24 x i8], align 8 + %tmp11 = tail call swiftcc %17* @bar(%1* swiftself undef) + %tmp12 = tail call swiftcc %2 @wibble(i64 0) #10 + %tmp13 = extractvalue %2 %tmp12, 0 + %tmp14 = getelementptr inbounds %9, %9* %tmp, i64 0, i32 0, i64 0 + call void @llvm.lifetime.start.p0i8(i64 40, i8* nonnull %tmp14) + %tmp15 = getelementptr inbounds %10, %10* %arg, i64 0, i32 1 + %tmp16 = getelementptr inbounds [24 x i8], [24 x i8]* %tmp1, i64 0, i64 0 + call void @llvm.lifetime.start.p0i8(i64 -1, i8* nonnull %tmp16) + %tmp17 = getelementptr inbounds %9, %9* %tmp15, i64 0, i32 0, i64 0 + call void @foo(i8* nonnull %tmp17, [24 x i8]* nonnull %tmp1, i64 0, i8* null) #0 + %tmp18 = call %9* @foo.12(%9* %tmp15, %9* %tmp) + %tmp19 = call swiftcc i1 @widget.20(%9* noalias nocapture nonnull dereferenceable(40) %tmp, %1* swiftself %tmp13) + %tmp20 = call %9* bitcast (%4* (%4*)* @hoge to %9* (%9*)*)(%9* %tmp) + call void @llvm.lifetime.end.p0i8(i64 40, i8* nonnull %tmp14) + br label %bb28 + +bb28: ; preds = %bb21, %bb + %tmp29 = call swiftcc double @spam.23(double 2.500000e+00) + %tmp30 = call swiftcc double @spam.21() + %tmp31 = call swiftcc double @spam.23(double 2.500000e+00) + %tmp32 = call swiftcc double @spam.21() + %tmp33 = call swiftcc %2 @foo.13(i64 undef) #10 + %tmp34 = extractvalue %2 %tmp33, 0 + %tmp35 = call noalias %2* @widget.6(%1* %tmp34, i64 56, i64 7) #0 + %tmp36 = bitcast %2* %tmp35 to %18* + %tmp37 = call swiftcc %18* @barney.16(double %tmp29, double %tmp30, double %tmp31, double %tmp32, %18* returned swiftself %tmp36) + %tmp38 = load double, double* getelementptr inbounds (%0, %0* @global, i64 0, i32 0), align 8 + %tmp39 = load double, double* getelementptr inbounds (%0, %0* @global, i64 0, i32 1), align 8 + %tmp40 = load double, double* getelementptr inbounds (%0, %0* @global, i64 0, i32 2), align 8 + %tmp41 = load double, double* getelementptr inbounds (%0, %0* @global, i64 0, i32 3), align 8 + %tmp42 = call swiftcc %2 @zot(i64 undef) #10 + %tmp43 = extractvalue %2 %tmp42, 0 + %tmp44 = call noalias %2* @widget.6(%1* %tmp43, i64 56, i64 7) #0 + %tmp45 = bitcast %2* %tmp44 to %18* + %tmp46 = call swiftcc %18* @barney.16(double %tmp38, double %tmp39, double %tmp40, double %tmp41, %18* swiftself %tmp45) + %tmp47 = call swiftcc %2 @foo.10(i64 0, %1** @global.3, %1* @global.4, %2 (i64, %1*)* @barney) + %tmp48 = extractvalue %2 %tmp47, 0 + %tmp49 = bitcast [33 x i8]* %tmp2 to %2* + %tmp50 = call %2* @blam.9(%1* %tmp48, %2* nonnull %tmp49) #0 + %tmp51 = getelementptr inbounds %2, %2* %tmp50, i64 1 + %tmp52 = bitcast %2* %tmp51 to <2 x i64>* + store <2 x i64> , <2 x i64>* %tmp52, align 8 + %tmp53 = bitcast %2* %tmp50 to %3* + %tmp54 = getelementptr inbounds %2, %2* %tmp50, i64 2 + %tmp55 = bitcast %2* %tmp54 to i8* + store i8 6, i8* %tmp55, align 8 + call swiftcc void @barney.17(%3* %tmp53, %18* swiftself %tmp46) + call void @eggs(%2* %tmp50) #0 + %tmp56 = bitcast %2* %tmp50 to i8* + call void @llvm.lifetime.end.p0i8(i64 -1, i8* %tmp56) + %tmp57 = getelementptr inbounds %18, %18* %tmp37, i64 0, i32 2 + %tmp58 = getelementptr inbounds [24 x i8], [24 x i8]* %tmp3, i64 0, i64 0 + call void @llvm.lifetime.start.p0i8(i64 -1, i8* nonnull %tmp58) + %tmp59 = bitcast %6* %tmp57 to i8* + call void @foo(i8* nonnull %tmp59, [24 x i8]* nonnull %tmp3, i64 33, i8* null) #0 + %tmp60 = getelementptr inbounds %18, %18* %tmp46, i64 0, i32 0 + %tmp61 = call %2* @widget(%2* returned %tmp60) #0 + call swiftcc void @quux(%6* nocapture swiftself dereferenceable(8) %tmp57, void (i64, %6*)* @wobble) + %tmp62 = getelementptr inbounds %6, %6* %tmp57, i64 0, i32 0, i32 0, i32 0 + %tmp63 = load %3*, %3** %tmp62, align 8 + %tmp64 = ptrtoint %3* %tmp63 to i64 + %tmp65 = icmp ugt %3* %tmp63, inttoptr (i64 4611686018427387903 to %3*) + br label %bb66 + +bb66: ; preds = %bb28 + %tmp67 = icmp sgt %3* %tmp63, inttoptr (i64 -1 to %3*) + %tmp68 = bitcast %3* %tmp63 to %5* + %tmp69 = and i64 %tmp64, 1152921504606846968 + %tmp70 = inttoptr i64 %tmp69 to %5* + %tmp71 = select i1 %tmp67, %5* %tmp70, %5* %tmp68 + %tmp72 = call %3* @blam.5(%3* returned %tmp63) #0 + %tmp73 = call swiftcc i64 @spam(%5* %tmp71) + %tmp74 = call swiftcc i64 @spam(%5* %tmp71) + %tmp75 = call swiftcc i64 @spam(%5* %tmp71) + %tmp76 = icmp slt i64 %tmp75, %tmp73 + %tmp77 = or i64 %tmp74, %tmp73 + %tmp78 = icmp slt i64 %tmp77, 0 + %tmp79 = or i1 %tmp78, %tmp76 + br label %bb80 + +bb80: ; preds = %bb66 + call void @widget.7(%3* %tmp63) #0 + br label %bb86 + +bb86: ; preds = %bb81, %bb80 + call swiftcc void @blam.11(i64 %tmp73, %6* nocapture swiftself dereferenceable(8) %tmp57, void (i64, %6*)* @wobble) + %tmp88 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %tmp73, i64 1) + %tmp89 = extractvalue { i64, i1 } %tmp88, 1 + br label %bb90 + +bb90: ; preds = %bb86 + %tmp91 = extractvalue { i64, i1 } %tmp88, 0 + %tmp92 = bitcast %6* %tmp57 to i64* + %tmp93 = load i64, i64* %tmp92, align 8 + %tmp94 = and i64 %tmp93, 1152921504606846968 + %tmp95 = inttoptr i64 %tmp94 to %20* + %tmp96 = getelementptr inbounds %20, %20* %tmp95, i64 0, i32 1, i32 0, i32 0, i32 0 + store i64 %tmp91, i64* %tmp96, align 8 + %tmp97 = inttoptr i64 %tmp94 to i8* + %tmp98 = getelementptr inbounds i8, i8* %tmp97, i64 32 + %tmp99 = bitcast i8* %tmp98 to %18** + %tmp100 = getelementptr inbounds %18*, %18** %tmp99, i64 %tmp73 + store %18* %tmp46, %18** %tmp100, align 8 + call void @blam.8([24 x i8]* nonnull %tmp3) #0 + call void @llvm.lifetime.end.p0i8(i64 -1, i8* nonnull %tmp58) + %tmp101 = getelementptr inbounds %17, %17* %tmp11, i64 0, i32 1 + %tmp102 = getelementptr inbounds [24 x i8], [24 x i8]* %tmp4, i64 0, i64 0 + call void @llvm.lifetime.start.p0i8(i64 -1, i8* nonnull %tmp102) + %tmp103 = bitcast %6* %tmp101 to i8* + call void @foo(i8* nonnull %tmp103, [24 x i8]* nonnull %tmp4, i64 33, i8* null) #0 + %tmp104 = getelementptr inbounds %18, %18* %tmp37, i64 0, i32 0 + %tmp105 = call %2* @widget(%2* returned %tmp104) #0 + call swiftcc void @quux(%6* nocapture swiftself dereferenceable(8) %tmp101, void (i64, %6*)* @baz.18) + %tmp106 = getelementptr inbounds %6, %6* %tmp101, i64 0, i32 0, i32 0, i32 0 + %tmp107 = load %3*, %3** %tmp106, align 8 + %tmp108 = ptrtoint %3* %tmp107 to i64 + %tmp109 = icmp ugt %3* %tmp107, inttoptr (i64 4611686018427387903 to %3*) + br label %bb110 + +bb110: ; preds = %bb90 + %tmp111 = icmp sgt %3* %tmp107, inttoptr (i64 -1 to %3*) + %tmp112 = bitcast %3* %tmp107 to %5* + %tmp113 = and i64 %tmp108, 1152921504606846968 + %tmp114 = inttoptr i64 %tmp113 to %5* + %tmp115 = select i1 %tmp111, %5* %tmp114, %5* %tmp112 + %tmp116 = call %3* @blam.5(%3* returned %tmp107) #0 + %tmp117 = call swiftcc i64 @spam(%5* %tmp115) + %tmp118 = call swiftcc i64 @spam(%5* %tmp115) + %tmp119 = call swiftcc i64 @spam(%5* %tmp115) + %tmp120 = icmp slt i64 %tmp119, %tmp117 + %tmp121 = or i64 %tmp118, %tmp117 + %tmp122 = icmp slt i64 %tmp121, 0 + %tmp123 = or i1 %tmp122, %tmp120 + br label %bb124 + +bb124: ; preds = %bb110 + call void @widget.7(%3* %tmp107) #0 + br label %bb130 + +bb130: ; preds = %bb125, %bb124 + call swiftcc void @blam.11(i64 %tmp117, %6* nocapture swiftself dereferenceable(8) %tmp101, void (i64, %6*)* @baz.18) + %tmp132 = call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %tmp117, i64 1) + %tmp133 = extractvalue { i64, i1 } %tmp132, 1 + br label %bb134 + +bb134: ; preds = %bb130 + %tmp135 = extractvalue { i64, i1 } %tmp132, 0 + %tmp136 = bitcast %6* %tmp101 to i64* + %tmp137 = load i64, i64* %tmp136, align 8 + %tmp138 = and i64 %tmp137, 1152921504606846968 + %tmp139 = inttoptr i64 %tmp138 to %20* + %tmp140 = getelementptr inbounds %20, %20* %tmp139, i64 0, i32 1, i32 0, i32 0, i32 0 + store i64 %tmp135, i64* %tmp140, align 8 + %tmp141 = inttoptr i64 %tmp138 to i8* + %tmp142 = getelementptr inbounds i8, i8* %tmp141, i64 32 + %tmp143 = bitcast i8* %tmp142 to %18** + %tmp144 = getelementptr inbounds %18*, %18** %tmp143, i64 %tmp117 + store %18* %tmp37, %18** %tmp144, align 8 + call void @blam.8([24 x i8]* nonnull %tmp4) #0 + call void @llvm.lifetime.end.p0i8(i64 -1, i8* nonnull %tmp102) + call void @blam(%2* %tmp104) #0 + call void @blam(%2* %tmp60) #0 + br label %bb145 + +bb145: ; preds = %bb134, %bb23 + %tmp150 = call swiftcc double @spam.23(double 2.500000e+00) + %tmp151 = call swiftcc double @spam.21() + %tmp152 = call swiftcc double @spam.23(double 2.500000e+00) + %tmp153 = call swiftcc double @spam.21() + %tmp154 = call swiftcc %2 @foo.13(i64 undef) #10 + %tmp155 = extractvalue %2 %tmp154, 0 + %tmp156 = call noalias %2* @widget.6(%1* %tmp155, i64 56, i64 7) #0 + %tmp157 = bitcast %2* %tmp156 to %18* + %tmp158 = call swiftcc %18* @barney.16(double %tmp150, double %tmp151, double %tmp152, double %tmp153, %18* returned swiftself %tmp157) + %tmp159 = call swiftcc %2 @zot(i64 undef) #10 + %tmp160 = extractvalue %2 %tmp159, 0 + %tmp161 = call noalias %2* @widget.6(%1* %tmp160, i64 56, i64 7) #0 + %tmp162 = bitcast %2* %tmp161 to %18* + %tmp163 = call swiftcc %18* @barney.16(double %tmp38, double %tmp39, double %tmp40, double %tmp41, %18* swiftself %tmp162) + %tmp164 = call swiftcc %2 @foo.10(i64 0, %1** @global.3, %1* @global.4, %2 (i64, %1*)* @barney) + %tmp165 = extractvalue %2 %tmp164, 0 + %tmp166 = bitcast [36 x i8]* %tmp5 to %2* + %tmp167 = call %2* @blam.9(%1* %tmp165, %2* nonnull %tmp166) #0 + %tmp168 = getelementptr inbounds %2, %2* %tmp167, i64 1 + %tmp169 = bitcast %2* %tmp168 to <2 x i64>* + store <2 x i64> , <2 x i64>* %tmp169, align 8 + %tmp170 = bitcast %2* %tmp167 to %3* + %tmp171 = getelementptr inbounds %2, %2* %tmp167, i64 2 + %tmp172 = bitcast %2* %tmp171 to %24* + %tmp173 = bitcast %2* %tmp171 to i8* + store i8 0, i8* %tmp173, align 8 + %tmp174 = getelementptr inbounds %24, %24* %tmp172, i64 1, i32 0 + store i8 3, i8* %tmp174, align 1 + %tmp175 = getelementptr inbounds %24, %24* %tmp172, i64 2, i32 0 + store i8 7, i8* %tmp175, align 2 + %tmp176 = getelementptr inbounds %24, %24* %tmp172, i64 3, i32 0 + store i8 2, i8* %tmp176, align 1 + call swiftcc void @barney.17(%3* %tmp170, %18* swiftself %tmp163) + call void @eggs(%2* %tmp167) #0 + %tmp177 = bitcast %2* %tmp167 to i8* + call void @llvm.lifetime.end.p0i8(i64 -1, i8* %tmp177) + %tmp178 = getelementptr inbounds %18, %18* %tmp158, i64 0, i32 2 + %tmp179 = getelementptr inbounds [24 x i8], [24 x i8]* %tmp6, i64 0, i64 0 + call void @llvm.lifetime.start.p0i8(i64 -1, i8* nonnull %tmp179) + %tmp180 = bitcast %6* %tmp178 to i8* + call void @foo(i8* nonnull %tmp180, [24 x i8]* nonnull %tmp6, i64 33, i8* null) #0 + %tmp181 = getelementptr inbounds %18, %18* %tmp163, i64 0, i32 0 + %tmp182 = call %2* @widget(%2* returned %tmp181) #0 + call swiftcc void @quux(%6* nocapture swiftself dereferenceable(8) %tmp178, void (i64, %6*)* @wobble) + %tmp183 = getelementptr inbounds %6, %6* %tmp178, i64 0, i32 0, i32 0, i32 0 + %tmp184 = load %3*, %3** %tmp183, align 8 + %tmp185 = ptrtoint %3* %tmp184 to i64 + %tmp186 = icmp ugt %3* %tmp184, inttoptr (i64 4611686018427387903 to %3*) + ret void + +} + +; Function Attrs: minsize nounwind +declare hidden swiftcc %17* @bar(%1* nocapture readnone swiftself) #7 + +; Function Attrs: minsize nounwind +declare hidden swiftcc %18* @barney.16(double, double, double, double, %18* returned swiftself) local_unnamed_addr #7 + +; Function Attrs: minsize +declare hidden swiftcc void @barney.17(%3* nocapture readonly, %18* swiftself) local_unnamed_addr #4 + +; Function Attrs: nounwind +declare void @eggs(%2*) local_unnamed_addr #0 + +; Function Attrs: minsize noinline +declare hidden swiftcc void @wobble(i64, %6* nocapture swiftself dereferenceable(8)) local_unnamed_addr #8 + +; Function Attrs: minsize +declare hidden swiftcc void @quux(%6* nocapture swiftself dereferenceable(8), void (i64, %6*)*) local_unnamed_addr #4 + +; Function Attrs: minsize noinline +declare hidden swiftcc void @baz.18(i64, %6* nocapture swiftself dereferenceable(8)) local_unnamed_addr #8 + +; Function Attrs: minsize noinline nounwind readnone +declare swiftcc %2 @wibble(i64) #5 + +; Function Attrs: minsize +declare swiftcc i1 @widget.20(%9* noalias nocapture dereferenceable(40), %1* nocapture readnone swiftself) #4 + +; Function Attrs: minsize +declare swiftcc double @spam.21() #4 + +; Function Attrs: minsize norecurse nounwind readnone +declare swiftcc double @spam.23(double) + +attributes #0 = { nounwind } +attributes #1 = { argmemonly nounwind willreturn } +attributes #2 = { noinline nounwind } +attributes #3 = { cold noreturn nounwind } +attributes #4 = { minsize "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "target-cpu"="cyclone" "target-features"="+crypto,+fp-armv8,+neon,+zcm,+zcz" } +attributes #5 = { minsize noinline nounwind readnone "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "target-cpu"="cyclone" "target-features"="+crypto,+fp-armv8,+neon,+zcm,+zcz" } +attributes #6 = { nounwind readnone speculatable willreturn } +attributes #7 = { minsize nounwind "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "target-cpu"="cyclone" "target-features"="+crypto,+fp-armv8,+neon,+zcm,+zcz" } +attributes #8 = { minsize noinline "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "target-cpu"="cyclone" "target-features"="+crypto,+fp-armv8,+neon,+zcm,+zcz" } +attributes #9 = { minsize norecurse nounwind readnone "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "target-cpu"="cyclone" "target-features"="+crypto,+fp-armv8,+neon,+zcm,+zcz" } +attributes #10 = { nounwind readnone } + +; CHECK: BL @OUTLINED_FUNCTION_3, {{.*}}, implicit $x27