Index: lib/CodeGen/RegAllocFast.cpp =================================================================== --- lib/CodeGen/RegAllocFast.cpp +++ lib/CodeGen/RegAllocFast.cpp @@ -582,17 +582,9 @@ } } - // First try to find a completely free register. - ArrayRef AllocationOrder = RegClassInfo.getOrder(&RC); - for (MCPhysReg PhysReg : AllocationOrder) { - if (PhysRegState[PhysReg] == regFree && !isRegUsedInInstr(PhysReg)) { - assignVirtToPhysReg(LR, PhysReg); - return; - } - } - MCPhysReg BestReg = 0; unsigned BestCost = spillImpossible; + ArrayRef AllocationOrder = RegClassInfo.getOrder(&RC); for (MCPhysReg PhysReg : AllocationOrder) { LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' '); unsigned Cost = calcSpillCost(PhysReg); Index: test/CodeGen/AArch64/arm64-fast-isel-br.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-br.ll +++ test/CodeGen/AArch64/arm64-fast-isel-br.ll @@ -94,7 +94,7 @@ store i32 %c, i32* %c.addr, align 4 store i64 %d, i64* %d.addr, align 8 %0 = load i16, i16* %b.addr, align 2 -; CHECK: tbz w0, #0, LBB4_2 +; CHECK: tbz w8, #0, LBB4_2 %conv = trunc i16 %0 to i1 br i1 %conv, label %if.then, label %if.end Index: test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll +++ test/CodeGen/AArch64/arm64-fast-isel-conversion-fallback.ll @@ -26,8 +26,8 @@ define half @sitofp_hw_i1(i1 %a) nounwind ssp { entry: ; CHECK-LABEL: sitofp_hw_i1 -; CHECK: sbfx w0, w0, #0, #1 -; CHECK: scvtf s0, w0 +; CHECK: sbfx w8, w0, #0, #1 +; CHECK: scvtf s0, w8 ; CHECK: fcvt h0, s0 %conv = sitofp i1 %a to half ret half %conv @@ -37,8 +37,8 @@ define half @sitofp_hw_i8(i8 %a) nounwind ssp { entry: ; CHECK-LABEL: sitofp_hw_i8 -; CHECK: sxtb w0, w0 -; CHECK: scvtf s0, w0 +; CHECK: sxtb w8, w0 +; CHECK: scvtf s0, w8 ; CHECK: fcvt h0, s0 %conv = sitofp i8 %a to half ret half %conv @@ -48,8 +48,8 @@ define half @sitofp_hw_i16(i16 %a) nounwind ssp { entry: ; CHECK-LABEL: sitofp_hw_i16 -; CHECK: sxth w0, w0 -; CHECK: scvtf s0, w0 +; CHECK: sxth w8, w0 +; CHECK: scvtf s0, w8 ; CHECK: fcvt h0, s0 %conv = sitofp i16 %a to half ret half %conv @@ -79,8 +79,8 @@ define half @uitofp_hw_i1(i1 %a) nounwind ssp { entry: ; CHECK-LABEL: uitofp_hw_i1 -; CHECK: and w0, w0, #0x1 -; CHECK: ucvtf s0, w0 +; CHECK: and w8, w0, #0x1 +; CHECK: ucvtf s0, w8 ; CHECK: fcvt h0, s0 %conv = uitofp i1 %a to half ret half %conv @@ -90,8 +90,8 @@ define half @uitofp_hw_i8(i8 %a) nounwind ssp { entry: ; CHECK-LABEL: uitofp_hw_i8 -; CHECK: and w0, w0, #0xff -; CHECK: ucvtf s0, w0 +; CHECK: and w8, w0, #0xff +; CHECK: ucvtf s0, w8 ; CHECK: fcvt h0, s0 %conv = uitofp i8 %a to half ret half %conv @@ -101,8 +101,8 @@ define half @uitofp_hw_i16(i16 %a) nounwind ssp { entry: ; CHECK-LABEL: uitofp_hw_i16 -; CHECK: and w0, w0, #0xffff -; CHECK: ucvtf s0, w0 +; CHECK: and w8, w0, #0xffff +; CHECK: ucvtf s0, w8 ; CHECK: fcvt h0, s0 %conv = uitofp i16 %a to half ret half %conv Index: test/CodeGen/AArch64/arm64-fast-isel-conversion.ll =================================================================== --- test/CodeGen/AArch64/arm64-fast-isel-conversion.ll +++ test/CodeGen/AArch64/arm64-fast-isel-conversion.ll @@ -9,13 +9,13 @@ ; CHECK: strh w1, [sp, #12] ; CHECK: str w2, [sp, #8] ; CHECK: str x3, [sp] -; CHECK: ldr x3, [sp] -; CHECK: mov x0, x3 -; CHECK: str w0, [sp, #8] -; CHECK: ldr w0, [sp, #8] -; CHECK: strh w0, [sp, #12] -; CHECK: ldrh w0, [sp, #12] -; CHECK: strb w0, [sp, #15] +; CHECK: ldr x8, [sp] +; CHECK: mov x9, x8 +; CHECK: str w9, [sp, #8] +; CHECK: ldr w9, [sp, #8] +; CHECK: strh w9, [sp, #12] +; CHECK: ldrh w9, [sp, #12] +; CHECK: strb w9, [sp, #15] ; CHECK: ldrb w0, [sp, #15] ; CHECK: add sp, sp, #16 ; CHECK: ret @@ -49,13 +49,13 @@ ; CHECK: strh w1, [sp, #12] ; CHECK: str w2, [sp, #8] ; CHECK: str x3, [sp] -; CHECK: ldrb w0, [sp, #15] -; CHECK: strh w0, [sp, #12] -; CHECK: ldrh w0, [sp, #12] -; CHECK: str w0, [sp, #8] -; CHECK: ldr w0, [sp, #8] -; CHECK: mov x3, x0 -; CHECK: str x3, [sp] +; CHECK: ldrb w8, [sp, #15] +; CHECK: strh w8, [sp, #12] +; CHECK: ldrh w8, [sp, #12] +; CHECK: str w8, [sp, #8] +; CHECK: ldr w8, [sp, #8] +; CHECK: mov x9, x8 +; CHECK: str x9, [sp] ; CHECK: ldr x0, [sp] ; CHECK: ret %a.addr = alloca i8, align 1 @@ -105,12 +105,12 @@ ; CHECK: strh w1, [sp, #12] ; CHECK: str w2, [sp, #8] ; CHECK: str x3, [sp] -; CHECK: ldrsb w0, [sp, #15] -; CHECK: strh w0, [sp, #12] -; CHECK: ldrsh w0, [sp, #12] -; CHECK: str w0, [sp, #8] -; CHECK: ldrsw x3, [sp, #8] -; CHECK: str x3, [sp] +; CHECK: ldrsb w8, [sp, #15] +; CHECK: strh w8, [sp, #12] +; CHECK: ldrsh w8, [sp, #12] +; CHECK: str w8, [sp, #8] +; CHECK: ldrsw x9, [sp, #8] +; CHECK: str x9, [sp] ; CHECK: ldr x0, [sp] ; CHECK: ret %a.addr = alloca i8, align 1 @@ -166,7 +166,8 @@ define signext i16 @sext_i1_i16(i1 %a) nounwind ssp { entry: ; CHECK-LABEL: sext_i1_i16 -; CHECK: sbfx w0, w0, #0, #1 +; CHECK: sbfx w8, w0, #0, #1 +; CHECK-NEXT: sxth w0, w8 %conv = sext i1 %a to i16 ret i16 %conv } @@ -175,7 +176,8 @@ define signext i8 @sext_i1_i8(i1 %a) nounwind ssp { entry: ; CHECK-LABEL: sext_i1_i8 -; CHECK: sbfx w0, w0, #0, #1 +; CHECK: sbfx w8, w0, #0, #1 +; CHECK-NEXT: sxtb w0, w8 %conv = sext i1 %a to i8 ret i8 %conv } @@ -238,8 +240,8 @@ define float @sitofp_sw_i1(i1 %a) nounwind ssp { entry: ; CHECK-LABEL: sitofp_sw_i1 -; CHECK: sbfx w0, w0, #0, #1 -; CHECK: scvtf s0, w0 +; CHECK: sbfx w8, w0, #0, #1 +; CHECK: scvtf s0, w8 %conv = sitofp i1 %a to float ret float %conv } @@ -248,8 +250,8 @@ define float @sitofp_sw_i8(i8 %a) nounwind ssp { entry: ; CHECK-LABEL: sitofp_sw_i8 -; CHECK: sxtb w0, w0 -; CHECK: scvtf s0, w0 +; CHECK: sxtb w8, w0 +; CHECK: scvtf s0, w8 %conv = sitofp i8 %a to float ret float %conv } @@ -302,8 +304,8 @@ define float @uitofp_sw_i1(i1 %a) nounwind ssp { entry: ; CHECK-LABEL: uitofp_sw_i1 -; CHECK: and w0, w0, #0x1 -; CHECK: ucvtf s0, w0 +; CHECK: and w8, w0, #0x1 +; CHECK: ucvtf s0, w8 %conv = uitofp i1 %a to float ret float %conv } Index: test/CodeGen/AArch64/swift-return.ll =================================================================== --- test/CodeGen/AArch64/swift-return.ll +++ test/CodeGen/AArch64/swift-return.ll @@ -8,7 +8,7 @@ ; CHECK-O0-LABEL: test1 ; CHECK-O0: bl _gen ; CHECK-O0: sxth [[TMP:w.*]], w0 -; CHECK-O0: add w0, [[TMP]], w1, sxtb +; CHECK-O0: add w8, [[TMP]], w1, sxtb define i16 @test1(i32) { entry: %call = call swiftcc { i16, i8 } @gen(i32 %0) Index: test/CodeGen/AArch64/swifterror.ll =================================================================== --- test/CodeGen/AArch64/swifterror.ll +++ test/CodeGen/AArch64/swifterror.ll @@ -19,12 +19,11 @@ ; CHECK-O0-LABEL: foo: ; CHECK-O0: orr w{{.*}}, wzr, #0x10 ; CHECK-O0: malloc -; CHECK-O0: mov x21, x0 -; CHECK-O0-NOT: x21 +; CHECK-O0: mov x1, x0 +; CHECK-O0-NOT: x1 ; CHECK-O0: orr [[ID:w[0-9]+]], wzr, #0x1 -; CHECK-O0-NOT: x21 ; CHECK-O0: strb [[ID]], [x0, #8] -; CHECK-O0-NOT: x21 +; CHECK-O0: mov x21, x1 entry: %call = call i8* @malloc(i64 16) %call.0 = bitcast i8* %call to %swift_error* Index: test/CodeGen/ARM/Windows/alloca.ll =================================================================== --- test/CodeGen/ARM/Windows/alloca.ll +++ test/CodeGen/ARM/Windows/alloca.ll @@ -17,7 +17,7 @@ ; CHECK: bl num_entries ; Any register is actually valid here, but turns out we use lr, ; because we do not have the kill flag on R0. -; CHECK: mov.w [[R1:lr]], #7 +; CHECK: movs [[R1:r1]], #7 ; CHECK: add.w [[R0:r[0-9]+]], [[R1]], [[R0]], lsl #2 ; CHECK: bic [[R0]], [[R0]], #4 ; CHECK: lsrs r4, [[R0]], #2 Index: test/CodeGen/ARM/thumb-big-stack.ll =================================================================== --- test/CodeGen/ARM/thumb-big-stack.ll +++ test/CodeGen/ARM/thumb-big-stack.ll @@ -12,7 +12,7 @@ ; CHECK: movw [[ADDR:(r[0-9]+|lr)]], # ; CHECK-NEXT: add [[ADDR]], sp ; CHECK-NEXT: vst1.64 {d{{[0-9]+}}, d{{[0-9]+}}}, {{\[}}[[ADDR]]:128] -define <4 x float> @f(<4 x float> %x) { +define <4 x float> @f(<4 x float> %x, float %val) { entry: %.compoundliteral7837 = alloca <4 x float>, align 16 %.compoundliteral7865 = alloca <4 x float>, align 16 @@ -143,9 +143,9 @@ %.compoundliteral13969 = alloca <4 x float>, align 16 %.compoundliteral13983 = alloca <4 x float>, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -153,17 +153,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add68 = fadd <4 x float> %tmp1, %tmp tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add68, <4 x float>* undef, align 16 + store volatile <4 x float> %add68, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp2 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add76 = fadd float undef, 0x4074C999A0000000 + %add76 = fadd float %val, 0x4074C999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp3 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins77 = insertelement <4 x float> %tmp3, float %add76, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins77, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins77, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp4 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -175,15 +175,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins80 = insertelement <4 x float> %tmp5, float %add79, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins80, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins80, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp6 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add82 = fadd <4 x float> undef, %tmp6 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add82, <4 x float>* undef, align 16 + store volatile <4 x float> %add82, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp7 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -195,19 +195,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins85 = insertelement <4 x float> %tmp8, float %add84, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins85, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins85, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp9 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext86 = extractelement <4 x float> %tmp9, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add93 = fadd float undef, 0xC076C66660000000 + %add93 = fadd float %val, 0xC076C66660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp10 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins94 = insertelement <4 x float> %tmp10, float %add93, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp11 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -223,17 +223,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp14 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins102 = insertelement <4 x float> undef, float undef, i32 1 + %vecins102 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins102, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins102, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp15 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add104 = fadd float undef, 0x406AB999A0000000 + %add104 = fadd float %val, 0x406AB999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp16 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext579 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -243,7 +243,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins581 = insertelement <4 x float> %tmp17, float %add580, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins581, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins581, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp18 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -251,7 +251,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add583 = fadd float %vecext582, 0x40444CCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp19 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -261,25 +261,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins592 = insertelement <4 x float> undef, float %add591, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins592, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins592, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp20 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add594 = fadd float undef, 0xC05B466660000000 + %add594 = fadd float %val, 0xC05B466660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add605 = fadd float undef, 0x407164CCC0000000 + %add605 = fadd float %val, 0x407164CCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp21 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add616 = fadd float undef, 1.885000e+02 + %add616 = fadd float %val, 1.885000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp22 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp23 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins620 = insertelement <4 x float> undef, float undef, i32 1 + %vecins620 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins620, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins620, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext621 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -287,7 +287,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins623 = insertelement <4 x float> undef, float %add622, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins623, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins623, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp24 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -299,9 +299,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins626 = insertelement <4 x float> %tmp25, float %add625, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins626, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins626, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp26 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -309,7 +309,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add628 = fadd <4 x float> %tmp27, %tmp26 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add628, <4 x float>* undef, align 16 + store volatile <4 x float> %add628, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp28 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -321,7 +321,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins631 = insertelement <4 x float> %tmp29, float %add630, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins631, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins631, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp30 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -333,7 +333,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins634 = insertelement <4 x float> %tmp31, float %add633, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins634, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins634, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp32 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -347,13 +347,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp35 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add658 = fadd float undef, 0xC04A4CCCC0000000 + %add658 = fadd float %val, 0xC04A4CCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext663 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp36 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins665 = insertelement <4 x float> %tmp36, float undef, i32 2 + %vecins665 = insertelement <4 x float> %tmp36, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext694 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -363,31 +363,31 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins696 = insertelement <4 x float> %tmp37, float %add695, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins696, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins696, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp38 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext699 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add703 = fadd float undef, 0x4068F33340000000 + %add703 = fadd float %val, 0x4068F33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins704 = insertelement <4 x float> undef, float %add703, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins704, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins704, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp39 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp40 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins710 = insertelement <4 x float> %tmp40, float undef, i32 3 + %vecins710 = insertelement <4 x float> %tmp40, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins710, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins710, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp41 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -395,7 +395,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add712 = fadd <4 x float> %tmp42, %tmp41 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add712, <4 x float>* undef, align 16 + store volatile <4 x float> %add712, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp43 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -403,7 +403,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp44 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins715 = insertelement <4 x float> %tmp44, float undef, i32 0 + %vecins715 = insertelement <4 x float> %tmp44, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp45 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -415,19 +415,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins718 = insertelement <4 x float> %tmp46, float %add717, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins718, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins718, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp47 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext719 = extractelement <4 x float> %tmp47, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add723 = fadd float undef, 0xC06A6CCCC0000000 + %add723 = fadd float %val, 0xC06A6CCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins724 = insertelement <4 x float> undef, float %add723, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add726 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext730 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -437,19 +437,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins732 = insertelement <4 x float> %tmp48, float %add731, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins732, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins732, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp49 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext733 = extractelement <4 x float> %tmp49, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp50 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins738 = insertelement <4 x float> %tmp50, float undef, i32 3 + %vecins738 = insertelement <4 x float> %tmp50, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp51 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -465,7 +465,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins743 = insertelement <4 x float> %tmp53, float %add742, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins743, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins743, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp54 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -473,7 +473,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add754 = fadd <4 x float> %tmp55, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add754, <4 x float>* undef, align 16 + store volatile <4 x float> %add754, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp56 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -485,7 +485,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins757 = insertelement <4 x float> %tmp57, float %add756, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add765 = fadd float undef, 0x405BA66660000000 + %add765 = fadd float %val, 0x405BA66660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp58 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -501,11 +501,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins771 = insertelement <4 x float> %tmp60, float %add770, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins771, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins771, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp61 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add776 = fadd float undef, 0xC055F33340000000 + %add776 = fadd float %val, 0xC055F33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins777 = insertelement <4 x float> undef, float %add776, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -515,7 +515,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add782 = fadd <4 x float> %tmp63, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add782, <4 x float>* undef, align 16 + store volatile <4 x float> %add782, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp64 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -523,25 +523,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add784 = fadd float %vecext783, -3.455000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add796 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add796, <4 x float>* undef, align 16 + store volatile <4 x float> %add796, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp65 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add801 = fadd float undef, 3.045000e+02 + %add801 = fadd float %val, 3.045000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp66 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins802 = insertelement <4 x float> %tmp66, float %add801, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins802, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins802, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext803 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp67 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -549,7 +549,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add810 = fadd <4 x float> undef, %tmp68 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add810, <4 x float>* undef, align 16 + store volatile <4 x float> %add810, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp69 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -557,17 +557,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp70 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins813 = insertelement <4 x float> %tmp70, float undef, i32 0 + %vecins813 = insertelement <4 x float> %tmp70, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext817 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add818 = fadd float %vecext817, -4.830000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins822 = insertelement <4 x float> undef, float undef, i32 3 + %vecins822 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins822, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins822, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp71 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -577,17 +577,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add838 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add838, <4 x float>* undef, align 16 + store volatile <4 x float> %add838, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp73 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext839 = extractelement <4 x float> %tmp73, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add849 = fadd float undef, 0xC07C266660000000 + %add849 = fadd float %val, 0xC07C266660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp74 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -609,9 +609,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins861 = insertelement <4 x float> %tmp77, float %add860, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins889 = insertelement <4 x float> undef, float undef, i32 2 + %vecins889 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins889, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins889, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp78 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -623,9 +623,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins892 = insertelement <4 x float> %tmp79, float %add891, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins892, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins892, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp80 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -633,7 +633,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add894 = fadd <4 x float> %tmp81, %tmp80 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add894, <4 x float>* undef, align 16 + store volatile <4 x float> %add894, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext895 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -659,7 +659,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins903 = insertelement <4 x float> %tmp84, float %add902, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins903, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins903, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext904 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -669,7 +669,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins906 = insertelement <4 x float> %tmp85, float %add905, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp86 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -677,13 +677,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add908 = fadd <4 x float> %tmp87, %tmp86 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add908, <4 x float>* undef, align 16 + store volatile <4 x float> %add908, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp88 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp89 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp90 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -703,7 +703,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins917 = insertelement <4 x float> %tmp92, float %add916, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins917, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins917, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp93 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -715,17 +715,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins920 = insertelement <4 x float> %tmp94, float %add919, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins920, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins920, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp95 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins925 = insertelement <4 x float> %tmp95, float undef, i32 0 + %vecins925 = insertelement <4 x float> %tmp95, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins925, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins925, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp96 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add927 = fadd float undef, 0xC0501999A0000000 + %add927 = fadd float %val, 0xC0501999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp97 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -739,7 +739,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins931 = insertelement <4 x float> %tmp98, float %add930, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp99 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -747,11 +747,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext937 = extractelement <4 x float> %tmp100, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add941 = fadd float undef, -4.665000e+02 + %add941 = fadd float %val, -4.665000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins942 = insertelement <4 x float> undef, float %add941, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins942, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins942, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp101 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -763,29 +763,29 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins945 = insertelement <4 x float> %tmp102, float %add944, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins945, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins945, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp103 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add947 = fadd float undef, 0xC051933340000000 + %add947 = fadd float %val, 0xC051933340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp104 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins948 = insertelement <4 x float> %tmp104, float %add947, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins948, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins948, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp105 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add955 = fadd float undef, 0x4077F4CCC0000000 + %add955 = fadd float %val, 0x4077F4CCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp106 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins956 = insertelement <4 x float> %tmp106, float %add955, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins956, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins956, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext971 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -795,17 +795,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins973 = insertelement <4 x float> %tmp107, float %add972, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins973, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins973, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp108 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext974 = extractelement <4 x float> %tmp108, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins976 = insertelement <4 x float> undef, float undef, i32 3 + %vecins976 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins976, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins976, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp109 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -817,7 +817,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp112 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext982 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -825,7 +825,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins984 = insertelement <4 x float> undef, float %add983, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins984, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins984, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp113 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -837,25 +837,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins987 = insertelement <4 x float> %tmp114, float %add986, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins987, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins987, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp115 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp116 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins995 = insertelement <4 x float> %tmp116, float undef, i32 0 + %vecins995 = insertelement <4 x float> %tmp116, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins995, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins995, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp117 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add997 = fadd float undef, 0xC0798999A0000000 + %add997 = fadd float %val, 0xC0798999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp118 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins998 = insertelement <4 x float> %tmp118, float %add997, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins998, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins998, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp119 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -865,7 +865,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp120 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp121 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -879,13 +879,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1031 = fadd float %vecext1030, 2.010000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp123 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp124 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1085 = insertelement <4 x float> %tmp124, float undef, i32 2 + %vecins1085 = insertelement <4 x float> %tmp124, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp125 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -897,13 +897,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1088 = insertelement <4 x float> %tmp126, float %add1087, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1088, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1088, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp127 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1090 = fadd <4 x float> undef, %tmp127 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp128 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -915,7 +915,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1096 = insertelement <4 x float> %tmp129, float %add1095, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1096, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1096, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp130 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -927,7 +927,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1099 = insertelement <4 x float> %tmp131, float %add1098, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1099, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1099, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp132 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -939,9 +939,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1102 = insertelement <4 x float> %tmp133, float %add1101, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1102, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1102, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp134 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -961,9 +961,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp137 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1110 = insertelement <4 x float> %tmp137, float undef, i32 1 + %vecins1110 = insertelement <4 x float> %tmp137, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1110, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1110, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp138 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -975,21 +975,21 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1113 = insertelement <4 x float> %tmp139, float %add1112, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1113, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1113, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1115 = fadd float undef, 0x4072B33340000000 + %add1115 = fadd float %val, 0x4072B33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1116 = insertelement <4 x float> undef, float %add1115, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1116, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1116, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp140 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1118 = fadd <4 x float> %tmp140, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1118, <4 x float>* undef, align 16 + store volatile <4 x float> %add1118, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp141 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -999,7 +999,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1121 = insertelement <4 x float> undef, float %add1120, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1121, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1121, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp142 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1013,9 +1013,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1125 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1127 = insertelement <4 x float> undef, float undef, i32 2 + %vecins1127 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1127, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1127, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp144 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1027,7 +1027,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1130 = insertelement <4 x float> %tmp145, float %add1129, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp146 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1045,7 +1045,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1135 = insertelement <4 x float> %tmp149, float %add1134, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1135, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1135, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp150 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1053,13 +1053,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp151 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1138 = insertelement <4 x float> %tmp151, float undef, i32 1 + %vecins1138 = insertelement <4 x float> %tmp151, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1138, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1138, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp152 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1140 = fadd float undef, 0x407AE999A0000000 + %add1140 = fadd float %val, 0x407AE999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp153 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1073,7 +1073,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1144 = insertelement <4 x float> %tmp154, float %add1143, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1144, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1144, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp155 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1081,27 +1081,27 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1146 = fadd <4 x float> %tmp156, %tmp155 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1146, <4 x float>* undef, align 16 + store volatile <4 x float> %add1146, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp157 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1148 = fadd float undef, 4.145000e+02 + %add1148 = fadd float %val, 4.145000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp158 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1158 = insertelement <4 x float> undef, float undef, i32 3 + %vecins1158 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1158, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1158, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1218 = fadd float undef, 0xC078733340000000 + %add1218 = fadd float %val, 0xC078733340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1219 = insertelement <4 x float> undef, float %add1218, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp159 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1113,7 +1113,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1281 = insertelement <4 x float> %tmp160, float %add1280, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1281, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1281, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp161 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1125,7 +1125,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1284 = insertelement <4 x float> %tmp162, float %add1283, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1284, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1284, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp163 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1133,27 +1133,27 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1286 = fadd <4 x float> %tmp164, %tmp163 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1286, <4 x float>* undef, align 16 + store volatile <4 x float> %add1286, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp165 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1288 = fadd float undef, 0xC0731199A0000000 + %add1288 = fadd float %val, 0xC0731199A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp166 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp167 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1444 = extractelement <4 x float> %tmp167, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1460 = insertelement <4 x float> undef, float undef, i32 1 + %vecins1460 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1460, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1460, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp168 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1462 = fadd float undef, -1.670000e+02 + %add1462 = fadd float %val, -1.670000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1463 = insertelement <4 x float> undef, float %add1462, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1167,9 +1167,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1466 = insertelement <4 x float> %tmp170, float %add1465, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1466, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1466, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp171 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1177,17 +1177,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1468 = fadd <4 x float> %tmp172, %tmp171 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1468, <4 x float>* undef, align 16 + store volatile <4 x float> %add1468, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp173 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1470 = fadd float undef, 0x4033B33340000000 + %add1470 = fadd float %val, 0x4033B33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp174 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1471 = insertelement <4 x float> %tmp174, float %add1470, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1471, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1471, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp175 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1205,9 +1205,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp178 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1477 = insertelement <4 x float> %tmp178, float undef, i32 2 + %vecins1477 = insertelement <4 x float> %tmp178, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1477, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1477, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp179 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1219,15 +1219,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1480 = insertelement <4 x float> %tmp180, float %add1479, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1480, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1480, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp181 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp182 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp183 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1241,9 +1241,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1486 = extractelement <4 x float> %tmp185, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1502 = insertelement <4 x float> undef, float undef, i32 1 + %vecins1502 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1502, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1502, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1503 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1253,7 +1253,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1505 = insertelement <4 x float> %tmp186, float %add1504, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1505, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1505, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp187 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1265,9 +1265,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1508 = insertelement <4 x float> %tmp188, float %add1507, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1508, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1508, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp189 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1275,7 +1275,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1510 = fadd <4 x float> %tmp190, %tmp189 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1510, <4 x float>* undef, align 16 + store volatile <4 x float> %add1510, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp191 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1289,13 +1289,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1656 = insertelement <4 x float> %tmp193, float %add1655, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1656, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1656, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1658 = fadd float undef, 0x40709999A0000000 + %add1658 = fadd float %val, 0x40709999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp194 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1660 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1305,19 +1305,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1662 = insertelement <4 x float> %tmp195, float %add1661, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1662, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1662, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1676 = insertelement <4 x float> undef, float undef, i32 3 + %vecins1676 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp196 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1692 = fadd <4 x float> %tmp196, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1692, <4 x float>* undef, align 16 + store volatile <4 x float> %add1692, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp197 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1329,7 +1329,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1695 = insertelement <4 x float> %tmp198, float %add1694, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1695, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1695, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp199 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1341,7 +1341,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1698 = insertelement <4 x float> %tmp200, float %add1697, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1698, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1698, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp201 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1349,15 +1349,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp202 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1701 = insertelement <4 x float> %tmp202, float undef, i32 2 + %vecins1701 = insertelement <4 x float> %tmp202, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1701, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1701, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp203 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1704 = insertelement <4 x float> undef, float undef, i32 3 + %vecins1704 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp204 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1365,9 +1365,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp206 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1709 = insertelement <4 x float> %tmp206, float undef, i32 0 + %vecins1709 = insertelement <4 x float> %tmp206, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1709, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1709, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp207 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1375,11 +1375,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1714 = fadd float %vecext1713, 0xC0703199A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1723 = insertelement <4 x float> undef, float undef, i32 0 + %vecins1723 = insertelement <4 x float> undef, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp208 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1730 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1389,9 +1389,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1732 = insertelement <4 x float> %tmp209, float %add1731, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1732, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1732, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp210 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1399,7 +1399,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp211 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1736 = fadd float undef, 0x407C3999A0000000 + %add1736 = fadd float %val, 0x407C3999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp212 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1415,7 +1415,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1740 = insertelement <4 x float> %tmp214, float %add1739, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1740, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1740, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp215 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1427,25 +1427,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1743 = insertelement <4 x float> %tmp216, float %add1742, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1743, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1743, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1744 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp217 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1746 = insertelement <4 x float> %tmp217, float undef, i32 3 + %vecins1746 = insertelement <4 x float> %tmp217, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp218 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1748 = fadd <4 x float> undef, %tmp218 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1748, <4 x float>* undef, align 16 + store volatile <4 x float> %add1748, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp219 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1750 = fadd float undef, 0x407C6B3340000000 + %add1750 = fadd float %val, 0x407C6B3340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1751 = insertelement <4 x float> undef, float %add1750, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1467,21 +1467,21 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp223 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1759 = fadd float undef, 0x40678999A0000000 + %add1759 = fadd float %val, 0x40678999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp224 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1760 = insertelement <4 x float> %tmp224, float %add1759, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1760, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1760, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp225 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1762 = fadd <4 x float> undef, %tmp225 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1762, <4 x float>* undef, align 16 + store volatile <4 x float> %add1762, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp226 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1493,7 +1493,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1765 = insertelement <4 x float> %tmp227, float %add1764, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1765, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1765, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp228 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1505,7 +1505,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1768 = insertelement <4 x float> %tmp229, float %add1767, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1768, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1768, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1769 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1515,7 +1515,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1771 = insertelement <4 x float> %tmp230, float %add1770, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1771, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1771, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp231 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1525,13 +1525,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp234 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1779 = insertelement <4 x float> %tmp234, float undef, i32 0 + %vecins1779 = insertelement <4 x float> %tmp234, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1779, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1779, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp235 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp236 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1541,9 +1541,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1785 = insertelement <4 x float> undef, float %add1784, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1785, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1785, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp237 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1559,25 +1559,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1793 = insertelement <4 x float> %tmp239, float %add1792, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1793, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1793, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp240 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1795 = fadd float undef, 0x4055266660000000 + %add1795 = fadd float %val, 0x4055266660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp241 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1796 = insertelement <4 x float> %tmp241, float %add1795, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1799 = insertelement <4 x float> undef, float undef, i32 2 + %vecins1799 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1800 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp242 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp243 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1587,7 +1587,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp246 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1865 = fadd float undef, -2.235000e+02 + %add1865 = fadd float %val, -2.235000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp247 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1597,33 +1597,33 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp249 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1872 = insertelement <4 x float> %tmp249, float undef, i32 3 + %vecins1872 = insertelement <4 x float> %tmp249, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp250 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1874 = fadd <4 x float> %tmp250, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1874, <4 x float>* undef, align 16 + store volatile <4 x float> %add1874, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1875 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp251 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1894 = insertelement <4 x float> %tmp251, float undef, i32 1 + %vecins1894 = insertelement <4 x float> %tmp251, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp252 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1895 = extractelement <4 x float> %tmp252, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1900 = insertelement <4 x float> undef, float undef, i32 3 + %vecins1900 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1900, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1900, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1905 = insertelement <4 x float> undef, float undef, i32 0 + %vecins1905 = insertelement <4 x float> undef, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1905, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1905, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp253 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1633,7 +1633,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1908 = insertelement <4 x float> undef, float %add1907, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1908, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1908, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1909 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1649,23 +1649,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1916 = fadd <4 x float> %tmp256, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add1916, <4 x float>* undef, align 16 + store volatile <4 x float> %add1916, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1923 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp257 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add1927 = fadd float undef, 0x40761999A0000000 + %add1927 = fadd float %val, 0x40761999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp258 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1928 = insertelement <4 x float> %tmp258, float %add1927, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1928, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1928, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp259 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1677,9 +1677,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp262 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1933 = insertelement <4 x float> %tmp262, float undef, i32 0 + %vecins1933 = insertelement <4 x float> %tmp262, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1933, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1933, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp263 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1693,15 +1693,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1940 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1942 = insertelement <4 x float> undef, float undef, i32 3 + %vecins1942 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp265 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp266 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp267 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1709,13 +1709,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add1946 = fadd float %vecext1945, 0xC074866660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1953 = insertelement <4 x float> undef, float undef, i32 2 + %vecins1953 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1953, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1953, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp268 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp269 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1737,15 +1737,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1964 = insertelement <4 x float> %tmp272, float %add1963, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1964, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1964, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1965 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp273 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1967 = insertelement <4 x float> %tmp273, float undef, i32 2 + %vecins1967 = insertelement <4 x float> %tmp273, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1967, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1967, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp274 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1757,9 +1757,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1970 = insertelement <4 x float> %tmp275, float %add1969, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1970, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1970, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp276 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1767,31 +1767,31 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp278 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1975 = insertelement <4 x float> %tmp278, float undef, i32 0 + %vecins1975 = insertelement <4 x float> %tmp278, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1975, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1975, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp279 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1976 = extractelement <4 x float> %tmp279, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1978 = insertelement <4 x float> undef, float undef, i32 1 + %vecins1978 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1978, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1978, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1979 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1981 = insertelement <4 x float> undef, float undef, i32 2 + %vecins1981 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1981, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1981, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins1984 = insertelement <4 x float> undef, float undef, i32 3 + %vecins1984 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1984, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1984, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext1990 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1803,11 +1803,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins1998 = insertelement <4 x float> %tmp280, float %add1997, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins1998, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins1998, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2004 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1817,7 +1817,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2006 = insertelement <4 x float> %tmp281, float %add2005, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2006, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2006, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp282 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1825,7 +1825,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp283 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2009 = insertelement <4 x float> %tmp283, float undef, i32 2 + %vecins2009 = insertelement <4 x float> %tmp283, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp284 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1837,15 +1837,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2012 = insertelement <4 x float> %tmp285, float %add2011, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2012, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2012, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp286 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp287 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp288 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1857,7 +1857,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2017 = insertelement <4 x float> %tmp289, float %add2016, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add2022 = fadd float undef, 8.350000e+01 + %add2022 = fadd float %val, 8.350000e+01 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp290 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1871,7 +1871,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add2028 = fadd <4 x float> %tmp292, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add2028, <4 x float>* undef, align 16 + store volatile <4 x float> %add2028, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2029 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1879,11 +1879,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp293 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp294 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add2036 = fadd float undef, 0x407DE66660000000 + %add2036 = fadd float %val, 0x407DE66660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp295 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1895,9 +1895,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp299 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2045 = insertelement <4 x float> %tmp299, float undef, i32 0 + %vecins2045 = insertelement <4 x float> %tmp299, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2045, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2045, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp300 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1905,35 +1905,35 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add2047 = fadd float %vecext2046, 0xC065433340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2052 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp301 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2054 = insertelement <4 x float> %tmp301, float undef, i32 3 + %vecins2054 = insertelement <4 x float> %tmp301, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2054, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2054, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp302 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add2056 = fadd <4 x float> undef, %tmp302 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add2056, <4 x float>* undef, align 16 + store volatile <4 x float> %add2056, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp303 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp304 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2062 = insertelement <4 x float> %tmp304, float undef, i32 1 + %vecins2062 = insertelement <4 x float> %tmp304, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2062, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2062, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp305 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp306 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1943,9 +1943,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2068 = insertelement <4 x float> undef, float %add2067, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2068, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2068, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp307 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1953,7 +1953,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add2070 = fadd <4 x float> %tmp308, %tmp307 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add2070, <4 x float>* undef, align 16 + store volatile <4 x float> %add2070, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp309 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1965,7 +1965,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2073 = insertelement <4 x float> %tmp310, float %add2072, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2073, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2073, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp311 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1973,7 +1973,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp312 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2076 = insertelement <4 x float> %tmp312, float undef, i32 1 + %vecins2076 = insertelement <4 x float> %tmp312, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp313 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1985,7 +1985,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2079 = insertelement <4 x float> %tmp314, float %add2078, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2079, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2079, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp315 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -1997,15 +1997,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2082 = insertelement <4 x float> %tmp316, float %add2081, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2082, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2082, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp317 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp318 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp319 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2015,7 +2015,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2087 = insertelement <4 x float> undef, float %add2086, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2087, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2087, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2480 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2029,23 +2029,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2485 = insertelement <4 x float> %tmp320, float %add2484, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2485, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2485, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp321 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add2487 = fadd float undef, 2.030000e+02 + %add2487 = fadd float %val, 2.030000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp322 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2491 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp323 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp324 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2055,9 +2055,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp325 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2499 = insertelement <4 x float> undef, float undef, i32 2 + %vecins2499 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2499, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2499, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2500 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2079,7 +2079,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp329 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add2534 = fadd float undef, 0x4072C66660000000 + %add2534 = fadd float %val, 0x4072C66660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2536 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2089,15 +2089,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2538 = insertelement <4 x float> %tmp330, float %add2537, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2538, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2538, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2539 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add2540 = fadd float %vecext2539, 0x406F9999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2580 = insertelement <4 x float> undef, float undef, i32 1 + %vecins2580 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2580, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2580, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp331 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2107,7 +2107,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2583 = insertelement <4 x float> undef, float %add2582, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2583, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2583, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2584 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2115,21 +2115,21 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp332 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add2590 = fadd float undef, 0x407B1999A0000000 + %add2590 = fadd float %val, 0x407B1999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp333 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp334 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add2672 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add2672, <4 x float>* undef, align 16 + store volatile <4 x float> %add2672, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp335 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2141,37 +2141,37 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2678 = insertelement <4 x float> %tmp336, float %add2677, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2678, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2678, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp337 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2679 = extractelement <4 x float> %tmp337, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2681 = insertelement <4 x float> undef, float undef, i32 2 + %vecins2681 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2681, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2681, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp338 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext2682 = extractelement <4 x float> %tmp338, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2684 = insertelement <4 x float> undef, float undef, i32 3 + %vecins2684 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp339 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp340 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp341 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add2688 = fadd float undef, 0x4063266660000000 + %add2688 = fadd float %val, 0x4063266660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins2692 = insertelement <4 x float> undef, float undef, i32 1 + %vecins2692 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2692, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2692, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp342 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2183,9 +2183,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins2698 = insertelement <4 x float> %tmp343, float %add2697, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins2698, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins2698, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp344 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2193,7 +2193,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add2700 = fadd <4 x float> %tmp345, %tmp344 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add2700, <4 x float>* undef, align 16 + store volatile <4 x float> %add2700, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp346 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2207,15 +2207,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp349 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3121 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3125 = fadd float undef, 0xC06F266660000000 + %add3125 = fadd float %val, 0xC06F266660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3126 = insertelement <4 x float> undef, float %add3125, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3126, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3126, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp350 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2227,11 +2227,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3129 = insertelement <4 x float> %tmp351, float %add3128, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3129, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3129, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp352 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3131 = fadd float undef, 3.215000e+02 + %add3131 = fadd float %val, 3.215000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp353 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2239,15 +2239,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add3134 = fadd <4 x float> %tmp354, %tmp353 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add3134, <4 x float>* undef, align 16 + store volatile <4 x float> %add3134, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp355 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3136 = fadd float undef, 0x4074333340000000 + %add3136 = fadd float %val, 0x4074333340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins3140 = insertelement <4 x float> undef, float undef, i32 1 + %vecins3140 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3140, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3140, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp356 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2259,7 +2259,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3143 = insertelement <4 x float> %tmp357, float %add3142, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3143, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3143, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp358 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2271,15 +2271,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3146 = insertelement <4 x float> %tmp359, float %add3145, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3146, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3146, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp360 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins3272 = insertelement <4 x float> undef, float undef, i32 3 + %vecins3272 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3272, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3272, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp361 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2287,7 +2287,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add3274 = fadd <4 x float> %tmp362, %tmp361 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add3274, <4 x float>* undef, align 16 + store volatile <4 x float> %add3274, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp363 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2299,7 +2299,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3277 = insertelement <4 x float> %tmp364, float %add3276, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3277, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3277, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp365 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2309,7 +2309,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3280 = insertelement <4 x float> undef, float %add3279, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3280, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3280, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp366 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2321,7 +2321,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3283 = insertelement <4 x float> %tmp367, float %add3282, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3283, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3283, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp368 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2333,7 +2333,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp369 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp370 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2345,7 +2345,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3291 = insertelement <4 x float> %tmp371, float %add3290, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3291, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3291, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3292 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2353,11 +2353,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp373 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins3328 = insertelement <4 x float> %tmp373, float undef, i32 3 + %vecins3328 = insertelement <4 x float> %tmp373, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add3330 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add3330, <4 x float>* undef, align 16 + store volatile <4 x float> %add3330, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3331 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2367,7 +2367,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3333 = insertelement <4 x float> %tmp374, float %add3332, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3333, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3333, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3334 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2385,7 +2385,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3339 = insertelement <4 x float> %tmp376, float %add3338, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3339, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3339, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp377 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2393,13 +2393,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp378 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins3342 = insertelement <4 x float> %tmp378, float undef, i32 3 + %vecins3342 = insertelement <4 x float> %tmp378, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp379 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add3344 = fadd <4 x float> %tmp379, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add3344, <4 x float>* undef, align 16 + store volatile <4 x float> %add3344, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp380 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2419,15 +2419,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3350 = insertelement <4 x float> %tmp382, float %add3349, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3350, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3350, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3352 = fadd float undef, 0xC06ACCCCC0000000 + %add3352 = fadd float %val, 0xC06ACCCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp383 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins3423 = insertelement <4 x float> undef, float undef, i32 2 + %vecins3423 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3423, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3423, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3424 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2437,9 +2437,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3426 = insertelement <4 x float> %tmp384, float %add3425, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3426, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3426, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp385 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2457,7 +2457,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3431 = insertelement <4 x float> %tmp388, float %add3430, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3431, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3431, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp389 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2469,15 +2469,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3434 = insertelement <4 x float> %tmp390, float %add3433, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3434, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3434, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3435 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp391 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins3437 = insertelement <4 x float> %tmp391, float undef, i32 2 + %vecins3437 = insertelement <4 x float> %tmp391, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3437, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3437, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp392 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2485,7 +2485,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add3439 = fadd float %vecext3438, 0xC071D999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp393 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2493,7 +2493,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add3442 = fadd <4 x float> %tmp394, %tmp393 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add3442, <4 x float>* undef, align 16 + store volatile <4 x float> %add3442, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3443 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2509,7 +2509,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3448 = insertelement <4 x float> %tmp396, float %add3447, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3448, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3448, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp397 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2521,15 +2521,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3451 = insertelement <4 x float> %tmp398, float %add3450, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3451, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3451, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3453 = fadd float undef, 0xC07ADCCCC0000000 + %add3453 = fadd float %val, 0xC07ADCCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp399 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3454 = insertelement <4 x float> %tmp399, float %add3453, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3454, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3454, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp400 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2539,7 +2539,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3459 = insertelement <4 x float> undef, float %add3458, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3459, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3459, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp401 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2547,19 +2547,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp402 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins3462 = insertelement <4 x float> %tmp402, float undef, i32 1 + %vecins3462 = insertelement <4 x float> %tmp402, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3462, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3462, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp403 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3464 = fadd float undef, 0xC057B999A0000000 + %add3464 = fadd float %val, 0xC057B999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp404 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3465 = insertelement <4 x float> %tmp404, float %add3464, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3465, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3465, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp405 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2569,21 +2569,21 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp406 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp407 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp408 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3477 = extractelement <4 x float> %tmp408, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins3479 = insertelement <4 x float> undef, float undef, i32 2 + %vecins3479 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3479, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3479, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3480 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2593,23 +2593,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3482 = insertelement <4 x float> %tmp409, float %add3481, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3482, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3482, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp410 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add3484 = fadd <4 x float> %tmp410, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add3484, <4 x float>* undef, align 16 + store volatile <4 x float> %add3484, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp411 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3486 = fadd float undef, -1.415000e+02 + %add3486 = fadd float %val, -1.415000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3487 = insertelement <4 x float> undef, float %add3486, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3487, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3487, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp412 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2621,25 +2621,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3490 = insertelement <4 x float> %tmp413, float %add3489, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3490, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3490, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3492 = fadd float undef, 0x4078066660000000 + %add3492 = fadd float %val, 0x4078066660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp414 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3493 = insertelement <4 x float> %tmp414, float %add3492, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3493, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3493, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp415 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3495 = fadd float undef, 0xC0798999A0000000 + %add3495 = fadd float %val, 0xC0798999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp416 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3496 = insertelement <4 x float> %tmp416, float %add3495, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3496, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3496, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp417 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2647,7 +2647,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add3498 = fadd <4 x float> %tmp418, %tmp417 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add3498, <4 x float>* undef, align 16 + store volatile <4 x float> %add3498, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3499 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2663,25 +2663,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp420 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3506 = fadd float undef, 0xC074DB3340000000 + %add3506 = fadd float %val, 0xC074DB3340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp421 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins3507 = insertelement <4 x float> %tmp421, float %add3506, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins3507, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins3507, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add3509 = fadd float undef, 0xC066033340000000 + %add3509 = fadd float %val, 0xC066033340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp422 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp423 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3513 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2693,9 +2693,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext3516 = extractelement <4 x float> %tmp425, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5414 = insertelement <4 x float> undef, float undef, i32 3 + %vecins5414 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5414, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5414, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp426 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2703,33 +2703,33 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5416 = fadd <4 x float> %tmp427, %tmp426 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5416, <4 x float>* undef, align 16 + store volatile <4 x float> %add5416, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp428 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add5418 = fadd float undef, 0xC07ED999A0000000 + %add5418 = fadd float %val, 0xC07ED999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp429 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5419 = insertelement <4 x float> %tmp429, float %add5418, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5624 = insertelement <4 x float> undef, float undef, i32 3 + %vecins5624 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5624, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5624, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5626 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5626, <4 x float>* undef, align 16 + store volatile <4 x float> %add5626, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext5627 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp430 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5629 = insertelement <4 x float> %tmp430, float undef, i32 0 + %vecins5629 = insertelement <4 x float> %tmp430, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5629, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5629, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp431 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2739,13 +2739,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5632 = insertelement <4 x float> undef, float %add5631, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5632, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5632, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp432 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5688 = insertelement <4 x float> %tmp432, float undef, i32 1 + %vecins5688 = insertelement <4 x float> %tmp432, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5688, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5688, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp433 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2753,35 +2753,35 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp434 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5691 = insertelement <4 x float> %tmp434, float undef, i32 2 + %vecins5691 = insertelement <4 x float> %tmp434, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5691, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5691, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext5692 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp435 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5696 = fadd <4 x float> undef, %tmp435 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5696, <4 x float>* undef, align 16 + store volatile <4 x float> %add5696, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add5701 = fadd float undef, 0x4077D4CCC0000000 + %add5701 = fadd float %val, 0x4077D4CCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp436 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5702 = insertelement <4 x float> %tmp436, float %add5701, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5702, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5702, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp437 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp438 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5705 = insertelement <4 x float> %tmp438, float undef, i32 2 + %vecins5705 = insertelement <4 x float> %tmp438, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5705, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5705, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp439 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2793,9 +2793,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5708 = insertelement <4 x float> %tmp440, float %add5707, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5708, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5708, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp441 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2803,7 +2803,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5710 = fadd <4 x float> %tmp442, %tmp441 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5710, <4 x float>* undef, align 16 + store volatile <4 x float> %add5710, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp443 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2815,19 +2815,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5713 = insertelement <4 x float> %tmp444, float %add5712, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5713, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5713, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp445 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp446 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5716 = insertelement <4 x float> %tmp446, float undef, i32 1 + %vecins5716 = insertelement <4 x float> %tmp446, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp447 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5724 = fadd <4 x float> %tmp447, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5724, <4 x float>* undef, align 16 + store volatile <4 x float> %add5724, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp448 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2835,21 +2835,21 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp449 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5750 = insertelement <4 x float> %tmp449, float undef, i32 3 + %vecins5750 = insertelement <4 x float> %tmp449, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp450 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5752 = fadd <4 x float> undef, %tmp450 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add5754 = fadd float undef, 0xC064033340000000 + %add5754 = fadd float %val, 0xC064033340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp451 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5755 = insertelement <4 x float> %tmp451, float %add5754, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5755, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5755, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp452 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2861,7 +2861,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5758 = insertelement <4 x float> %tmp453, float %add5757, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5758, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5758, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp454 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2869,9 +2869,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp455 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5761 = insertelement <4 x float> %tmp455, float undef, i32 2 + %vecins5761 = insertelement <4 x float> %tmp455, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5761, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5761, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp456 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2883,13 +2883,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5764 = insertelement <4 x float> %tmp457, float %add5763, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5764, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5764, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5766 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5766, <4 x float>* undef, align 16 + store volatile <4 x float> %add5766, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp458 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2901,9 +2901,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5769 = insertelement <4 x float> %tmp459, float %add5768, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5769, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5769, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add5771 = fadd float undef, 8.000000e+00 + %add5771 = fadd float %val, 8.000000e+00 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp460 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2911,11 +2911,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp461 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add5796 = fadd float undef, 0x4058ECCCC0000000 + %add5796 = fadd float %val, 0x4058ECCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5797 = insertelement <4 x float> undef, float %add5796, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5797, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5797, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp462 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2923,7 +2923,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp463 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5800 = insertelement <4 x float> %tmp463, float undef, i32 1 + %vecins5800 = insertelement <4 x float> %tmp463, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp464 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2935,7 +2935,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5803 = insertelement <4 x float> %tmp465, float %add5802, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5803, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5803, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp466 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2947,11 +2947,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5806 = insertelement <4 x float> %tmp467, float %add5805, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5806, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5806, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp468 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp469 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2961,7 +2961,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp470 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp471 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2973,9 +2973,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5820 = insertelement <4 x float> %tmp472, float %add5819, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5820, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5820, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp473 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2983,7 +2983,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5822 = fadd <4 x float> %tmp474, %tmp473 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5822, <4 x float>* undef, align 16 + store volatile <4 x float> %add5822, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp475 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -2991,7 +2991,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp476 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5825 = insertelement <4 x float> %tmp476, float undef, i32 0 + %vecins5825 = insertelement <4 x float> %tmp476, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp477 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3003,7 +3003,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5828 = insertelement <4 x float> %tmp478, float %add5827, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5828, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5828, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp479 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3015,19 +3015,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5831 = insertelement <4 x float> %tmp480, float %add5830, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp481 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext5837 = extractelement <4 x float> %tmp481, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5839 = insertelement <4 x float> undef, float undef, i32 0 + %vecins5839 = insertelement <4 x float> undef, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5839, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5839, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp482 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3035,33 +3035,33 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp483 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5842 = insertelement <4 x float> %tmp483, float undef, i32 1 + %vecins5842 = insertelement <4 x float> %tmp483, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5842, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5842, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp484 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp485 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5845 = insertelement <4 x float> %tmp485, float undef, i32 2 + %vecins5845 = insertelement <4 x float> %tmp485, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5845, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5845, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5850 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5850, <4 x float>* undef, align 16 + store volatile <4 x float> %add5850, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp486 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add5852 = fadd float undef, 2.985000e+02 + %add5852 = fadd float %val, 2.985000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp487 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5853 = insertelement <4 x float> %tmp487, float %add5852, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5853, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5853, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp488 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3073,17 +3073,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5856 = insertelement <4 x float> %tmp489, float %add5855, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5856, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5856, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp490 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add5858 = fadd float undef, 0x4071666660000000 + %add5858 = fadd float %val, 0x4071666660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp491 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5859 = insertelement <4 x float> %tmp491, float %add5858, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5859, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5859, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp492 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3099,19 +3099,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5901 = insertelement <4 x float> %tmp494, float %add5900, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5901, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5901, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add5914 = fadd float undef, 0x40786E6660000000 + %add5914 = fadd float %val, 0x40786E6660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins5918 = insertelement <4 x float> undef, float undef, i32 3 + %vecins5918 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5918, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5918, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5920 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add5920, <4 x float>* undef, align 16 + store volatile <4 x float> %add5920, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add5934 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3121,7 +3121,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp495 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp496 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3131,13 +3131,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins5996 = insertelement <4 x float> undef, float %add5995, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins5996, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins5996, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp497 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext5997 = extractelement <4 x float> %tmp497, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp498 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3149,15 +3149,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6002 = insertelement <4 x float> %tmp499, float %add6001, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6002, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6002, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp500 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6004 = fadd <4 x float> undef, %tmp500 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6004, <4 x float>* undef, align 16 + store volatile <4 x float> %add6004, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp501 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3165,7 +3165,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp502 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6007 = insertelement <4 x float> %tmp502, float undef, i32 0 + %vecins6007 = insertelement <4 x float> %tmp502, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp503 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3173,9 +3173,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp504 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6024 = insertelement <4 x float> %tmp504, float undef, i32 1 + %vecins6024 = insertelement <4 x float> %tmp504, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6024, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6024, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp505 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3187,7 +3187,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6027 = insertelement <4 x float> %tmp506, float %add6026, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6027, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6027, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6028 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3197,15 +3197,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6030 = insertelement <4 x float> %tmp507, float %add6029, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6030, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6030, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp508 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp509 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp510 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3213,7 +3213,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp511 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6036 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3221,17 +3221,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6038 = insertelement <4 x float> undef, float %add6037, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6038, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6038, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp512 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6040 = fadd float undef, 0x4071ECCCC0000000 + %add6040 = fadd float %val, 0x4071ECCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp513 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6041 = insertelement <4 x float> %tmp513, float %add6040, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6041, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6041, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp514 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3243,9 +3243,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6044 = insertelement <4 x float> %tmp515, float %add6043, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6044, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6044, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp516 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3253,15 +3253,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6046 = fadd <4 x float> %tmp517, %tmp516 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6046, <4 x float>* undef, align 16 + store volatile <4 x float> %add6046, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6047 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp518 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6049 = insertelement <4 x float> %tmp518, float undef, i32 0 + %vecins6049 = insertelement <4 x float> %tmp518, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6049, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6049, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp519 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3269,19 +3269,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6051 = fadd float %vecext6050, 0x407E4E6660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6055 = insertelement <4 x float> undef, float undef, i32 2 + %vecins6055 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6056 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp520 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6061 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp521 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp522 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3295,9 +3295,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6072 = insertelement <4 x float> undef, float %add6071, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6072, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6072, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp523 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3305,7 +3305,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6074 = fadd <4 x float> %tmp524, %tmp523 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6074, <4 x float>* undef, align 16 + store volatile <4 x float> %add6074, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp525 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3317,23 +3317,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6077 = insertelement <4 x float> %tmp526, float %add6076, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6077, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6077, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp527 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6079 = fadd float undef, 0xC07E9B3340000000 + %add6079 = fadd float %val, 0xC07E9B3340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp528 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp529 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6082 = fadd float undef, 0x407DCE6660000000 + %add6082 = fadd float %val, 0x407DCE6660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6083 = insertelement <4 x float> undef, float %add6082, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6083, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6083, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp530 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3343,9 +3343,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6086 = insertelement <4 x float> undef, float %add6085, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6086, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6086, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp531 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3353,19 +3353,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6088 = fadd <4 x float> %tmp532, %tmp531 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6088, <4 x float>* undef, align 16 + store volatile <4 x float> %add6088, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp533 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6089 = extractelement <4 x float> %tmp533, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6107 = fadd float undef, 0xC06A166660000000 + %add6107 = fadd float %val, 0xC06A166660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp534 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6108 = insertelement <4 x float> %tmp534, float %add6107, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6108, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6108, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp535 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3375,7 +3375,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp536 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp537 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3395,7 +3395,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6119 = insertelement <4 x float> %tmp540, float %add6118, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6119, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6119, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp541 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3407,7 +3407,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6122 = insertelement <4 x float> %tmp542, float %add6121, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6122, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6122, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6123 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3415,17 +3415,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp543 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6126 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp544 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6128 = insertelement <4 x float> %tmp544, float undef, i32 3 + %vecins6128 = insertelement <4 x float> %tmp544, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6128, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6128, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp545 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3441,7 +3441,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6133 = insertelement <4 x float> undef, float %add6132, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6133, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6133, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6134 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3463,9 +3463,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp551 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6178 = insertelement <4 x float> %tmp551, float undef, i32 1 + %vecins6178 = insertelement <4 x float> %tmp551, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6178, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6178, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp552 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3487,13 +3487,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6184 = insertelement <4 x float> %tmp555, float %add6183, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6184, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6184, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp556 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6189 = insertelement <4 x float> undef, float undef, i32 0 + %vecins6189 = insertelement <4 x float> undef, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6189, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6189, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp557 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3505,7 +3505,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6192 = insertelement <4 x float> %tmp558, float %add6191, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6192, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6192, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp559 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3519,7 +3519,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6198 = insertelement <4 x float> %tmp561, float %add6197, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp562 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3527,7 +3527,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6200 = fadd <4 x float> %tmp563, %tmp562 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6200, <4 x float>* undef, align 16 + store volatile <4 x float> %add6200, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp564 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3535,7 +3535,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp565 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6203 = insertelement <4 x float> %tmp565, float undef, i32 0 + %vecins6203 = insertelement <4 x float> %tmp565, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp566 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3549,9 +3549,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp568 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6209 = insertelement <4 x float> %tmp568, float undef, i32 2 + %vecins6209 = insertelement <4 x float> %tmp568, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6209, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6209, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp569 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3559,7 +3559,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp570 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6219 = fadd float undef, 0xC0596CCCC0000000 + %add6219 = fadd float %val, 0xC0596CCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp571 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3573,7 +3573,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6228 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6228, <4 x float>* undef, align 16 + store volatile <4 x float> %add6228, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6229 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3583,7 +3583,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6231 = insertelement <4 x float> %tmp573, float %add6230, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6231, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6231, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp574 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3595,7 +3595,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6234 = insertelement <4 x float> %tmp575, float %add6233, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6234, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6234, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6235 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3603,13 +3603,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6237 = insertelement <4 x float> undef, float %add6236, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6237, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6237, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp576 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6245 = insertelement <4 x float> undef, float undef, i32 0 + %vecins6245 = insertelement <4 x float> undef, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6245, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6245, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp577 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3619,17 +3619,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp578 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6251 = insertelement <4 x float> undef, float undef, i32 2 + %vecins6251 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp579 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6253 = fadd float undef, 0xC0692999A0000000 + %add6253 = fadd float %val, 0xC0692999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6254 = insertelement <4 x float> undef, float %add6253, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6254, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6254, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp580 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3637,7 +3637,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6256 = fadd <4 x float> %tmp581, %tmp580 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6256, <4 x float>* undef, align 16 + store volatile <4 x float> %add6256, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp582 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3649,7 +3649,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6259 = insertelement <4 x float> %tmp583, float %add6258, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6259, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6259, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp584 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3661,7 +3661,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6262 = insertelement <4 x float> %tmp585, float %add6261, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6262, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6262, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp586 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3669,9 +3669,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp587 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6265 = insertelement <4 x float> %tmp587, float undef, i32 2 + %vecins6265 = insertelement <4 x float> %tmp587, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6265, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6265, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp588 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3683,9 +3683,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6268 = insertelement <4 x float> %tmp589, float %add6267, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6268, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6268, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp590 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3693,7 +3693,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6270 = fadd <4 x float> %tmp591, %tmp590 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6270, <4 x float>* undef, align 16 + store volatile <4 x float> %add6270, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp592 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3705,7 +3705,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6273 = insertelement <4 x float> %tmp593, float %add6272, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6273, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6273, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp594 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3717,7 +3717,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6276 = insertelement <4 x float> %tmp595, float %add6275, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6276, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6276, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp596 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3729,7 +3729,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6279 = insertelement <4 x float> %tmp597, float %add6278, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6279, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6279, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp598 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3739,21 +3739,21 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6282 = insertelement <4 x float> undef, float %add6281, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6282, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6282, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6284 = fadd <4 x float> undef, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6285 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6289 = fadd float undef, 0xC0738999A0000000 + %add6289 = fadd float %val, 0xC0738999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp599 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6293 = insertelement <4 x float> %tmp599, float undef, i32 2 + %vecins6293 = insertelement <4 x float> %tmp599, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6293, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6293, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp600 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3763,15 +3763,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6296 = insertelement <4 x float> undef, float %add6295, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6296, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6296, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp601 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6298 = fadd <4 x float> undef, %tmp601 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6298, <4 x float>* undef, align 16 + store volatile <4 x float> %add6298, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp602 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3783,7 +3783,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6301 = insertelement <4 x float> %tmp603, float %add6300, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6301, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6301, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp604 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3795,7 +3795,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6304 = insertelement <4 x float> %tmp605, float %add6303, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6304, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6304, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp606 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3805,7 +3805,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6307 = insertelement <4 x float> undef, float %add6306, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6307, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6307, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp607 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3817,9 +3817,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6310 = insertelement <4 x float> %tmp608, float %add6309, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6310, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6310, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp609 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3827,7 +3827,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6312 = fadd <4 x float> %tmp610, %tmp609 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6312, <4 x float>* undef, align 16 + store volatile <4 x float> %add6312, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp611 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3849,13 +3849,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6657 = insertelement <4 x float> %tmp614, float %add6656, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6657, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6657, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6660 = insertelement <4 x float> undef, float undef, i32 3 + %vecins6660 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6660, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6660, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp615 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3867,7 +3867,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6665 = insertelement <4 x float> %tmp616, float %add6664, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp617 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3875,15 +3875,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp618 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp619 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6676 = fadd <4 x float> %tmp619, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6676, <4 x float>* undef, align 16 + store volatile <4 x float> %add6676, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp620 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3901,7 +3901,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp622 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp623 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3913,7 +3913,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6685 = insertelement <4 x float> %tmp624, float %add6684, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6685, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6685, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp625 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3925,15 +3925,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6688 = insertelement <4 x float> %tmp626, float %add6687, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6688, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6688, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp627 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6690 = fadd <4 x float> undef, %tmp627 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6690, <4 x float>* undef, align 16 + store volatile <4 x float> %add6690, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp628 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3945,7 +3945,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6693 = insertelement <4 x float> %tmp629, float %add6692, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6693, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6693, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp630 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3957,7 +3957,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6696 = insertelement <4 x float> %tmp631, float %add6695, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6696, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6696, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp632 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3969,7 +3969,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6699 = insertelement <4 x float> %tmp633, float %add6698, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6699, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6699, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp634 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3981,17 +3981,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6702 = insertelement <4 x float> %tmp635, float %add6701, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6702, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6702, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp636 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp637 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6707 = insertelement <4 x float> undef, float undef, i32 0 + %vecins6707 = insertelement <4 x float> undef, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6707, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6707, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp638 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -3999,7 +3999,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp639 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp640 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4031,21 +4031,21 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp645 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6726 = fadd float undef, 0x4059B999A0000000 + %add6726 = fadd float %val, 0x4059B999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp646 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6727 = insertelement <4 x float> %tmp646, float %add6726, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6727, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6727, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6728 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6729 = fadd float %vecext6728, 0xC073466660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp647 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4053,7 +4053,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6732 = fadd <4 x float> %tmp648, %tmp647 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6732, <4 x float>* undef, align 16 + store volatile <4 x float> %add6732, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp649 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4065,7 +4065,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6735 = insertelement <4 x float> %tmp650, float %add6734, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6735, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6735, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp651 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4077,7 +4077,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6738 = insertelement <4 x float> %tmp652, float %add6737, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6738, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6738, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp653 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4089,7 +4089,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6741 = insertelement <4 x float> %tmp654, float %add6740, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6741, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6741, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp655 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4101,7 +4101,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6744 = insertelement <4 x float> %tmp656, float %add6743, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6744, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6744, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp657 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4109,21 +4109,21 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6746 = fadd <4 x float> %tmp658, %tmp657 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6746, <4 x float>* undef, align 16 + store volatile <4 x float> %add6746, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp659 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6749 = insertelement <4 x float> undef, float undef, i32 0 + %vecins6749 = insertelement <4 x float> undef, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6749, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6749, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp660 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6751 = fadd float undef, 0x4075DE6660000000 + %add6751 = fadd float %val, 0x4075DE6660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6752 = insertelement <4 x float> undef, float %add6751, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6752, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6752, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp661 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4133,7 +4133,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6755 = insertelement <4 x float> undef, float %add6754, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6755, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6755, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp662 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4145,15 +4145,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6758 = insertelement <4 x float> %tmp663, float %add6757, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6758, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6758, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp664 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6760 = fadd <4 x float> undef, %tmp664 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6760, <4 x float>* undef, align 16 + store volatile <4 x float> %add6760, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp665 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4165,9 +4165,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp666 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp667 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4183,7 +4183,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6777 = insertelement <4 x float> %tmp669, float %add6776, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6777, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6777, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp670 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4195,9 +4195,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6784 = extractelement <4 x float> %tmp671, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6875 = insertelement <4 x float> undef, float undef, i32 0 + %vecins6875 = insertelement <4 x float> undef, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6875, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6875, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp672 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4207,15 +4207,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6878 = insertelement <4 x float> undef, float %add6877, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6878, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6878, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6888 = fadd float undef, 0x4057CCCCC0000000 + %add6888 = fadd float %val, 0x4057CCCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp673 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6889 = insertelement <4 x float> %tmp673, float %add6888, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6889, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6889, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp674 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4227,7 +4227,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6892 = insertelement <4 x float> %tmp675, float %add6891, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6892, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6892, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp676 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4239,7 +4239,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6895 = insertelement <4 x float> %tmp677, float %add6894, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6895, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6895, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp678 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4249,7 +4249,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6900 = fadd <4 x float> %tmp680, %tmp679 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6900, <4 x float>* undef, align 16 + store volatile <4 x float> %add6900, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp681 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4261,9 +4261,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6903 = insertelement <4 x float> %tmp682, float %add6902, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6903, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6903, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6905 = fadd float undef, 0x4031B33340000000 + %add6905 = fadd float %val, 0x4031B33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp683 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4271,9 +4271,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp684 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6912 = insertelement <4 x float> %tmp684, float undef, i32 3 + %vecins6912 = insertelement <4 x float> %tmp684, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp685 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4281,13 +4281,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6914 = fadd <4 x float> %tmp686, %tmp685 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6914, <4 x float>* undef, align 16 + store volatile <4 x float> %add6914, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6915 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6920 = insertelement <4 x float> undef, float undef, i32 1 + %vecins6920 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6920, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6920, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext6921 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4295,11 +4295,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp687 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6926 = insertelement <4 x float> %tmp687, float undef, i32 3 + %vecins6926 = insertelement <4 x float> %tmp687, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6926, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6926, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp688 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4307,13 +4307,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6928 = fadd <4 x float> %tmp689, %tmp688 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6928, <4 x float>* undef, align 16 + store volatile <4 x float> %add6928, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6930 = fadd float undef, -4.590000e+02 + %add6930 = fadd float %val, -4.590000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6931 = insertelement <4 x float> undef, float %add6930, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6931, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6931, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp690 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4323,7 +4323,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp691 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp692 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4349,15 +4349,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp695 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6950 = fadd float undef, 0xC078F33340000000 + %add6950 = fadd float %val, 0xC078F33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp696 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6951 = insertelement <4 x float> %tmp696, float %add6950, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6951, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6951, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp697 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4369,7 +4369,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6954 = insertelement <4 x float> %tmp698, float %add6953, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6954, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6954, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp699 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4377,7 +4377,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6956 = fadd <4 x float> %tmp700, %tmp699 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6956, <4 x float>* undef, align 16 + store volatile <4 x float> %add6956, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp701 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4389,7 +4389,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6959 = insertelement <4 x float> %tmp702, float %add6958, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6959, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6959, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp703 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4401,15 +4401,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6965 = insertelement <4 x float> %tmp704, float %add6964, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6965, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6965, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add6975 = fadd float undef, 0x406AF33340000000 + %add6975 = fadd float %val, 0x406AF33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp705 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6976 = insertelement <4 x float> %tmp705, float %add6975, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6976, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6976, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp706 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4417,7 +4417,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6984 = fadd <4 x float> %tmp707, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6984, <4 x float>* undef, align 16 + store volatile <4 x float> %add6984, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp708 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4429,7 +4429,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins6987 = insertelement <4 x float> %tmp709, float %add6986, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6987, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6987, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp710 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4439,11 +4439,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp711 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins6996 = insertelement <4 x float> %tmp711, float undef, i32 3 + %vecins6996 = insertelement <4 x float> %tmp711, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins6996, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins6996, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp712 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4451,7 +4451,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add6998 = fadd <4 x float> %tmp713, %tmp712 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add6998, <4 x float>* undef, align 16 + store volatile <4 x float> %add6998, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp714 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4463,7 +4463,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7001 = insertelement <4 x float> %tmp715, float %add7000, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7001, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7001, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp716 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4475,11 +4475,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7004 = insertelement <4 x float> %tmp717, float %add7003, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp718 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add7140 = fadd float undef, 0x403D333340000000 + %add7140 = fadd float %val, 0x403D333340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7141 = insertelement <4 x float> undef, float %add7140, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4489,7 +4489,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7144 = insertelement <4 x float> undef, float %add7143, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp719 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4501,15 +4501,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7150 = insertelement <4 x float> %tmp720, float %add7149, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7150, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7150, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp721 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add7152 = fadd <4 x float> %tmp721, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add7152, <4 x float>* undef, align 16 + store volatile <4 x float> %add7152, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext7156 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4519,7 +4519,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7158 = insertelement <4 x float> %tmp722, float %add7157, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7158, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7158, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp723 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4531,13 +4531,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7161 = insertelement <4 x float> %tmp724, float %add7160, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7161, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7161, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add7168 = fadd float undef, 0xC072F199A0000000 + %add7168 = fadd float %val, 0xC072F199A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp725 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext7170 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4545,11 +4545,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7172 = insertelement <4 x float> undef, float %add7171, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7172, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7172, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext7173 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp726 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4559,7 +4559,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7421 = insertelement <4 x float> undef, float %add7420, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7421, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7421, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp727 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4571,7 +4571,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7424 = insertelement <4 x float> %tmp728, float %add7423, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7424, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7424, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp729 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4583,11 +4583,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7427 = insertelement <4 x float> %tmp730, float %add7426, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7427, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7427, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext7428 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp731 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4599,9 +4599,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7570 = insertelement <4 x float> %tmp732, float %add7569, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7570, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7570, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp733 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4609,7 +4609,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add7572 = fadd <4 x float> %tmp734, %tmp733 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add7572, <4 x float>* undef, align 16 + store volatile <4 x float> %add7572, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext7573 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4619,11 +4619,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7575 = insertelement <4 x float> %tmp735, float %add7574, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7575, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7575, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp736 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add7577 = fadd float undef, 0xC051666660000000 + %add7577 = fadd float %val, 0xC051666660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp737 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4635,7 +4635,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7581 = insertelement <4 x float> undef, float %add7580, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7581, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7581, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp739 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4647,7 +4647,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7584 = insertelement <4 x float> %tmp740, float %add7583, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp741 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4655,7 +4655,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add7586 = fadd <4 x float> %tmp742, %tmp741 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add7586, <4 x float>* undef, align 16 + store volatile <4 x float> %add7586, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp743 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4665,7 +4665,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp744 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp745 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4677,15 +4677,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7592 = insertelement <4 x float> %tmp746, float %add7591, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7592, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7592, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp747 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext7593 = extractelement <4 x float> %tmp747, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins7595 = insertelement <4 x float> undef, float undef, i32 2 + %vecins7595 = insertelement <4 x float> undef, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7595, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7595, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp748 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4693,17 +4693,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add7597 = fadd float %vecext7596, 0x407E666660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp749 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add7616 = fadd float undef, 0xC04DE66660000000 + %add7616 = fadd float %val, 0xC04DE66660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp750 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7617 = insertelement <4 x float> %tmp750, float %add7616, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7617, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7617, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp751 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4715,17 +4715,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7620 = insertelement <4 x float> %tmp752, float %add7619, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7620, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7620, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp753 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add7622 = fadd float undef, 0xC054B999A0000000 + %add7622 = fadd float %val, 0xC054B999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp754 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins7626 = insertelement <4 x float> undef, float undef, i32 3 + %vecins7626 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7626, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7626, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp755 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4733,7 +4733,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add7628 = fadd <4 x float> %tmp756, %tmp755 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add7628, <4 x float>* undef, align 16 + store volatile <4 x float> %add7628, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp757 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4745,13 +4745,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7631 = insertelement <4 x float> %tmp758, float %add7630, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add7639 = fadd float undef, 0x407C5999A0000000 + %add7639 = fadd float %val, 0x407C5999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp759 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7640 = insertelement <4 x float> %tmp759, float %add7639, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp760 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4759,9 +4759,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp761 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add7644 = fadd float undef, 0xC0758999A0000000 + %add7644 = fadd float %val, 0xC0758999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp762 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4773,7 +4773,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7648 = insertelement <4 x float> %tmp763, float %add7647, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7648, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7648, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp764 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4785,7 +4785,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7651 = insertelement <4 x float> %tmp765, float %add7650, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7651, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7651, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp766 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4797,7 +4797,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7654 = insertelement <4 x float> %tmp767, float %add7653, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7654, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7654, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp768 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4805,7 +4805,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add7656 = fadd <4 x float> %tmp769, %tmp768 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add7656, <4 x float>* undef, align 16 + store volatile <4 x float> %add7656, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp770 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4817,7 +4817,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7659 = insertelement <4 x float> %tmp771, float %add7658, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7659, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7659, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp772 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4829,7 +4829,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7662 = insertelement <4 x float> %tmp773, float %add7661, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7662, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7662, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp774 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4841,7 +4841,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7665 = insertelement <4 x float> %tmp775, float %add7664, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7665, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7665, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp776 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4851,7 +4851,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7668 = insertelement <4 x float> undef, float %add7667, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7668, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7668, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp777 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4873,23 +4873,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp781 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp782 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add7731 = fadd float undef, 1.900000e+02 + %add7731 = fadd float %val, 1.900000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp783 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins7732 = insertelement <4 x float> %tmp783, float %add7731, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7732, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7732, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp784 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins7735 = insertelement <4 x float> %tmp784, float undef, i32 2 + %vecins7735 = insertelement <4 x float> %tmp784, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7735, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7735, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp785 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4897,11 +4897,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add7737 = fadd float %vecext7736, 0xC06AF66660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins7850 = insertelement <4 x float> undef, float undef, i32 3 + %vecins7850 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins7850, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins7850, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp786 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4909,7 +4909,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add7852 = fadd <4 x float> %tmp787, %tmp786 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add7852, <4 x float>* undef, align 16 + store volatile <4 x float> %add7852, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp788 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4921,13 +4921,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9398 = insertelement <4 x float> %tmp789, float %add9397, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9398, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9398, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9399 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp790 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9401 = insertelement <4 x float> %tmp790, float undef, i32 2 + %vecins9401 = insertelement <4 x float> %tmp790, float %val, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp791 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4939,11 +4939,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9404 = insertelement <4 x float> %tmp792, float %add9403, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9404, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9404, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp793 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp794 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4959,7 +4959,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp796 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp797 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4971,7 +4971,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9415 = insertelement <4 x float> %tmp798, float %add9414, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9415, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9415, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp799 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4983,9 +4983,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9418 = insertelement <4 x float> %tmp800, float %add9417, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9418, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9418, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp801 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -4993,7 +4993,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9420 = fadd <4 x float> %tmp802, %tmp801 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9420, <4 x float>* undef, align 16 + store volatile <4 x float> %add9420, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp803 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5001,9 +5001,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp804 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9423 = insertelement <4 x float> %tmp804, float undef, i32 0 + %vecins9423 = insertelement <4 x float> %tmp804, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9423, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9423, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp805 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5015,17 +5015,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9426 = insertelement <4 x float> %tmp806, float %add9425, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9426, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9426, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp807 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9428 = fadd float undef, 0xC065466660000000 + %add9428 = fadd float %val, 0xC065466660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp808 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9429 = insertelement <4 x float> %tmp808, float %add9428, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9429, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9429, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp809 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5037,7 +5037,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9432 = insertelement <4 x float> %tmp810, float %add9431, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp811 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5045,7 +5045,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9434 = fadd <4 x float> %tmp812, %tmp811 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9436 = fadd float undef, -3.185000e+02 + %add9436 = fadd float %val, -3.185000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp813 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5053,7 +5053,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp814 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp815 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5065,7 +5065,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9443 = insertelement <4 x float> %tmp816, float %add9442, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9443, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9443, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp817 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5077,7 +5077,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9446 = insertelement <4 x float> %tmp818, float %add9445, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9446, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9446, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp819 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5085,23 +5085,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9448 = fadd <4 x float> %tmp820, %tmp819 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9448, <4 x float>* undef, align 16 + store volatile <4 x float> %add9448, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9450 = fadd float undef, 0xC0718199A0000000 + %add9450 = fadd float %val, 0xC0718199A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp821 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9451 = insertelement <4 x float> %tmp821, float %add9450, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9451, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9451, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp822 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp823 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9454 = insertelement <4 x float> %tmp823, float undef, i32 1 + %vecins9454 = insertelement <4 x float> %tmp823, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9454, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9454, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp824 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5113,23 +5113,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9457 = insertelement <4 x float> %tmp825, float %add9456, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9457, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9457, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9458 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp826 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9460 = insertelement <4 x float> %tmp826, float undef, i32 3 + %vecins9460 = insertelement <4 x float> %tmp826, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9460, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9460, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp827 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9462 = fadd <4 x float> %tmp827, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9462, <4 x float>* undef, align 16 + store volatile <4 x float> %add9462, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp828 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5137,23 +5137,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp829 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9465 = insertelement <4 x float> %tmp829, float undef, i32 0 + %vecins9465 = insertelement <4 x float> %tmp829, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9467 = fadd float undef, 0x405D666660000000 + %add9467 = fadd float %val, 0x405D666660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp830 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9468 = insertelement <4 x float> %tmp830, float %add9467, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9468, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9468, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp831 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9470 = fadd float undef, 0x4077033340000000 + %add9470 = fadd float %val, 0x4077033340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp832 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9472 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5163,9 +5163,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9474 = insertelement <4 x float> %tmp833, float %add9473, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9474, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9474, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp834 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5173,7 +5173,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9476 = fadd <4 x float> %tmp835, %tmp834 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9476, <4 x float>* undef, align 16 + store volatile <4 x float> %add9476, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp836 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5185,17 +5185,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9479 = insertelement <4 x float> %tmp837, float %add9478, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9479, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9479, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp838 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9481 = fadd float undef, 0x407BE33340000000 + %add9481 = fadd float %val, 0x407BE33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp839 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9482 = insertelement <4 x float> %tmp839, float %add9481, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9482, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9482, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9483 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5205,7 +5205,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9485 = insertelement <4 x float> %tmp840, float %add9484, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9485, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9485, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp841 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5215,13 +5215,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp842 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp843 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp844 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5229,15 +5229,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9492 = fadd float %vecext9491, 0x407C166660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9495 = fadd float undef, 0x407DBB3340000000 + %add9495 = fadd float %val, 0x407DBB3340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp845 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9496 = insertelement <4 x float> %tmp845, float %add9495, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9496, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9496, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp846 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5249,41 +5249,41 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9499 = insertelement <4 x float> %tmp847, float %add9498, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9499, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9499, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp848 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9501 = fadd float undef, 0x407D5CCCC0000000 + %add9501 = fadd float %val, 0x407D5CCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp849 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9502 = insertelement <4 x float> %tmp849, float %add9501, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9502, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9502, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp850 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9504 = fadd <4 x float> %tmp850, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9504, <4 x float>* undef, align 16 + store volatile <4 x float> %add9504, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp851 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9506 = fadd float undef, 0x4076EE6660000000 + %add9506 = fadd float %val, 0x4076EE6660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp852 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9507 = insertelement <4 x float> %tmp852, float %add9506, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9507, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9507, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp853 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9509 = fadd float undef, 0xC0535999A0000000 + %add9509 = fadd float %val, 0xC0535999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp854 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp855 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5295,7 +5295,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9513 = insertelement <4 x float> %tmp856, float %add9512, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9513, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9513, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp857 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5303,11 +5303,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp858 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9516 = insertelement <4 x float> %tmp858, float undef, i32 3 + %vecins9516 = insertelement <4 x float> %tmp858, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9516, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9516, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp859 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5319,9 +5319,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp862 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9521 = insertelement <4 x float> %tmp862, float undef, i32 0 + %vecins9521 = insertelement <4 x float> %tmp862, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9521, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9521, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp863 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5333,25 +5333,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9524 = insertelement <4 x float> %tmp864, float %add9523, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9524, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9524, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp865 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9526 = fadd float undef, 0x4072833340000000 + %add9526 = fadd float %val, 0x4072833340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp866 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9527 = insertelement <4 x float> %tmp866, float %add9526, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9527, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9527, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp867 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9530 = insertelement <4 x float> undef, float undef, i32 3 + %vecins9530 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9530, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9530, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp868 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5363,9 +5363,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp870 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9535 = insertelement <4 x float> %tmp870, float undef, i32 0 + %vecins9535 = insertelement <4 x float> %tmp870, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9535, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9535, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp871 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5377,7 +5377,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9538 = insertelement <4 x float> %tmp872, float %add9537, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9538, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9538, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp873 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5385,17 +5385,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9543 = fadd float %vecext9542, 0x4050D999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9576 = fadd float undef, 0x40219999A0000000 + %add9576 = fadd float %val, 0x40219999A0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9577 = insertelement <4 x float> undef, float %add9576, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9577, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9577, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp874 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9580 = insertelement <4 x float> undef, float undef, i32 1 + %vecins9580 = insertelement <4 x float> undef, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9580, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9580, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp875 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5407,11 +5407,11 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9583 = insertelement <4 x float> %tmp876, float %add9582, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9583, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9583, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp877 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9673 = extractelement <4 x float> undef, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5421,7 +5421,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9675 = insertelement <4 x float> %tmp878, float %add9674, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9675, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9675, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9676 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5441,7 +5441,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9681 = insertelement <4 x float> %tmp881, float %add9680, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9681, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9681, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp882 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5451,7 +5451,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9686 = fadd <4 x float> %tmp883, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9686, <4 x float>* undef, align 16 + store volatile <4 x float> %add9686, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp884 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5481,19 +5481,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9695 = insertelement <4 x float> %tmp888, float %add9694, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9695, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9695, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp889 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9697 = fadd float undef, 0x4058D33340000000 + %add9697 = fadd float %val, 0x4058D33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp890 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9698 = insertelement <4 x float> %tmp890, float %add9697, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9698, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9698, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp891 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5509,7 +5509,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9703 = insertelement <4 x float> %tmp893, float %add9702, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9703, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9703, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp894 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5521,7 +5521,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9706 = insertelement <4 x float> %tmp895, float %add9705, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9706, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9706, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9707 = extractelement <4 x float> undef, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5531,23 +5531,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9709 = insertelement <4 x float> %tmp896, float %add9708, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9709, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9709, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp897 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9710 = extractelement <4 x float> %tmp897, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9712 = insertelement <4 x float> undef, float undef, i32 3 + %vecins9712 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9712, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9712, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp898 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9714 = fadd <4 x float> undef, %tmp898 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9714, <4 x float>* undef, align 16 + store volatile <4 x float> %add9714, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp899 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5555,9 +5555,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp900 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9717 = insertelement <4 x float> %tmp900, float undef, i32 0 + %vecins9717 = insertelement <4 x float> %tmp900, float %val, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9717, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9717, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp901 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5569,7 +5569,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9720 = insertelement <4 x float> %tmp902, float %add9719, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9720, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9720, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp903 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5581,7 +5581,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9723 = insertelement <4 x float> %tmp904, float %add9722, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9723, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9723, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp905 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5593,15 +5593,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9726 = insertelement <4 x float> %tmp906, float %add9725, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9726, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9726, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp907 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9728 = fadd <4 x float> %tmp907, undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9728, <4 x float>* undef, align 16 + store volatile <4 x float> %add9728, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp908 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5613,17 +5613,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9731 = insertelement <4 x float> %tmp909, float %add9730, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9731, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9731, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp910 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9733 = fadd float undef, 0xC050F33340000000 + %add9733 = fadd float %val, 0xC050F33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp911 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9734 = insertelement <4 x float> %tmp911, float %add9733, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9734, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9734, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp912 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5635,23 +5635,23 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9737 = insertelement <4 x float> %tmp913, float %add9736, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9737, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9737, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp914 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9738 = extractelement <4 x float> %tmp914, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9740 = insertelement <4 x float> undef, float undef, i32 3 + %vecins9740 = insertelement <4 x float> undef, float %val, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9740, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9740, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp915 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp916 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp917 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5661,7 +5661,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9745 = insertelement <4 x float> undef, float %add9744, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9745, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9745, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp918 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5673,7 +5673,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9748 = insertelement <4 x float> %tmp919, float %add9747, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9748, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9748, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp920 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5685,7 +5685,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9751 = insertelement <4 x float> %tmp921, float %add9750, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9751, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9751, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp922 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5697,9 +5697,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9754 = insertelement <4 x float> %tmp923, float %add9753, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9754, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9754, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* %.compoundliteral9755 + store volatile <4 x float> , <4 x float>* %.compoundliteral9755 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp924 = load <4 x float>, <4 x float>* %.compoundliteral9755 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5717,7 +5717,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9759 = insertelement <4 x float> %tmp927, float %add9758, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9759, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9759, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp928 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5729,17 +5729,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9762 = insertelement <4 x float> %tmp929, float %add9761, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9762, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9762, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp930 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add9764 = fadd float undef, 0xC060E66660000000 + %add9764 = fadd float %val, 0xC060E66660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp931 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9765 = insertelement <4 x float> %tmp931, float %add9764, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9765, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9765, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp932 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5751,9 +5751,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9768 = insertelement <4 x float> %tmp933, float %add9767, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9768, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9768, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* %.compoundliteral9769 + store volatile <4 x float> , <4 x float>* %.compoundliteral9769 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp934 = load <4 x float>, <4 x float>* %.compoundliteral9769 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5761,7 +5761,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add9770 = fadd <4 x float> %tmp935, %tmp934 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add9770, <4 x float>* undef, align 16 + store volatile <4 x float> %add9770, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp936 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5773,7 +5773,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9773 = insertelement <4 x float> %tmp937, float %add9772, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9773, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9773, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp938 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5785,25 +5785,25 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins9776 = insertelement <4 x float> %tmp939, float %add9775, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins9776, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins9776, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext9816 = extractelement <4 x float> undef, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp940 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %vecins9818 = insertelement <4 x float> %tmp940, float undef, i32 1 + %vecins9818 = insertelement <4 x float> %tmp940, float %val, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp941 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add10388 = fadd float undef, 4.755000e+02 + %add10388 = fadd float %val, 4.755000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp942 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10389 = insertelement <4 x float> %tmp942, float %add10388, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10389, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10389, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp943 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5815,19 +5815,19 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10392 = insertelement <4 x float> %tmp944, float %add10391, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10392, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10392, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp945 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp946 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add10405 = fadd float undef, -5.650000e+01 + %add10405 = fadd float %val, -5.650000e+01 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp947 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10406 = insertelement <4 x float> %tmp947, float %add10405, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10406, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10406, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp948 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5839,7 +5839,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10409 = insertelement <4 x float> %tmp949, float %add10408, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10409, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10409, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp950 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5849,9 +5849,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp951 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* %.compoundliteral10413 + store volatile <4 x float> , <4 x float>* %.compoundliteral10413 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp952 = load <4 x float>, <4 x float>* %.compoundliteral10413 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5859,7 +5859,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add10414 = fadd <4 x float> %tmp953, %tmp952 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add10414, <4 x float>* undef, align 16 + store volatile <4 x float> %add10414, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp954 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5871,7 +5871,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10417 = insertelement <4 x float> %tmp955, float %add10416, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10417, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10417, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp956 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5883,15 +5883,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10420 = insertelement <4 x float> %tmp957, float %add10419, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10420, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10420, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add10422 = fadd float undef, 0xC0662CCCC0000000 + %add10422 = fadd float %val, 0xC0662CCCC0000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext10424 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp958 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5899,7 +5899,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add10428 = fadd <4 x float> %tmp959, %tmp958 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add10428, <4 x float>* undef, align 16 + store volatile <4 x float> %add10428, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp960 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5909,13 +5909,13 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp961 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add10436 = fadd float undef, 0xC06AF33340000000 + %add10436 = fadd float %val, 0xC06AF33340000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp962 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10437 = insertelement <4 x float> %tmp962, float %add10436, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10437, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10437, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecext10438 = extractelement <4 x float> undef, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5925,9 +5925,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10440 = insertelement <4 x float> %tmp963, float %add10439, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10440, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10440, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp964 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5941,7 +5941,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10445 = insertelement <4 x float> %tmp966, float %add10444, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10445, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10445, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp967 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5953,7 +5953,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10448 = insertelement <4 x float> %tmp968, float %add10447, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10448, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10448, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp969 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5965,7 +5965,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10451 = insertelement <4 x float> %tmp970, float %add10450, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10451, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10451, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp971 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5975,7 +5975,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10454 = insertelement <4 x float> undef, float %add10453, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp972 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5983,7 +5983,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %add10456 = fadd <4 x float> %tmp973, %tmp972 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %add10456, <4 x float>* undef, align 16 + store volatile <4 x float> %add10456, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp974 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -5993,7 +5993,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10459 = insertelement <4 x float> undef, float %add10458, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10459, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10459, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp975 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6015,7 +6015,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10465 = insertelement <4 x float> %tmp978, float %add10464, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10465, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10465, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp979 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6027,9 +6027,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10468 = insertelement <4 x float> %tmp980, float %add10467, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10468, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10468, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp981 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6045,7 +6045,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10473 = insertelement <4 x float> %tmp983, float %add10472, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10473, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10473, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp984 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6057,15 +6057,15 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10476 = insertelement <4 x float> %tmp985, float %add10475, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10476, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10476, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add10489 = fadd float undef, 0x4074666660000000 + %add10489 = fadd float %val, 0x4074666660000000 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp986 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10490 = insertelement <4 x float> %tmp986, float %add10489, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10490, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10490, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp987 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6079,9 +6079,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10510 = insertelement <4 x float> %tmp989, float %add10509, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10510, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10510, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp990 = load <4 x float>, <4 x float>* undef tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6097,17 +6097,17 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10515 = insertelement <4 x float> %tmp992, float %add10514, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10515, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10515, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp993 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - %add10562 = fadd float undef, 2.035000e+02 + %add10562 = fadd float %val, 2.035000e+02 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp994 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10563 = insertelement <4 x float> %tmp994, float %add10562, i32 2 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10563, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10563, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp995 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6119,9 +6119,9 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10566 = insertelement <4 x float> %tmp996, float %add10565, i32 3 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10566, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10566, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> , <4 x float>* %.compoundliteral10567 + store volatile <4 x float> , <4 x float>* %.compoundliteral10567 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp997 = load <4 x float>, <4 x float>* %.compoundliteral10567 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6139,7 +6139,7 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10571 = insertelement <4 x float> %tmp1000, float %add10570, i32 0 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10571, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10571, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %tmp1001 = load <4 x float>, <4 x float>* undef, align 16 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() @@ -6151,56 +6151,56 @@ tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() %vecins10574 = insertelement <4 x float> %tmp1002, float %add10573, i32 1 tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q14}{q15}"() - store <4 x float> %vecins10574, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10574, <4 x float>* undef, align 16 %tmp1003 = load <4 x float>, <4 x float>* undef, align 16 %vecext10575 = extractelement <4 x float> %tmp1003, i32 2 %tmp1004 = load <4 x float>, <4 x float>* undef, align 16 - %vecins10577 = insertelement <4 x float> %tmp1004, float undef, i32 2 - store <4 x float> %vecins10577, <4 x float>* undef, align 16 + %vecins10577 = insertelement <4 x float> %tmp1004, float %val, i32 2 + store volatile <4 x float> %vecins10577, <4 x float>* undef, align 16 %tmp1005 = load <4 x float>, <4 x float>* undef, align 16 %vecext10578 = extractelement <4 x float> %tmp1005, i32 3 %add10579 = fadd float %vecext10578, 0x4076566660000000 %tmp1006 = load <4 x float>, <4 x float>* undef, align 16 %vecins10580 = insertelement <4 x float> %tmp1006, float %add10579, i32 3 - store <4 x float> %vecins10580, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral10581 + store volatile <4 x float> %vecins10580, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral10581 %tmp1007 = load <4 x float>, <4 x float>* %.compoundliteral10581 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1008 = load <4 x float>, <4 x float>* undef, align 16 %vecext10583 = extractelement <4 x float> %tmp1008, i32 0 %add10584 = fadd float %vecext10583, 0xC060533340000000 %tmp1009 = load <4 x float>, <4 x float>* undef, align 16 %vecins10585 = insertelement <4 x float> %tmp1009, float %add10584, i32 0 - store <4 x float> %vecins10585, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10585, <4 x float>* undef, align 16 %tmp1010 = load <4 x float>, <4 x float>* undef, align 16 %vecext10586 = extractelement <4 x float> %tmp1010, i32 1 %add10587 = fadd float %vecext10586, 0xC0694CCCC0000000 %tmp1011 = load <4 x float>, <4 x float>* undef, align 16 %vecins10588 = insertelement <4 x float> %tmp1011, float %add10587, i32 1 - store <4 x float> %vecins10588, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10588, <4 x float>* undef, align 16 %tmp1012 = load <4 x float>, <4 x float>* undef, align 16 %vecext10589 = extractelement <4 x float> %tmp1012, i32 2 %add10590 = fadd float %vecext10589, 0xC0541999A0000000 %tmp1013 = load <4 x float>, <4 x float>* undef, align 16 %vecins10591 = insertelement <4 x float> %tmp1013, float %add10590, i32 2 - store <4 x float> %vecins10591, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10591, <4 x float>* undef, align 16 %tmp1014 = load <4 x float>, <4 x float>* undef, align 16 %vecext10592 = extractelement <4 x float> %tmp1014, i32 3 %add10593 = fadd float %vecext10592, 0xC06C566660000000 %tmp1015 = load <4 x float>, <4 x float>* undef, align 16 %vecins10594 = insertelement <4 x float> %tmp1015, float %add10593, i32 3 - store <4 x float> %vecins10594, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral10595 + store volatile <4 x float> %vecins10594, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral10595 %tmp1016 = load <4 x float>, <4 x float>* %.compoundliteral10595 %tmp1017 = load <4 x float>, <4 x float>* undef, align 16 %add10596 = fadd <4 x float> %tmp1017, %tmp1016 - store <4 x float> %add10596, <4 x float>* undef, align 16 + store volatile <4 x float> %add10596, <4 x float>* undef, align 16 %tmp1018 = load <4 x float>, <4 x float>* undef, align 16 %vecext10597 = extractelement <4 x float> %tmp1018, i32 0 %add10598 = fadd float %vecext10597, 0x40640999A0000000 %tmp1019 = load <4 x float>, <4 x float>* undef, align 16 %vecins10599 = insertelement <4 x float> %tmp1019, float %add10598, i32 0 - store <4 x float> %vecins10599, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10599, <4 x float>* undef, align 16 %tmp1020 = load <4 x float>, <4 x float>* undef, align 16 %vecext10600 = extractelement <4 x float> %tmp1020, i32 1 %add10601 = fadd float %vecext10600, 0xC073966660000000 @@ -6211,48 +6211,48 @@ %add10604 = fadd float %vecext10603, 1.780000e+02 %tmp1023 = load <4 x float>, <4 x float>* undef, align 16 %vecins10605 = insertelement <4 x float> %tmp1023, float %add10604, i32 2 - store <4 x float> %vecins10605, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10605, <4 x float>* undef, align 16 %tmp1024 = load <4 x float>, <4 x float>* undef, align 16 - %add10607 = fadd float undef, 0x4070A33340000000 + %add10607 = fadd float %val, 0x4070A33340000000 %tmp1025 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral10609 + store volatile <4 x float> , <4 x float>* %.compoundliteral10609 %tmp1026 = load <4 x float>, <4 x float>* %.compoundliteral10609 %tmp1027 = load <4 x float>, <4 x float>* undef, align 16 %tmp1028 = load <4 x float>, <4 x float>* undef, align 16 %vecext10611 = extractelement <4 x float> %tmp1028, i32 0 %add10612 = fadd float %vecext10611, 0x40757199A0000000 %vecins10613 = insertelement <4 x float> undef, float %add10612, i32 0 - store <4 x float> %vecins10613, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10613, <4 x float>* undef, align 16 %tmp1029 = load <4 x float>, <4 x float>* undef, align 16 %vecext10614 = extractelement <4 x float> %tmp1029, i32 1 %add10615 = fadd float %vecext10614, 0x40740CCCC0000000 %tmp1030 = load <4 x float>, <4 x float>* undef, align 16 %vecins10616 = insertelement <4 x float> %tmp1030, float %add10615, i32 1 - store <4 x float> %vecins10616, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10616, <4 x float>* undef, align 16 %tmp1031 = load <4 x float>, <4 x float>* undef, align 16 %vecext10617 = extractelement <4 x float> %tmp1031, i32 2 %add10618 = fadd float %vecext10617, 0xC012CCCCC0000000 %tmp1032 = load <4 x float>, <4 x float>* undef, align 16 %vecins10619 = insertelement <4 x float> %tmp1032, float %add10618, i32 2 - store <4 x float> %vecins10619, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10619, <4 x float>* undef, align 16 %tmp1033 = load <4 x float>, <4 x float>* undef, align 16 %vecext10620 = extractelement <4 x float> %tmp1033, i32 3 %add10621 = fadd float %vecext10620, 0x406E566660000000 %tmp1034 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral10623 + store volatile <4 x float> , <4 x float>* %.compoundliteral10623 %tmp1035 = load <4 x float>, <4 x float>* %.compoundliteral10623 %add10624 = fadd <4 x float> undef, %tmp1035 %tmp1036 = load <4 x float>, <4 x float>* undef, align 16 %vecext10625 = extractelement <4 x float> %tmp1036, i32 0 %tmp1037 = load <4 x float>, <4 x float>* undef, align 16 - %vecins10627 = insertelement <4 x float> %tmp1037, float undef, i32 0 - store <4 x float> %vecins10627, <4 x float>* undef, align 16 + %vecins10627 = insertelement <4 x float> %tmp1037, float %val, i32 0 + store volatile <4 x float> %vecins10627, <4 x float>* undef, align 16 %tmp1038 = load <4 x float>, <4 x float>* undef, align 16 %vecext10628 = extractelement <4 x float> %tmp1038, i32 1 %add10629 = fadd float %vecext10628, 0x407E3CCCC0000000 %tmp1039 = load <4 x float>, <4 x float>* undef, align 16 %vecins10630 = insertelement <4 x float> %tmp1039, float %add10629, i32 1 - store <4 x float> %vecins10630, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10630, <4 x float>* undef, align 16 %tmp1040 = load <4 x float>, <4 x float>* undef, align 16 %vecext10631 = extractelement <4 x float> %tmp1040, i32 2 %tmp1041 = load <4 x float>, <4 x float>* undef, align 16 @@ -6261,8 +6261,8 @@ %add10635 = fadd float %vecext10634, 0xC067533340000000 %tmp1043 = load <4 x float>, <4 x float>* undef, align 16 %vecins10636 = insertelement <4 x float> %tmp1043, float %add10635, i32 3 - store <4 x float> %vecins10636, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral10637 + store volatile <4 x float> %vecins10636, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral10637 %tmp1044 = load <4 x float>, <4 x float>* undef, align 16 %add10638 = fadd <4 x float> %tmp1044, undef %tmp1045 = load <4 x float>, <4 x float>* undef, align 16 @@ -6270,94 +6270,94 @@ %add10640 = fadd float %vecext10639, 0x406CA33340000000 %tmp1046 = load <4 x float>, <4 x float>* undef, align 16 %vecins10641 = insertelement <4 x float> %tmp1046, float %add10640, i32 0 - store <4 x float> %vecins10641, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10641, <4 x float>* undef, align 16 %tmp1047 = load <4 x float>, <4 x float>* undef, align 16 %vecext10642 = extractelement <4 x float> %tmp1047, i32 1 %add10643 = fadd float %vecext10642, 0xC07C8999A0000000 %tmp1048 = load <4 x float>, <4 x float>* undef, align 16 %vecins10644 = insertelement <4 x float> %tmp1048, float %add10643, i32 1 - store <4 x float> %vecins10644, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10644, <4 x float>* undef, align 16 %tmp1049 = load <4 x float>, <4 x float>* undef, align 16 %vecext10645 = extractelement <4 x float> %tmp1049, i32 2 %tmp1050 = load <4 x float>, <4 x float>* undef, align 16 %tmp1051 = load <4 x float>, <4 x float>* undef, align 16 - %vecins10748 = insertelement <4 x float> undef, float undef, i32 3 + %vecins10748 = insertelement <4 x float> undef, float %val, i32 3 %tmp1052 = load <4 x float>, <4 x float>* %.compoundliteral10749 %add10750 = fadd <4 x float> undef, %tmp1052 - store <4 x float> %add10750, <4 x float>* undef, align 16 + store volatile <4 x float> %add10750, <4 x float>* undef, align 16 %tmp1053 = load <4 x float>, <4 x float>* undef, align 16 %vecext10751 = extractelement <4 x float> %tmp1053, i32 0 %add10752 = fadd float %vecext10751, 0x4071B33340000000 %tmp1054 = load <4 x float>, <4 x float>* undef, align 16 %vecins10753 = insertelement <4 x float> %tmp1054, float %add10752, i32 0 - store <4 x float> %vecins10753, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10753, <4 x float>* undef, align 16 %tmp1055 = load <4 x float>, <4 x float>* undef, align 16 %vecext10754 = extractelement <4 x float> %tmp1055, i32 1 %add10755 = fadd float %vecext10754, 0xC076A66660000000 %tmp1056 = load <4 x float>, <4 x float>* undef, align 16 %vecins10756 = insertelement <4 x float> %tmp1056, float %add10755, i32 1 - store <4 x float> %vecins10756, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10756, <4 x float>* undef, align 16 %tmp1057 = load <4 x float>, <4 x float>* undef, align 16 %vecext10757 = extractelement <4 x float> %tmp1057, i32 2 %add10758 = fadd float %vecext10757, 3.800000e+01 %tmp1058 = load <4 x float>, <4 x float>* undef, align 16 %vecins10759 = insertelement <4 x float> %tmp1058, float %add10758, i32 2 - store <4 x float> %vecins10759, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10759, <4 x float>* undef, align 16 %tmp1059 = load <4 x float>, <4 x float>* undef, align 16 %vecext10760 = extractelement <4 x float> %tmp1059, i32 3 - store <4 x float> undef, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral10763 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral10763 %tmp1060 = load <4 x float>, <4 x float>* %.compoundliteral10763 %tmp1061 = load <4 x float>, <4 x float>* undef, align 16 %tmp1062 = load <4 x float>, <4 x float>* undef, align 16 - %add10985 = fadd float undef, 0x405E933340000000 + %add10985 = fadd float %val, 0x405E933340000000 %tmp1063 = load <4 x float>, <4 x float>* undef, align 16 %vecins10986 = insertelement <4 x float> %tmp1063, float %add10985, i32 3 - store <4 x float> %vecins10986, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral10987 + store volatile <4 x float> %vecins10986, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral10987 %tmp1064 = load <4 x float>, <4 x float>* %.compoundliteral10987 %tmp1065 = load <4 x float>, <4 x float>* undef, align 16 - %vecins10994 = insertelement <4 x float> %tmp1065, float undef, i32 1 + %vecins10994 = insertelement <4 x float> %tmp1065, float %val, i32 1 %tmp1066 = load <4 x float>, <4 x float>* undef, align 16 %vecext10995 = extractelement <4 x float> %tmp1066, i32 2 %add10996 = fadd float %vecext10995, 0x406F9999A0000000 %tmp1067 = load <4 x float>, <4 x float>* undef, align 16 %vecins10997 = insertelement <4 x float> %tmp1067, float %add10996, i32 2 - store <4 x float> %vecins10997, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins10997, <4 x float>* undef, align 16 %tmp1068 = load <4 x float>, <4 x float>* undef, align 16 %vecext10998 = extractelement <4 x float> %tmp1068, i32 3 %add10999 = fadd float %vecext10998, -2.765000e+02 %tmp1069 = load <4 x float>, <4 x float>* undef, align 16 %vecins11000 = insertelement <4 x float> %tmp1069, float %add10999, i32 3 - store <4 x float> %vecins11000, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral11001 + store volatile <4 x float> %vecins11000, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral11001 %tmp1070 = load <4 x float>, <4 x float>* undef, align 16 %add11002 = fadd <4 x float> %tmp1070, undef %vecext11003 = extractelement <4 x float> undef, i32 0 %vecext11009 = extractelement <4 x float> undef, i32 2 %tmp1071 = load <4 x float>, <4 x float>* undef, align 16 - %vecins11033 = insertelement <4 x float> %tmp1071, float undef, i32 0 - store <4 x float> %vecins11033, <4 x float>* undef, align 16 + %vecins11033 = insertelement <4 x float> %tmp1071, float %val, i32 0 + store volatile <4 x float> %vecins11033, <4 x float>* undef, align 16 %tmp1072 = load <4 x float>, <4 x float>* undef, align 16 %vecext11034 = extractelement <4 x float> %tmp1072, i32 1 %add11035 = fadd float %vecext11034, 0x4056D33340000000 %tmp1073 = load <4 x float>, <4 x float>* undef, align 16 %vecins11036 = insertelement <4 x float> %tmp1073, float %add11035, i32 1 - store <4 x float> %vecins11036, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins11036, <4 x float>* undef, align 16 %tmp1074 = load <4 x float>, <4 x float>* undef, align 16 %vecext11037 = extractelement <4 x float> %tmp1074, i32 2 %add11038 = fadd float %vecext11037, 0xC06EA33340000000 %tmp1075 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1076 = load <4 x float>, <4 x float>* undef, align 16 %vecext11040 = extractelement <4 x float> %tmp1076, i32 3 %add11041 = fadd float %vecext11040, 0x40746CCCC0000000 %tmp1077 = load <4 x float>, <4 x float>* undef, align 16 %vecins11042 = insertelement <4 x float> %tmp1077, float %add11041, i32 3 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> , <4 x float>* undef %tmp1078 = load <4 x float>, <4 x float>* undef, align 16 %add11044 = fadd <4 x float> %tmp1078, undef - store <4 x float> %add11044, <4 x float>* undef, align 16 + store volatile <4 x float> %add11044, <4 x float>* undef, align 16 %tmp1079 = load <4 x float>, <4 x float>* undef, align 16 %vecext11045 = extractelement <4 x float> %tmp1079, i32 0 %add11046 = fadd float %vecext11045, 0xC076E66660000000 @@ -6366,58 +6366,58 @@ %tmp1081 = load <4 x float>, <4 x float>* undef, align 16 %vecext11048 = extractelement <4 x float> %tmp1081, i32 1 %add11049 = fadd float %vecext11048, 4.100000e+02 - %vecins11064 = insertelement <4 x float> undef, float undef, i32 1 - %add11074 = fadd float undef, 0xC06FF999A0000000 + %vecins11064 = insertelement <4 x float> undef, float %val, i32 1 + %add11074 = fadd float %val, 0xC06FF999A0000000 %tmp1082 = load <4 x float>, <4 x float>* undef, align 16 %vecins11075 = insertelement <4 x float> %tmp1082, float %add11074, i32 0 - store <4 x float> %vecins11075, <4 x float>* undef, align 16 - %add11077 = fadd float undef, 0xC075D33340000000 + store volatile <4 x float> %vecins11075, <4 x float>* undef, align 16 + %add11077 = fadd float %val, 0xC075D33340000000 %tmp1083 = load <4 x float>, <4 x float>* undef, align 16 %tmp1084 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1085 = load <4 x float>, <4 x float>* undef, align 16 %vecext11093 = extractelement <4 x float> %tmp1085, i32 2 %add11094 = fadd float %vecext11093, 0xC07CD66660000000 %tmp1086 = load <4 x float>, <4 x float>* undef, align 16 %vecins11095 = insertelement <4 x float> %tmp1086, float %add11094, i32 2 - store <4 x float> %vecins11095, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> %vecins11095, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1087 = load <4 x float>, <4 x float>* undef - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1088 = load <4 x float>, <4 x float>* undef, align 16 %vecext11513 = extractelement <4 x float> %tmp1088, i32 2 %add11514 = fadd float %vecext11513, 0xC07C7199A0000000 %vecins11515 = insertelement <4 x float> undef, float %add11514, i32 2 - store <4 x float> %vecins11515, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins11515, <4 x float>* undef, align 16 %add11520 = fadd <4 x float> undef, undef - store <4 x float> %add11520, <4 x float>* undef, align 16 + store volatile <4 x float> %add11520, <4 x float>* undef, align 16 %vecext11521 = extractelement <4 x float> undef, i32 0 %add11522 = fadd float %vecext11521, 0x4041733340000000 %tmp1089 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1090 = load <4 x float>, <4 x float>* undef %tmp1091 = load <4 x float>, <4 x float>* undef, align 16 %add11562 = fadd <4 x float> %tmp1091, %tmp1090 %tmp1092 = load <4 x float>, <4 x float>* undef, align 16 - %add11564 = fadd float undef, 0xC0411999A0000000 + %add11564 = fadd float %val, 0xC0411999A0000000 %tmp1093 = load <4 x float>, <4 x float>* undef, align 16 %vecins11565 = insertelement <4 x float> %tmp1093, float %add11564, i32 0 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %vecext11586 = extractelement <4 x float> undef, i32 3 %add11587 = fadd float %vecext11586, 3.760000e+02 %tmp1094 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1095 = load <4 x float>, <4 x float>* undef %tmp1096 = load <4 x float>, <4 x float>* undef, align 16 %tmp1097 = load <4 x float>, <4 x float>* undef, align 16 %tmp1098 = load <4 x float>, <4 x float>* undef, align 16 - %vecins11593 = insertelement <4 x float> %tmp1098, float undef, i32 0 + %vecins11593 = insertelement <4 x float> %tmp1098, float %val, i32 0 %vecext11594 = extractelement <4 x float> undef, i32 1 %tmp1099 = load <4 x float>, <4 x float>* undef, align 16 - %vecins11596 = insertelement <4 x float> %tmp1099, float undef, i32 1 - store <4 x float> %vecins11596, <4 x float>* undef, align 16 + %vecins11596 = insertelement <4 x float> %tmp1099, float %val, i32 1 + store volatile <4 x float> %vecins11596, <4 x float>* undef, align 16 %tmp1100 = load <4 x float>, <4 x float>* undef, align 16 %vecext11597 = extractelement <4 x float> %tmp1100, i32 2 %add11598 = fadd float %vecext11597, 0x40430CCCC0000000 @@ -6426,34 +6426,34 @@ %tmp1102 = load <4 x float>, <4 x float>* undef, align 16 %vecext11600 = extractelement <4 x float> %tmp1102, i32 3 %tmp1103 = load <4 x float>, <4 x float>* undef, align 16 - %vecins11602 = insertelement <4 x float> %tmp1103, float undef, i32 3 - store <4 x float> %vecins11602, <4 x float>* undef, align 16 + %vecins11602 = insertelement <4 x float> %tmp1103, float %val, i32 3 + store volatile <4 x float> %vecins11602, <4 x float>* undef, align 16 %tmp1104 = load <4 x float>, <4 x float>* undef %tmp1105 = load <4 x float>, <4 x float>* undef, align 16 %add11604 = fadd <4 x float> %tmp1105, %tmp1104 %tmp1106 = load <4 x float>, <4 x float>* undef, align 16 %vecext11605 = extractelement <4 x float> %tmp1106, i32 0 %tmp1107 = load <4 x float>, <4 x float>* undef, align 16 - %vecins11607 = insertelement <4 x float> %tmp1107, float undef, i32 0 - %vecins11621 = insertelement <4 x float> undef, float undef, i32 0 - %vecins11630 = insertelement <4 x float> undef, float undef, i32 3 - store <4 x float> %vecins11630, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral11631 + %vecins11607 = insertelement <4 x float> %tmp1107, float %val, i32 0 + %vecins11621 = insertelement <4 x float> undef, float %val, i32 0 + %vecins11630 = insertelement <4 x float> undef, float %val, i32 3 + store volatile <4 x float> %vecins11630, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral11631 %tmp1108 = load <4 x float>, <4 x float>* %.compoundliteral11631 %tmp1109 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 - %add11634 = fadd float undef, -1.075000e+02 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + %add11634 = fadd float %val, -1.075000e+02 %vecext11647 = extractelement <4 x float> undef, i32 0 %add11648 = fadd float %vecext11647, 0x40775999A0000000 %tmp1110 = load <4 x float>, <4 x float>* undef, align 16 %vecext11650 = extractelement <4 x float> undef, i32 1 %tmp1111 = load <4 x float>, <4 x float>* undef, align 16 - %vecins11784 = insertelement <4 x float> %tmp1111, float undef, i32 3 - store <4 x float> %vecins11784, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral11785 + %vecins11784 = insertelement <4 x float> %tmp1111, float %val, i32 3 + store volatile <4 x float> %vecins11784, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral11785 %tmp1112 = load <4 x float>, <4 x float>* %.compoundliteral11785 %add11786 = fadd <4 x float> undef, %tmp1112 - store <4 x float> %add11786, <4 x float>* undef, align 16 + store volatile <4 x float> %add11786, <4 x float>* undef, align 16 %tmp1113 = load <4 x float>, <4 x float>* undef, align 16 %vecext11787 = extractelement <4 x float> %tmp1113, i32 0 %vecext11807 = extractelement <4 x float> undef, i32 2 @@ -6463,60 +6463,60 @@ %add11811 = fadd float %vecext11810, 0x4068F66660000000 %tmp1115 = load <4 x float>, <4 x float>* undef, align 16 %vecins11812 = insertelement <4 x float> %tmp1115, float %add11811, i32 3 - store <4 x float> %vecins11812, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins11812, <4 x float>* undef, align 16 %tmp1116 = load <4 x float>, <4 x float>* undef %tmp1117 = load <4 x float>, <4 x float>* undef, align 16 %vecext11958 = extractelement <4 x float> undef, i32 1 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %vecext11961 = extractelement <4 x float> undef, i32 2 %add11962 = fadd float %vecext11961, -3.680000e+02 %tmp1118 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 - %add11965 = fadd float undef, 0x4061133340000000 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + %add11965 = fadd float %val, 0x4061133340000000 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1119 = load <4 x float>, <4 x float>* undef, align 16 %vecext11975 = extractelement <4 x float> %tmp1119, i32 2 %tmp1120 = load <4 x float>, <4 x float>* undef, align 16 - %vecins11977 = insertelement <4 x float> %tmp1120, float undef, i32 2 - store <4 x float> %vecins11977, <4 x float>* undef, align 16 + %vecins11977 = insertelement <4 x float> %tmp1120, float %val, i32 2 + store volatile <4 x float> %vecins11977, <4 x float>* undef, align 16 %vecext11978 = extractelement <4 x float> undef, i32 3 %add11979 = fadd float %vecext11978, 0xC0688999A0000000 %tmp1121 = load <4 x float>, <4 x float>* undef, align 16 %vecins11980 = insertelement <4 x float> %tmp1121, float %add11979, i32 3 - store <4 x float> %vecins11980, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins11980, <4 x float>* undef, align 16 %add11982 = fadd <4 x float> undef, undef - store <4 x float> %add11982, <4 x float>* undef, align 16 + store volatile <4 x float> %add11982, <4 x float>* undef, align 16 %tmp1122 = load <4 x float>, <4 x float>* undef, align 16 %vecext11983 = extractelement <4 x float> %tmp1122, i32 0 %add11984 = fadd float %vecext11983, 0xC075966660000000 %tmp1123 = load <4 x float>, <4 x float>* undef, align 16 - %vecins12005 = insertelement <4 x float> undef, float undef, i32 2 - store <4 x float> %vecins12005, <4 x float>* undef, align 16 + %vecins12005 = insertelement <4 x float> undef, float %val, i32 2 + store volatile <4 x float> %vecins12005, <4 x float>* undef, align 16 %tmp1124 = load <4 x float>, <4 x float>* undef, align 16 - %add12007 = fadd float undef, 0xC07124CCC0000000 + %add12007 = fadd float %val, 0xC07124CCC0000000 %vecins12008 = insertelement <4 x float> undef, float %add12007, i32 3 - store <4 x float> %vecins12008, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins12008, <4 x float>* undef, align 16 %tmp1125 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1126 = load <4 x float>, <4 x float>* undef, align 16 - %add12012 = fadd float undef, 0xC0750CCCC0000000 + %add12012 = fadd float %val, 0xC0750CCCC0000000 %tmp1127 = load <4 x float>, <4 x float>* undef, align 16 %vecins12013 = insertelement <4 x float> %tmp1127, float %add12012, i32 0 - store <4 x float> %vecins12013, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins12013, <4 x float>* undef, align 16 %tmp1128 = load <4 x float>, <4 x float>* undef, align 16 - %add12015 = fadd float undef, 0x4079CE6660000000 + %add12015 = fadd float %val, 0x4079CE6660000000 %tmp1129 = load <4 x float>, <4 x float>* undef, align 16 %vecins12016 = insertelement <4 x float> %tmp1129, float %add12015, i32 1 - store <4 x float> %vecins12016, <4 x float>* undef, align 16 - %add12018 = fadd float undef, 3.555000e+02 + store volatile <4 x float> %vecins12016, <4 x float>* undef, align 16 + %add12018 = fadd float %val, 3.555000e+02 %tmp1130 = load <4 x float>, <4 x float>* undef, align 16 %vecins12019 = insertelement <4 x float> %tmp1130, float %add12018, i32 2 %tmp1131 = load <4 x float>, <4 x float>* undef, align 16 %vecext12020 = extractelement <4 x float> %tmp1131, i32 3 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %vecext12028 = extractelement <4 x float> undef, i32 1 - store <4 x float> undef, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1132 = load <4 x float>, <4 x float>* undef, align 16 %add12038 = fadd <4 x float> %tmp1132, undef %tmp1133 = load <4 x float>, <4 x float>* undef, align 16 @@ -6524,27 +6524,27 @@ %add12043 = fadd float %vecext12042, 0x402F9999A0000000 %tmp1134 = load <4 x float>, <4 x float>* undef, align 16 %vecins12044 = insertelement <4 x float> %tmp1134, float %add12043, i32 1 - store <4 x float> %vecins12044, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins12044, <4 x float>* undef, align 16 %vecext12045 = extractelement <4 x float> undef, i32 2 %add12046 = fadd float %vecext12045, 0xC07EF33340000000 %tmp1135 = load <4 x float>, <4 x float>* undef, align 16 %vecins12047 = insertelement <4 x float> %tmp1135, float %add12046, i32 2 - store <4 x float> %vecins12047, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins12047, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1136 = load <4 x float>, <4 x float>* undef, align 16 %vecext12112 = extractelement <4 x float> %tmp1136, i32 1 %tmp1137 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 - %add12116 = fadd float undef, 0xC074F4CCC0000000 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + %add12116 = fadd float %val, 0xC074F4CCC0000000 %tmp1138 = load <4 x float>, <4 x float>* undef, align 16 %vecins12117 = insertelement <4 x float> %tmp1138, float %add12116, i32 2 - store <4 x float> %vecins12117, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins12117, <4 x float>* undef, align 16 %tmp1139 = load <4 x float>, <4 x float>* undef, align 16 %vecext12118 = extractelement <4 x float> %tmp1139, i32 3 %add12119 = fadd float %vecext12118, 0xC0638CCCC0000000 %tmp1140 = load <4 x float>, <4 x float>* undef, align 16 %vecins12120 = insertelement <4 x float> %tmp1140, float %add12119, i32 3 - %add12152 = fadd float undef, 0x4039333340000000 + %add12152 = fadd float %val, 0x4039333340000000 %tmp1141 = load <4 x float>, <4 x float>* undef, align 16 %vecins12153 = insertelement <4 x float> %tmp1141, float %add12152, i32 0 %vecext12154 = extractelement <4 x float> undef, i32 1 @@ -6561,67 +6561,67 @@ %add12161 = fadd float %vecext12160, 0x407B1999A0000000 %tmp1146 = load <4 x float>, <4 x float>* undef, align 16 %vecins12162 = insertelement <4 x float> %tmp1146, float %add12161, i32 3 - store <4 x float> %vecins12162, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins12162, <4 x float>* undef, align 16 %tmp1147 = load <4 x float>, <4 x float>* undef %tmp1148 = load <4 x float>, <4 x float>* undef, align 16 %tmp1149 = load <4 x float>, <4 x float>* undef, align 16 %vecext12182 = extractelement <4 x float> %tmp1149, i32 1 %tmp1150 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef - %add12208 = fadd float undef, 0x407854CCC0000000 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef + %add12208 = fadd float %val, 0x407854CCC0000000 %tmp1151 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1152 = load <4 x float>, <4 x float>* undef, align 16 %tmp1153 = load <4 x float>, <4 x float>* undef, align 16 - %vecins12218 = insertelement <4 x float> undef, float undef, i32 3 - store <4 x float> %vecins12218, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + %vecins12218 = insertelement <4 x float> undef, float %val, i32 3 + store volatile <4 x float> %vecins12218, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1154 = load <4 x float>, <4 x float>* undef %tmp1155 = load <4 x float>, <4 x float>* undef, align 16 %add12220 = fadd <4 x float> %tmp1155, %tmp1154 %tmp1156 = load <4 x float>, <4 x float>* undef, align 16 %tmp1157 = load <4 x float>, <4 x float>* undef, align 16 - %vecins12223 = insertelement <4 x float> %tmp1157, float undef, i32 0 - store <4 x float> %vecins12223, <4 x float>* undef, align 16 + %vecins12223 = insertelement <4 x float> %tmp1157, float %val, i32 0 + store volatile <4 x float> %vecins12223, <4 x float>* undef, align 16 %tmp1158 = load <4 x float>, <4 x float>* undef, align 16 - %add12242 = fadd float undef, 0x4067E33340000000 + %add12242 = fadd float %val, 0x4067E33340000000 %tmp1159 = load <4 x float>, <4 x float>* undef, align 16 %vecins12243 = insertelement <4 x float> %tmp1159, float %add12242, i32 2 - store <4 x float> %vecins12243, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins12243, <4 x float>* undef, align 16 %tmp1160 = load <4 x float>, <4 x float>* undef, align 16 %vecext12244 = extractelement <4 x float> %tmp1160, i32 3 %add12245 = fadd float %vecext12244, 0x4071AE6660000000 %tmp1161 = load <4 x float>, <4 x float>* undef, align 16 %vecins12246 = insertelement <4 x float> %tmp1161, float %add12245, i32 3 - store <4 x float> %vecins12246, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral12247 + store volatile <4 x float> %vecins12246, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral12247 %tmp1162 = load <4 x float>, <4 x float>* %.compoundliteral12247 %tmp1163 = load <4 x float>, <4 x float>* undef, align 16 %add12248 = fadd <4 x float> %tmp1163, %tmp1162 - store <4 x float> %add12248, <4 x float>* undef, align 16 + store volatile <4 x float> %add12248, <4 x float>* undef, align 16 %tmp1164 = load <4 x float>, <4 x float>* undef, align 16 %vecext12249 = extractelement <4 x float> %tmp1164, i32 0 %add12250 = fadd float %vecext12249, 1.075000e+02 %tmp1165 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1166 = load <4 x float>, <4 x float>* undef, align 16 %vecext12252 = extractelement <4 x float> %tmp1166, i32 1 %add12253 = fadd float %vecext12252, 0xC0662CCCC0000000 %tmp1167 = load <4 x float>, <4 x float>* undef, align 16 %vecins12254 = insertelement <4 x float> %tmp1167, float %add12253, i32 1 - store <4 x float> %vecins12254, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins12254, <4 x float>* undef, align 16 %tmp1168 = load <4 x float>, <4 x float>* undef, align 16 %vecext12255 = extractelement <4 x float> %tmp1168, i32 2 %add12256 = fadd float %vecext12255, 0x40554CCCC0000000 - store <4 x float> undef, <4 x float>* undef, align 16 - %add13141 = fadd float undef, 0x40768999A0000000 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + %add13141 = fadd float %val, 0x40768999A0000000 %tmp1169 = load <4 x float>, <4 x float>* undef, align 16 %vecins13142 = insertelement <4 x float> %tmp1169, float %add13141, i32 3 - store <4 x float> %vecins13142, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13142, <4 x float>* undef, align 16 %tmp1170 = load <4 x float>, <4 x float>* undef %add13144 = fadd <4 x float> undef, %tmp1170 - store <4 x float> %add13144, <4 x float>* undef, align 16 + store volatile <4 x float> %add13144, <4 x float>* undef, align 16 %tmp1171 = load <4 x float>, <4 x float>* undef, align 16 %vecext13145 = extractelement <4 x float> %tmp1171, i32 0 %add13146 = fadd float %vecext13145, 3.975000e+02 @@ -6630,137 +6630,137 @@ %add13379 = fadd float %vecext13378, 0xC053B33340000000 %tmp1173 = load <4 x float>, <4 x float>* undef, align 16 %vecins13380 = insertelement <4 x float> %tmp1173, float %add13379, i32 3 - store <4 x float> %vecins13380, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13380, <4 x float>* undef, align 16 %tmp1174 = load <4 x float>, <4 x float>* undef, align 16 - %vecins13408 = insertelement <4 x float> %tmp1174, float undef, i32 3 - store <4 x float> %vecins13408, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + %vecins13408 = insertelement <4 x float> %tmp1174, float %val, i32 3 + store volatile <4 x float> %vecins13408, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1175 = load <4 x float>, <4 x float>* undef %tmp1176 = load <4 x float>, <4 x float>* undef, align 16 %add13410 = fadd <4 x float> %tmp1176, %tmp1175 - store <4 x float> %add13410, <4 x float>* undef, align 16 + store volatile <4 x float> %add13410, <4 x float>* undef, align 16 %tmp1177 = load <4 x float>, <4 x float>* undef, align 16 - %add13412 = fadd float undef, 0xC0708999A0000000 + %add13412 = fadd float %val, 0xC0708999A0000000 %tmp1178 = load <4 x float>, <4 x float>* undef, align 16 %vecins13413 = insertelement <4 x float> %tmp1178, float %add13412, i32 0 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %vecext13428 = extractelement <4 x float> undef, i32 1 %add13429 = fadd float %vecext13428, 0xC063BCCCC0000000 %tmp1179 = load <4 x float>, <4 x float>* undef, align 16 %vecins13430 = insertelement <4 x float> %tmp1179, float %add13429, i32 1 - store <4 x float> %vecins13430, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13430, <4 x float>* undef, align 16 %tmp1180 = load <4 x float>, <4 x float>* undef, align 16 %vecext13431 = extractelement <4 x float> %tmp1180, i32 2 - %vecins13433 = insertelement <4 x float> undef, float undef, i32 2 - store <4 x float> undef, <4 x float>* undef, align 16 - %add13449 = fadd float undef, 4.590000e+02 + %vecins13433 = insertelement <4 x float> undef, float %val, i32 2 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + %add13449 = fadd float %val, 4.590000e+02 %tmp1181 = load <4 x float>, <4 x float>* undef, align 16 %vecins13450 = insertelement <4 x float> %tmp1181, float %add13449, i32 3 - store <4 x float> %vecins13450, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> %vecins13450, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1182 = load <4 x float>, <4 x float>* undef %tmp1183 = load <4 x float>, <4 x float>* undef, align 16 %add13452 = fadd <4 x float> %tmp1183, %tmp1182 - store <4 x float> %add13452, <4 x float>* undef, align 16 + store volatile <4 x float> %add13452, <4 x float>* undef, align 16 %tmp1184 = load <4 x float>, <4 x float>* undef, align 16 %vecext13453 = extractelement <4 x float> %tmp1184, i32 0 %add13454 = fadd float %vecext13453, 0xC072866660000000 %tmp1185 = load <4 x float>, <4 x float>* undef, align 16 %vecins13455 = insertelement <4 x float> %tmp1185, float %add13454, i32 0 - %add13471 = fadd float undef, 0xC0556CCCC0000000 + %add13471 = fadd float %val, 0xC0556CCCC0000000 %tmp1186 = load <4 x float>, <4 x float>* undef, align 16 %vecins13472 = insertelement <4 x float> %tmp1186, float %add13471, i32 1 - store <4 x float> %vecins13472, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13472, <4 x float>* undef, align 16 %tmp1187 = load <4 x float>, <4 x float>* undef, align 16 %vecext13473 = extractelement <4 x float> %tmp1187, i32 2 %add13474 = fadd float %vecext13473, 0xC0786999A0000000 %tmp1188 = load <4 x float>, <4 x float>* undef, align 16 %vecins13475 = insertelement <4 x float> %tmp1188, float %add13474, i32 2 - store <4 x float> %vecins13475, <4 x float>* undef, align 16 - %add13477 = fadd float undef, 0xC07C3E6660000000 + store volatile <4 x float> %vecins13475, <4 x float>* undef, align 16 + %add13477 = fadd float %val, 0xC07C3E6660000000 %tmp1189 = load <4 x float>, <4 x float>* undef, align 16 %vecins13478 = insertelement <4 x float> %tmp1189, float %add13477, i32 3 - store <4 x float> %vecins13478, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> %vecins13478, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1190 = load <4 x float>, <4 x float>* undef, align 16 %add13480 = fadd <4 x float> %tmp1190, undef - store <4 x float> %add13480, <4 x float>* undef, align 16 + store volatile <4 x float> %add13480, <4 x float>* undef, align 16 %tmp1191 = load <4 x float>, <4 x float>* undef, align 16 %vecext13481 = extractelement <4 x float> %tmp1191, i32 0 %add13482 = fadd float %vecext13481, 0xC07BA4CCC0000000 %tmp1192 = load <4 x float>, <4 x float>* undef, align 16 %vecins13483 = insertelement <4 x float> %tmp1192, float %add13482, i32 0 - store <4 x float> %vecins13483, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13483, <4 x float>* undef, align 16 %tmp1193 = load <4 x float>, <4 x float>* undef, align 16 - %add13485 = fadd float undef, 0x406B1999A0000000 + %add13485 = fadd float %val, 0x406B1999A0000000 %tmp1194 = load <4 x float>, <4 x float>* undef, align 16 %vecins13486 = insertelement <4 x float> %tmp1194, float %add13485, i32 1 - store <4 x float> %vecins13486, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13486, <4 x float>* undef, align 16 %tmp1195 = load <4 x float>, <4 x float>* undef, align 16 %vecext13487 = extractelement <4 x float> %tmp1195, i32 2 %add13488 = fadd float %vecext13487, 0x40647999A0000000 %tmp1196 = load <4 x float>, <4 x float>* undef, align 16 %vecins13489 = insertelement <4 x float> %tmp1196, float %add13488, i32 2 - store <4 x float> %vecins13489, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13489, <4 x float>* undef, align 16 %tmp1197 = load <4 x float>, <4 x float>* undef, align 16 %vecext13490 = extractelement <4 x float> %tmp1197, i32 3 %tmp1198 = load <4 x float>, <4 x float>* undef, align 16 - %vecins13492 = insertelement <4 x float> %tmp1198, float undef, i32 3 - store <4 x float> %vecins13492, <4 x float>* undef, align 16 + %vecins13492 = insertelement <4 x float> %tmp1198, float %val, i32 3 + store volatile <4 x float> %vecins13492, <4 x float>* undef, align 16 %tmp1199 = load <4 x float>, <4 x float>* %.compoundliteral13493 %tmp1200 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 - %vecins13548 = insertelement <4 x float> undef, float undef, i32 3 - store <4 x float> , <4 x float>* %.compoundliteral13549 + store volatile <4 x float> undef, <4 x float>* undef, align 16 + %vecins13548 = insertelement <4 x float> undef, float %val, i32 3 + store volatile <4 x float> , <4 x float>* %.compoundliteral13549 %tmp1201 = load <4 x float>, <4 x float>* undef, align 16 - %add13552 = fadd float undef, 3.230000e+02 + %add13552 = fadd float %val, 3.230000e+02 %tmp1202 = load <4 x float>, <4 x float>* undef, align 16 %vecins13553 = insertelement <4 x float> %tmp1202, float %add13552, i32 0 %tmp1203 = load <4 x float>, <4 x float>* undef, align 16 %vecext13554 = extractelement <4 x float> %tmp1203, i32 1 %tmp1204 = load <4 x float>, <4 x float>* undef, align 16 - %vecins13556 = insertelement <4 x float> %tmp1204, float undef, i32 1 - store <4 x float> %vecins13556, <4 x float>* undef, align 16 + %vecins13556 = insertelement <4 x float> %tmp1204, float %val, i32 1 + store volatile <4 x float> %vecins13556, <4 x float>* undef, align 16 %tmp1205 = load <4 x float>, <4 x float>* undef, align 16 - %add13558 = fadd float undef, 2.625000e+02 + %add13558 = fadd float %val, 2.625000e+02 %tmp1206 = load <4 x float>, <4 x float>* undef, align 16 %vecins13559 = insertelement <4 x float> %tmp1206, float %add13558, i32 2 - store <4 x float> %vecins13559, <4 x float>* undef, align 16 - %add13575 = fadd float undef, -4.725000e+02 + store volatile <4 x float> %vecins13559, <4 x float>* undef, align 16 + %add13575 = fadd float %val, -4.725000e+02 %tmp1207 = load <4 x float>, <4 x float>* undef, align 16 %vecins13576 = insertelement <4 x float> %tmp1207, float %add13575, i32 3 - store <4 x float> %vecins13576, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> %vecins13576, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1208 = load <4 x float>, <4 x float>* undef %tmp1209 = load <4 x float>, <4 x float>* undef, align 16 %add13578 = fadd <4 x float> %tmp1209, %tmp1208 - store <4 x float> %add13578, <4 x float>* undef, align 16 + store volatile <4 x float> %add13578, <4 x float>* undef, align 16 %tmp1210 = load <4 x float>, <4 x float>* undef, align 16 %tmp1211 = load <4 x float>, <4 x float>* undef, align 16 %add13592 = fadd <4 x float> %tmp1211, undef - store <4 x float> %add13592, <4 x float>* undef, align 16 + store volatile <4 x float> %add13592, <4 x float>* undef, align 16 %tmp1212 = load <4 x float>, <4 x float>* undef, align 16 %vecext13593 = extractelement <4 x float> %tmp1212, i32 0 %add13594 = fadd float %vecext13593, 0xC0708B3340000000 %tmp1213 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1214 = load <4 x float>, <4 x float>* undef, align 16 %vecext13596 = extractelement <4 x float> %tmp1214, i32 1 %add13597 = fadd float %vecext13596, 0x40660999A0000000 - %vecins13604 = insertelement <4 x float> undef, float undef, i32 3 - store <4 x float> %vecins13604, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + %vecins13604 = insertelement <4 x float> undef, float %val, i32 3 + store volatile <4 x float> %vecins13604, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1215 = load <4 x float>, <4 x float>* undef, align 16 %add13606 = fadd <4 x float> %tmp1215, undef %tmp1216 = load <4 x float>, <4 x float>* undef, align 16 %vecext13607 = extractelement <4 x float> %tmp1216, i32 0 - %vecins13609 = insertelement <4 x float> undef, float undef, i32 0 + %vecins13609 = insertelement <4 x float> undef, float %val, i32 0 %tmp1217 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1218 = load <4 x float>, <4 x float>* undef, align 16 - %add13622 = fadd float undef, -3.390000e+02 + %add13622 = fadd float %val, -3.390000e+02 %vecins13623 = insertelement <4 x float> undef, float %add13622, i32 0 - store <4 x float> %vecins13623, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13623, <4 x float>* undef, align 16 %tmp1219 = load <4 x float>, <4 x float>* undef, align 16 %vecext13624 = extractelement <4 x float> %tmp1219, i32 1 %add13625 = fadd float %vecext13624, 0x405C3999A0000000 @@ -6772,41 +6772,41 @@ %add13631 = fadd float %vecext13630, 0xC060333340000000 %tmp1222 = load <4 x float>, <4 x float>* undef, align 16 %vecins13632 = insertelement <4 x float> %tmp1222, float %add13631, i32 3 - store <4 x float> %vecins13632, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> %vecins13632, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1223 = load <4 x float>, <4 x float>* undef %tmp1224 = load <4 x float>, <4 x float>* undef, align 16 %add13634 = fadd <4 x float> %tmp1224, %tmp1223 - store <4 x float> %add13634, <4 x float>* undef, align 16 + store volatile <4 x float> %add13634, <4 x float>* undef, align 16 %vecext13635 = extractelement <4 x float> undef, i32 0 %add13636 = fadd float %vecext13635, 0x406A5999A0000000 %tmp1225 = load <4 x float>, <4 x float>* undef, align 16 %vecins13637 = insertelement <4 x float> %tmp1225, float %add13636, i32 0 - store <4 x float> %vecins13637, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13637, <4 x float>* undef, align 16 %tmp1226 = load <4 x float>, <4 x float>* undef, align 16 %tmp1227 = load <4 x float>, <4 x float>* undef, align 16 - %vecins13643 = insertelement <4 x float> %tmp1227, float undef, i32 2 - store <4 x float> undef, <4 x float>* undef, align 16 + %vecins13643 = insertelement <4 x float> %tmp1227, float %val, i32 2 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1228 = load <4 x float>, <4 x float>* undef, align 16 - %add13785 = fadd float undef, 0x4068866660000000 + %add13785 = fadd float %val, 0x4068866660000000 %tmp1229 = load <4 x float>, <4 x float>* undef, align 16 %vecins13786 = insertelement <4 x float> %tmp1229, float %add13785, i32 3 - store <4 x float> %vecins13786, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* %.compoundliteral13787 + store volatile <4 x float> %vecins13786, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* %.compoundliteral13787 %tmp1230 = load <4 x float>, <4 x float>* undef, align 16 %add13788 = fadd <4 x float> %tmp1230, undef %tmp1231 = load <4 x float>, <4 x float>* undef %tmp1232 = load <4 x float>, <4 x float>* undef, align 16 %add13802 = fadd <4 x float> %tmp1232, %tmp1231 - store <4 x float> %add13802, <4 x float>* undef, align 16 + store volatile <4 x float> %add13802, <4 x float>* undef, align 16 %tmp1233 = load <4 x float>, <4 x float>* undef, align 16 %vecext13803 = extractelement <4 x float> %tmp1233, i32 0 %add13804 = fadd float %vecext13803, -2.900000e+01 %tmp1234 = load <4 x float>, <4 x float>* undef, align 16 %vecins13805 = insertelement <4 x float> %tmp1234, float %add13804, i32 0 - store <4 x float> %vecins13805, <4 x float>* undef, align 16 + store volatile <4 x float> %vecins13805, <4 x float>* undef, align 16 %tmp1235 = load <4 x float>, <4 x float>* undef, align 16 - %add13807 = fadd float undef, 6.400000e+01 + %add13807 = fadd float %val, 6.400000e+01 %tmp1236 = load <4 x float>, <4 x float>* undef, align 16 %tmp1237 = load <4 x float>, <4 x float>* undef, align 16 %vecext13809 = extractelement <4 x float> %tmp1237, i32 2 @@ -6814,28 +6814,28 @@ %vecext13812 = extractelement <4 x float> %tmp1238, i32 3 %add13813 = fadd float %vecext13812, -3.615000e+02 %vecins13814 = insertelement <4 x float> undef, float %add13813, i32 3 - store <4 x float> %vecins13814, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + store volatile <4 x float> %vecins13814, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1239 = load <4 x float>, <4 x float>* undef - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1240 = load <4 x float>, <4 x float>* undef, align 16 %vecext13817 = extractelement <4 x float> %tmp1240, i32 0 - %vecins13856 = insertelement <4 x float> undef, float undef, i32 3 - store <4 x float> %vecins13856, <4 x float>* undef, align 16 - store <4 x float> , <4 x float>* undef + %vecins13856 = insertelement <4 x float> undef, float %val, i32 3 + store volatile <4 x float> %vecins13856, <4 x float>* undef, align 16 + store volatile <4 x float> , <4 x float>* undef %tmp1241 = load <4 x float>, <4 x float>* undef %tmp1242 = load <4 x float>, <4 x float>* undef, align 16 - store <4 x float> undef, <4 x float>* undef, align 16 + store volatile <4 x float> undef, <4 x float>* undef, align 16 %tmp1243 = load <4 x float>, <4 x float>* undef, align 16 %vecext13859 = extractelement <4 x float> %tmp1243, i32 0 %tmp1244 = load <4 x float>, <4 x float>* undef, align 16 - %vecins13861 = insertelement <4 x float> %tmp1244, float undef, i32 0 + %vecins13861 = insertelement <4 x float> %tmp1244, float %val, i32 0 %tmp1245 = load <4 x float>, <4 x float>* undef, align 16 %vecext13862 = extractelement <4 x float> %tmp1245, i32 1 %add13863 = fadd float %vecext13862, -1.380000e+02 %vecins13864 = insertelement <4 x float> undef, float %add13863, i32 1 - %vecins13867 = insertelement <4 x float> undef, float undef, i32 2 - store <4 x float> %vecins13867, <4 x float>* undef, align 16 + %vecins13867 = insertelement <4 x float> undef, float %val, i32 2 + store volatile <4 x float> %vecins13867, <4 x float>* undef, align 16 %tmp1246 = load <4 x float>, <4 x float>* undef, align 16 %tmp1247 = load <4 x float>, <4 x float>* undef, align 16 ret <4 x float> undef Index: test/CodeGen/Mips/Fast-ISel/callabi.ll =================================================================== --- test/CodeGen/Mips/Fast-ISel/callabi.ll +++ test/CodeGen/Mips/Fast-ISel/callabi.ll @@ -244,12 +244,12 @@ ; ALL-DAG: lw $[[REG_C1_ADDR:[0-9]+]], %got(c1)($[[REG_GP]]) ; ALL-DAG: lbu $[[REG_C1:[0-9]+]], 0($[[REG_C1_ADDR]]) ; 32R1-DAG: sll $[[REG_C1_1:[0-9]+]], $[[REG_C1]], 24 - ; 32R1-DAG: sra $4, $[[REG_C1_1]], 24 - ; 32R2-DAG: seb $4, $[[REG_C1]] + ; 32R1-DAG: sra $5, $[[REG_C1_1]], 24 + ; 32R2-DAG: seb $5, $[[REG_C1]] ; FIXME: andi is superfulous ; ALL-DAG: lw $[[REG_UC1_ADDR:[0-9]+]], %got(uc1)($[[REG_GP]]) ; ALL-DAG: lbu $[[REG_UC1:[0-9]+]], 0($[[REG_UC1_ADDR]]) - ; ALL-DAG: andi $5, $[[REG_UC1]], 255 + ; ALL-DAG: andi $4, $[[REG_UC1]], 255 ; ALL-DAG: lw $[[REG_S1_ADDR:[0-9]+]], %got(s1)($[[REG_GP]]) ; ALL-DAG: lhu $[[REG_S1:[0-9]+]], 0($[[REG_S1_ADDR]]) ; 32R1-DAG: sll $[[REG_S1_1:[0-9]+]], $[[REG_S1]], 16 Index: test/CodeGen/Mips/Fast-ISel/pr40325.ll =================================================================== --- test/CodeGen/Mips/Fast-ISel/pr40325.ll +++ test/CodeGen/Mips/Fast-ISel/pr40325.ll @@ -5,8 +5,8 @@ ; CHECK-LABEL: test: ; CHECK: # %bb.0: ; CHECK-NEXT: move $1, $4 -; CHECK-NEXT: andi $4, $4, 1 -; CHECK-NEXT: sb $4, 0($5) +; CHECK-NEXT: andi $2, $4, 1 +; CHECK-NEXT: sb $2, 0($5) ; CHECK-NEXT: andi $1, $1, 1 ; CHECK-NEXT: bgtz $1, $BB0_1 ; CHECK-NEXT: nop Index: test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll =================================================================== --- test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll +++ test/CodeGen/Mips/GlobalISel/llvm-ir/add.ll @@ -14,9 +14,9 @@ define signext i8 @add_i8_sext(i8 signext %a, i8 signext %b) { ; MIPS32-LABEL: add_i8_sext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: addu $4, $5, $4 -; MIPS32-NEXT: sll $4, $4, 24 -; MIPS32-NEXT: sra $2, $4, 24 +; MIPS32-NEXT: addu $1, $5, $4 +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -27,10 +27,10 @@ define zeroext i8 @add_i8_zext(i8 zeroext %a, i8 zeroext %b) { ; MIPS32-LABEL: add_i8_zext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: addu $4, $5, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 255 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: addu $1, $5, $4 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 255 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -52,9 +52,9 @@ define signext i16 @add_i16_sext(i16 signext %a, i16 signext %b) { ; MIPS32-LABEL: add_i16_sext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: addu $4, $5, $4 -; MIPS32-NEXT: sll $4, $4, 16 -; MIPS32-NEXT: sra $2, $4, 16 +; MIPS32-NEXT: addu $1, $5, $4 +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -65,10 +65,10 @@ define zeroext i16 @add_i16_zext(i16 zeroext %a, i16 zeroext %b) { ; MIPS32-LABEL: add_i16_zext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: addu $4, $5, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 65535 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: addu $1, $5, $4 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 65535 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -92,17 +92,17 @@ ; MIPS32: # %bb.0: # %entry ; MIPS32-NEXT: lui $1, 0 ; MIPS32-NEXT: ori $1, $1, 0 -; MIPS32-NEXT: addu $4, $6, $4 -; MIPS32-NEXT: lui $2, 0 -; MIPS32-NEXT: ori $2, $2, 1 -; MIPS32-NEXT: and $1, $1, $2 -; MIPS32-NEXT: addu $1, $4, $1 +; MIPS32-NEXT: addu $2, $6, $4 +; MIPS32-NEXT: lui $3, 0 +; MIPS32-NEXT: ori $3, $3, 1 +; MIPS32-NEXT: and $1, $1, $3 +; MIPS32-NEXT: addu $1, $2, $1 ; MIPS32-NEXT: sltu $2, $1, $6 -; MIPS32-NEXT: addu $4, $7, $5 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $2, $5 -; MIPS32-NEXT: addu $3, $4, $2 +; MIPS32-NEXT: addu $3, $7, $5 +; MIPS32-NEXT: lui $4, 0 +; MIPS32-NEXT: ori $4, $4, 1 +; MIPS32-NEXT: and $2, $2, $4 +; MIPS32-NEXT: addu $3, $3, $2 ; MIPS32-NEXT: move $2, $1 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop @@ -165,13 +165,13 @@ define void @uadd_with_overflow(i32 %lhs, i32 %rhs, i32* %padd, i1* %pcarry_flag) { ; MIPS32-LABEL: uadd_with_overflow: ; MIPS32: # %bb.0: -; MIPS32-NEXT: addu $4, $4, $5 -; MIPS32-NEXT: sltu $5, $4, $5 -; MIPS32-NEXT: lui $1, 0 -; MIPS32-NEXT: ori $1, $1, 1 -; MIPS32-NEXT: and $1, $5, $1 -; MIPS32-NEXT: sb $1, 0($7) -; MIPS32-NEXT: sw $4, 0($6) +; MIPS32-NEXT: addu $1, $4, $5 +; MIPS32-NEXT: sltu $2, $1, $5 +; MIPS32-NEXT: lui $3, 0 +; MIPS32-NEXT: ori $3, $3, 1 +; MIPS32-NEXT: and $2, $2, $3 +; MIPS32-NEXT: sb $2, 0($7) +; MIPS32-NEXT: sw $1, 0($6) ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop %res = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %lhs, i32 %rhs) Index: test/CodeGen/Mips/GlobalISel/llvm-ir/icmp.ll =================================================================== --- test/CodeGen/Mips/GlobalISel/llvm-ir/icmp.ll +++ test/CodeGen/Mips/GlobalISel/llvm-ir/icmp.ll @@ -4,11 +4,11 @@ define i32 @eq(i32 %a, i32 %b){ ; MIPS32-LABEL: eq: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: xor $4, $4, $5 -; MIPS32-NEXT: sltiu $4, $4, 1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: xor $1, $4, $5 +; MIPS32-NEXT: sltiu $1, $1, 1 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -20,11 +20,11 @@ define i32 @ne(i32 %a, i32 %b) { ; MIPS32-LABEL: ne: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: xor $4, $4, $5 -; MIPS32-NEXT: sltu $4, $zero, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: xor $1, $4, $5 +; MIPS32-NEXT: sltu $1, $zero, $1 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -36,10 +36,10 @@ define i32 @sgt(i32 %a, i32 %b) { ; MIPS32-LABEL: sgt: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: slt $4, $5, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: slt $1, $5, $4 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -51,11 +51,11 @@ define i32 @sge(i32 %a, i32 %b) { ; MIPS32-LABEL: sge: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: slt $4, $4, $5 -; MIPS32-NEXT: xori $4, $4, 1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: slt $1, $4, $5 +; MIPS32-NEXT: xori $1, $1, 1 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -67,10 +67,10 @@ define i32 @slt(i32 %a, i32 %b) { ; MIPS32-LABEL: slt: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: slt $4, $4, $5 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: slt $1, $4, $5 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -82,11 +82,11 @@ define i32 @sle(i32 %a, i32 %b) { ; MIPS32-LABEL: sle: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: slt $4, $5, $4 -; MIPS32-NEXT: xori $4, $4, 1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: slt $1, $5, $4 +; MIPS32-NEXT: xori $1, $1, 1 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -98,10 +98,10 @@ define i32 @ugt(i32 %a, i32 %b) { ; MIPS32-LABEL: ugt: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sltu $4, $5, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: sltu $1, $5, $4 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -113,11 +113,11 @@ define i32 @uge(i32 %a, i32 %b) { ; MIPS32-LABEL: uge: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sltu $4, $4, $5 -; MIPS32-NEXT: xori $4, $4, 1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: sltu $1, $4, $5 +; MIPS32-NEXT: xori $1, $1, 1 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -129,10 +129,10 @@ define i32 @ult(i32 %a, i32 %b) { ; MIPS32-LABEL: ult: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sltu $4, $4, $5 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: sltu $1, $4, $5 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -144,11 +144,11 @@ define i32 @ule(i32 %a, i32 %b) { ; MIPS32-LABEL: ule: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sltu $4, $5, $4 -; MIPS32-NEXT: xori $4, $4, 1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: sltu $1, $5, $4 +; MIPS32-NEXT: xori $1, $1, 1 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: Index: test/CodeGen/Mips/GlobalISel/llvm-ir/mul.ll =================================================================== --- test/CodeGen/Mips/GlobalISel/llvm-ir/mul.ll +++ test/CodeGen/Mips/GlobalISel/llvm-ir/mul.ll @@ -14,9 +14,9 @@ define signext i8 @mul_i8_sext(i8 signext %a, i8 signext %b) { ; MIPS32-LABEL: mul_i8_sext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: mul $4, $5, $4 -; MIPS32-NEXT: sll $4, $4, 24 -; MIPS32-NEXT: sra $2, $4, 24 +; MIPS32-NEXT: mul $1, $5, $4 +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -27,10 +27,10 @@ define zeroext i8 @mul_i8_zext(i8 zeroext %a, i8 zeroext %b) { ; MIPS32-LABEL: mul_i8_zext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: mul $4, $5, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 255 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: mul $1, $5, $4 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 255 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -52,9 +52,9 @@ define signext i16 @mul_i16_sext(i16 signext %a, i16 signext %b) { ; MIPS32-LABEL: mul_i16_sext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: mul $4, $5, $4 -; MIPS32-NEXT: sll $4, $4, 16 -; MIPS32-NEXT: sra $2, $4, 16 +; MIPS32-NEXT: mul $1, $5, $4 +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -65,10 +65,10 @@ define zeroext i16 @mul_i16_zext(i16 zeroext %a, i16 zeroext %b) { ; MIPS32-LABEL: mul_i16_zext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: mul $4, $5, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 65535 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: mul $1, $5, $4 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 65535 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -91,12 +91,12 @@ ; MIPS32-LABEL: mul_i64: ; MIPS32: # %bb.0: # %entry ; MIPS32-NEXT: mul $2, $6, $4 -; MIPS32-NEXT: mul $7, $7, $4 -; MIPS32-NEXT: mul $5, $6, $5 +; MIPS32-NEXT: mul $1, $7, $4 +; MIPS32-NEXT: mul $3, $6, $5 ; MIPS32-NEXT: multu $6, $4 ; MIPS32-NEXT: mfhi $4 -; MIPS32-NEXT: addu $5, $7, $5 -; MIPS32-NEXT: addu $3, $5, $4 +; MIPS32-NEXT: addu $1, $1, $3 +; MIPS32-NEXT: addu $3, $1, $4 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -200,15 +200,15 @@ ; MIPS32: # %bb.0: ; MIPS32-NEXT: mul $1, $4, $5 ; MIPS32-NEXT: multu $4, $5 -; MIPS32-NEXT: mfhi $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 0 -; MIPS32-NEXT: xor $4, $4, $5 -; MIPS32-NEXT: sltu $4, $zero, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $4, $4, $5 -; MIPS32-NEXT: sb $4, 0($7) +; MIPS32-NEXT: mfhi $2 +; MIPS32-NEXT: lui $3, 0 +; MIPS32-NEXT: ori $3, $3, 0 +; MIPS32-NEXT: xor $2, $2, $3 +; MIPS32-NEXT: sltu $2, $zero, $2 +; MIPS32-NEXT: lui $3, 0 +; MIPS32-NEXT: ori $3, $3, 1 +; MIPS32-NEXT: and $2, $2, $3 +; MIPS32-NEXT: sb $2, 0($7) ; MIPS32-NEXT: sw $1, 0($6) ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop Index: test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll =================================================================== --- test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll +++ test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll @@ -5,15 +5,15 @@ define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) { ; MIPS32-LABEL: sdiv_i8: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $5, $5, 24 -; MIPS32-NEXT: sra $5, $5, 24 -; MIPS32-NEXT: sll $4, $4, 24 -; MIPS32-NEXT: sra $4, $4, 24 -; MIPS32-NEXT: div $zero, $5, $4 -; MIPS32-NEXT: teq $4, $zero, 7 -; MIPS32-NEXT: mflo $4 -; MIPS32-NEXT: sll $4, $4, 24 -; MIPS32-NEXT: sra $2, $4, 24 +; MIPS32-NEXT: sll $1, $5, 24 +; MIPS32-NEXT: sra $1, $1, 24 +; MIPS32-NEXT: sll $2, $4, 24 +; MIPS32-NEXT: sra $2, $2, 24 +; MIPS32-NEXT: div $zero, $1, $2 +; MIPS32-NEXT: teq $2, $zero, 7 +; MIPS32-NEXT: mflo $1 +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -24,15 +24,15 @@ define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) { ; MIPS32-LABEL: sdiv_i16: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $5, $5, 16 -; MIPS32-NEXT: sra $5, $5, 16 -; MIPS32-NEXT: sll $4, $4, 16 -; MIPS32-NEXT: sra $4, $4, 16 -; MIPS32-NEXT: div $zero, $5, $4 -; MIPS32-NEXT: teq $4, $zero, 7 -; MIPS32-NEXT: mflo $4 -; MIPS32-NEXT: sll $4, $4, 16 -; MIPS32-NEXT: sra $2, $4, 16 +; MIPS32-NEXT: sll $1, $5, 16 +; MIPS32-NEXT: sra $1, $1, 16 +; MIPS32-NEXT: sll $2, $4, 16 +; MIPS32-NEXT: sra $2, $2, 16 +; MIPS32-NEXT: div $zero, $1, $2 +; MIPS32-NEXT: teq $2, $zero, 7 +; MIPS32-NEXT: mflo $1 +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -81,15 +81,15 @@ define signext i8 @srem_i8(i8 signext %a, i8 signext %b) { ; MIPS32-LABEL: srem_i8: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $5, $5, 24 -; MIPS32-NEXT: sra $5, $5, 24 -; MIPS32-NEXT: sll $4, $4, 24 -; MIPS32-NEXT: sra $4, $4, 24 -; MIPS32-NEXT: div $zero, $5, $4 -; MIPS32-NEXT: teq $4, $zero, 7 -; MIPS32-NEXT: mflo $4 -; MIPS32-NEXT: sll $4, $4, 24 -; MIPS32-NEXT: sra $2, $4, 24 +; MIPS32-NEXT: sll $1, $5, 24 +; MIPS32-NEXT: sra $1, $1, 24 +; MIPS32-NEXT: sll $2, $4, 24 +; MIPS32-NEXT: sra $2, $2, 24 +; MIPS32-NEXT: div $zero, $1, $2 +; MIPS32-NEXT: teq $2, $zero, 7 +; MIPS32-NEXT: mflo $1 +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -100,15 +100,15 @@ define signext i16 @srem_i16(i16 signext %a, i16 signext %b) { ; MIPS32-LABEL: srem_i16: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $5, $5, 16 -; MIPS32-NEXT: sra $5, $5, 16 -; MIPS32-NEXT: sll $4, $4, 16 -; MIPS32-NEXT: sra $4, $4, 16 -; MIPS32-NEXT: div $zero, $5, $4 -; MIPS32-NEXT: teq $4, $zero, 7 -; MIPS32-NEXT: mfhi $4 -; MIPS32-NEXT: sll $4, $4, 16 -; MIPS32-NEXT: sra $2, $4, 16 +; MIPS32-NEXT: sll $1, $5, 16 +; MIPS32-NEXT: sra $1, $1, 16 +; MIPS32-NEXT: sll $2, $4, 16 +; MIPS32-NEXT: sra $2, $2, 16 +; MIPS32-NEXT: div $zero, $1, $2 +; MIPS32-NEXT: teq $2, $zero, 7 +; MIPS32-NEXT: mfhi $1 +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -160,11 +160,11 @@ ; MIPS32-NEXT: lui $1, 0 ; MIPS32-NEXT: ori $1, $1, 255 ; MIPS32-NEXT: and $1, $5, $1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 255 -; MIPS32-NEXT: and $4, $4, $5 -; MIPS32-NEXT: divu $zero, $1, $4 -; MIPS32-NEXT: teq $4, $zero, 7 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 255 +; MIPS32-NEXT: and $2, $4, $2 +; MIPS32-NEXT: divu $zero, $1, $2 +; MIPS32-NEXT: teq $2, $zero, 7 ; MIPS32-NEXT: mflo $1 ; MIPS32-NEXT: sll $1, $1, 24 ; MIPS32-NEXT: sra $2, $1, 24 @@ -181,11 +181,11 @@ ; MIPS32-NEXT: lui $1, 0 ; MIPS32-NEXT: ori $1, $1, 65535 ; MIPS32-NEXT: and $1, $5, $1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 65535 -; MIPS32-NEXT: and $4, $4, $5 -; MIPS32-NEXT: divu $zero, $1, $4 -; MIPS32-NEXT: teq $4, $zero, 7 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 65535 +; MIPS32-NEXT: and $2, $4, $2 +; MIPS32-NEXT: divu $zero, $1, $2 +; MIPS32-NEXT: teq $2, $zero, 7 ; MIPS32-NEXT: mflo $1 ; MIPS32-NEXT: sll $1, $1, 16 ; MIPS32-NEXT: sra $2, $1, 16 @@ -240,11 +240,11 @@ ; MIPS32-NEXT: lui $1, 0 ; MIPS32-NEXT: ori $1, $1, 255 ; MIPS32-NEXT: and $1, $5, $1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 255 -; MIPS32-NEXT: and $4, $4, $5 -; MIPS32-NEXT: divu $zero, $1, $4 -; MIPS32-NEXT: teq $4, $zero, 7 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 255 +; MIPS32-NEXT: and $2, $4, $2 +; MIPS32-NEXT: divu $zero, $1, $2 +; MIPS32-NEXT: teq $2, $zero, 7 ; MIPS32-NEXT: mfhi $1 ; MIPS32-NEXT: sll $1, $1, 24 ; MIPS32-NEXT: sra $2, $1, 24 @@ -261,11 +261,11 @@ ; MIPS32-NEXT: lui $1, 0 ; MIPS32-NEXT: ori $1, $1, 65535 ; MIPS32-NEXT: and $1, $5, $1 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 65535 -; MIPS32-NEXT: and $4, $4, $5 -; MIPS32-NEXT: divu $zero, $1, $4 -; MIPS32-NEXT: teq $4, $zero, 7 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 65535 +; MIPS32-NEXT: and $2, $4, $2 +; MIPS32-NEXT: divu $zero, $1, $2 +; MIPS32-NEXT: teq $2, $zero, 7 ; MIPS32-NEXT: mfhi $1 ; MIPS32-NEXT: sll $1, $1, 16 ; MIPS32-NEXT: sra $2, $1, 16 Index: test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll =================================================================== --- test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll +++ test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll @@ -64,12 +64,12 @@ define i32 @select_with_negation(i32 %a, i32 %b, i32 %x, i32 %y) { ; MIPS32-LABEL: select_with_negation: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: slt $4, $4, $5 -; MIPS32-NEXT: not $4, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 1 -; MIPS32-NEXT: and $4, $4, $5 -; MIPS32-NEXT: movn $7, $6, $4 +; MIPS32-NEXT: slt $1, $4, $5 +; MIPS32-NEXT: not $1, $1 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $1, $1, $2 +; MIPS32-NEXT: movn $7, $6, $1 ; MIPS32-NEXT: move $2, $7 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop Index: test/CodeGen/Mips/GlobalISel/llvm-ir/sub.ll =================================================================== --- test/CodeGen/Mips/GlobalISel/llvm-ir/sub.ll +++ test/CodeGen/Mips/GlobalISel/llvm-ir/sub.ll @@ -15,9 +15,9 @@ define signext i8 @sub_i8_sext(i8 signext %a, i8 signext %b) { ; MIPS32-LABEL: sub_i8_sext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: subu $4, $5, $4 -; MIPS32-NEXT: sll $4, $4, 24 -; MIPS32-NEXT: sra $2, $4, 24 +; MIPS32-NEXT: subu $1, $5, $4 +; MIPS32-NEXT: sll $1, $1, 24 +; MIPS32-NEXT: sra $2, $1, 24 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -28,10 +28,10 @@ define zeroext i8 @sub_i8_zext(i8 zeroext %a, i8 zeroext %b) { ; MIPS32-LABEL: sub_i8_zext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: subu $4, $5, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 255 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: subu $1, $5, $4 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 255 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -53,9 +53,9 @@ define signext i16 @sub_i16_sext(i16 signext %a, i16 signext %b) { ; MIPS32-LABEL: sub_i16_sext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: subu $4, $5, $4 -; MIPS32-NEXT: sll $4, $4, 16 -; MIPS32-NEXT: sra $2, $4, 16 +; MIPS32-NEXT: subu $1, $5, $4 +; MIPS32-NEXT: sll $1, $1, 16 +; MIPS32-NEXT: sra $2, $1, 16 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -66,10 +66,10 @@ define zeroext i16 @sub_i16_zext(i16 zeroext %a, i16 zeroext %b) { ; MIPS32-LABEL: sub_i16_zext: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: subu $4, $5, $4 -; MIPS32-NEXT: lui $5, 0 -; MIPS32-NEXT: ori $5, $5, 65535 -; MIPS32-NEXT: and $2, $4, $5 +; MIPS32-NEXT: subu $1, $5, $4 +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 65535 +; MIPS32-NEXT: and $2, $1, $2 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -92,12 +92,12 @@ ; MIPS32-LABEL: sub_i64: ; MIPS32: # %bb.0: # %entry ; MIPS32-NEXT: subu $2, $6, $4 -; MIPS32-NEXT: sltu $4, $6, $4 -; MIPS32-NEXT: subu $5, $7, $5 -; MIPS32-NEXT: lui $6, 0 -; MIPS32-NEXT: ori $6, $6, 1 -; MIPS32-NEXT: and $4, $4, $6 -; MIPS32-NEXT: subu $3, $5, $4 +; MIPS32-NEXT: sltu $1, $6, $4 +; MIPS32-NEXT: subu $3, $7, $5 +; MIPS32-NEXT: lui $4, 0 +; MIPS32-NEXT: ori $4, $4, 1 +; MIPS32-NEXT: and $1, $1, $4 +; MIPS32-NEXT: subu $3, $3, $1 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: Index: test/CodeGen/Mips/GlobalISel/llvm-ir/truncStore_and_aExtLoad.ll =================================================================== --- test/CodeGen/Mips/GlobalISel/llvm-ir/truncStore_and_aExtLoad.ll +++ test/CodeGen/Mips/GlobalISel/llvm-ir/truncStore_and_aExtLoad.ll @@ -26,10 +26,10 @@ define void @load_store_i1(i1* %px, i1* %py) { ; MIPS32-LABEL: load_store_i1: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: lbu $5, 0($5) -; MIPS32-NEXT: lui $1, 0 -; MIPS32-NEXT: ori $1, $1, 1 -; MIPS32-NEXT: and $1, $5, $1 +; MIPS32-NEXT: lbu $1, 0($5) +; MIPS32-NEXT: lui $2, 0 +; MIPS32-NEXT: ori $2, $2, 1 +; MIPS32-NEXT: and $1, $1, $2 ; MIPS32-NEXT: sb $1, 0($4) ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop @@ -42,8 +42,8 @@ define void @load_store_i8(i8* %px, i8* %py) { ; MIPS32-LABEL: load_store_i8: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: lbu $5, 0($5) -; MIPS32-NEXT: sb $5, 0($4) +; MIPS32-NEXT: lbu $1, 0($5) +; MIPS32-NEXT: sb $1, 0($4) ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -55,8 +55,8 @@ define void @load_store_i16(i16* %px, i16* %py) { ; MIPS32-LABEL: load_store_i16: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: lhu $5, 0($5) -; MIPS32-NEXT: sh $5, 0($4) +; MIPS32-NEXT: lhu $1, 0($5) +; MIPS32-NEXT: sh $1, 0($4) ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: @@ -68,8 +68,8 @@ define void @load_store_i32(i32* %px, i32* %py) { ; MIPS32-LABEL: load_store_i32: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: lw $5, 0($5) -; MIPS32-NEXT: sw $5, 0($4) +; MIPS32-NEXT: lw $1, 0($5) +; MIPS32-NEXT: sw $1, 0($4) ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop entry: Index: test/CodeGen/Mips/atomic.ll =================================================================== --- test/CodeGen/Mips/atomic.ll +++ test/CodeGen/Mips/atomic.ll @@ -56,17 +56,16 @@ ; MIPS32O0: # %bb.0: # %entry ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(x)($2) +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(x)($1) ; MIPS32O0-NEXT: $BB0_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: addu $1, $25, $4 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB0_1 +; MIPS32O0-NEXT: ll $2, 0($1) +; MIPS32O0-NEXT: addu $3, $2, $4 +; MIPS32O0-NEXT: sc $3, 0($1) +; MIPS32O0-NEXT: beqz $3, $BB0_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: move $2, $25 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop ; @@ -108,18 +107,18 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) ; MIPS32R6O0-NEXT: $BB0_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: addu $3, $1, $4 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB0_1 +; MIPS32R6O0-NEXT: ll $3, 0($1) +; MIPS32R6O0-NEXT: addu $5, $3, $4 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB0_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $3 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -317,17 +316,16 @@ ; MIPS32O0: # %bb.0: # %entry ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(x)($2) +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(x)($1) ; MIPS32O0-NEXT: $BB1_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: subu $1, $25, $4 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB1_1 +; MIPS32O0-NEXT: ll $2, 0($1) +; MIPS32O0-NEXT: subu $3, $2, $4 +; MIPS32O0-NEXT: sc $3, 0($1) +; MIPS32O0-NEXT: beqz $3, $BB1_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: move $2, $25 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop ; @@ -369,18 +367,18 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) ; MIPS32R6O0-NEXT: $BB1_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: subu $3, $1, $4 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB1_1 +; MIPS32R6O0-NEXT: ll $3, 0($1) +; MIPS32R6O0-NEXT: subu $5, $3, $4 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB1_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $3 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -578,17 +576,16 @@ ; MIPS32O0: # %bb.0: # %entry ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(x)($2) +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(x)($1) ; MIPS32O0-NEXT: $BB2_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: xor $1, $25, $4 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB2_1 +; MIPS32O0-NEXT: ll $2, 0($1) +; MIPS32O0-NEXT: xor $3, $2, $4 +; MIPS32O0-NEXT: sc $3, 0($1) +; MIPS32O0-NEXT: beqz $3, $BB2_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: move $2, $25 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop ; @@ -630,18 +627,18 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) ; MIPS32R6O0-NEXT: $BB2_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: xor $3, $1, $4 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB2_1 +; MIPS32R6O0-NEXT: ll $3, 0($1) +; MIPS32R6O0-NEXT: xor $5, $3, $4 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB2_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $3 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -838,17 +835,16 @@ ; MIPS32O0: # %bb.0: # %entry ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(x)($2) +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(x)($1) ; MIPS32O0-NEXT: $BB3_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: or $1, $25, $4 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB3_1 +; MIPS32O0-NEXT: ll $2, 0($1) +; MIPS32O0-NEXT: or $3, $2, $4 +; MIPS32O0-NEXT: sc $3, 0($1) +; MIPS32O0-NEXT: beqz $3, $BB3_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: move $2, $25 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop ; @@ -890,18 +886,18 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) ; MIPS32R6O0-NEXT: $BB3_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: or $3, $1, $4 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB3_1 +; MIPS32R6O0-NEXT: ll $3, 0($1) +; MIPS32R6O0-NEXT: or $5, $3, $4 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB3_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $3 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -1098,17 +1094,16 @@ ; MIPS32O0: # %bb.0: # %entry ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(x)($2) +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(x)($1) ; MIPS32O0-NEXT: $BB4_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: and $1, $25, $4 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB4_1 +; MIPS32O0-NEXT: ll $2, 0($1) +; MIPS32O0-NEXT: and $3, $2, $4 +; MIPS32O0-NEXT: sc $3, 0($1) +; MIPS32O0-NEXT: beqz $3, $BB4_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: move $2, $25 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop ; @@ -1150,18 +1145,18 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) ; MIPS32R6O0-NEXT: $BB4_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: and $3, $1, $4 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB4_1 +; MIPS32R6O0-NEXT: ll $3, 0($1) +; MIPS32R6O0-NEXT: and $5, $3, $4 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB4_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $3 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -1359,18 +1354,17 @@ ; MIPS32O0: # %bb.0: # %entry ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(x)($2) +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(x)($1) ; MIPS32O0-NEXT: $BB5_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: and $1, $25, $4 -; MIPS32O0-NEXT: nor $1, $zero, $1 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB5_1 +; MIPS32O0-NEXT: ll $2, 0($1) +; MIPS32O0-NEXT: and $3, $2, $4 +; MIPS32O0-NEXT: nor $3, $zero, $3 +; MIPS32O0-NEXT: sc $3, 0($1) +; MIPS32O0-NEXT: beqz $3, $BB5_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: move $2, $25 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop ; @@ -1414,19 +1408,19 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) ; MIPS32R6O0-NEXT: $BB5_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: and $3, $1, $4 -; MIPS32R6O0-NEXT: nor $3, $zero, $3 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB5_1 +; MIPS32R6O0-NEXT: ll $3, 0($1) +; MIPS32R6O0-NEXT: and $5, $3, $4 +; MIPS32R6O0-NEXT: nor $5, $zero, $5 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB5_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $3 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -1637,19 +1631,19 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -8 -; MIPS32O0-NEXT: addu $2, $2, $25 +; MIPS32O0-NEXT: addu $1, $2, $25 ; MIPS32O0-NEXT: sw $4, 4($sp) -; MIPS32O0-NEXT: lw $4, 4($sp) -; MIPS32O0-NEXT: lw $2, %got(x)($2) +; MIPS32O0-NEXT: lw $2, 4($sp) +; MIPS32O0-NEXT: lw $1, %got(x)($1) ; MIPS32O0-NEXT: $BB6_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: move $1, $4 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB6_1 +; MIPS32O0-NEXT: ll $3, 0($1) +; MIPS32O0-NEXT: move $4, $2 +; MIPS32O0-NEXT: sc $4, 0($1) +; MIPS32O0-NEXT: beqz $4, $BB6_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: move $2, $25 +; MIPS32O0-NEXT: move $2, $3 ; MIPS32O0-NEXT: addiu $sp, $sp, 8 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop @@ -1697,20 +1691,20 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 ; MIPS32R6O0-NEXT: sw $4, 4($sp) -; MIPS32R6O0-NEXT: lw $4, 4($sp) -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) +; MIPS32R6O0-NEXT: lw $3, 4($sp) +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) ; MIPS32R6O0-NEXT: $BB6_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: move $3, $4 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB6_1 +; MIPS32R6O0-NEXT: ll $4, 0($1) +; MIPS32R6O0-NEXT: move $5, $3 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB6_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 0($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 0($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $4 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -1942,29 +1936,29 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -16 -; MIPS32O0-NEXT: addu $2, $2, $25 +; MIPS32O0-NEXT: addu $1, $2, $25 ; MIPS32O0-NEXT: sw $5, 12($sp) -; MIPS32O0-NEXT: lw $5, 12($sp) -; MIPS32O0-NEXT: lw $2, %got(x)($2) -; MIPS32O0-NEXT: lw $25, 8($sp) # 4-byte Folded Reload -; MIPS32O0-NEXT: move $1, $4 +; MIPS32O0-NEXT: lw $2, 12($sp) +; MIPS32O0-NEXT: lw $1, %got(x)($1) +; MIPS32O0-NEXT: lw $3, 8($sp) # 4-byte Folded Reload +; MIPS32O0-NEXT: move $5, $4 ; MIPS32O0-NEXT: $BB7_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $3, 0($2) -; MIPS32O0-NEXT: bne $3, $1, $BB7_3 +; MIPS32O0-NEXT: ll $6, 0($1) +; MIPS32O0-NEXT: bne $6, $5, $BB7_3 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry ; MIPS32O0-NEXT: # in Loop: Header=BB7_1 Depth=1 -; MIPS32O0-NEXT: move $6, $5 -; MIPS32O0-NEXT: sc $6, 0($2) -; MIPS32O0-NEXT: beqz $6, $BB7_1 +; MIPS32O0-NEXT: move $7, $2 +; MIPS32O0-NEXT: sc $7, 0($1) +; MIPS32O0-NEXT: beqz $7, $BB7_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: $BB7_3: # %entry -; MIPS32O0-NEXT: xor $1, $3, $4 +; MIPS32O0-NEXT: xor $1, $6, $4 ; MIPS32O0-NEXT: sltiu $1, $1, 1 -; MIPS32O0-NEXT: move $2, $3 -; MIPS32O0-NEXT: sw $3, 8($sp) # 4-byte Folded Spill -; MIPS32O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: move $2, $6 +; MIPS32O0-NEXT: sw $6, 8($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $3, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: sw $1, 0($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: addiu $sp, $sp, 16 ; MIPS32O0-NEXT: jr $ra @@ -2020,28 +2014,28 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -24 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $5 -; MIPS32R6O0-NEXT: move $1, $4 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $5 +; MIPS32R6O0-NEXT: move $3, $4 ; MIPS32R6O0-NEXT: sw $5, 20($sp) ; MIPS32R6O0-NEXT: lw $5, 20($sp) -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) -; MIPS32R6O0-NEXT: lw $3, 16($sp) # 4-byte Folded Reload +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) +; MIPS32R6O0-NEXT: lw $6, 16($sp) # 4-byte Folded Reload ; MIPS32R6O0-NEXT: $BB7_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $6, 0($2) -; MIPS32R6O0-NEXT: bnec $6, $4, $BB7_3 +; MIPS32R6O0-NEXT: ll $7, 0($1) +; MIPS32R6O0-NEXT: bnec $7, $4, $BB7_3 ; MIPS32R6O0-NEXT: # %bb.2: # %entry ; MIPS32R6O0-NEXT: # in Loop: Header=BB7_1 Depth=1 -; MIPS32R6O0-NEXT: move $7, $5 -; MIPS32R6O0-NEXT: sc $7, 0($2) -; MIPS32R6O0-NEXT: beqzc $7, $BB7_1 +; MIPS32R6O0-NEXT: move $8, $5 +; MIPS32R6O0-NEXT: sc $8, 0($1) +; MIPS32R6O0-NEXT: beqzc $8, $BB7_1 ; MIPS32R6O0-NEXT: $BB7_3: # %entry -; MIPS32R6O0-NEXT: move $2, $6 -; MIPS32R6O0-NEXT: sw $25, 12($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $1, 8($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $6, 16($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $3, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 12($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $7 +; MIPS32R6O0-NEXT: sw $3, 8($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $7, 16($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $6, 4($sp) # 4-byte Folded Spill ; MIPS32R6O0-NEXT: addiu $sp, $sp, 24 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -2329,33 +2323,33 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -8 -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(y)($2) -; MIPS32O0-NEXT: addiu $25, $zero, -4 -; MIPS32O0-NEXT: and $25, $2, $25 -; MIPS32O0-NEXT: andi $2, $2, 3 -; MIPS32O0-NEXT: sll $2, $2, 3 -; MIPS32O0-NEXT: ori $1, $zero, 255 -; MIPS32O0-NEXT: sllv $1, $1, $2 -; MIPS32O0-NEXT: nor $3, $zero, $1 -; MIPS32O0-NEXT: sllv $4, $4, $2 +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(y)($1) +; MIPS32O0-NEXT: addiu $2, $zero, -4 +; MIPS32O0-NEXT: and $2, $1, $2 +; MIPS32O0-NEXT: andi $1, $1, 3 +; MIPS32O0-NEXT: sll $1, $1, 3 +; MIPS32O0-NEXT: ori $3, $zero, 255 +; MIPS32O0-NEXT: sllv $3, $3, $1 +; MIPS32O0-NEXT: nor $5, $zero, $3 +; MIPS32O0-NEXT: sllv $4, $4, $1 ; MIPS32O0-NEXT: $BB8_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $6, 0($25) -; MIPS32O0-NEXT: addu $7, $6, $4 -; MIPS32O0-NEXT: and $7, $7, $1 -; MIPS32O0-NEXT: and $8, $6, $3 -; MIPS32O0-NEXT: or $8, $8, $7 -; MIPS32O0-NEXT: sc $8, 0($25) -; MIPS32O0-NEXT: beqz $8, $BB8_1 +; MIPS32O0-NEXT: ll $7, 0($2) +; MIPS32O0-NEXT: addu $8, $7, $4 +; MIPS32O0-NEXT: and $8, $8, $3 +; MIPS32O0-NEXT: and $9, $7, $5 +; MIPS32O0-NEXT: or $9, $9, $8 +; MIPS32O0-NEXT: sc $9, 0($2) +; MIPS32O0-NEXT: beqz $9, $BB8_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: and $5, $6, $1 -; MIPS32O0-NEXT: srlv $5, $5, $2 -; MIPS32O0-NEXT: sll $5, $5, 24 -; MIPS32O0-NEXT: sra $5, $5, 24 +; MIPS32O0-NEXT: and $6, $7, $3 +; MIPS32O0-NEXT: srlv $6, $6, $1 +; MIPS32O0-NEXT: sll $6, $6, 24 +; MIPS32O0-NEXT: sra $6, $6, 24 ; MIPS32O0-NEXT: # %bb.3: # %entry -; MIPS32O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $6, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: # %bb.4: # %entry ; MIPS32O0-NEXT: lw $1, 4($sp) # 4-byte Folded Reload ; MIPS32O0-NEXT: sll $2, $1, 24 @@ -2432,33 +2426,33 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(y)($2) -; MIPS32R6O0-NEXT: addiu $1, $zero, -4 -; MIPS32R6O0-NEXT: and $1, $2, $1 -; MIPS32R6O0-NEXT: andi $2, $2, 3 -; MIPS32R6O0-NEXT: sll $2, $2, 3 -; MIPS32R6O0-NEXT: ori $3, $zero, 255 -; MIPS32R6O0-NEXT: sllv $3, $3, $2 -; MIPS32R6O0-NEXT: nor $5, $zero, $3 -; MIPS32R6O0-NEXT: sllv $4, $4, $2 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(y)($1) +; MIPS32R6O0-NEXT: addiu $3, $zero, -4 +; MIPS32R6O0-NEXT: and $3, $1, $3 +; MIPS32R6O0-NEXT: andi $1, $1, 3 +; MIPS32R6O0-NEXT: sll $1, $1, 3 +; MIPS32R6O0-NEXT: ori $5, $zero, 255 +; MIPS32R6O0-NEXT: sllv $5, $5, $1 +; MIPS32R6O0-NEXT: nor $6, $zero, $5 +; MIPS32R6O0-NEXT: sllv $4, $4, $1 ; MIPS32R6O0-NEXT: $BB8_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $7, 0($1) -; MIPS32R6O0-NEXT: addu $8, $7, $4 -; MIPS32R6O0-NEXT: and $8, $8, $3 -; MIPS32R6O0-NEXT: and $9, $7, $5 -; MIPS32R6O0-NEXT: or $9, $9, $8 -; MIPS32R6O0-NEXT: sc $9, 0($1) -; MIPS32R6O0-NEXT: beqzc $9, $BB8_1 +; MIPS32R6O0-NEXT: ll $8, 0($3) +; MIPS32R6O0-NEXT: addu $9, $8, $4 +; MIPS32R6O0-NEXT: and $9, $9, $5 +; MIPS32R6O0-NEXT: and $10, $8, $6 +; MIPS32R6O0-NEXT: or $10, $10, $9 +; MIPS32R6O0-NEXT: sc $10, 0($3) +; MIPS32R6O0-NEXT: beqzc $10, $BB8_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: and $6, $7, $3 -; MIPS32R6O0-NEXT: srlv $6, $6, $2 -; MIPS32R6O0-NEXT: seb $6, $6 +; MIPS32R6O0-NEXT: and $7, $8, $5 +; MIPS32R6O0-NEXT: srlv $7, $7, $1 +; MIPS32R6O0-NEXT: seb $7, $7 ; MIPS32R6O0-NEXT: # %bb.3: # %entry -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $6, 0($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $7, 0($sp) # 4-byte Folded Spill ; MIPS32R6O0-NEXT: # %bb.4: # %entry ; MIPS32R6O0-NEXT: lw $1, 0($sp) # 4-byte Folded Reload ; MIPS32R6O0-NEXT: seb $2, $1 @@ -2604,30 +2598,30 @@ ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd8))) ; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) -; MIPS64R6O0-NEXT: daddiu $4, $zero, -4 -; MIPS64R6O0-NEXT: and $4, $1, $4 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $2, $2, $3 +; MIPS64R6O0-NEXT: daddiu $3, $zero, -4 +; MIPS64R6O0-NEXT: and $3, $1, $3 +; MIPS64R6O0-NEXT: andi $5, $1, 3 +; MIPS64R6O0-NEXT: xori $5, $5, 3 +; MIPS64R6O0-NEXT: sll $5, $5, 3 +; MIPS64R6O0-NEXT: ori $6, $zero, 255 +; MIPS64R6O0-NEXT: sllv $6, $6, $5 +; MIPS64R6O0-NEXT: nor $7, $zero, $6 +; MIPS64R6O0-NEXT: sllv $2, $2, $5 ; MIPS64R6O0-NEXT: .LBB8_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($4) -; MIPS64R6O0-NEXT: addu $9, $8, $2 -; MIPS64R6O0-NEXT: and $9, $9, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($4) -; MIPS64R6O0-NEXT: beqzc $10, .LBB8_1 +; MIPS64R6O0-NEXT: ll $9, 0($3) +; MIPS64R6O0-NEXT: addu $10, $9, $2 +; MIPS64R6O0-NEXT: and $10, $10, $6 +; MIPS64R6O0-NEXT: and $11, $9, $7 +; MIPS64R6O0-NEXT: or $11, $11, $10 +; MIPS64R6O0-NEXT: sc $11, 0($3) +; MIPS64R6O0-NEXT: beqzc $11, .LBB8_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seb $7, $7 +; MIPS64R6O0-NEXT: and $8, $9, $6 +; MIPS64R6O0-NEXT: srlv $8, $8, $5 +; MIPS64R6O0-NEXT: seb $8, $8 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $8, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seb $2, $1 @@ -2846,33 +2840,33 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -8 -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(y)($2) -; MIPS32O0-NEXT: addiu $25, $zero, -4 -; MIPS32O0-NEXT: and $25, $2, $25 -; MIPS32O0-NEXT: andi $2, $2, 3 -; MIPS32O0-NEXT: sll $2, $2, 3 -; MIPS32O0-NEXT: ori $1, $zero, 255 -; MIPS32O0-NEXT: sllv $1, $1, $2 -; MIPS32O0-NEXT: nor $3, $zero, $1 -; MIPS32O0-NEXT: sllv $4, $4, $2 +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(y)($1) +; MIPS32O0-NEXT: addiu $2, $zero, -4 +; MIPS32O0-NEXT: and $2, $1, $2 +; MIPS32O0-NEXT: andi $1, $1, 3 +; MIPS32O0-NEXT: sll $1, $1, 3 +; MIPS32O0-NEXT: ori $3, $zero, 255 +; MIPS32O0-NEXT: sllv $3, $3, $1 +; MIPS32O0-NEXT: nor $5, $zero, $3 +; MIPS32O0-NEXT: sllv $4, $4, $1 ; MIPS32O0-NEXT: $BB9_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $6, 0($25) -; MIPS32O0-NEXT: subu $7, $6, $4 -; MIPS32O0-NEXT: and $7, $7, $1 -; MIPS32O0-NEXT: and $8, $6, $3 -; MIPS32O0-NEXT: or $8, $8, $7 -; MIPS32O0-NEXT: sc $8, 0($25) -; MIPS32O0-NEXT: beqz $8, $BB9_1 +; MIPS32O0-NEXT: ll $7, 0($2) +; MIPS32O0-NEXT: subu $8, $7, $4 +; MIPS32O0-NEXT: and $8, $8, $3 +; MIPS32O0-NEXT: and $9, $7, $5 +; MIPS32O0-NEXT: or $9, $9, $8 +; MIPS32O0-NEXT: sc $9, 0($2) +; MIPS32O0-NEXT: beqz $9, $BB9_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: and $5, $6, $1 -; MIPS32O0-NEXT: srlv $5, $5, $2 -; MIPS32O0-NEXT: sll $5, $5, 24 -; MIPS32O0-NEXT: sra $5, $5, 24 +; MIPS32O0-NEXT: and $6, $7, $3 +; MIPS32O0-NEXT: srlv $6, $6, $1 +; MIPS32O0-NEXT: sll $6, $6, 24 +; MIPS32O0-NEXT: sra $6, $6, 24 ; MIPS32O0-NEXT: # %bb.3: # %entry -; MIPS32O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $6, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: # %bb.4: # %entry ; MIPS32O0-NEXT: lw $1, 4($sp) # 4-byte Folded Reload ; MIPS32O0-NEXT: sll $2, $1, 24 @@ -2949,33 +2943,33 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(y)($2) -; MIPS32R6O0-NEXT: addiu $1, $zero, -4 -; MIPS32R6O0-NEXT: and $1, $2, $1 -; MIPS32R6O0-NEXT: andi $2, $2, 3 -; MIPS32R6O0-NEXT: sll $2, $2, 3 -; MIPS32R6O0-NEXT: ori $3, $zero, 255 -; MIPS32R6O0-NEXT: sllv $3, $3, $2 -; MIPS32R6O0-NEXT: nor $5, $zero, $3 -; MIPS32R6O0-NEXT: sllv $4, $4, $2 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(y)($1) +; MIPS32R6O0-NEXT: addiu $3, $zero, -4 +; MIPS32R6O0-NEXT: and $3, $1, $3 +; MIPS32R6O0-NEXT: andi $1, $1, 3 +; MIPS32R6O0-NEXT: sll $1, $1, 3 +; MIPS32R6O0-NEXT: ori $5, $zero, 255 +; MIPS32R6O0-NEXT: sllv $5, $5, $1 +; MIPS32R6O0-NEXT: nor $6, $zero, $5 +; MIPS32R6O0-NEXT: sllv $4, $4, $1 ; MIPS32R6O0-NEXT: $BB9_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $7, 0($1) -; MIPS32R6O0-NEXT: subu $8, $7, $4 -; MIPS32R6O0-NEXT: and $8, $8, $3 -; MIPS32R6O0-NEXT: and $9, $7, $5 -; MIPS32R6O0-NEXT: or $9, $9, $8 -; MIPS32R6O0-NEXT: sc $9, 0($1) -; MIPS32R6O0-NEXT: beqzc $9, $BB9_1 +; MIPS32R6O0-NEXT: ll $8, 0($3) +; MIPS32R6O0-NEXT: subu $9, $8, $4 +; MIPS32R6O0-NEXT: and $9, $9, $5 +; MIPS32R6O0-NEXT: and $10, $8, $6 +; MIPS32R6O0-NEXT: or $10, $10, $9 +; MIPS32R6O0-NEXT: sc $10, 0($3) +; MIPS32R6O0-NEXT: beqzc $10, $BB9_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: and $6, $7, $3 -; MIPS32R6O0-NEXT: srlv $6, $6, $2 -; MIPS32R6O0-NEXT: seb $6, $6 +; MIPS32R6O0-NEXT: and $7, $8, $5 +; MIPS32R6O0-NEXT: srlv $7, $7, $1 +; MIPS32R6O0-NEXT: seb $7, $7 ; MIPS32R6O0-NEXT: # %bb.3: # %entry -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $6, 0($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $7, 0($sp) # 4-byte Folded Spill ; MIPS32R6O0-NEXT: # %bb.4: # %entry ; MIPS32R6O0-NEXT: lw $1, 0($sp) # 4-byte Folded Reload ; MIPS32R6O0-NEXT: seb $2, $1 @@ -3121,30 +3115,30 @@ ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub8))) ; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) -; MIPS64R6O0-NEXT: daddiu $4, $zero, -4 -; MIPS64R6O0-NEXT: and $4, $1, $4 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $2, $2, $3 +; MIPS64R6O0-NEXT: daddiu $3, $zero, -4 +; MIPS64R6O0-NEXT: and $3, $1, $3 +; MIPS64R6O0-NEXT: andi $5, $1, 3 +; MIPS64R6O0-NEXT: xori $5, $5, 3 +; MIPS64R6O0-NEXT: sll $5, $5, 3 +; MIPS64R6O0-NEXT: ori $6, $zero, 255 +; MIPS64R6O0-NEXT: sllv $6, $6, $5 +; MIPS64R6O0-NEXT: nor $7, $zero, $6 +; MIPS64R6O0-NEXT: sllv $2, $2, $5 ; MIPS64R6O0-NEXT: .LBB9_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($4) -; MIPS64R6O0-NEXT: subu $9, $8, $2 -; MIPS64R6O0-NEXT: and $9, $9, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($4) -; MIPS64R6O0-NEXT: beqzc $10, .LBB9_1 +; MIPS64R6O0-NEXT: ll $9, 0($3) +; MIPS64R6O0-NEXT: subu $10, $9, $2 +; MIPS64R6O0-NEXT: and $10, $10, $6 +; MIPS64R6O0-NEXT: and $11, $9, $7 +; MIPS64R6O0-NEXT: or $11, $11, $10 +; MIPS64R6O0-NEXT: sc $11, 0($3) +; MIPS64R6O0-NEXT: beqzc $11, .LBB9_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seb $7, $7 +; MIPS64R6O0-NEXT: and $8, $9, $6 +; MIPS64R6O0-NEXT: srlv $8, $8, $5 +; MIPS64R6O0-NEXT: seb $8, $8 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $8, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seb $2, $1 @@ -3365,34 +3359,34 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -8 -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(y)($2) -; MIPS32O0-NEXT: addiu $25, $zero, -4 -; MIPS32O0-NEXT: and $25, $2, $25 -; MIPS32O0-NEXT: andi $2, $2, 3 -; MIPS32O0-NEXT: sll $2, $2, 3 -; MIPS32O0-NEXT: ori $1, $zero, 255 -; MIPS32O0-NEXT: sllv $1, $1, $2 -; MIPS32O0-NEXT: nor $3, $zero, $1 -; MIPS32O0-NEXT: sllv $4, $4, $2 +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(y)($1) +; MIPS32O0-NEXT: addiu $2, $zero, -4 +; MIPS32O0-NEXT: and $2, $1, $2 +; MIPS32O0-NEXT: andi $1, $1, 3 +; MIPS32O0-NEXT: sll $1, $1, 3 +; MIPS32O0-NEXT: ori $3, $zero, 255 +; MIPS32O0-NEXT: sllv $3, $3, $1 +; MIPS32O0-NEXT: nor $5, $zero, $3 +; MIPS32O0-NEXT: sllv $4, $4, $1 ; MIPS32O0-NEXT: $BB10_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $6, 0($25) -; MIPS32O0-NEXT: and $7, $6, $4 -; MIPS32O0-NEXT: nor $7, $zero, $7 -; MIPS32O0-NEXT: and $7, $7, $1 -; MIPS32O0-NEXT: and $8, $6, $3 -; MIPS32O0-NEXT: or $8, $8, $7 -; MIPS32O0-NEXT: sc $8, 0($25) -; MIPS32O0-NEXT: beqz $8, $BB10_1 +; MIPS32O0-NEXT: ll $7, 0($2) +; MIPS32O0-NEXT: and $8, $7, $4 +; MIPS32O0-NEXT: nor $8, $zero, $8 +; MIPS32O0-NEXT: and $8, $8, $3 +; MIPS32O0-NEXT: and $9, $7, $5 +; MIPS32O0-NEXT: or $9, $9, $8 +; MIPS32O0-NEXT: sc $9, 0($2) +; MIPS32O0-NEXT: beqz $9, $BB10_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: and $5, $6, $1 -; MIPS32O0-NEXT: srlv $5, $5, $2 -; MIPS32O0-NEXT: sll $5, $5, 24 -; MIPS32O0-NEXT: sra $5, $5, 24 +; MIPS32O0-NEXT: and $6, $7, $3 +; MIPS32O0-NEXT: srlv $6, $6, $1 +; MIPS32O0-NEXT: sll $6, $6, 24 +; MIPS32O0-NEXT: sra $6, $6, 24 ; MIPS32O0-NEXT: # %bb.3: # %entry -; MIPS32O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $6, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: # %bb.4: # %entry ; MIPS32O0-NEXT: lw $1, 4($sp) # 4-byte Folded Reload ; MIPS32O0-NEXT: sll $2, $1, 24 @@ -3471,34 +3465,34 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(y)($2) -; MIPS32R6O0-NEXT: addiu $1, $zero, -4 -; MIPS32R6O0-NEXT: and $1, $2, $1 -; MIPS32R6O0-NEXT: andi $2, $2, 3 -; MIPS32R6O0-NEXT: sll $2, $2, 3 -; MIPS32R6O0-NEXT: ori $3, $zero, 255 -; MIPS32R6O0-NEXT: sllv $3, $3, $2 -; MIPS32R6O0-NEXT: nor $5, $zero, $3 -; MIPS32R6O0-NEXT: sllv $4, $4, $2 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(y)($1) +; MIPS32R6O0-NEXT: addiu $3, $zero, -4 +; MIPS32R6O0-NEXT: and $3, $1, $3 +; MIPS32R6O0-NEXT: andi $1, $1, 3 +; MIPS32R6O0-NEXT: sll $1, $1, 3 +; MIPS32R6O0-NEXT: ori $5, $zero, 255 +; MIPS32R6O0-NEXT: sllv $5, $5, $1 +; MIPS32R6O0-NEXT: nor $6, $zero, $5 +; MIPS32R6O0-NEXT: sllv $4, $4, $1 ; MIPS32R6O0-NEXT: $BB10_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $7, 0($1) -; MIPS32R6O0-NEXT: and $8, $7, $4 -; MIPS32R6O0-NEXT: nor $8, $zero, $8 -; MIPS32R6O0-NEXT: and $8, $8, $3 -; MIPS32R6O0-NEXT: and $9, $7, $5 -; MIPS32R6O0-NEXT: or $9, $9, $8 -; MIPS32R6O0-NEXT: sc $9, 0($1) -; MIPS32R6O0-NEXT: beqzc $9, $BB10_1 +; MIPS32R6O0-NEXT: ll $8, 0($3) +; MIPS32R6O0-NEXT: and $9, $8, $4 +; MIPS32R6O0-NEXT: nor $9, $zero, $9 +; MIPS32R6O0-NEXT: and $9, $9, $5 +; MIPS32R6O0-NEXT: and $10, $8, $6 +; MIPS32R6O0-NEXT: or $10, $10, $9 +; MIPS32R6O0-NEXT: sc $10, 0($3) +; MIPS32R6O0-NEXT: beqzc $10, $BB10_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: and $6, $7, $3 -; MIPS32R6O0-NEXT: srlv $6, $6, $2 -; MIPS32R6O0-NEXT: seb $6, $6 +; MIPS32R6O0-NEXT: and $7, $8, $5 +; MIPS32R6O0-NEXT: srlv $7, $7, $1 +; MIPS32R6O0-NEXT: seb $7, $7 ; MIPS32R6O0-NEXT: # %bb.3: # %entry -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $6, 0($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $7, 0($sp) # 4-byte Folded Spill ; MIPS32R6O0-NEXT: # %bb.4: # %entry ; MIPS32R6O0-NEXT: lw $1, 0($sp) # 4-byte Folded Reload ; MIPS32R6O0-NEXT: seb $2, $1 @@ -3648,31 +3642,31 @@ ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand8))) ; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) -; MIPS64R6O0-NEXT: daddiu $4, $zero, -4 -; MIPS64R6O0-NEXT: and $4, $1, $4 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $2, $2, $3 +; MIPS64R6O0-NEXT: daddiu $3, $zero, -4 +; MIPS64R6O0-NEXT: and $3, $1, $3 +; MIPS64R6O0-NEXT: andi $5, $1, 3 +; MIPS64R6O0-NEXT: xori $5, $5, 3 +; MIPS64R6O0-NEXT: sll $5, $5, 3 +; MIPS64R6O0-NEXT: ori $6, $zero, 255 +; MIPS64R6O0-NEXT: sllv $6, $6, $5 +; MIPS64R6O0-NEXT: nor $7, $zero, $6 +; MIPS64R6O0-NEXT: sllv $2, $2, $5 ; MIPS64R6O0-NEXT: .LBB10_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($4) -; MIPS64R6O0-NEXT: and $9, $8, $2 -; MIPS64R6O0-NEXT: nor $9, $zero, $9 -; MIPS64R6O0-NEXT: and $9, $9, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($4) -; MIPS64R6O0-NEXT: beqzc $10, .LBB10_1 +; MIPS64R6O0-NEXT: ll $9, 0($3) +; MIPS64R6O0-NEXT: and $10, $9, $2 +; MIPS64R6O0-NEXT: nor $10, $zero, $10 +; MIPS64R6O0-NEXT: and $10, $10, $6 +; MIPS64R6O0-NEXT: and $11, $9, $7 +; MIPS64R6O0-NEXT: or $11, $11, $10 +; MIPS64R6O0-NEXT: sc $11, 0($3) +; MIPS64R6O0-NEXT: beqzc $11, .LBB10_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seb $7, $7 +; MIPS64R6O0-NEXT: and $8, $9, $6 +; MIPS64R6O0-NEXT: srlv $8, $8, $5 +; MIPS64R6O0-NEXT: seb $8, $8 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $8, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seb $2, $1 @@ -3896,32 +3890,32 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -8 -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(y)($2) -; MIPS32O0-NEXT: addiu $25, $zero, -4 -; MIPS32O0-NEXT: and $25, $2, $25 -; MIPS32O0-NEXT: andi $2, $2, 3 -; MIPS32O0-NEXT: sll $2, $2, 3 -; MIPS32O0-NEXT: ori $1, $zero, 255 -; MIPS32O0-NEXT: sllv $1, $1, $2 -; MIPS32O0-NEXT: nor $3, $zero, $1 -; MIPS32O0-NEXT: sllv $4, $4, $2 +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(y)($1) +; MIPS32O0-NEXT: addiu $2, $zero, -4 +; MIPS32O0-NEXT: and $2, $1, $2 +; MIPS32O0-NEXT: andi $1, $1, 3 +; MIPS32O0-NEXT: sll $1, $1, 3 +; MIPS32O0-NEXT: ori $3, $zero, 255 +; MIPS32O0-NEXT: sllv $3, $3, $1 +; MIPS32O0-NEXT: nor $5, $zero, $3 +; MIPS32O0-NEXT: sllv $4, $4, $1 ; MIPS32O0-NEXT: $BB11_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $6, 0($25) -; MIPS32O0-NEXT: and $7, $4, $1 -; MIPS32O0-NEXT: and $8, $6, $3 -; MIPS32O0-NEXT: or $8, $8, $7 -; MIPS32O0-NEXT: sc $8, 0($25) -; MIPS32O0-NEXT: beqz $8, $BB11_1 +; MIPS32O0-NEXT: ll $7, 0($2) +; MIPS32O0-NEXT: and $8, $4, $3 +; MIPS32O0-NEXT: and $9, $7, $5 +; MIPS32O0-NEXT: or $9, $9, $8 +; MIPS32O0-NEXT: sc $9, 0($2) +; MIPS32O0-NEXT: beqz $9, $BB11_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: and $5, $6, $1 -; MIPS32O0-NEXT: srlv $5, $5, $2 -; MIPS32O0-NEXT: sll $5, $5, 24 -; MIPS32O0-NEXT: sra $5, $5, 24 +; MIPS32O0-NEXT: and $6, $7, $3 +; MIPS32O0-NEXT: srlv $6, $6, $1 +; MIPS32O0-NEXT: sll $6, $6, 24 +; MIPS32O0-NEXT: sra $6, $6, 24 ; MIPS32O0-NEXT: # %bb.3: # %entry -; MIPS32O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $6, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: # %bb.4: # %entry ; MIPS32O0-NEXT: lw $1, 4($sp) # 4-byte Folded Reload ; MIPS32O0-NEXT: sll $2, $1, 24 @@ -3996,32 +3990,32 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(y)($2) -; MIPS32R6O0-NEXT: addiu $1, $zero, -4 -; MIPS32R6O0-NEXT: and $1, $2, $1 -; MIPS32R6O0-NEXT: andi $2, $2, 3 -; MIPS32R6O0-NEXT: sll $2, $2, 3 -; MIPS32R6O0-NEXT: ori $3, $zero, 255 -; MIPS32R6O0-NEXT: sllv $3, $3, $2 -; MIPS32R6O0-NEXT: nor $5, $zero, $3 -; MIPS32R6O0-NEXT: sllv $4, $4, $2 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(y)($1) +; MIPS32R6O0-NEXT: addiu $3, $zero, -4 +; MIPS32R6O0-NEXT: and $3, $1, $3 +; MIPS32R6O0-NEXT: andi $1, $1, 3 +; MIPS32R6O0-NEXT: sll $1, $1, 3 +; MIPS32R6O0-NEXT: ori $5, $zero, 255 +; MIPS32R6O0-NEXT: sllv $5, $5, $1 +; MIPS32R6O0-NEXT: nor $6, $zero, $5 +; MIPS32R6O0-NEXT: sllv $4, $4, $1 ; MIPS32R6O0-NEXT: $BB11_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $7, 0($1) -; MIPS32R6O0-NEXT: and $8, $4, $3 -; MIPS32R6O0-NEXT: and $9, $7, $5 -; MIPS32R6O0-NEXT: or $9, $9, $8 -; MIPS32R6O0-NEXT: sc $9, 0($1) -; MIPS32R6O0-NEXT: beqzc $9, $BB11_1 +; MIPS32R6O0-NEXT: ll $8, 0($3) +; MIPS32R6O0-NEXT: and $9, $4, $5 +; MIPS32R6O0-NEXT: and $10, $8, $6 +; MIPS32R6O0-NEXT: or $10, $10, $9 +; MIPS32R6O0-NEXT: sc $10, 0($3) +; MIPS32R6O0-NEXT: beqzc $10, $BB11_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: and $6, $7, $3 -; MIPS32R6O0-NEXT: srlv $6, $6, $2 -; MIPS32R6O0-NEXT: seb $6, $6 +; MIPS32R6O0-NEXT: and $7, $8, $5 +; MIPS32R6O0-NEXT: srlv $7, $7, $1 +; MIPS32R6O0-NEXT: seb $7, $7 ; MIPS32R6O0-NEXT: # %bb.3: # %entry -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $6, 0($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $7, 0($sp) # 4-byte Folded Spill ; MIPS32R6O0-NEXT: # %bb.4: # %entry ; MIPS32R6O0-NEXT: lw $1, 0($sp) # 4-byte Folded Reload ; MIPS32R6O0-NEXT: seb $2, $1 @@ -4163,29 +4157,29 @@ ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap8))) ; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(y)($1) -; MIPS64R6O0-NEXT: daddiu $4, $zero, -4 -; MIPS64R6O0-NEXT: and $4, $1, $4 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 255 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $2, $2, $3 +; MIPS64R6O0-NEXT: daddiu $3, $zero, -4 +; MIPS64R6O0-NEXT: and $3, $1, $3 +; MIPS64R6O0-NEXT: andi $5, $1, 3 +; MIPS64R6O0-NEXT: xori $5, $5, 3 +; MIPS64R6O0-NEXT: sll $5, $5, 3 +; MIPS64R6O0-NEXT: ori $6, $zero, 255 +; MIPS64R6O0-NEXT: sllv $6, $6, $5 +; MIPS64R6O0-NEXT: nor $7, $zero, $6 +; MIPS64R6O0-NEXT: sllv $2, $2, $5 ; MIPS64R6O0-NEXT: .LBB11_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($4) -; MIPS64R6O0-NEXT: and $9, $2, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($4) -; MIPS64R6O0-NEXT: beqzc $10, .LBB11_1 +; MIPS64R6O0-NEXT: ll $9, 0($3) +; MIPS64R6O0-NEXT: and $10, $2, $6 +; MIPS64R6O0-NEXT: and $11, $9, $7 +; MIPS64R6O0-NEXT: or $11, $11, $10 +; MIPS64R6O0-NEXT: sc $11, 0($3) +; MIPS64R6O0-NEXT: beqzc $11, .LBB11_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seb $7, $7 +; MIPS64R6O0-NEXT: and $8, $9, $6 +; MIPS64R6O0-NEXT: srlv $8, $8, $5 +; MIPS64R6O0-NEXT: seb $8, $8 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $8, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seb $2, $1 @@ -4404,38 +4398,38 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -8 -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(y)($2) -; MIPS32O0-NEXT: addiu $25, $zero, -4 -; MIPS32O0-NEXT: and $25, $2, $25 -; MIPS32O0-NEXT: andi $2, $2, 3 -; MIPS32O0-NEXT: sll $2, $2, 3 -; MIPS32O0-NEXT: ori $1, $zero, 255 -; MIPS32O0-NEXT: sllv $1, $1, $2 -; MIPS32O0-NEXT: nor $3, $zero, $1 +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(y)($1) +; MIPS32O0-NEXT: addiu $2, $zero, -4 +; MIPS32O0-NEXT: and $2, $1, $2 +; MIPS32O0-NEXT: andi $1, $1, 3 +; MIPS32O0-NEXT: sll $1, $1, 3 +; MIPS32O0-NEXT: ori $3, $zero, 255 +; MIPS32O0-NEXT: sllv $3, $3, $1 +; MIPS32O0-NEXT: nor $6, $zero, $3 ; MIPS32O0-NEXT: andi $4, $4, 255 -; MIPS32O0-NEXT: sllv $4, $4, $2 +; MIPS32O0-NEXT: sllv $4, $4, $1 ; MIPS32O0-NEXT: andi $5, $5, 255 -; MIPS32O0-NEXT: sllv $5, $5, $2 +; MIPS32O0-NEXT: sllv $5, $5, $1 ; MIPS32O0-NEXT: $BB12_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $7, 0($25) -; MIPS32O0-NEXT: and $8, $7, $1 -; MIPS32O0-NEXT: bne $8, $4, $BB12_3 +; MIPS32O0-NEXT: ll $8, 0($2) +; MIPS32O0-NEXT: and $9, $8, $3 +; MIPS32O0-NEXT: bne $9, $4, $BB12_3 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry ; MIPS32O0-NEXT: # in Loop: Header=BB12_1 Depth=1 -; MIPS32O0-NEXT: and $7, $7, $3 -; MIPS32O0-NEXT: or $7, $7, $5 -; MIPS32O0-NEXT: sc $7, 0($25) -; MIPS32O0-NEXT: beqz $7, $BB12_1 +; MIPS32O0-NEXT: and $8, $8, $6 +; MIPS32O0-NEXT: or $8, $8, $5 +; MIPS32O0-NEXT: sc $8, 0($2) +; MIPS32O0-NEXT: beqz $8, $BB12_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: $BB12_3: # %entry -; MIPS32O0-NEXT: srlv $6, $8, $2 -; MIPS32O0-NEXT: sll $6, $6, 24 -; MIPS32O0-NEXT: sra $6, $6, 24 +; MIPS32O0-NEXT: srlv $7, $9, $1 +; MIPS32O0-NEXT: sll $7, $7, 24 +; MIPS32O0-NEXT: sra $7, $7, 24 ; MIPS32O0-NEXT: # %bb.4: # %entry -; MIPS32O0-NEXT: sw $6, 4($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $7, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: # %bb.5: # %entry ; MIPS32O0-NEXT: lw $1, 4($sp) # 4-byte Folded Reload ; MIPS32O0-NEXT: sll $2, $1, 24 @@ -4520,39 +4514,39 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -16 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $5 -; MIPS32R6O0-NEXT: move $1, $4 -; MIPS32R6O0-NEXT: lw $2, %got(y)($2) -; MIPS32R6O0-NEXT: addiu $3, $zero, -4 -; MIPS32R6O0-NEXT: and $3, $2, $3 -; MIPS32R6O0-NEXT: andi $2, $2, 3 -; MIPS32R6O0-NEXT: sll $2, $2, 3 -; MIPS32R6O0-NEXT: ori $6, $zero, 255 -; MIPS32R6O0-NEXT: sllv $6, $6, $2 -; MIPS32R6O0-NEXT: nor $7, $zero, $6 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $5 +; MIPS32R6O0-NEXT: move $3, $4 +; MIPS32R6O0-NEXT: lw $1, %got(y)($1) +; MIPS32R6O0-NEXT: addiu $6, $zero, -4 +; MIPS32R6O0-NEXT: and $6, $1, $6 +; MIPS32R6O0-NEXT: andi $1, $1, 3 +; MIPS32R6O0-NEXT: sll $1, $1, 3 +; MIPS32R6O0-NEXT: ori $7, $zero, 255 +; MIPS32R6O0-NEXT: sllv $7, $7, $1 +; MIPS32R6O0-NEXT: nor $8, $zero, $7 ; MIPS32R6O0-NEXT: andi $4, $4, 255 -; MIPS32R6O0-NEXT: sllv $4, $4, $2 +; MIPS32R6O0-NEXT: sllv $4, $4, $1 ; MIPS32R6O0-NEXT: andi $5, $5, 255 -; MIPS32R6O0-NEXT: sllv $5, $5, $2 +; MIPS32R6O0-NEXT: sllv $5, $5, $1 ; MIPS32R6O0-NEXT: $BB12_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $9, 0($3) -; MIPS32R6O0-NEXT: and $10, $9, $6 -; MIPS32R6O0-NEXT: bnec $10, $4, $BB12_3 +; MIPS32R6O0-NEXT: ll $10, 0($6) +; MIPS32R6O0-NEXT: and $11, $10, $7 +; MIPS32R6O0-NEXT: bnec $11, $4, $BB12_3 ; MIPS32R6O0-NEXT: # %bb.2: # %entry ; MIPS32R6O0-NEXT: # in Loop: Header=BB12_1 Depth=1 -; MIPS32R6O0-NEXT: and $9, $9, $7 -; MIPS32R6O0-NEXT: or $9, $9, $5 -; MIPS32R6O0-NEXT: sc $9, 0($3) -; MIPS32R6O0-NEXT: beqzc $9, $BB12_1 +; MIPS32R6O0-NEXT: and $10, $10, $8 +; MIPS32R6O0-NEXT: or $10, $10, $5 +; MIPS32R6O0-NEXT: sc $10, 0($6) +; MIPS32R6O0-NEXT: beqzc $10, $BB12_1 ; MIPS32R6O0-NEXT: $BB12_3: # %entry -; MIPS32R6O0-NEXT: srlv $8, $10, $2 -; MIPS32R6O0-NEXT: seb $8, $8 +; MIPS32R6O0-NEXT: srlv $9, $11, $1 +; MIPS32R6O0-NEXT: seb $9, $9 ; MIPS32R6O0-NEXT: # %bb.4: # %entry -; MIPS32R6O0-NEXT: sw $25, 12($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $1, 8($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $8, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 12($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $3, 8($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $9, 4($sp) # 4-byte Folded Spill ; MIPS32R6O0-NEXT: # %bb.5: # %entry ; MIPS32R6O0-NEXT: lw $2, 4($sp) # 4-byte Folded Reload ; MIPS32R6O0-NEXT: addiu $sp, $sp, 16 @@ -4989,30 +4983,30 @@ ; MIPS32O0-NEXT: addiu $sp, $sp, -8 ; MIPS32O0-NEXT: addiu $1, $zero, -4 ; MIPS32O0-NEXT: and $1, $4, $1 -; MIPS32O0-NEXT: andi $4, $4, 3 -; MIPS32O0-NEXT: sll $4, $4, 3 -; MIPS32O0-NEXT: ori $2, $zero, 255 -; MIPS32O0-NEXT: sllv $2, $2, $4 -; MIPS32O0-NEXT: nor $3, $zero, $2 +; MIPS32O0-NEXT: andi $2, $4, 3 +; MIPS32O0-NEXT: sll $2, $2, 3 +; MIPS32O0-NEXT: ori $3, $zero, 255 +; MIPS32O0-NEXT: sllv $3, $3, $2 +; MIPS32O0-NEXT: nor $4, $zero, $3 ; MIPS32O0-NEXT: andi $7, $5, 255 -; MIPS32O0-NEXT: sllv $7, $7, $4 +; MIPS32O0-NEXT: sllv $7, $7, $2 ; MIPS32O0-NEXT: andi $6, $6, 255 -; MIPS32O0-NEXT: sllv $6, $6, $4 +; MIPS32O0-NEXT: sllv $6, $6, $2 ; MIPS32O0-NEXT: $BB13_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 ; MIPS32O0-NEXT: ll $9, 0($1) -; MIPS32O0-NEXT: and $10, $9, $2 +; MIPS32O0-NEXT: and $10, $9, $3 ; MIPS32O0-NEXT: bne $10, $7, $BB13_3 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry ; MIPS32O0-NEXT: # in Loop: Header=BB13_1 Depth=1 -; MIPS32O0-NEXT: and $9, $9, $3 +; MIPS32O0-NEXT: and $9, $9, $4 ; MIPS32O0-NEXT: or $9, $9, $6 ; MIPS32O0-NEXT: sc $9, 0($1) ; MIPS32O0-NEXT: beqz $9, $BB13_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: $BB13_3: # %entry -; MIPS32O0-NEXT: srlv $8, $10, $4 +; MIPS32O0-NEXT: srlv $8, $10, $2 ; MIPS32O0-NEXT: sll $8, $8, 24 ; MIPS32O0-NEXT: sra $8, $8, 24 ; MIPS32O0-NEXT: # %bb.4: # %entry @@ -5285,37 +5279,37 @@ ; MIPS64R6O0-NEXT: daddiu $sp, $sp, -32 ; MIPS64R6O0-NEXT: move $1, $6 ; MIPS64R6O0-NEXT: move $2, $5 -; MIPS64R6O0-NEXT: move $5, $4 -; MIPS64R6O0-NEXT: daddiu $6, $zero, -4 -; MIPS64R6O0-NEXT: and $6, $4, $6 -; MIPS64R6O0-NEXT: andi $3, $4, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 3 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $7, $zero, 255 -; MIPS64R6O0-NEXT: sllv $7, $7, $3 -; MIPS64R6O0-NEXT: nor $8, $zero, $7 -; MIPS64R6O0-NEXT: andi $9, $2, 255 -; MIPS64R6O0-NEXT: sllv $9, $9, $3 +; MIPS64R6O0-NEXT: move $3, $4 +; MIPS64R6O0-NEXT: daddiu $5, $zero, -4 +; MIPS64R6O0-NEXT: and $5, $4, $5 +; MIPS64R6O0-NEXT: andi $7, $4, 3 +; MIPS64R6O0-NEXT: xori $7, $7, 3 +; MIPS64R6O0-NEXT: sll $7, $7, 3 +; MIPS64R6O0-NEXT: ori $8, $zero, 255 +; MIPS64R6O0-NEXT: sllv $8, $8, $7 +; MIPS64R6O0-NEXT: nor $9, $zero, $8 +; MIPS64R6O0-NEXT: andi $10, $2, 255 +; MIPS64R6O0-NEXT: sllv $10, $10, $7 ; MIPS64R6O0-NEXT: andi $1, $1, 255 -; MIPS64R6O0-NEXT: sllv $1, $1, $3 +; MIPS64R6O0-NEXT: sllv $1, $1, $7 ; MIPS64R6O0-NEXT: .LBB13_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $11, 0($6) -; MIPS64R6O0-NEXT: and $12, $11, $7 -; MIPS64R6O0-NEXT: bnec $12, $9, .LBB13_3 +; MIPS64R6O0-NEXT: ll $12, 0($5) +; MIPS64R6O0-NEXT: and $13, $12, $8 +; MIPS64R6O0-NEXT: bnec $13, $10, .LBB13_3 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: # in Loop: Header=BB13_1 Depth=1 -; MIPS64R6O0-NEXT: and $11, $11, $8 -; MIPS64R6O0-NEXT: or $11, $11, $1 -; MIPS64R6O0-NEXT: sc $11, 0($6) -; MIPS64R6O0-NEXT: beqzc $11, .LBB13_1 +; MIPS64R6O0-NEXT: and $12, $12, $9 +; MIPS64R6O0-NEXT: or $12, $12, $1 +; MIPS64R6O0-NEXT: sc $12, 0($5) +; MIPS64R6O0-NEXT: beqzc $12, .LBB13_1 ; MIPS64R6O0-NEXT: .LBB13_3: # %entry -; MIPS64R6O0-NEXT: srlv $10, $12, $3 -; MIPS64R6O0-NEXT: seb $10, $10 +; MIPS64R6O0-NEXT: srlv $11, $13, $7 +; MIPS64R6O0-NEXT: seb $11, $11 ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: sw $2, 28($sp) # 4-byte Folded Spill -; MIPS64R6O0-NEXT: sd $5, 16($sp) # 8-byte Folded Spill -; MIPS64R6O0-NEXT: sw $10, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sd $3, 16($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sw $11, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.5: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: lw $2, 28($sp) # 4-byte Folded Reload @@ -5557,33 +5551,33 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -8 -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(z)($2) -; MIPS32O0-NEXT: addiu $25, $zero, -4 -; MIPS32O0-NEXT: and $25, $2, $25 -; MIPS32O0-NEXT: andi $2, $2, 3 -; MIPS32O0-NEXT: sll $2, $2, 3 -; MIPS32O0-NEXT: ori $1, $zero, 65535 -; MIPS32O0-NEXT: sllv $1, $1, $2 -; MIPS32O0-NEXT: nor $3, $zero, $1 -; MIPS32O0-NEXT: sllv $4, $4, $2 +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(z)($1) +; MIPS32O0-NEXT: addiu $2, $zero, -4 +; MIPS32O0-NEXT: and $2, $1, $2 +; MIPS32O0-NEXT: andi $1, $1, 3 +; MIPS32O0-NEXT: sll $1, $1, 3 +; MIPS32O0-NEXT: ori $3, $zero, 65535 +; MIPS32O0-NEXT: sllv $3, $3, $1 +; MIPS32O0-NEXT: nor $5, $zero, $3 +; MIPS32O0-NEXT: sllv $4, $4, $1 ; MIPS32O0-NEXT: $BB14_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $6, 0($25) -; MIPS32O0-NEXT: addu $7, $6, $4 -; MIPS32O0-NEXT: and $7, $7, $1 -; MIPS32O0-NEXT: and $8, $6, $3 -; MIPS32O0-NEXT: or $8, $8, $7 -; MIPS32O0-NEXT: sc $8, 0($25) -; MIPS32O0-NEXT: beqz $8, $BB14_1 +; MIPS32O0-NEXT: ll $7, 0($2) +; MIPS32O0-NEXT: addu $8, $7, $4 +; MIPS32O0-NEXT: and $8, $8, $3 +; MIPS32O0-NEXT: and $9, $7, $5 +; MIPS32O0-NEXT: or $9, $9, $8 +; MIPS32O0-NEXT: sc $9, 0($2) +; MIPS32O0-NEXT: beqz $9, $BB14_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: and $5, $6, $1 -; MIPS32O0-NEXT: srlv $5, $5, $2 -; MIPS32O0-NEXT: sll $5, $5, 16 -; MIPS32O0-NEXT: sra $5, $5, 16 +; MIPS32O0-NEXT: and $6, $7, $3 +; MIPS32O0-NEXT: srlv $6, $6, $1 +; MIPS32O0-NEXT: sll $6, $6, 16 +; MIPS32O0-NEXT: sra $6, $6, 16 ; MIPS32O0-NEXT: # %bb.3: # %entry -; MIPS32O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $6, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: # %bb.4: # %entry ; MIPS32O0-NEXT: lw $1, 4($sp) # 4-byte Folded Reload ; MIPS32O0-NEXT: sll $2, $1, 16 @@ -5660,33 +5654,33 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(z)($2) -; MIPS32R6O0-NEXT: addiu $1, $zero, -4 -; MIPS32R6O0-NEXT: and $1, $2, $1 -; MIPS32R6O0-NEXT: andi $2, $2, 3 -; MIPS32R6O0-NEXT: sll $2, $2, 3 -; MIPS32R6O0-NEXT: ori $3, $zero, 65535 -; MIPS32R6O0-NEXT: sllv $3, $3, $2 -; MIPS32R6O0-NEXT: nor $5, $zero, $3 -; MIPS32R6O0-NEXT: sllv $4, $4, $2 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(z)($1) +; MIPS32R6O0-NEXT: addiu $3, $zero, -4 +; MIPS32R6O0-NEXT: and $3, $1, $3 +; MIPS32R6O0-NEXT: andi $1, $1, 3 +; MIPS32R6O0-NEXT: sll $1, $1, 3 +; MIPS32R6O0-NEXT: ori $5, $zero, 65535 +; MIPS32R6O0-NEXT: sllv $5, $5, $1 +; MIPS32R6O0-NEXT: nor $6, $zero, $5 +; MIPS32R6O0-NEXT: sllv $4, $4, $1 ; MIPS32R6O0-NEXT: $BB14_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $7, 0($1) -; MIPS32R6O0-NEXT: addu $8, $7, $4 -; MIPS32R6O0-NEXT: and $8, $8, $3 -; MIPS32R6O0-NEXT: and $9, $7, $5 -; MIPS32R6O0-NEXT: or $9, $9, $8 -; MIPS32R6O0-NEXT: sc $9, 0($1) -; MIPS32R6O0-NEXT: beqzc $9, $BB14_1 +; MIPS32R6O0-NEXT: ll $8, 0($3) +; MIPS32R6O0-NEXT: addu $9, $8, $4 +; MIPS32R6O0-NEXT: and $9, $9, $5 +; MIPS32R6O0-NEXT: and $10, $8, $6 +; MIPS32R6O0-NEXT: or $10, $10, $9 +; MIPS32R6O0-NEXT: sc $10, 0($3) +; MIPS32R6O0-NEXT: beqzc $10, $BB14_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: and $6, $7, $3 -; MIPS32R6O0-NEXT: srlv $6, $6, $2 -; MIPS32R6O0-NEXT: seh $6, $6 +; MIPS32R6O0-NEXT: and $7, $8, $5 +; MIPS32R6O0-NEXT: srlv $7, $7, $1 +; MIPS32R6O0-NEXT: seh $7, $7 ; MIPS32R6O0-NEXT: # %bb.3: # %entry -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $6, 0($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $7, 0($sp) # 4-byte Folded Spill ; MIPS32R6O0-NEXT: # %bb.4: # %entry ; MIPS32R6O0-NEXT: lw $1, 0($sp) # 4-byte Folded Reload ; MIPS32R6O0-NEXT: seh $2, $1 @@ -5832,30 +5826,30 @@ ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd16))) ; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(z)($1) -; MIPS64R6O0-NEXT: daddiu $4, $zero, -4 -; MIPS64R6O0-NEXT: and $4, $1, $4 -; MIPS64R6O0-NEXT: andi $3, $1, 3 -; MIPS64R6O0-NEXT: xori $3, $3, 2 -; MIPS64R6O0-NEXT: sll $3, $3, 3 -; MIPS64R6O0-NEXT: ori $5, $zero, 65535 -; MIPS64R6O0-NEXT: sllv $5, $5, $3 -; MIPS64R6O0-NEXT: nor $6, $zero, $5 -; MIPS64R6O0-NEXT: sllv $2, $2, $3 +; MIPS64R6O0-NEXT: daddiu $3, $zero, -4 +; MIPS64R6O0-NEXT: and $3, $1, $3 +; MIPS64R6O0-NEXT: andi $5, $1, 3 +; MIPS64R6O0-NEXT: xori $5, $5, 2 +; MIPS64R6O0-NEXT: sll $5, $5, 3 +; MIPS64R6O0-NEXT: ori $6, $zero, 65535 +; MIPS64R6O0-NEXT: sllv $6, $6, $5 +; MIPS64R6O0-NEXT: nor $7, $zero, $6 +; MIPS64R6O0-NEXT: sllv $2, $2, $5 ; MIPS64R6O0-NEXT: .LBB14_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: ll $8, 0($4) -; MIPS64R6O0-NEXT: addu $9, $8, $2 -; MIPS64R6O0-NEXT: and $9, $9, $5 -; MIPS64R6O0-NEXT: and $10, $8, $6 -; MIPS64R6O0-NEXT: or $10, $10, $9 -; MIPS64R6O0-NEXT: sc $10, 0($4) -; MIPS64R6O0-NEXT: beqzc $10, .LBB14_1 +; MIPS64R6O0-NEXT: ll $9, 0($3) +; MIPS64R6O0-NEXT: addu $10, $9, $2 +; MIPS64R6O0-NEXT: and $10, $10, $6 +; MIPS64R6O0-NEXT: and $11, $9, $7 +; MIPS64R6O0-NEXT: or $11, $11, $10 +; MIPS64R6O0-NEXT: sc $11, 0($3) +; MIPS64R6O0-NEXT: beqzc $11, .LBB14_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: and $7, $8, $5 -; MIPS64R6O0-NEXT: srlv $7, $7, $3 -; MIPS64R6O0-NEXT: seh $7, $7 +; MIPS64R6O0-NEXT: and $8, $9, $6 +; MIPS64R6O0-NEXT: srlv $8, $8, $5 +; MIPS64R6O0-NEXT: seh $8, $8 ; MIPS64R6O0-NEXT: # %bb.3: # %entry -; MIPS64R6O0-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; MIPS64R6O0-NEXT: sw $8, 12($sp) # 4-byte Folded Spill ; MIPS64R6O0-NEXT: # %bb.4: # %entry ; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload ; MIPS64R6O0-NEXT: seh $2, $1 @@ -6084,36 +6078,36 @@ ; MIPS32O0: # %bb.0: ; MIPS32O0-NEXT: addiu $sp, $sp, -8 ; MIPS32O0-NEXT: .cfi_def_cfa_offset 8 -; MIPS32O0-NEXT: addu $5, $5, $6 +; MIPS32O0-NEXT: addu $1, $5, $6 ; MIPS32O0-NEXT: sync -; MIPS32O0-NEXT: addiu $6, $zero, -4 -; MIPS32O0-NEXT: and $6, $4, $6 -; MIPS32O0-NEXT: andi $4, $4, 3 -; MIPS32O0-NEXT: sll $4, $4, 3 -; MIPS32O0-NEXT: ori $1, $zero, 65535 -; MIPS32O0-NEXT: sllv $1, $1, $4 -; MIPS32O0-NEXT: nor $2, $zero, $1 -; MIPS32O0-NEXT: andi $3, $5, 65535 -; MIPS32O0-NEXT: sllv $3, $3, $4 +; MIPS32O0-NEXT: addiu $2, $zero, -4 +; MIPS32O0-NEXT: and $2, $4, $2 +; MIPS32O0-NEXT: andi $3, $4, 3 +; MIPS32O0-NEXT: sll $3, $3, 3 +; MIPS32O0-NEXT: ori $4, $zero, 65535 +; MIPS32O0-NEXT: sllv $4, $4, $3 +; MIPS32O0-NEXT: nor $5, $zero, $4 +; MIPS32O0-NEXT: andi $6, $1, 65535 +; MIPS32O0-NEXT: sllv $6, $6, $3 ; MIPS32O0-NEXT: andi $7, $7, 65535 -; MIPS32O0-NEXT: sllv $7, $7, $4 +; MIPS32O0-NEXT: sllv $7, $7, $3 ; MIPS32O0-NEXT: $BB15_1: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $9, 0($6) -; MIPS32O0-NEXT: and $10, $9, $1 -; MIPS32O0-NEXT: bne $10, $3, $BB15_3 +; MIPS32O0-NEXT: ll $9, 0($2) +; MIPS32O0-NEXT: and $10, $9, $4 +; MIPS32O0-NEXT: bne $10, $6, $BB15_3 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1 -; MIPS32O0-NEXT: and $9, $9, $2 +; MIPS32O0-NEXT: and $9, $9, $5 ; MIPS32O0-NEXT: or $9, $9, $7 -; MIPS32O0-NEXT: sc $9, 0($6) +; MIPS32O0-NEXT: sc $9, 0($2) ; MIPS32O0-NEXT: beqz $9, $BB15_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: $BB15_3: -; MIPS32O0-NEXT: srlv $8, $10, $4 +; MIPS32O0-NEXT: srlv $8, $10, $3 ; MIPS32O0-NEXT: sll $8, $8, 16 ; MIPS32O0-NEXT: sra $8, $8, 16 ; MIPS32O0-NEXT: # %bb.4: -; MIPS32O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $1, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: sw $8, 0($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: # %bb.5: ; MIPS32O0-NEXT: lw $1, 4($sp) # 4-byte Folded Reload @@ -6684,19 +6678,18 @@ ; MIPS32O0: # %bb.0: # %entry ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) -; MIPS32O0-NEXT: addu $2, $2, $25 +; MIPS32O0-NEXT: addu $1, $2, $25 ; MIPS32O0-NEXT: sync -; MIPS32O0-NEXT: lw $2, %got(countsint)($2) +; MIPS32O0-NEXT: lw $1, %got(countsint)($1) ; MIPS32O0-NEXT: $BB16_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: addu $1, $25, $4 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB16_1 +; MIPS32O0-NEXT: ll $2, 0($1) +; MIPS32O0-NEXT: addu $3, $2, $4 +; MIPS32O0-NEXT: sc $3, 0($1) +; MIPS32O0-NEXT: beqz $3, $BB16_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry ; MIPS32O0-NEXT: sync -; MIPS32O0-NEXT: move $2, $25 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop ; @@ -6741,20 +6734,20 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 ; MIPS32R6O0-NEXT: sync -; MIPS32R6O0-NEXT: lw $2, %got(countsint)($2) +; MIPS32R6O0-NEXT: lw $1, %got(countsint)($1) ; MIPS32R6O0-NEXT: $BB16_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: addu $3, $1, $4 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB16_1 +; MIPS32R6O0-NEXT: ll $3, 0($1) +; MIPS32R6O0-NEXT: addu $5, $3, $4 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB16_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry ; MIPS32R6O0-NEXT: sync -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $3 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -6990,34 +6983,34 @@ ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32O0-NEXT: addiu $sp, $sp, -16 -; MIPS32O0-NEXT: addu $2, $2, $25 +; MIPS32O0-NEXT: addu $1, $2, $25 ; MIPS32O0-NEXT: sync -; MIPS32O0-NEXT: lw $2, %got(a)($2) -; MIPS32O0-NEXT: addiu $25, $zero, 0 -; MIPS32O0-NEXT: addiu $1, $zero, 1 -; MIPS32O0-NEXT: lw $3, 12($sp) # 4-byte Folded Reload -; MIPS32O0-NEXT: move $4, $1 +; MIPS32O0-NEXT: lw $1, %got(a)($1) +; MIPS32O0-NEXT: addiu $2, $zero, 0 +; MIPS32O0-NEXT: addiu $3, $zero, 1 +; MIPS32O0-NEXT: lw $4, 12($sp) # 4-byte Folded Reload +; MIPS32O0-NEXT: move $5, $3 ; MIPS32O0-NEXT: $BB17_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $5, 0($2) -; MIPS32O0-NEXT: bne $5, $4, $BB17_3 +; MIPS32O0-NEXT: ll $6, 0($1) +; MIPS32O0-NEXT: bne $6, $5, $BB17_3 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry ; MIPS32O0-NEXT: # in Loop: Header=BB17_1 Depth=1 -; MIPS32O0-NEXT: move $6, $25 -; MIPS32O0-NEXT: sc $6, 0($2) -; MIPS32O0-NEXT: beqz $6, $BB17_1 +; MIPS32O0-NEXT: move $7, $2 +; MIPS32O0-NEXT: sc $7, 0($1) +; MIPS32O0-NEXT: beqz $7, $BB17_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: $BB17_3: # %entry -; MIPS32O0-NEXT: xor $1, $5, $1 +; MIPS32O0-NEXT: xor $1, $6, $3 ; MIPS32O0-NEXT: sltiu $1, $1, 1 ; MIPS32O0-NEXT: sync ; MIPS32O0-NEXT: addiu $2, $zero, 1 -; MIPS32O0-NEXT: xor $2, $5, $2 +; MIPS32O0-NEXT: xor $2, $6, $2 ; MIPS32O0-NEXT: sltiu $2, $2, 1 ; MIPS32O0-NEXT: andi $2, $2, 1 -; MIPS32O0-NEXT: sw $5, 12($sp) # 4-byte Folded Spill -; MIPS32O0-NEXT: sw $3, 8($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $6, 12($sp) # 4-byte Folded Spill +; MIPS32O0-NEXT: sw $4, 8($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: sw $1, 4($sp) # 4-byte Folded Spill ; MIPS32O0-NEXT: addiu $sp, $sp, 16 ; MIPS32O0-NEXT: jr $ra @@ -7079,28 +7072,28 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 +; MIPS32R6O0-NEXT: addu $1, $2, $25 ; MIPS32R6O0-NEXT: sync -; MIPS32R6O0-NEXT: lw $2, %got(a)($2) -; MIPS32R6O0-NEXT: addiu $25, $zero, 0 -; MIPS32R6O0-NEXT: addiu $1, $zero, 1 -; MIPS32R6O0-NEXT: lw $3, 4($sp) # 4-byte Folded Reload -; MIPS32R6O0-NEXT: move $4, $1 +; MIPS32R6O0-NEXT: lw $1, %got(a)($1) +; MIPS32R6O0-NEXT: addiu $2, $zero, 0 +; MIPS32R6O0-NEXT: addiu $3, $zero, 1 +; MIPS32R6O0-NEXT: lw $4, 4($sp) # 4-byte Folded Reload +; MIPS32R6O0-NEXT: move $5, $3 ; MIPS32R6O0-NEXT: $BB17_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $5, 0($2) -; MIPS32R6O0-NEXT: bnec $5, $4, $BB17_3 +; MIPS32R6O0-NEXT: ll $6, 0($1) +; MIPS32R6O0-NEXT: bnec $6, $5, $BB17_3 ; MIPS32R6O0-NEXT: # %bb.2: # %entry ; MIPS32R6O0-NEXT: # in Loop: Header=BB17_1 Depth=1 -; MIPS32R6O0-NEXT: move $6, $25 -; MIPS32R6O0-NEXT: sc $6, 0($2) -; MIPS32R6O0-NEXT: beqzc $6, $BB17_1 +; MIPS32R6O0-NEXT: move $7, $2 +; MIPS32R6O0-NEXT: sc $7, 0($1) +; MIPS32R6O0-NEXT: beqzc $7, $BB17_1 ; MIPS32R6O0-NEXT: $BB17_3: # %entry -; MIPS32R6O0-NEXT: xor $1, $5, $1 +; MIPS32R6O0-NEXT: xor $1, $6, $3 ; MIPS32R6O0-NEXT: sltiu $2, $1, 1 ; MIPS32R6O0-NEXT: sync -; MIPS32R6O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill -; MIPS32R6O0-NEXT: sw $3, 0($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $6, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $4, 0($sp) # 4-byte Folded Spill ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; @@ -7403,18 +7396,17 @@ ; MIPS32O0: # %bb.0: # %entry ; MIPS32O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32O0-NEXT: addiu $2, $2, %lo(_gp_disp) -; MIPS32O0-NEXT: addu $2, $2, $25 -; MIPS32O0-NEXT: lw $2, %got(x)($2) -; MIPS32O0-NEXT: addiu $2, $2, 1024 +; MIPS32O0-NEXT: addu $1, $2, $25 +; MIPS32O0-NEXT: lw $1, %got(x)($1) +; MIPS32O0-NEXT: addiu $1, $1, 1024 ; MIPS32O0-NEXT: $BB18_1: # %entry ; MIPS32O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32O0-NEXT: ll $25, 0($2) -; MIPS32O0-NEXT: addu $1, $25, $4 -; MIPS32O0-NEXT: sc $1, 0($2) -; MIPS32O0-NEXT: beqz $1, $BB18_1 +; MIPS32O0-NEXT: ll $2, 0($1) +; MIPS32O0-NEXT: addu $3, $2, $4 +; MIPS32O0-NEXT: sc $3, 0($1) +; MIPS32O0-NEXT: beqz $3, $BB18_1 ; MIPS32O0-NEXT: nop ; MIPS32O0-NEXT: # %bb.2: # %entry -; MIPS32O0-NEXT: move $2, $25 ; MIPS32O0-NEXT: jr $ra ; MIPS32O0-NEXT: nop ; @@ -7458,19 +7450,19 @@ ; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp) ; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp) ; MIPS32R6O0-NEXT: addiu $sp, $sp, -8 -; MIPS32R6O0-NEXT: addu $2, $2, $25 -; MIPS32R6O0-NEXT: move $25, $4 -; MIPS32R6O0-NEXT: lw $2, %got(x)($2) -; MIPS32R6O0-NEXT: addiu $2, $2, 1024 +; MIPS32R6O0-NEXT: addu $1, $2, $25 +; MIPS32R6O0-NEXT: move $2, $4 +; MIPS32R6O0-NEXT: lw $1, %got(x)($1) +; MIPS32R6O0-NEXT: addiu $1, $1, 1024 ; MIPS32R6O0-NEXT: $BB18_1: # %entry ; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS32R6O0-NEXT: ll $1, 0($2) -; MIPS32R6O0-NEXT: addu $3, $1, $4 -; MIPS32R6O0-NEXT: sc $3, 0($2) -; MIPS32R6O0-NEXT: beqzc $3, $BB18_1 +; MIPS32R6O0-NEXT: ll $3, 0($1) +; MIPS32R6O0-NEXT: addu $5, $3, $4 +; MIPS32R6O0-NEXT: sc $5, 0($1) +; MIPS32R6O0-NEXT: beqzc $5, $BB18_1 ; MIPS32R6O0-NEXT: # %bb.2: # %entry -; MIPS32R6O0-NEXT: move $2, $1 -; MIPS32R6O0-NEXT: sw $25, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill +; MIPS32R6O0-NEXT: move $2, $3 ; MIPS32R6O0-NEXT: addiu $sp, $sp, 8 ; MIPS32R6O0-NEXT: jrc $ra ; Index: test/CodeGen/Mips/atomic64.ll =================================================================== --- test/CodeGen/Mips/atomic64.ll +++ test/CodeGen/Mips/atomic64.ll @@ -96,16 +96,17 @@ ; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd))) ; MIPS64R6O0-NEXT: daddu $1, $1, $25 ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd))) -; MIPS64R6O0-NEXT: move $25, $4 +; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1) ; MIPS64R6O0-NEXT: .LBB0_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: lld $2, 0($1) -; MIPS64R6O0-NEXT: daddu $3, $2, $4 -; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB0_1 +; MIPS64R6O0-NEXT: lld $3, 0($1) +; MIPS64R6O0-NEXT: daddu $5, $3, $4 +; MIPS64R6O0-NEXT: scd $5, 0($1) +; MIPS64R6O0-NEXT: beqzc $5, .LBB0_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: move $2, $3 ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 ; MIPS64R6O0-NEXT: jrc $ra ; @@ -256,16 +257,17 @@ ; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub))) ; MIPS64R6O0-NEXT: daddu $1, $1, $25 ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub))) -; MIPS64R6O0-NEXT: move $25, $4 +; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1) ; MIPS64R6O0-NEXT: .LBB1_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: lld $2, 0($1) -; MIPS64R6O0-NEXT: dsubu $3, $2, $4 -; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB1_1 +; MIPS64R6O0-NEXT: lld $3, 0($1) +; MIPS64R6O0-NEXT: dsubu $5, $3, $4 +; MIPS64R6O0-NEXT: scd $5, 0($1) +; MIPS64R6O0-NEXT: beqzc $5, .LBB1_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: move $2, $3 ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 ; MIPS64R6O0-NEXT: jrc $ra ; @@ -416,16 +418,17 @@ ; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd))) ; MIPS64R6O0-NEXT: daddu $1, $1, $25 ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd))) -; MIPS64R6O0-NEXT: move $25, $4 +; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1) ; MIPS64R6O0-NEXT: .LBB2_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: lld $2, 0($1) -; MIPS64R6O0-NEXT: and $3, $2, $4 -; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB2_1 +; MIPS64R6O0-NEXT: lld $3, 0($1) +; MIPS64R6O0-NEXT: and $5, $3, $4 +; MIPS64R6O0-NEXT: scd $5, 0($1) +; MIPS64R6O0-NEXT: beqzc $5, .LBB2_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: move $2, $3 ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 ; MIPS64R6O0-NEXT: jrc $ra ; @@ -576,16 +579,17 @@ ; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr))) ; MIPS64R6O0-NEXT: daddu $1, $1, $25 ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr))) -; MIPS64R6O0-NEXT: move $25, $4 +; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1) ; MIPS64R6O0-NEXT: .LBB3_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: lld $2, 0($1) -; MIPS64R6O0-NEXT: or $3, $2, $4 -; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB3_1 +; MIPS64R6O0-NEXT: lld $3, 0($1) +; MIPS64R6O0-NEXT: or $5, $3, $4 +; MIPS64R6O0-NEXT: scd $5, 0($1) +; MIPS64R6O0-NEXT: beqzc $5, .LBB3_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: move $2, $3 ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 ; MIPS64R6O0-NEXT: jrc $ra ; @@ -736,16 +740,17 @@ ; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor))) ; MIPS64R6O0-NEXT: daddu $1, $1, $25 ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor))) -; MIPS64R6O0-NEXT: move $25, $4 +; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1) ; MIPS64R6O0-NEXT: .LBB4_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: lld $2, 0($1) -; MIPS64R6O0-NEXT: xor $3, $2, $4 -; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB4_1 +; MIPS64R6O0-NEXT: lld $3, 0($1) +; MIPS64R6O0-NEXT: xor $5, $3, $4 +; MIPS64R6O0-NEXT: scd $5, 0($1) +; MIPS64R6O0-NEXT: beqzc $5, .LBB4_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: move $2, $3 ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 ; MIPS64R6O0-NEXT: jrc $ra ; @@ -900,17 +905,18 @@ ; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand))) ; MIPS64R6O0-NEXT: daddu $1, $1, $25 ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand))) -; MIPS64R6O0-NEXT: move $25, $4 +; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1) ; MIPS64R6O0-NEXT: .LBB5_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: lld $2, 0($1) -; MIPS64R6O0-NEXT: and $3, $2, $4 -; MIPS64R6O0-NEXT: nor $3, $zero, $3 -; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB5_1 +; MIPS64R6O0-NEXT: lld $3, 0($1) +; MIPS64R6O0-NEXT: and $5, $3, $4 +; MIPS64R6O0-NEXT: nor $5, $zero, $5 +; MIPS64R6O0-NEXT: scd $5, 0($1) +; MIPS64R6O0-NEXT: beqzc $5, .LBB5_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: move $2, $3 ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 ; MIPS64R6O0-NEXT: jrc $ra ; @@ -1074,18 +1080,19 @@ ; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64))) ; MIPS64R6O0-NEXT: daddu $1, $1, $25 ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64))) -; MIPS64R6O0-NEXT: move $25, $4 +; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: sd $4, 8($sp) -; MIPS64R6O0-NEXT: ld $4, 8($sp) +; MIPS64R6O0-NEXT: ld $3, 8($sp) ; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1) ; MIPS64R6O0-NEXT: .LBB6_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: lld $2, 0($1) -; MIPS64R6O0-NEXT: move $3, $4 -; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB6_1 +; MIPS64R6O0-NEXT: lld $4, 0($1) +; MIPS64R6O0-NEXT: move $5, $3 +; MIPS64R6O0-NEXT: scd $5, 0($1) +; MIPS64R6O0-NEXT: beqzc $5, .LBB6_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry -; MIPS64R6O0-NEXT: sd $25, 0($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $2, 0($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: move $2, $4 ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 ; MIPS64R6O0-NEXT: jrc $ra ; @@ -1271,27 +1278,27 @@ ; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64))) ; MIPS64R6O0-NEXT: daddu $1, $1, $25 ; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64))) -; MIPS64R6O0-NEXT: move $25, $5 -; MIPS64R6O0-NEXT: move $2, $4 +; MIPS64R6O0-NEXT: move $2, $5 +; MIPS64R6O0-NEXT: move $3, $4 ; MIPS64R6O0-NEXT: sd $5, 40($sp) ; MIPS64R6O0-NEXT: ld $5, 40($sp) ; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1) -; MIPS64R6O0-NEXT: ld $3, 32($sp) # 8-byte Folded Reload +; MIPS64R6O0-NEXT: ld $6, 32($sp) # 8-byte Folded Reload ; MIPS64R6O0-NEXT: .LBB7_1: # %entry ; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1 -; MIPS64R6O0-NEXT: lld $6, 0($1) -; MIPS64R6O0-NEXT: bnec $6, $4, .LBB7_3 +; MIPS64R6O0-NEXT: lld $7, 0($1) +; MIPS64R6O0-NEXT: bnec $7, $4, .LBB7_3 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: # in Loop: Header=BB7_1 Depth=1 -; MIPS64R6O0-NEXT: move $7, $5 -; MIPS64R6O0-NEXT: scd $7, 0($1) -; MIPS64R6O0-NEXT: beqzc $7, .LBB7_1 +; MIPS64R6O0-NEXT: move $8, $5 +; MIPS64R6O0-NEXT: scd $8, 0($1) +; MIPS64R6O0-NEXT: beqzc $8, .LBB7_1 ; MIPS64R6O0-NEXT: .LBB7_3: # %entry ; MIPS64R6O0-NEXT: sd $2, 24($sp) # 8-byte Folded Spill -; MIPS64R6O0-NEXT: move $2, $6 -; MIPS64R6O0-NEXT: sd $25, 16($sp) # 8-byte Folded Spill -; MIPS64R6O0-NEXT: sd $6, 32($sp) # 8-byte Folded Spill -; MIPS64R6O0-NEXT: sd $3, 8($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: move $2, $7 +; MIPS64R6O0-NEXT: sd $3, 16($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $7, 32($sp) # 8-byte Folded Spill +; MIPS64R6O0-NEXT: sd $6, 8($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 48 ; MIPS64R6O0-NEXT: jrc $ra ; Index: test/CodeGen/Mips/atomicCmpSwapPW.ll =================================================================== --- test/CodeGen/Mips/atomicCmpSwapPW.ll +++ test/CodeGen/Mips/atomicCmpSwapPW.ll @@ -79,29 +79,29 @@ ; N64-NEXT: sll $1, $1, 0 ; N64-NEXT: move $2, $4 ; N64-NEXT: sll $2, $2, 0 -; N64-NEXT: lui $4, %highest(sym) -; N64-NEXT: daddiu $4, $4, %higher(sym) -; N64-NEXT: dsll $4, $4, 16 -; N64-NEXT: daddiu $4, $4, %hi(sym) -; N64-NEXT: dsll $4, $4, 16 -; N64-NEXT: ld $4, %lo(sym)($4) +; N64-NEXT: lui $3, %highest(sym) +; N64-NEXT: daddiu $3, $3, %higher(sym) +; N64-NEXT: dsll $3, $3, 16 +; N64-NEXT: daddiu $3, $3, %hi(sym) +; N64-NEXT: dsll $3, $3, 16 +; N64-NEXT: ld $3, %lo(sym)($3) ; N64-NEXT: sync -; N64-NEXT: lw $3, 12($sp) # 4-byte Folded Reload +; N64-NEXT: lw $6, 12($sp) # 4-byte Folded Reload ; N64-NEXT: .LBB0_1: # %entry ; N64-NEXT: # =>This Inner Loop Header: Depth=1 -; N64-NEXT: ll $6, 0($4) -; N64-NEXT: bne $6, $2, .LBB0_3 +; N64-NEXT: ll $7, 0($3) +; N64-NEXT: bne $7, $2, .LBB0_3 ; N64-NEXT: nop ; N64-NEXT: # %bb.2: # %entry ; N64-NEXT: # in Loop: Header=BB0_1 Depth=1 -; N64-NEXT: move $7, $1 -; N64-NEXT: sc $7, 0($4) -; N64-NEXT: beqz $7, .LBB0_1 +; N64-NEXT: move $8, $1 +; N64-NEXT: sc $8, 0($3) +; N64-NEXT: beqz $8, .LBB0_1 ; N64-NEXT: nop ; N64-NEXT: .LBB0_3: # %entry ; N64-NEXT: sync -; N64-NEXT: sw $6, 12($sp) # 4-byte Folded Spill -; N64-NEXT: sw $3, 8($sp) # 4-byte Folded Spill +; N64-NEXT: sw $7, 12($sp) # 4-byte Folded Spill +; N64-NEXT: sw $6, 8($sp) # 4-byte Folded Spill ; N64-NEXT: daddiu $sp, $sp, 16 ; N64-NEXT: jr $ra ; N64-NEXT: nop Index: test/CodeGen/PowerPC/anon_aggr.ll =================================================================== --- test/CodeGen/PowerPC/anon_aggr.ll +++ test/CodeGen/PowerPC/anon_aggr.ll @@ -38,11 +38,11 @@ ret i8* %array2_ptr } ; CHECK-LABEL: func2: -; CHECK-DAG: cmpld {{([0-9]+,)?}}4, 6 +; CHECK-DAG: cmpld {{([0-9]+,)?}}4, 5 ; CHECK-DAG: std 6, 72(1) ; CHECK-DAG: std 5, 64(1) -; CHECK-DAG: std 6, -[[OFFSET1:[0-9]+]] -; CHECK-DAG: std 5, -[[OFFSET2:[0-9]+]] +; CHECK-DAG: std 5, -[[OFFSET1:[0-9]+]] +; CHECK-DAG: std 3, -[[OFFSET2:[0-9]+]] ; CHECK: ld 3, -[[OFFSET2]](1) ; CHECK: ld 3, -[[OFFSET1]](1) Index: test/CodeGen/PowerPC/fp-int128-fp-combine.ll =================================================================== --- test/CodeGen/PowerPC/fp-int128-fp-combine.ll +++ test/CodeGen/PowerPC/fp-int128-fp-combine.ll @@ -31,7 +31,8 @@ define float @f_i128_fi_nsz(float %v) #0 { ; CHECK-LABEL: f_i128_fi_nsz: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: friz 1, 1 +; CHECK-NEXT: friz 0, 1 +; CHECK-NEXT: fmr 1, 0 ; CHECK-NEXT: blr entry: %a = fptosi float %v to i128 Index: test/CodeGen/PowerPC/fp64-to-int16.ll =================================================================== --- test/CodeGen/PowerPC/fp64-to-int16.ll +++ test/CodeGen/PowerPC/fp64-to-int16.ll @@ -5,8 +5,8 @@ define i1 @Test(double %a) { ; CHECK-LABEL: Test: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: xscvdpsxws 1, 1 -; CHECK-NEXT: mfvsrwz 3, 1 +; CHECK-NEXT: xscvdpsxws 0, 1 +; CHECK-NEXT: mfvsrwz 3, 0 ; CHECK-NEXT: xori 3, 3, 65534 ; CHECK-NEXT: cntlzw 3, 3 ; CHECK-NEXT: srwi 3, 3, 5 Index: test/CodeGen/PowerPC/vsx.ll =================================================================== --- test/CodeGen/PowerPC/vsx.ll +++ test/CodeGen/PowerPC/vsx.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -relocation-model=static -verify-machineinstrs -mcpu=pwr7 \ ; RUN: -mtriple=powerpc64-unknown-linux-gnu -mattr=+vsx \ ; RUN: -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s | FileCheck %s @@ -15,1189 +16,2526 @@ ; RUN: -check-prefix=CHECK-LE %s define double @test1(double %a, double %b) { +; CHECK-LABEL: test1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xsmuldp f1, f1, f2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test1: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xsmuldp f1, f1, f2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test1: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xsmuldp f1, f1, f2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test1: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xsmuldp f1, f1, f2 +; CHECK-LE-NEXT: blr entry: %v = fmul double %a, %b ret double %v -; CHECK-LABEL: @test1 -; CHECK: xsmuldp f1, f1, f2 -; CHECK: blr -; CHECK-LE-LABEL: @test1 -; CHECK-LE: xsmuldp f1, f1, f2 -; CHECK-LE: blr } define double @test2(double %a, double %b) { +; CHECK-LABEL: test2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xsdivdp f1, f1, f2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test2: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xsdivdp f1, f1, f2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test2: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xsdivdp f1, f1, f2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test2: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xsdivdp f1, f1, f2 +; CHECK-LE-NEXT: blr entry: %v = fdiv double %a, %b ret double %v -; CHECK-LABEL: @test2 -; CHECK: xsdivdp f1, f1, f2 -; CHECK: blr -; CHECK-LE-LABEL: @test2 -; CHECK-LE: xsdivdp f1, f1, f2 -; CHECK-LE: blr } define double @test3(double %a, double %b) { +; CHECK-LABEL: test3: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xsadddp f1, f1, f2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test3: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xsadddp f1, f1, f2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test3: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xsadddp f1, f1, f2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test3: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xsadddp f1, f1, f2 +; CHECK-LE-NEXT: blr entry: %v = fadd double %a, %b ret double %v -; CHECK-LABEL: @test3 -; CHECK: xsadddp f1, f1, f2 -; CHECK: blr -; CHECK-LE-LABEL: @test3 -; CHECK-LE: xsadddp f1, f1, f2 -; CHECK-LE: blr } define <2 x double> @test4(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test4: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvadddp v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test4: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xvadddp v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test4: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xvadddp v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test4: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xvadddp v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = fadd <2 x double> %a, %b ret <2 x double> %v -; CHECK-LABEL: @test4 -; CHECK: xvadddp v2, v2, v3 -; CHECK: blr -; CHECK-LE-LABEL: @test4 -; CHECK-LE: xvadddp v2, v2, v3 -; CHECK-LE: blr } define <4 x i32> @test5(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test5: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlxor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test5: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlxor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test5: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlxor v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test5: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlxor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = xor <4 x i32> %a, %b ret <4 x i32> %v -; CHECK-REG-LABEL: @test5 -; CHECK-REG: xxlxor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test5 -; CHECK-FISL: xxlxor v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test5 -; CHECK-LE: xxlxor v2, v2, v3 -; CHECK-LE: blr } define <8 x i16> @test6(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test6: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlxor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test6: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlxor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test6: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlxor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test6: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlxor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = xor <8 x i16> %a, %b ret <8 x i16> %v -; CHECK-REG-LABEL: @test6 -; CHECK-REG: xxlxor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test6 -; CHECK-FISL: xxlxor v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test6 -; CHECK-LE: xxlxor v2, v2, v3 -; CHECK-LE: blr } define <16 x i8> @test7(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test7: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlxor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test7: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlxor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test7: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlxor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test7: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlxor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = xor <16 x i8> %a, %b ret <16 x i8> %v -; CHECK-REG-LABEL: @test7 -; CHECK-REG: xxlxor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test7 -; CHECK-FISL: xxlxor v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test7 -; CHECK-LE: xxlxor v2, v2, v3 -; CHECK-LE: blr } define <4 x i32> @test8(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test8: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test8: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlor v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test8: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = or <4 x i32> %a, %b ret <4 x i32> %v -; CHECK-REG-LABEL: @test8 -; CHECK-REG: xxlor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test8 -; CHECK-FISL: xxlor v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test8 -; CHECK-LE: xxlor v2, v2, v3 -; CHECK-LE: blr } define <8 x i16> @test9(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test9: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test9: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test9: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test9: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = or <8 x i16> %a, %b ret <8 x i16> %v -; CHECK-REG-LABEL: @test9 -; CHECK-REG: xxlor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test9 -; CHECK-FISL: xxlor v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test9 -; CHECK-LE: xxlor v2, v2, v3 -; CHECK-LE: blr } define <16 x i8> @test10(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test10: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test10: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test10: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test10: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = or <16 x i8> %a, %b ret <16 x i8> %v -; CHECK-REG-LABEL: @test10 -; CHECK-REG: xxlor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test10 -; CHECK-FISL: xxlor v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test10 -; CHECK-LE: xxlor v2, v2, v3 -; CHECK-LE: blr } define <4 x i32> @test11(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test11: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxland v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test11: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxland v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test11: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxland v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test11: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxland v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = and <4 x i32> %a, %b ret <4 x i32> %v -; CHECK-REG-LABEL: @test11 -; CHECK-REG: xxland v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test11 -; CHECK-FISL: xxland v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test11 -; CHECK-LE: xxland v2, v2, v3 -; CHECK-LE: blr } define <8 x i16> @test12(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test12: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxland v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test12: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxland v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test12: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxland vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test12: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxland v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = and <8 x i16> %a, %b ret <8 x i16> %v -; CHECK-REG-LABEL: @test12 -; CHECK-REG: xxland v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test12 -; CHECK-FISL: xxland v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test12 -; CHECK-LE: xxland v2, v2, v3 -; CHECK-LE: blr } define <16 x i8> @test13(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test13: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxland v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test13: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxland v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test13: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxland vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test13: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxland v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = and <16 x i8> %a, %b ret <16 x i8> %v -; CHECK-REG-LABEL: @test13 -; CHECK-REG: xxland v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test13 -; CHECK-FISL: xxland v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test13 -; CHECK-LE: xxland v2, v2, v3 -; CHECK-LE: blr } define <4 x i32> @test14(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test14: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlnor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test14: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlnor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test14: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlnor v2, v2, v3 +; CHECK-FISL-NEXT: li r3, -16 +; CHECK-FISL-NEXT: stxvd2x vs0, r1, r3 # 16-byte Folded Spill +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test14: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlnor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = or <4 x i32> %a, %b %w = xor <4 x i32> %v, ret <4 x i32> %w -; CHECK-REG-LABEL: @test14 -; CHECK-REG: xxlnor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test14 -; CHECK-FISL: xxlor vs0, v2, v3 -; CHECK-FISL: xxlnor v2, v2, v3 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: li r3, -16 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: stxvd2x vs0, r1, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test14 -; CHECK-LE: xxlnor v2, v2, v3 -; CHECK-LE: blr } define <8 x i16> @test15(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test15: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlnor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test15: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlnor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test15: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v4, vs0, vs0 +; CHECK-FISL-NEXT: xxlnor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: li r3, -16 +; CHECK-FISL-NEXT: stxvd2x v4, r1, r3 # 16-byte Folded Spill +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test15: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlnor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = or <8 x i16> %a, %b %w = xor <8 x i16> %v, ret <8 x i16> %w -; CHECK-REG-LABEL: @test15 -; CHECK-REG: xxlnor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test15 -; CHECK-FISL: xxlor vs0, v2, v3 -; CHECK-FISL: xxlor v4, vs0, vs0 -; CHECK-FISL: xxlnor vs0, v2, v3 -; CHECK-FISL: xxlor v2, vs0, vs0 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: li r3, -16 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: stxvd2x v4, r1, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test15 -; CHECK-LE: xxlnor v2, v2, v3 -; CHECK-LE: blr } define <16 x i8> @test16(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlnor v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test16: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlnor v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test16: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v4, vs0, vs0 +; CHECK-FISL-NEXT: xxlnor vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: li r3, -16 +; CHECK-FISL-NEXT: stxvd2x v4, r1, r3 # 16-byte Folded Spill +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test16: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlnor v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %v = or <16 x i8> %a, %b %w = xor <16 x i8> %v, ret <16 x i8> %w -; CHECK-REG-LABEL: @test16 -; CHECK-REG: xxlnor v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test16 -; CHECK-FISL: xxlor vs0, v2, v3 -; CHECK-FISL: xxlor v4, vs0, vs0 -; CHECK-FISL: xxlnor vs0, v2, v3 -; CHECK-FISL: xxlor v2, vs0, vs0 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: li r3, -16 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: stxvd2x v4, r1, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test16 -; CHECK-LE: xxlnor v2, v2, v3 -; CHECK-LE: blr } define <4 x i32> @test17(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test17: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlandc v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test17: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlandc v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test17: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlnor vs0, v3, v3 +; CHECK-FISL-NEXT: xxland v2, v2, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test17: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlandc v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %w = xor <4 x i32> %b, %v = and <4 x i32> %a, %w ret <4 x i32> %v -; CHECK-REG-LABEL: @test17 -; CHECK-REG: xxlandc v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test17 -; CHECK-FISL: xxlnor v3, v3, v3 -; CHECK-FISL: xxland v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test17 -; CHECK-LE: xxlandc v2, v2, v3 -; CHECK-LE: blr } define <8 x i16> @test18(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: test18: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlandc v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test18: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlandc v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test18: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlnor vs0, v3, v3 +; CHECK-FISL-NEXT: xxlor v4, vs0, vs0 +; CHECK-FISL-NEXT: xxlandc vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: li r3, -16 +; CHECK-FISL-NEXT: stxvd2x v4, r1, r3 # 16-byte Folded Spill +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test18: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlandc v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %w = xor <8 x i16> %b, %v = and <8 x i16> %a, %w ret <8 x i16> %v -; CHECK-REG-LABEL: @test18 -; CHECK-REG: xxlandc v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test18 -; CHECK-FISL: xxlnor vs0, v3, v3 -; CHECK-FISL: xxlor v4, vs0, vs0 -; CHECK-FISL: xxlandc vs0, v2, v3 -; CHECK-FISL: xxlor v2, vs0, vs0 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: li r3, -16 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: stxvd2x v4, r1, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test18 -; CHECK-LE: xxlandc v2, v2, v3 -; CHECK-LE: blr } define <16 x i8> @test19(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: test19: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlandc v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test19: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xxlandc v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test19: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xxlnor vs0, v3, v3 +; CHECK-FISL-NEXT: xxlor v4, vs0, vs0 +; CHECK-FISL-NEXT: xxlandc vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: li r3, -16 +; CHECK-FISL-NEXT: stxvd2x v4, r1, r3 # 16-byte Folded Spill +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test19: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xxlandc v2, v2, v3 +; CHECK-LE-NEXT: blr entry: %w = xor <16 x i8> %b, %v = and <16 x i8> %a, %w ret <16 x i8> %v -; CHECK-REG-LABEL: @test19 -; CHECK-REG: xxlandc v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test19 -; CHECK-FISL: xxlnor vs0, v3, v3 -; CHECK-FISL: xxlor v4, vs0, vs0 -; CHECK-FISL: xxlandc vs0, v2, v3 -; CHECK-FISL: xxlor v2, vs0, vs0 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: li r3, -16 -; CHECK-FISL-NOT: lis -; CHECK-FISL-NOT: ori -; CHECK-FISL: stxvd2x v4, r1, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test19 -; CHECK-LE: xxlandc v2, v2, v3 -; CHECK-LE: blr } define <4 x i32> @test20(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) { +; CHECK-LABEL: test20: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vcmpequw v4, v4, v5 +; CHECK-NEXT: xxsel v2, v3, v2, v4 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test20: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: vcmpequw v4, v4, v5 +; CHECK-REG-NEXT: xxsel v2, v3, v2, v4 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test20: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: vcmpequw v4, v4, v5 +; CHECK-FISL-NEXT: xxsel v2, v3, v2, v4 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test20: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: vcmpequw v4, v4, v5 +; CHECK-LE-NEXT: xxsel v2, v3, v2, v4 +; CHECK-LE-NEXT: blr entry: %m = icmp eq <4 x i32> %c, %d %v = select <4 x i1> %m, <4 x i32> %a, <4 x i32> %b ret <4 x i32> %v -; CHECK-REG-LABEL: @test20 -; CHECK-REG: vcmpequw v4, v4, v5 -; CHECK-REG: xxsel v2, v3, v2, v4 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test20 -; CHECK-FISL: vcmpequw v4, v4, v5 -; CHECK-FISL: xxsel v2, v3, v2, v4 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test20 -; CHECK-LE: vcmpequw v4, v4, v5 -; CHECK-LE: xxsel v2, v3, v2, v4 -; CHECK-LE: blr } define <4 x float> @test21(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) { +; CHECK-LABEL: test21: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvcmpeqsp vs0, v4, v5 +; CHECK-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test21: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xvcmpeqsp vs0, v4, v5 +; CHECK-REG-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test21: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xvcmpeqsp vs0, v4, v5 +; CHECK-FISL-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test21: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xvcmpeqsp vs0, v4, v5 +; CHECK-LE-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-LE-NEXT: blr entry: %m = fcmp oeq <4 x float> %c, %d %v = select <4 x i1> %m, <4 x float> %a, <4 x float> %b ret <4 x float> %v -; CHECK-REG-LABEL: @test21 -; CHECK-REG: xvcmpeqsp vs0, v4, v5 -; CHECK-REG: xxsel v2, v3, v2, vs0 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test21 -; CHECK-FISL: xvcmpeqsp v4, v4, v5 -; CHECK-FISL: xxsel v2, v3, v2, v4 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test21 -; CHECK-LE: xvcmpeqsp vs0, v4, v5 -; CHECK-LE: xxsel v2, v3, v2, vs0 -; CHECK-LE: blr } define <4 x float> @test22(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x float> %d) { +; CHECK-LABEL: test22: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvcmpeqsp vs0, v5, v5 +; CHECK-NEXT: xvcmpeqsp vs1, v4, v4 +; CHECK-NEXT: xvcmpeqsp vs2, v4, v5 +; CHECK-NEXT: xxlnor vs0, vs0, vs0 +; CHECK-NEXT: xxlnor vs1, vs1, vs1 +; CHECK-NEXT: xxlor vs0, vs1, vs0 +; CHECK-NEXT: xxlor vs0, vs2, vs0 +; CHECK-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test22: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xvcmpeqsp vs0, v5, v5 +; CHECK-REG-NEXT: xvcmpeqsp vs1, v4, v4 +; CHECK-REG-NEXT: xvcmpeqsp vs2, v4, v5 +; CHECK-REG-NEXT: xxlnor vs0, vs0, vs0 +; CHECK-REG-NEXT: xxlnor vs1, vs1, vs1 +; CHECK-REG-NEXT: xxlor vs0, vs1, vs0 +; CHECK-REG-NEXT: xxlor vs0, vs2, vs0 +; CHECK-REG-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test22: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xvcmpeqsp vs0, v4, v5 +; CHECK-FISL-NEXT: xvcmpeqsp vs1, v5, v5 +; CHECK-FISL-NEXT: xxlnor vs1, vs1, vs1 +; CHECK-FISL-NEXT: xvcmpeqsp vs2, v4, v4 +; CHECK-FISL-NEXT: xxlnor vs2, vs2, vs2 +; CHECK-FISL-NEXT: xxlor vs1, vs2, vs1 +; CHECK-FISL-NEXT: xxlor vs0, vs0, vs1 +; CHECK-FISL-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test22: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xvcmpeqsp vs0, v5, v5 +; CHECK-LE-NEXT: xvcmpeqsp vs1, v4, v4 +; CHECK-LE-NEXT: xvcmpeqsp vs2, v4, v5 +; CHECK-LE-NEXT: xxlnor vs0, vs0, vs0 +; CHECK-LE-NEXT: xxlnor vs1, vs1, vs1 +; CHECK-LE-NEXT: xxlor vs0, vs1, vs0 +; CHECK-LE-NEXT: xxlor vs0, vs2, vs0 +; CHECK-LE-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-LE-NEXT: blr entry: %m = fcmp ueq <4 x float> %c, %d %v = select <4 x i1> %m, <4 x float> %a, <4 x float> %b ret <4 x float> %v -; CHECK-REG-LABEL: @test22 -; CHECK-REG-DAG: xvcmpeqsp vs0, v5, v5 -; CHECK-REG-DAG: xvcmpeqsp vs1, v4, v4 -; CHECK-REG-DAG: xvcmpeqsp vs2, v4, v5 -; CHECK-REG-DAG: xxlnor vs0, vs0, vs0 -; CHECK-REG-DAG: xxlnor vs1, vs1, vs1 -; CHECK-REG-DAG: xxlor vs0, vs1, vs0 -; CHECK-REG-DAG: xxlor vs0, vs2, vs0 -; CHECK-REG: xxsel v2, v3, v2, vs0 -; CHECK-REG: blr - -; CHECK-FISL-LABEL: @test22 -; CHECK-FISL-DAG: xvcmpeqsp vs0, v4, v5 -; CHECK-FISL-DAG: xvcmpeqsp v5, v5, v5 -; CHECK-FISL-DAG: xvcmpeqsp v4, v4, v4 -; CHECK-FISL-DAG: xxlnor v5, v5, v5 -; CHECK-FISL-DAG: xxlnor v4, v4, v4 -; CHECK-FISL-DAG: xxlor v4, v4, v5 -; CHECK-FISL-DAG: xxlor vs0, vs0, v4 -; CHECK-FISL: xxsel v2, v3, v2, vs0 -; CHECK-FISL: blr - -; CHECK-LE-LABEL: @test22 -; CHECK-LE-DAG: xvcmpeqsp vs0, v5, v5 -; CHECK-LE-DAG: xvcmpeqsp vs1, v4, v4 -; CHECK-LE-DAG: xvcmpeqsp vs2, v4, v5 -; CHECK-LE-DAG: xxlnor vs0, vs0, vs0 -; CHECK-LE-DAG: xxlnor vs1, vs1, vs1 -; CHECK-LE-DAG: xxlor vs0, vs1, vs0 -; CHECK-LE-DAG: xxlor vs0, vs2, vs0 -; CHECK-LE: xxsel v2, v3, v2, vs0 -; CHECK-LE: blr + + } define <8 x i16> @test23(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) { +; CHECK-LABEL: test23: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vcmpequh v4, v4, v5 +; CHECK-NEXT: xxsel v2, v3, v2, v4 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test23: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: vcmpequh v4, v4, v5 +; CHECK-REG-NEXT: xxsel v2, v3, v2, v4 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test23: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: vcmpequh v4, v4, v5 +; CHECK-FISL-NEXT: xxsel vs0, v3, v2, v4 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test23: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: vcmpequh v4, v4, v5 +; CHECK-LE-NEXT: xxsel v2, v3, v2, v4 +; CHECK-LE-NEXT: blr entry: %m = icmp eq <8 x i16> %c, %d %v = select <8 x i1> %m, <8 x i16> %a, <8 x i16> %b ret <8 x i16> %v -; CHECK-REG-LABEL: @test23 -; CHECK-REG: vcmpequh v4, v4, v5 -; CHECK-REG: xxsel v2, v3, v2, v4 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test23 -; CHECK-FISL: vcmpequh v4, v4, v5 -; CHECK-FISL: xxsel v2, v3, v2, v4 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test23 -; CHECK-LE: vcmpequh v4, v4, v5 -; CHECK-LE: xxsel v2, v3, v2, v4 -; CHECK-LE: blr } define <16 x i8> @test24(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) { +; CHECK-LABEL: test24: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vcmpequb v4, v4, v5 +; CHECK-NEXT: xxsel v2, v3, v2, v4 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test24: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: vcmpequb v4, v4, v5 +; CHECK-REG-NEXT: xxsel v2, v3, v2, v4 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test24: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: vcmpequb v4, v4, v5 +; CHECK-FISL-NEXT: xxsel vs0, v3, v2, v4 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test24: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: vcmpequb v4, v4, v5 +; CHECK-LE-NEXT: xxsel v2, v3, v2, v4 +; CHECK-LE-NEXT: blr entry: %m = icmp eq <16 x i8> %c, %d %v = select <16 x i1> %m, <16 x i8> %a, <16 x i8> %b ret <16 x i8> %v -; CHECK-REG-LABEL: @test24 -; CHECK-REG: vcmpequb v4, v4, v5 -; CHECK-REG: xxsel v2, v3, v2, v4 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test24 -; CHECK-FISL: vcmpequb v4, v4, v5 -; CHECK-FISL: xxsel v2, v3, v2, v4 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test24 -; CHECK-LE: vcmpequb v4, v4, v5 -; CHECK-LE: xxsel v2, v3, v2, v4 -; CHECK-LE: blr } define <2 x double> @test25(<2 x double> %a, <2 x double> %b, <2 x double> %c, <2 x double> %d) { +; CHECK-LABEL: test25: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xvcmpeqdp vs0, v4, v5 +; CHECK-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test25: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xvcmpeqdp vs0, v4, v5 +; CHECK-REG-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test25: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xvcmpeqdp vs0, v4, v5 +; CHECK-FISL-NEXT: xxsel v2, v3, v2, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test25: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xvcmpeqdp v4, v4, v5 +; CHECK-LE-NEXT: xxsel v2, v3, v2, v4 +; CHECK-LE-NEXT: blr entry: %m = fcmp oeq <2 x double> %c, %d %v = select <2 x i1> %m, <2 x double> %a, <2 x double> %b ret <2 x double> %v -; CHECK-LABEL: @test25 -; CHECK: xvcmpeqdp vs0, v4, v5 -; CHECK: xxsel v2, v3, v2, vs0 -; CHECK: blr -; CHECK-LE-LABEL: @test25 -; CHECK-LE: xvcmpeqdp v4, v4, v5 -; CHECK-LE: xxsel v2, v3, v2, v4 -; CHECK-LE: blr } define <2 x i64> @test26(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test26: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -32 +; CHECK-NEXT: addi r4, r1, -48 +; CHECK-NEXT: stxvd2x v3, 0, r3 +; CHECK-NEXT: stxvd2x v2, 0, r4 +; CHECK-NEXT: ld r3, -24(r1) +; CHECK-NEXT: ld r4, -40(r1) +; CHECK-NEXT: add r3, r4, r3 +; CHECK-NEXT: std r3, -8(r1) +; CHECK-NEXT: ld r3, -32(r1) +; CHECK-NEXT: ld r4, -48(r1) +; CHECK-NEXT: add r3, r4, r3 +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test26: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -32 +; CHECK-REG-NEXT: addi r4, r1, -48 +; CHECK-REG-NEXT: stxvd2x v3, 0, r3 +; CHECK-REG-NEXT: stxvd2x v2, 0, r4 +; CHECK-REG-NEXT: ld r3, -24(r1) +; CHECK-REG-NEXT: ld r4, -40(r1) +; CHECK-REG-NEXT: add r3, r4, r3 +; CHECK-REG-NEXT: std r3, -8(r1) +; CHECK-REG-NEXT: ld r3, -32(r1) +; CHECK-REG-NEXT: ld r4, -48(r1) +; CHECK-REG-NEXT: add r3, r4, r3 +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test26: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -32 +; CHECK-FISL-NEXT: stxvd2x v3, 0, r3 +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: ld r3, -24(r1) +; CHECK-FISL-NEXT: ld r4, -40(r1) +; CHECK-FISL-NEXT: add r3, r4, r3 +; CHECK-FISL-NEXT: std r3, -8(r1) +; CHECK-FISL-NEXT: ld r3, -32(r1) +; CHECK-FISL-NEXT: ld r4, -48(r1) +; CHECK-FISL-NEXT: add r3, r4, r3 +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test26: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: vaddudm v2, v2, v3 +; CHECK-LE-NEXT: blr %v = add <2 x i64> %a, %b ret <2 x i64> %v -; CHECK-LABEL: @test26 ; Make sure we use only two stores (one for each operand). -; CHECK: stxvd2x v3, 0, r3 -; CHECK: stxvd2x v2, 0, r4 -; CHECK-NOT: stxvd2x ; FIXME: The code quality here is not good; just make sure we do something for now. -; CHECK: add r3, r4, r3 -; CHECK: add r3, r4, r3 -; CHECK: blr -; CHECK-LE: vaddudm v2, v2, v3 -; CHECK-LE: blr } define <2 x i64> @test27(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test27: +; CHECK: # %bb.0: +; CHECK-NEXT: xxland v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test27: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxland v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test27: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxland vs0, v2, v3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test27: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxland v2, v2, v3 +; CHECK-LE-NEXT: blr %v = and <2 x i64> %a, %b ret <2 x i64> %v -; CHECK-LABEL: @test27 -; CHECK: xxland v2, v2, v3 -; CHECK: blr -; CHECK-LE-LABEL: @test27 -; CHECK-LE: xxland v2, v2, v3 -; CHECK-LE: blr } define <2 x double> @test28(<2 x double>* %a) { +; CHECK-LABEL: test28: +; CHECK: # %bb.0: +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test28: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test28: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: lxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test28: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: lxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: xxswapd v2, vs0 +; CHECK-LE-NEXT: blr %v = load <2 x double>, <2 x double>* %a, align 16 ret <2 x double> %v -; CHECK-LABEL: @test28 -; CHECK: lxvd2x v2, 0, r3 -; CHECK: blr -; CHECK-LE-LABEL: @test28 -; CHECK-LE: lxvd2x vs0, 0, r3 -; CHECK-LE: xxswapd v2, vs0 -; CHECK-LE: blr } define void @test29(<2 x double>* %a, <2 x double> %b) { +; CHECK-LABEL: test29: +; CHECK: # %bb.0: +; CHECK-NEXT: stxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test29: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: stxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test29: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test29: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxswapd vs0, v2 +; CHECK-LE-NEXT: stxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: blr store <2 x double> %b, <2 x double>* %a, align 16 ret void -; CHECK-LABEL: @test29 -; CHECK: stxvd2x v2, 0, r3 -; CHECK: blr -; CHECK-LE-LABEL: @test29 -; CHECK-LE: xxswapd vs0, v2 -; CHECK-LE: stxvd2x vs0, 0, r3 -; CHECK-LE: blr } define <2 x double> @test28u(<2 x double>* %a) { +; CHECK-LABEL: test28u: +; CHECK: # %bb.0: +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test28u: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test28u: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: lxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test28u: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: lxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: xxswapd v2, vs0 +; CHECK-LE-NEXT: blr %v = load <2 x double>, <2 x double>* %a, align 8 ret <2 x double> %v -; CHECK-LABEL: @test28u -; CHECK: lxvd2x v2, 0, r3 -; CHECK: blr -; CHECK-LE-LABEL: @test28u -; CHECK-LE: lxvd2x vs0, 0, r3 -; CHECK-LE: xxswapd v2, vs0 -; CHECK-LE: blr } define void @test29u(<2 x double>* %a, <2 x double> %b) { +; CHECK-LABEL: test29u: +; CHECK: # %bb.0: +; CHECK-NEXT: stxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test29u: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: stxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test29u: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test29u: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxswapd vs0, v2 +; CHECK-LE-NEXT: stxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: blr store <2 x double> %b, <2 x double>* %a, align 8 ret void -; CHECK-LABEL: @test29u -; CHECK: stxvd2x v2, 0, r3 -; CHECK: blr -; CHECK-LE-LABEL: @test29u -; CHECK-LE: xxswapd vs0, v2 -; CHECK-LE: stxvd2x vs0, 0, r3 -; CHECK-LE: blr } define <2 x i64> @test30(<2 x i64>* %a) { +; CHECK-LABEL: test30: +; CHECK: # %bb.0: +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test30: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test30: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test30: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: lxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: xxswapd v2, vs0 +; CHECK-LE-NEXT: blr %v = load <2 x i64>, <2 x i64>* %a, align 16 ret <2 x i64> %v -; CHECK-REG-LABEL: @test30 -; CHECK-REG: lxvd2x v2, 0, r3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test30 -; CHECK-FISL: lxvd2x vs0, 0, r3 -; CHECK-FISL: xxlor v2, vs0, vs0 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test30 -; CHECK-LE: lxvd2x vs0, 0, r3 -; CHECK-LE: xxswapd v2, vs0 -; CHECK-LE: blr } define void @test31(<2 x i64>* %a, <2 x i64> %b) { +; CHECK-LABEL: test31: +; CHECK: # %bb.0: +; CHECK-NEXT: stxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test31: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: stxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test31: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test31: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxswapd vs0, v2 +; CHECK-LE-NEXT: stxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: blr store <2 x i64> %b, <2 x i64>* %a, align 16 ret void -; CHECK-LABEL: @test31 -; CHECK: stxvd2x v2, 0, r3 -; CHECK: blr -; CHECK-LE-LABEL: @test31 -; CHECK-LE: xxswapd vs0, v2 -; CHECK-LE: stxvd2x vs0, 0, r3 -; CHECK-LE: blr } define <4 x float> @test32(<4 x float>* %a) { +; CHECK-LABEL: test32: +; CHECK: # %bb.0: +; CHECK-NEXT: lxvw4x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test32: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: lxvw4x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test32: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: lxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test32: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: lvx v2, 0, r3 +; CHECK-LE-NEXT: blr %v = load <4 x float>, <4 x float>* %a, align 16 ret <4 x float> %v -; CHECK-REG-LABEL: @test32 -; CHECK-REG: lxvw4x v2, 0, r3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test32 -; CHECK-FISL: lxvw4x v2, 0, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test32 -; CHECK-LE: lvx v2, 0, r3 -; CHECK-LE-NOT: xxswapd -; CHECK-LE: blr } define void @test33(<4 x float>* %a, <4 x float> %b) { +; CHECK-LABEL: test33: +; CHECK: # %bb.0: +; CHECK-NEXT: stxvw4x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test33: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: stxvw4x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test33: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: stxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test33: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: stvx v2, 0, r3 +; CHECK-LE-NEXT: blr store <4 x float> %b, <4 x float>* %a, align 16 ret void -; CHECK-REG-LABEL: @test33 -; CHECK-REG: stxvw4x v2, 0, r3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test33 -; CHECK-FISL: stxvw4x v2, 0, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test33 -; CHECK-LE-NOT: xxswapd -; CHECK-LE: stvx v2, 0, r3 -; CHECK-LE: blr } define <4 x float> @test32u(<4 x float>* %a) { +; CHECK-LABEL: test32u: +; CHECK: # %bb.0: +; CHECK-NEXT: li r4, 15 +; CHECK-NEXT: lvsl v3, 0, r3 +; CHECK-NEXT: lvx v2, r3, r4 +; CHECK-NEXT: lvx v4, 0, r3 +; CHECK-NEXT: vperm v2, v4, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test32u: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: li r4, 15 +; CHECK-REG-NEXT: lvsl v3, 0, r3 +; CHECK-REG-NEXT: lvx v2, r3, r4 +; CHECK-REG-NEXT: lvx v4, 0, r3 +; CHECK-REG-NEXT: vperm v2, v4, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test32u: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: li r4, 15 +; CHECK-FISL-NEXT: lvx v2, r3, r4 +; CHECK-FISL-NEXT: lvsl v3, 0, r3 +; CHECK-FISL-NEXT: lvx v4, 0, r3 +; CHECK-FISL-NEXT: vperm v2, v4, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test32u: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: lxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: xxswapd v2, vs0 +; CHECK-LE-NEXT: blr %v = load <4 x float>, <4 x float>* %a, align 8 ret <4 x float> %v -; CHECK-LABEL: @test32u -; CHECK-DAG: lvsl v3, 0, r3 -; CHECK-DAG: lvx v2, r3, r4 -; CHECK-DAG: lvx v4, 0, r3 -; CHECK: vperm v2, v4, v2, v3 -; CHECK: blr -; CHECK-LE-LABEL: @test32u -; CHECK-LE: lxvd2x vs0, 0, r3 -; CHECK-LE: xxswapd v2, vs0 -; CHECK-LE: blr } define void @test33u(<4 x float>* %a, <4 x float> %b) { +; CHECK-LABEL: test33u: +; CHECK: # %bb.0: +; CHECK-NEXT: stxvw4x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test33u: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: stxvw4x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test33u: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: stxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test33u: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxswapd vs0, v2 +; CHECK-LE-NEXT: stxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: blr store <4 x float> %b, <4 x float>* %a, align 8 ret void -; CHECK-REG-LABEL: @test33u -; CHECK-REG: stxvw4x v2, 0, r3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test33u -; CHECK-FISL: stxvw4x v2, 0, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test33u -; CHECK-LE: xxswapd vs0, v2 -; CHECK-LE: stxvd2x vs0, 0, r3 -; CHECK-LE: blr } define <4 x i32> @test34(<4 x i32>* %a) { +; CHECK-LABEL: test34: +; CHECK: # %bb.0: +; CHECK-NEXT: lxvw4x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test34: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: lxvw4x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test34: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: lxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test34: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: lvx v2, 0, r3 +; CHECK-LE-NEXT: blr %v = load <4 x i32>, <4 x i32>* %a, align 16 ret <4 x i32> %v -; CHECK-REG-LABEL: @test34 -; CHECK-REG: lxvw4x v2, 0, r3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test34 -; CHECK-FISL: lxvw4x v2, 0, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test34 -; CHECK-LE: lvx v2, 0, r3 -; CHECK-LE-NOT: xxswapd -; CHECK-LE: blr } define void @test35(<4 x i32>* %a, <4 x i32> %b) { +; CHECK-LABEL: test35: +; CHECK: # %bb.0: +; CHECK-NEXT: stxvw4x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test35: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: stxvw4x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test35: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: stxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test35: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: stvx v2, 0, r3 +; CHECK-LE-NEXT: blr store <4 x i32> %b, <4 x i32>* %a, align 16 ret void -; CHECK-REG-LABEL: @test35 -; CHECK-REG: stxvw4x v2, 0, r3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test35 -; CHECK-FISL: stxvw4x v2, 0, r3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test35 -; CHECK-LE-NOT: xxswapd -; CHECK-LE: stvx v2, 0, r3 -; CHECK-LE: blr } define <2 x double> @test40(<2 x i64> %a) { +; CHECK-LABEL: test40: +; CHECK: # %bb.0: +; CHECK-NEXT: xvcvuxddp v2, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test40: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xvcvuxddp v2, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test40: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xvcvuxddp v2, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test40: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xvcvuxddp v2, v2 +; CHECK-LE-NEXT: blr %v = uitofp <2 x i64> %a to <2 x double> ret <2 x double> %v -; CHECK-LABEL: @test40 -; CHECK: xvcvuxddp v2, v2 -; CHECK: blr -; CHECK-LE-LABEL: @test40 -; CHECK-LE: xvcvuxddp v2, v2 -; CHECK-LE: blr } define <2 x double> @test41(<2 x i64> %a) { +; CHECK-LABEL: test41: +; CHECK: # %bb.0: +; CHECK-NEXT: xvcvsxddp v2, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test41: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xvcvsxddp v2, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test41: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xvcvsxddp v2, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test41: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xvcvsxddp v2, v2 +; CHECK-LE-NEXT: blr %v = sitofp <2 x i64> %a to <2 x double> ret <2 x double> %v -; CHECK-LABEL: @test41 -; CHECK: xvcvsxddp v2, v2 -; CHECK: blr -; CHECK-LE-LABEL: @test41 -; CHECK-LE: xvcvsxddp v2, v2 -; CHECK-LE: blr } define <2 x i64> @test42(<2 x double> %a) { +; CHECK-LABEL: test42: +; CHECK: # %bb.0: +; CHECK-NEXT: xvcvdpuxds v2, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test42: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xvcvdpuxds v2, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test42: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xvcvdpuxds v2, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test42: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xvcvdpuxds v2, v2 +; CHECK-LE-NEXT: blr %v = fptoui <2 x double> %a to <2 x i64> ret <2 x i64> %v -; CHECK-LABEL: @test42 -; CHECK: xvcvdpuxds v2, v2 -; CHECK: blr -; CHECK-LE-LABEL: @test42 -; CHECK-LE: xvcvdpuxds v2, v2 -; CHECK-LE: blr } define <2 x i64> @test43(<2 x double> %a) { +; CHECK-LABEL: test43: +; CHECK: # %bb.0: +; CHECK-NEXT: xvcvdpsxds v2, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test43: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xvcvdpsxds v2, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test43: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xvcvdpsxds v2, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test43: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xvcvdpsxds v2, v2 +; CHECK-LE-NEXT: blr %v = fptosi <2 x double> %a to <2 x i64> ret <2 x i64> %v -; CHECK-LABEL: @test43 -; CHECK: xvcvdpsxds v2, v2 -; CHECK: blr -; CHECK-LE-LABEL: @test43 -; CHECK-LE: xvcvdpsxds v2, v2 -; CHECK-LE: blr } define <2 x float> @test44(<2 x i64> %a) { +; CHECK-LABEL: test44: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: addi r4, r1, -64 +; CHECK-NEXT: stxvd2x v2, 0, r3 +; CHECK-NEXT: ld r3, -8(r1) +; CHECK-NEXT: std r3, -24(r1) +; CHECK-NEXT: ld r3, -16(r1) +; CHECK-NEXT: std r3, -32(r1) +; CHECK-NEXT: lfd f0, -24(r1) +; CHECK-NEXT: fcfidus f0, f0 +; CHECK-NEXT: stfs f0, -48(r1) +; CHECK-NEXT: lfd f0, -32(r1) +; CHECK-NEXT: addi r3, r1, -48 +; CHECK-NEXT: fcfidus f0, f0 +; CHECK-NEXT: stfs f0, -64(r1) +; CHECK-NEXT: lxvw4x v2, 0, r3 +; CHECK-NEXT: lxvw4x v3, 0, r4 +; CHECK-NEXT: vmrghw v2, v3, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test44: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: addi r4, r1, -64 +; CHECK-REG-NEXT: stxvd2x v2, 0, r3 +; CHECK-REG-NEXT: ld r3, -8(r1) +; CHECK-REG-NEXT: std r3, -24(r1) +; CHECK-REG-NEXT: ld r3, -16(r1) +; CHECK-REG-NEXT: std r3, -32(r1) +; CHECK-REG-NEXT: lfd f0, -24(r1) +; CHECK-REG-NEXT: fcfidus f0, f0 +; CHECK-REG-NEXT: stfs f0, -48(r1) +; CHECK-REG-NEXT: lfd f0, -32(r1) +; CHECK-REG-NEXT: addi r3, r1, -48 +; CHECK-REG-NEXT: fcfidus f0, f0 +; CHECK-REG-NEXT: stfs f0, -64(r1) +; CHECK-REG-NEXT: lxvw4x v2, 0, r3 +; CHECK-REG-NEXT: lxvw4x v3, 0, r4 +; CHECK-REG-NEXT: vmrghw v2, v3, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test44: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: ld r3, -8(r1) +; CHECK-FISL-NEXT: std r3, -24(r1) +; CHECK-FISL-NEXT: ld r3, -16(r1) +; CHECK-FISL-NEXT: std r3, -32(r1) +; CHECK-FISL-NEXT: lfd f0, -24(r1) +; CHECK-FISL-NEXT: fcfidus f0, f0 +; CHECK-FISL-NEXT: stfs f0, -48(r1) +; CHECK-FISL-NEXT: lfd f0, -32(r1) +; CHECK-FISL-NEXT: fcfidus f0, f0 +; CHECK-FISL-NEXT: stfs f0, -64(r1) +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: lxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: addi r3, r1, -64 +; CHECK-FISL-NEXT: lxvw4x v3, 0, r3 +; CHECK-FISL-NEXT: vmrghw v2, v3, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test44: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxswapd vs0, v2 +; CHECK-LE-NEXT: xxlor vs1, v2, v2 +; CHECK-LE-NEXT: xscvuxdsp f1, f1 +; CHECK-LE-NEXT: xscvuxdsp f0, f0 +; CHECK-LE-NEXT: xscvdpspn vs1, f1 +; CHECK-LE-NEXT: xscvdpspn vs0, f0 +; CHECK-LE-NEXT: xxsldwi v3, vs1, vs1, 1 +; CHECK-LE-NEXT: xxsldwi v2, vs0, vs0, 1 +; CHECK-LE-NEXT: vmrglw v2, v3, v2 +; CHECK-LE-NEXT: blr %v = uitofp <2 x i64> %a to <2 x float> ret <2 x float> %v -; CHECK-LABEL: @test44 ; FIXME: The code quality here looks pretty bad. -; CHECK: blr } define <2 x float> @test45(<2 x i64> %a) { +; CHECK-LABEL: test45: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: addi r4, r1, -64 +; CHECK-NEXT: stxvd2x v2, 0, r3 +; CHECK-NEXT: ld r3, -8(r1) +; CHECK-NEXT: std r3, -24(r1) +; CHECK-NEXT: ld r3, -16(r1) +; CHECK-NEXT: std r3, -32(r1) +; CHECK-NEXT: lfd f0, -24(r1) +; CHECK-NEXT: fcfids f0, f0 +; CHECK-NEXT: stfs f0, -48(r1) +; CHECK-NEXT: lfd f0, -32(r1) +; CHECK-NEXT: addi r3, r1, -48 +; CHECK-NEXT: fcfids f0, f0 +; CHECK-NEXT: stfs f0, -64(r1) +; CHECK-NEXT: lxvw4x v2, 0, r3 +; CHECK-NEXT: lxvw4x v3, 0, r4 +; CHECK-NEXT: vmrghw v2, v3, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test45: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: addi r4, r1, -64 +; CHECK-REG-NEXT: stxvd2x v2, 0, r3 +; CHECK-REG-NEXT: ld r3, -8(r1) +; CHECK-REG-NEXT: std r3, -24(r1) +; CHECK-REG-NEXT: ld r3, -16(r1) +; CHECK-REG-NEXT: std r3, -32(r1) +; CHECK-REG-NEXT: lfd f0, -24(r1) +; CHECK-REG-NEXT: fcfids f0, f0 +; CHECK-REG-NEXT: stfs f0, -48(r1) +; CHECK-REG-NEXT: lfd f0, -32(r1) +; CHECK-REG-NEXT: addi r3, r1, -48 +; CHECK-REG-NEXT: fcfids f0, f0 +; CHECK-REG-NEXT: stfs f0, -64(r1) +; CHECK-REG-NEXT: lxvw4x v2, 0, r3 +; CHECK-REG-NEXT: lxvw4x v3, 0, r4 +; CHECK-REG-NEXT: vmrghw v2, v3, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test45: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: ld r3, -8(r1) +; CHECK-FISL-NEXT: std r3, -24(r1) +; CHECK-FISL-NEXT: ld r3, -16(r1) +; CHECK-FISL-NEXT: std r3, -32(r1) +; CHECK-FISL-NEXT: lfd f0, -24(r1) +; CHECK-FISL-NEXT: fcfids f0, f0 +; CHECK-FISL-NEXT: stfs f0, -48(r1) +; CHECK-FISL-NEXT: lfd f0, -32(r1) +; CHECK-FISL-NEXT: fcfids f0, f0 +; CHECK-FISL-NEXT: stfs f0, -64(r1) +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: lxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: addi r3, r1, -64 +; CHECK-FISL-NEXT: lxvw4x v3, 0, r3 +; CHECK-FISL-NEXT: vmrghw v2, v3, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test45: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxswapd vs0, v2 +; CHECK-LE-NEXT: xxlor vs1, v2, v2 +; CHECK-LE-NEXT: xscvsxdsp f1, f1 +; CHECK-LE-NEXT: xscvsxdsp f0, f0 +; CHECK-LE-NEXT: xscvdpspn vs1, f1 +; CHECK-LE-NEXT: xscvdpspn vs0, f0 +; CHECK-LE-NEXT: xxsldwi v3, vs1, vs1, 1 +; CHECK-LE-NEXT: xxsldwi v2, vs0, vs0, 1 +; CHECK-LE-NEXT: vmrglw v2, v3, v2 +; CHECK-LE-NEXT: blr %v = sitofp <2 x i64> %a to <2 x float> ret <2 x float> %v -; CHECK-LABEL: @test45 ; FIXME: The code quality here looks pretty bad. -; CHECK: blr } define <2 x i64> @test46(<2 x float> %a) { +; CHECK-LABEL: test46: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -48 +; CHECK-NEXT: stxvw4x v2, 0, r3 +; CHECK-NEXT: lfs f0, -44(r1) +; CHECK-NEXT: xscvdpuxds f0, f0 +; CHECK-NEXT: stfd f0, -32(r1) +; CHECK-NEXT: lfs f0, -48(r1) +; CHECK-NEXT: xscvdpuxds f0, f0 +; CHECK-NEXT: stfd f0, -24(r1) +; CHECK-NEXT: ld r3, -32(r1) +; CHECK-NEXT: std r3, -8(r1) +; CHECK-NEXT: ld r3, -24(r1) +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test46: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -48 +; CHECK-REG-NEXT: stxvw4x v2, 0, r3 +; CHECK-REG-NEXT: lfs f0, -44(r1) +; CHECK-REG-NEXT: xscvdpuxds f0, f0 +; CHECK-REG-NEXT: stfd f0, -32(r1) +; CHECK-REG-NEXT: lfs f0, -48(r1) +; CHECK-REG-NEXT: xscvdpuxds f0, f0 +; CHECK-REG-NEXT: stfd f0, -24(r1) +; CHECK-REG-NEXT: ld r3, -32(r1) +; CHECK-REG-NEXT: std r3, -8(r1) +; CHECK-REG-NEXT: ld r3, -24(r1) +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test46: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: stxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: lfs f0, -44(r1) +; CHECK-FISL-NEXT: xscvdpuxds f0, f0 +; CHECK-FISL-NEXT: stfd f0, -32(r1) +; CHECK-FISL-NEXT: lfs f0, -48(r1) +; CHECK-FISL-NEXT: xscvdpuxds f0, f0 +; CHECK-FISL-NEXT: stfd f0, -24(r1) +; CHECK-FISL-NEXT: ld r3, -32(r1) +; CHECK-FISL-NEXT: std r3, -8(r1) +; CHECK-FISL-NEXT: ld r3, -24(r1) +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs1, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs1, vs1 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test46: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxsldwi vs0, v2, v2, 3 +; CHECK-LE-NEXT: xxswapd vs1, v2 +; CHECK-LE-NEXT: xscvspdpn f0, vs0 +; CHECK-LE-NEXT: xscvspdpn f1, vs1 +; CHECK-LE-NEXT: xxmrghd vs0, vs1, vs0 +; CHECK-LE-NEXT: xvcvdpuxds v2, vs0 +; CHECK-LE-NEXT: blr %v = fptoui <2 x float> %a to <2 x i64> ret <2 x i64> %v -; CHECK-LABEL: @test46 ; FIXME: The code quality here looks pretty bad. -; CHECK: blr } define <2 x i64> @test47(<2 x float> %a) { +; CHECK-LABEL: test47: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -48 +; CHECK-NEXT: stxvw4x v2, 0, r3 +; CHECK-NEXT: lfs f0, -44(r1) +; CHECK-NEXT: xscvdpsxds f0, f0 +; CHECK-NEXT: stfd f0, -32(r1) +; CHECK-NEXT: lfs f0, -48(r1) +; CHECK-NEXT: xscvdpsxds f0, f0 +; CHECK-NEXT: stfd f0, -24(r1) +; CHECK-NEXT: ld r3, -32(r1) +; CHECK-NEXT: std r3, -8(r1) +; CHECK-NEXT: ld r3, -24(r1) +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test47: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -48 +; CHECK-REG-NEXT: stxvw4x v2, 0, r3 +; CHECK-REG-NEXT: lfs f0, -44(r1) +; CHECK-REG-NEXT: xscvdpsxds f0, f0 +; CHECK-REG-NEXT: stfd f0, -32(r1) +; CHECK-REG-NEXT: lfs f0, -48(r1) +; CHECK-REG-NEXT: xscvdpsxds f0, f0 +; CHECK-REG-NEXT: stfd f0, -24(r1) +; CHECK-REG-NEXT: ld r3, -32(r1) +; CHECK-REG-NEXT: std r3, -8(r1) +; CHECK-REG-NEXT: ld r3, -24(r1) +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test47: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: stxvw4x v2, 0, r3 +; CHECK-FISL-NEXT: lfs f0, -44(r1) +; CHECK-FISL-NEXT: xscvdpsxds f0, f0 +; CHECK-FISL-NEXT: stfd f0, -32(r1) +; CHECK-FISL-NEXT: lfs f0, -48(r1) +; CHECK-FISL-NEXT: xscvdpsxds f0, f0 +; CHECK-FISL-NEXT: stfd f0, -24(r1) +; CHECK-FISL-NEXT: ld r3, -32(r1) +; CHECK-FISL-NEXT: std r3, -8(r1) +; CHECK-FISL-NEXT: ld r3, -24(r1) +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs1, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs1, vs1 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test47: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxsldwi vs0, v2, v2, 3 +; CHECK-LE-NEXT: xxswapd vs1, v2 +; CHECK-LE-NEXT: xscvspdpn f0, vs0 +; CHECK-LE-NEXT: xscvspdpn f1, vs1 +; CHECK-LE-NEXT: xxmrghd vs0, vs1, vs0 +; CHECK-LE-NEXT: xvcvdpsxds v2, vs0 +; CHECK-LE-NEXT: blr %v = fptosi <2 x float> %a to <2 x i64> ret <2 x i64> %v -; CHECK-LABEL: @test47 ; FIXME: The code quality here looks pretty bad. -; CHECK: blr } define <2 x double> @test50(double* %a) { +; CHECK-LABEL: test50: +; CHECK: # %bb.0: +; CHECK-NEXT: lxvdsx v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test50: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: lxvdsx v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test50: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: lxvdsx v2, 0, r3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test50: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: lxvdsx v2, 0, r3 +; CHECK-LE-NEXT: blr %v = load double, double* %a, align 8 %w = insertelement <2 x double> undef, double %v, i32 0 %x = insertelement <2 x double> %w, double %v, i32 1 ret <2 x double> %x -; CHECK-LABEL: @test50 -; CHECK: lxvdsx v2, 0, r3 -; CHECK: blr -; CHECK-LE-LABEL: @test50 -; CHECK-LE: lxvdsx v2, 0, r3 -; CHECK-LE: blr } define <2 x double> @test51(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test51: +; CHECK: # %bb.0: +; CHECK-NEXT: xxspltd v2, v2, 0 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test51: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxspltd v2, v2, 0 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test51: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxspltd v2, v2, 0 +; CHECK-FISL-NEXT: li r3, -16 +; CHECK-FISL-NEXT: stxvd2x v3, r1, r3 # 16-byte Folded Spill +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test51: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxspltd v2, v2, 1 +; CHECK-LE-NEXT: blr %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> ret <2 x double> %v -; CHECK-LABEL: @test51 -; CHECK: xxspltd v2, v2, 0 -; CHECK: blr -; CHECK-LE-LABEL: @test51 -; CHECK-LE: xxspltd v2, v2, 1 -; CHECK-LE: blr } define <2 x double> @test52(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test52: +; CHECK: # %bb.0: +; CHECK-NEXT: xxmrghd v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test52: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxmrghd v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test52: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxmrghd v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test52: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxmrgld v2, v3, v2 +; CHECK-LE-NEXT: blr %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> ret <2 x double> %v -; CHECK-LABEL: @test52 -; CHECK: xxmrghd v2, v2, v3 -; CHECK: blr -; CHECK-LE-LABEL: @test52 -; CHECK-LE: xxmrgld v2, v3, v2 -; CHECK-LE: blr } define <2 x double> @test53(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test53: +; CHECK: # %bb.0: +; CHECK-NEXT: xxmrghd v2, v3, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test53: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxmrghd v2, v3, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test53: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxmrghd v2, v3, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test53: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxmrgld v2, v2, v3 +; CHECK-LE-NEXT: blr %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> ret <2 x double> %v -; CHECK-LABEL: @test53 -; CHECK: xxmrghd v2, v3, v2 -; CHECK: blr -; CHECK-LE-LABEL: @test53 -; CHECK-LE: xxmrgld v2, v2, v3 -; CHECK-LE: blr } define <2 x double> @test54(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test54: +; CHECK: # %bb.0: +; CHECK-NEXT: xxpermdi v2, v2, v3, 2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test54: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxpermdi v2, v2, v3, 2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test54: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxpermdi v2, v2, v3, 2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test54: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxpermdi v2, v3, v2, 2 +; CHECK-LE-NEXT: blr %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> ret <2 x double> %v -; CHECK-LABEL: @test54 -; CHECK: xxpermdi v2, v2, v3, 2 -; CHECK: blr -; CHECK-LE-LABEL: @test54 -; CHECK-LE: xxpermdi v2, v3, v2, 2 -; CHECK-LE: blr } define <2 x double> @test55(<2 x double> %a, <2 x double> %b) { +; CHECK-LABEL: test55: +; CHECK: # %bb.0: +; CHECK-NEXT: xxmrgld v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test55: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxmrgld v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test55: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxmrgld v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test55: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxmrghd v2, v3, v2 +; CHECK-LE-NEXT: blr %v = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> ret <2 x double> %v -; CHECK-LABEL: @test55 -; CHECK: xxmrgld v2, v2, v3 -; CHECK: blr -; CHECK-LE-LABEL: @test55 -; CHECK-LE: xxmrghd v2, v3, v2 -; CHECK-LE: blr } define <2 x i64> @test56(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test56: +; CHECK: # %bb.0: +; CHECK-NEXT: xxmrgld v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test56: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxmrgld v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test56: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxmrgld v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test56: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxmrghd v2, v3, v2 +; CHECK-LE-NEXT: blr %v = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> ret <2 x i64> %v -; CHECK-LABEL: @test56 -; CHECK: xxmrgld v2, v2, v3 -; CHECK: blr -; CHECK-LE-LABEL: @test56 -; CHECK-LE: xxmrghd v2, v3, v2 -; CHECK-LE: blr } define <2 x i64> @test60(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test60: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -32 +; CHECK-NEXT: addi r4, r1, -48 +; CHECK-NEXT: stxvd2x v3, 0, r3 +; CHECK-NEXT: stxvd2x v2, 0, r4 +; CHECK-NEXT: lwz r3, -20(r1) +; CHECK-NEXT: ld r4, -40(r1) +; CHECK-NEXT: sld r3, r4, r3 +; CHECK-NEXT: std r3, -8(r1) +; CHECK-NEXT: lwz r3, -28(r1) +; CHECK-NEXT: ld r4, -48(r1) +; CHECK-NEXT: sld r3, r4, r3 +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test60: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -32 +; CHECK-REG-NEXT: addi r4, r1, -48 +; CHECK-REG-NEXT: stxvd2x v3, 0, r3 +; CHECK-REG-NEXT: stxvd2x v2, 0, r4 +; CHECK-REG-NEXT: lwz r3, -20(r1) +; CHECK-REG-NEXT: ld r4, -40(r1) +; CHECK-REG-NEXT: sld r3, r4, r3 +; CHECK-REG-NEXT: std r3, -8(r1) +; CHECK-REG-NEXT: lwz r3, -28(r1) +; CHECK-REG-NEXT: ld r4, -48(r1) +; CHECK-REG-NEXT: sld r3, r4, r3 +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test60: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -32 +; CHECK-FISL-NEXT: stxvd2x v3, 0, r3 +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: lwz r4, -20(r1) +; CHECK-FISL-NEXT: ld r3, -40(r1) +; CHECK-FISL-NEXT: sld r3, r3, r4 +; CHECK-FISL-NEXT: std r3, -8(r1) +; CHECK-FISL-NEXT: lwz r4, -28(r1) +; CHECK-FISL-NEXT: ld r3, -48(r1) +; CHECK-FISL-NEXT: sld r3, r3, r4 +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test60: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: vsld v2, v2, v3 +; CHECK-LE-NEXT: blr %v = shl <2 x i64> %a, %b ret <2 x i64> %v -; CHECK-LABEL: @test60 ; This should scalarize, and the current code quality is not good. -; CHECK: stxvd2x v3, 0, r3 -; CHECK: stxvd2x v2, 0, r4 -; CHECK: sld r3, r4, r3 -; CHECK: sld r3, r4, r3 -; CHECK: lxvd2x v2, 0, r3 -; CHECK: blr } define <2 x i64> @test61(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test61: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -32 +; CHECK-NEXT: addi r4, r1, -48 +; CHECK-NEXT: stxvd2x v3, 0, r3 +; CHECK-NEXT: stxvd2x v2, 0, r4 +; CHECK-NEXT: lwz r3, -20(r1) +; CHECK-NEXT: ld r4, -40(r1) +; CHECK-NEXT: srd r3, r4, r3 +; CHECK-NEXT: std r3, -8(r1) +; CHECK-NEXT: lwz r3, -28(r1) +; CHECK-NEXT: ld r4, -48(r1) +; CHECK-NEXT: srd r3, r4, r3 +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test61: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -32 +; CHECK-REG-NEXT: addi r4, r1, -48 +; CHECK-REG-NEXT: stxvd2x v3, 0, r3 +; CHECK-REG-NEXT: stxvd2x v2, 0, r4 +; CHECK-REG-NEXT: lwz r3, -20(r1) +; CHECK-REG-NEXT: ld r4, -40(r1) +; CHECK-REG-NEXT: srd r3, r4, r3 +; CHECK-REG-NEXT: std r3, -8(r1) +; CHECK-REG-NEXT: lwz r3, -28(r1) +; CHECK-REG-NEXT: ld r4, -48(r1) +; CHECK-REG-NEXT: srd r3, r4, r3 +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test61: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -32 +; CHECK-FISL-NEXT: stxvd2x v3, 0, r3 +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: lwz r4, -20(r1) +; CHECK-FISL-NEXT: ld r3, -40(r1) +; CHECK-FISL-NEXT: srd r3, r3, r4 +; CHECK-FISL-NEXT: std r3, -8(r1) +; CHECK-FISL-NEXT: lwz r4, -28(r1) +; CHECK-FISL-NEXT: ld r3, -48(r1) +; CHECK-FISL-NEXT: srd r3, r3, r4 +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test61: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: vsrd v2, v2, v3 +; CHECK-LE-NEXT: blr %v = lshr <2 x i64> %a, %b ret <2 x i64> %v -; CHECK-LABEL: @test61 ; This should scalarize, and the current code quality is not good. -; CHECK: stxvd2x v3, 0, r3 -; CHECK: stxvd2x v2, 0, r4 -; CHECK: srd r3, r4, r3 -; CHECK: srd r3, r4, r3 -; CHECK: lxvd2x v2, 0, r3 -; CHECK: blr } define <2 x i64> @test62(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test62: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -32 +; CHECK-NEXT: addi r4, r1, -48 +; CHECK-NEXT: stxvd2x v3, 0, r3 +; CHECK-NEXT: stxvd2x v2, 0, r4 +; CHECK-NEXT: lwz r3, -20(r1) +; CHECK-NEXT: ld r4, -40(r1) +; CHECK-NEXT: srad r3, r4, r3 +; CHECK-NEXT: std r3, -8(r1) +; CHECK-NEXT: lwz r3, -28(r1) +; CHECK-NEXT: ld r4, -48(r1) +; CHECK-NEXT: srad r3, r4, r3 +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test62: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -32 +; CHECK-REG-NEXT: addi r4, r1, -48 +; CHECK-REG-NEXT: stxvd2x v3, 0, r3 +; CHECK-REG-NEXT: stxvd2x v2, 0, r4 +; CHECK-REG-NEXT: lwz r3, -20(r1) +; CHECK-REG-NEXT: ld r4, -40(r1) +; CHECK-REG-NEXT: srad r3, r4, r3 +; CHECK-REG-NEXT: std r3, -8(r1) +; CHECK-REG-NEXT: lwz r3, -28(r1) +; CHECK-REG-NEXT: ld r4, -48(r1) +; CHECK-REG-NEXT: srad r3, r4, r3 +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test62: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -32 +; CHECK-FISL-NEXT: stxvd2x v3, 0, r3 +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: lwz r4, -20(r1) +; CHECK-FISL-NEXT: ld r3, -40(r1) +; CHECK-FISL-NEXT: srad r3, r3, r4 +; CHECK-FISL-NEXT: std r3, -8(r1) +; CHECK-FISL-NEXT: lwz r4, -28(r1) +; CHECK-FISL-NEXT: ld r3, -48(r1) +; CHECK-FISL-NEXT: srad r3, r3, r4 +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test62: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: vsrad v2, v2, v3 +; CHECK-LE-NEXT: blr %v = ashr <2 x i64> %a, %b ret <2 x i64> %v -; CHECK-LABEL: @test62 ; This should scalarize, and the current code quality is not good. -; CHECK: stxvd2x v3, 0, r3 -; CHECK: stxvd2x v2, 0, r4 -; CHECK: srad r3, r4, r3 -; CHECK: srad r3, r4, r3 -; CHECK: lxvd2x v2, 0, r3 -; CHECK: blr } define double @test63(<2 x double> %a) { +; CHECK-LABEL: test63: +; CHECK: # %bb.0: +; CHECK-NEXT: xxlor f1, v2, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test63: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxlor f1, v2, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test63: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxlor f0, v2, v2 +; CHECK-FISL-NEXT: fmr f1, f0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test63: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxswapd vs1, v2 +; CHECK-LE-NEXT: # kill: def $f1 killed $f1 killed $vsl1 +; CHECK-LE-NEXT: blr %v = extractelement <2 x double> %a, i32 0 ret double %v -; CHECK-REG-LABEL: @test63 -; CHECK-REG: xxlor f1, v2, v2 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test63 -; CHECK-FISL: xxlor f0, v2, v2 -; CHECK-FISL: fmr f1, f0 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test63 -; CHECK-LE: xxswapd vs1, v2 -; CHECK-LE: blr } define double @test64(<2 x double> %a) { +; CHECK-LABEL: test64: +; CHECK: # %bb.0: +; CHECK-NEXT: xxswapd vs1, v2 +; CHECK-NEXT: # kill: def $f1 killed $f1 killed $vsl1 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test64: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxswapd vs1, v2 +; CHECK-REG-NEXT: # kill: def $f1 killed $f1 killed $vsl1 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test64: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxswapd vs0, v2 +; CHECK-FISL-NEXT: fmr f1, f0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test64: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxlor f1, v2, v2 +; CHECK-LE-NEXT: blr %v = extractelement <2 x double> %a, i32 1 ret double %v -; CHECK-REG-LABEL: @test64 -; CHECK-REG: xxswapd vs1, v2 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test64 -; CHECK-FISL: xxswapd v2, v2 -; CHECK-FISL: xxlor f0, v2, v2 -; CHECK-FISL: fmr f1, f0 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test64 -; CHECK-LE: xxlor f1, v2, v2 } define <2 x i1> @test65(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test65: +; CHECK: # %bb.0: +; CHECK-NEXT: vcmpequw v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test65: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: vcmpequw v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test65: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: vcmpequw v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test65: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: vcmpequd v2, v2, v3 +; CHECK-LE-NEXT: blr %w = icmp eq <2 x i64> %a, %b ret <2 x i1> %w -; CHECK-REG-LABEL: @test65 -; CHECK-REG: vcmpequw v2, v2, v3 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test65 -; CHECK-FISL: vcmpequw v2, v2, v3 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test65 -; CHECK-LE: vcmpequd v2, v2, v3 -; CHECK-LE: blr } define <2 x i1> @test66(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test66: +; CHECK: # %bb.0: +; CHECK-NEXT: vcmpequw v2, v2, v3 +; CHECK-NEXT: xxlnor v2, v2, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test66: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: vcmpequw v2, v2, v3 +; CHECK-REG-NEXT: xxlnor v2, v2, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test66: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: vcmpequw v2, v2, v3 +; CHECK-FISL-NEXT: xxlnor vs0, v2, v2 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test66: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: vcmpequd v2, v2, v3 +; CHECK-LE-NEXT: xxlnor v2, v2, v2 +; CHECK-LE-NEXT: blr %w = icmp ne <2 x i64> %a, %b ret <2 x i1> %w -; CHECK-REG-LABEL: @test66 -; CHECK-REG: vcmpequw v2, v2, v3 -; CHECK-REG: xxlnor v2, v2, v2 -; CHECK-REG: blr -; CHECK-FISL-LABEL: @test66 -; CHECK-FISL: vcmpequw v2, v2, v3 -; CHECK-FISL: xxlnor v2, v2, v2 -; CHECK-FISL: blr -; CHECK-LE-LABEL: @test66 -; CHECK-LE: vcmpequd v2, v2, v3 -; CHECK-LE: xxlnor v2, v2, v2 -; CHECK-LE: blr } define <2 x i1> @test67(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: test67: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r3, r1, -32 +; CHECK-NEXT: addi r4, r1, -48 +; CHECK-NEXT: stxvd2x v3, 0, r3 +; CHECK-NEXT: stxvd2x v2, 0, r4 +; CHECK-NEXT: ld r3, -24(r1) +; CHECK-NEXT: ld r4, -40(r1) +; CHECK-NEXT: cmpld r4, r3 +; CHECK-NEXT: li r3, 0 +; CHECK-NEXT: li r4, -1 +; CHECK-NEXT: isel r5, r4, r3, lt +; CHECK-NEXT: std r5, -8(r1) +; CHECK-NEXT: ld r5, -32(r1) +; CHECK-NEXT: ld r6, -48(r1) +; CHECK-NEXT: cmpld r6, r5 +; CHECK-NEXT: isel r3, r4, r3, lt +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test67: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r3, r1, -32 +; CHECK-REG-NEXT: addi r4, r1, -48 +; CHECK-REG-NEXT: stxvd2x v3, 0, r3 +; CHECK-REG-NEXT: stxvd2x v2, 0, r4 +; CHECK-REG-NEXT: ld r3, -24(r1) +; CHECK-REG-NEXT: ld r4, -40(r1) +; CHECK-REG-NEXT: cmpld r4, r3 +; CHECK-REG-NEXT: li r3, 0 +; CHECK-REG-NEXT: li r4, -1 +; CHECK-REG-NEXT: isel r5, r4, r3, lt +; CHECK-REG-NEXT: std r5, -8(r1) +; CHECK-REG-NEXT: ld r5, -32(r1) +; CHECK-REG-NEXT: ld r6, -48(r1) +; CHECK-REG-NEXT: cmpld r6, r5 +; CHECK-REG-NEXT: isel r3, r4, r3, lt +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test67: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addi r3, r1, -32 +; CHECK-FISL-NEXT: stxvd2x v3, 0, r3 +; CHECK-FISL-NEXT: addi r3, r1, -48 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: ld r3, -24(r1) +; CHECK-FISL-NEXT: ld r4, -40(r1) +; CHECK-FISL-NEXT: cmpld r4, r3 +; CHECK-FISL-NEXT: li r3, 0 +; CHECK-FISL-NEXT: li r4, -1 +; CHECK-FISL-NEXT: isel r5, r4, r3, lt +; CHECK-FISL-NEXT: std r5, -8(r1) +; CHECK-FISL-NEXT: ld r5, -32(r1) +; CHECK-FISL-NEXT: ld r6, -48(r1) +; CHECK-FISL-NEXT: cmpld r6, r5 +; CHECK-FISL-NEXT: isel r3, r4, r3, lt +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test67: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: vcmpgtud v2, v3, v2 +; CHECK-LE-NEXT: blr %w = icmp ult <2 x i64> %a, %b ret <2 x i1> %w -; CHECK-LABEL: @test67 ; This should scalarize, and the current code quality is not good. -; CHECK: stxvd2x v3, 0, r3 -; CHECK: stxvd2x v2, 0, r4 -; CHECK: cmpld r4, r3 -; CHECK: cmpld r6, r5 -; CHECK: lxvd2x v2, 0, r3 -; CHECK: blr -; CHECK-LE-LABEL: @test67 -; CHECK-LE: vcmpgtud v2, v3, v2 -; CHECK-LE: blr } define <2 x double> @test68(<2 x i32> %a) { +; CHECK-LABEL: test68: +; CHECK: # %bb.0: +; CHECK-NEXT: xxmrghw vs0, v2, v2 +; CHECK-NEXT: xvcvsxwdp v2, vs0 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test68: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: xxmrghw vs0, v2, v2 +; CHECK-REG-NEXT: xvcvsxwdp v2, vs0 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test68: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: xxmrghw vs0, v2, v2 +; CHECK-FISL-NEXT: xvcvsxwdp v2, vs0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test68: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: xxmrglw v2, v2, v2 +; CHECK-LE-NEXT: xvcvsxwdp v2, v2 +; CHECK-LE-NEXT: blr %w = sitofp <2 x i32> %a to <2 x double> ret <2 x double> %w -; CHECK-LABEL: @test68 -; CHECK: xxmrghw vs0, v2, v2 -; CHECK: xvcvsxwdp v2, vs0 -; CHECK: blr -; CHECK-LE-LABEL: @test68 -; CHECK-LE: xxmrglw v2, v2, v2 -; CHECK-LE: xvcvsxwdp v2, v2 -; CHECK-LE: blr } ; This gets scalarized so the code isn't great define <2 x double> @test69(<2 x i16> %a) { +; CHECK-LABEL: test69: +; CHECK: # %bb.0: +; CHECK-NEXT: addis r3, r2, .LCPI63_0@toc@ha +; CHECK-NEXT: addi r3, r3, .LCPI63_0@toc@l +; CHECK-NEXT: lxvw4x v3, 0, r3 +; CHECK-NEXT: addi r3, r1, -32 +; CHECK-NEXT: vperm v2, v2, v2, v3 +; CHECK-NEXT: stxvd2x v2, 0, r3 +; CHECK-NEXT: lha r3, -18(r1) +; CHECK-NEXT: std r3, -8(r1) +; CHECK-NEXT: lha r3, -26(r1) +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: xvcvsxddp v2, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test69: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addis r3, r2, .LCPI63_0@toc@ha +; CHECK-REG-NEXT: addi r3, r3, .LCPI63_0@toc@l +; CHECK-REG-NEXT: lxvw4x v3, 0, r3 +; CHECK-REG-NEXT: addi r3, r1, -32 +; CHECK-REG-NEXT: vperm v2, v2, v2, v3 +; CHECK-REG-NEXT: stxvd2x v2, 0, r3 +; CHECK-REG-NEXT: lha r3, -18(r1) +; CHECK-REG-NEXT: std r3, -8(r1) +; CHECK-REG-NEXT: lha r3, -26(r1) +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: xvcvsxddp v2, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test69: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addis r3, r2, .LCPI63_0@toc@ha +; CHECK-FISL-NEXT: addi r3, r3, .LCPI63_0@toc@l +; CHECK-FISL-NEXT: lxvw4x v3, 0, r3 +; CHECK-FISL-NEXT: vperm v2, v2, v2, v3 +; CHECK-FISL-NEXT: addi r3, r1, -32 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: lha r3, -18(r1) +; CHECK-FISL-NEXT: std r3, -8(r1) +; CHECK-FISL-NEXT: lha r3, -26(r1) +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: xvcvsxddp v2, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test69: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: addis r3, r2, .LCPI63_0@toc@ha +; CHECK-LE-NEXT: addi r3, r3, .LCPI63_0@toc@l +; CHECK-LE-NEXT: lvx v3, 0, r3 +; CHECK-LE-NEXT: addis r3, r2, .LCPI63_1@toc@ha +; CHECK-LE-NEXT: addi r3, r3, .LCPI63_1@toc@l +; CHECK-LE-NEXT: lxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: vperm v2, v2, v2, v3 +; CHECK-LE-NEXT: xxswapd v3, vs0 +; CHECK-LE-NEXT: vsld v2, v2, v3 +; CHECK-LE-NEXT: vsrad v2, v2, v3 +; CHECK-LE-NEXT: xvcvsxddp v2, v2 +; CHECK-LE-NEXT: blr %w = sitofp <2 x i16> %a to <2 x double> ret <2 x double> %w -; CHECK-LABEL: @test69 -; CHECK-DAG: lxvd2x v2, 0, r3 -; CHECK-DAG: xvcvsxddp v2, v2 -; CHECK: blr -; CHECK-LE-LABEL: @test69 -; CHECK-LE: vperm -; CHECK-LE: vsld -; CHECK-LE: vsrad -; CHECK-LE: xvcvsxddp v2, v2 -; CHECK-LE: blr } ; This gets scalarized so the code isn't great define <2 x double> @test70(<2 x i8> %a) { +; CHECK-LABEL: test70: +; CHECK: # %bb.0: +; CHECK-NEXT: addis r3, r2, .LCPI64_0@toc@ha +; CHECK-NEXT: addi r3, r3, .LCPI64_0@toc@l +; CHECK-NEXT: lxvw4x v3, 0, r3 +; CHECK-NEXT: addi r3, r1, -32 +; CHECK-NEXT: vperm v2, v2, v2, v3 +; CHECK-NEXT: stxvd2x v2, 0, r3 +; CHECK-NEXT: ld r3, -24(r1) +; CHECK-NEXT: extsb r3, r3 +; CHECK-NEXT: std r3, -8(r1) +; CHECK-NEXT: ld r3, -32(r1) +; CHECK-NEXT: extsb r3, r3 +; CHECK-NEXT: std r3, -16(r1) +; CHECK-NEXT: addi r3, r1, -16 +; CHECK-NEXT: lxvd2x v2, 0, r3 +; CHECK-NEXT: xvcvsxddp v2, v2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test70: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addis r3, r2, .LCPI64_0@toc@ha +; CHECK-REG-NEXT: addi r3, r3, .LCPI64_0@toc@l +; CHECK-REG-NEXT: lxvw4x v3, 0, r3 +; CHECK-REG-NEXT: addi r3, r1, -32 +; CHECK-REG-NEXT: vperm v2, v2, v2, v3 +; CHECK-REG-NEXT: stxvd2x v2, 0, r3 +; CHECK-REG-NEXT: ld r3, -24(r1) +; CHECK-REG-NEXT: extsb r3, r3 +; CHECK-REG-NEXT: std r3, -8(r1) +; CHECK-REG-NEXT: ld r3, -32(r1) +; CHECK-REG-NEXT: extsb r3, r3 +; CHECK-REG-NEXT: std r3, -16(r1) +; CHECK-REG-NEXT: addi r3, r1, -16 +; CHECK-REG-NEXT: lxvd2x v2, 0, r3 +; CHECK-REG-NEXT: xvcvsxddp v2, v2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test70: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: addis r3, r2, .LCPI64_0@toc@ha +; CHECK-FISL-NEXT: addi r3, r3, .LCPI64_0@toc@l +; CHECK-FISL-NEXT: lxvw4x v3, 0, r3 +; CHECK-FISL-NEXT: vperm v2, v2, v2, v3 +; CHECK-FISL-NEXT: addi r3, r1, -32 +; CHECK-FISL-NEXT: stxvd2x v2, 0, r3 +; CHECK-FISL-NEXT: ld r3, -24(r1) +; CHECK-FISL-NEXT: extsb r3, r3 +; CHECK-FISL-NEXT: std r3, -8(r1) +; CHECK-FISL-NEXT: ld r3, -32(r1) +; CHECK-FISL-NEXT: extsb r3, r3 +; CHECK-FISL-NEXT: std r3, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvd2x vs0, 0, r3 +; CHECK-FISL-NEXT: xxlor v2, vs0, vs0 +; CHECK-FISL-NEXT: xvcvsxddp v2, v2 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test70: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: addis r3, r2, .LCPI64_0@toc@ha +; CHECK-LE-NEXT: addi r3, r3, .LCPI64_0@toc@l +; CHECK-LE-NEXT: lvx v3, 0, r3 +; CHECK-LE-NEXT: addis r3, r2, .LCPI64_1@toc@ha +; CHECK-LE-NEXT: addi r3, r3, .LCPI64_1@toc@l +; CHECK-LE-NEXT: lxvd2x vs0, 0, r3 +; CHECK-LE-NEXT: vperm v2, v2, v2, v3 +; CHECK-LE-NEXT: xxswapd v3, vs0 +; CHECK-LE-NEXT: vsld v2, v2, v3 +; CHECK-LE-NEXT: vsrad v2, v2, v3 +; CHECK-LE-NEXT: xvcvsxddp v2, v2 +; CHECK-LE-NEXT: blr %w = sitofp <2 x i8> %a to <2 x double> ret <2 x double> %w -; CHECK-LABEL: @test70 -; CHECK-DAG: lxvd2x v2, 0, r3 -; CHECK-DAG: xvcvsxddp v2, v2 -; CHECK: blr -; CHECK-LE-LABEL: @test70 -; CHECK-LE: vperm -; CHECK-LE: vsld -; CHECK-LE: vsrad -; CHECK-LE: xvcvsxddp v2, v2 -; CHECK-LE: blr } ; This gets scalarized so the code isn't great define <2 x i32> @test80(i32 %v) { +; CHECK-LABEL: test80: +; CHECK: # %bb.0: +; CHECK-NEXT: addi r4, r1, -16 +; CHECK-NEXT: stw r3, -16(r1) +; CHECK-NEXT: addis r3, r2, .LCPI65_0@toc@ha +; CHECK-NEXT: lxvw4x vs0, 0, r4 +; CHECK-NEXT: addi r3, r3, .LCPI65_0@toc@l +; CHECK-NEXT: lxvw4x v3, 0, r3 +; CHECK-NEXT: xxspltw v2, vs0, 0 +; CHECK-NEXT: vadduwm v2, v2, v3 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test80: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: addi r4, r1, -16 +; CHECK-REG-NEXT: stw r3, -16(r1) +; CHECK-REG-NEXT: addis r3, r2, .LCPI65_0@toc@ha +; CHECK-REG-NEXT: lxvw4x vs0, 0, r4 +; CHECK-REG-NEXT: addi r3, r3, .LCPI65_0@toc@l +; CHECK-REG-NEXT: lxvw4x v3, 0, r3 +; CHECK-REG-NEXT: xxspltw v2, vs0, 0 +; CHECK-REG-NEXT: vadduwm v2, v2, v3 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test80: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: mr r4, r3 +; CHECK-FISL-NEXT: stw r4, -16(r1) +; CHECK-FISL-NEXT: addi r3, r1, -16 +; CHECK-FISL-NEXT: lxvw4x vs0, 0, r3 +; CHECK-FISL-NEXT: xxspltw v2, vs0, 0 +; CHECK-FISL-NEXT: addis r3, r2, .LCPI65_0@toc@ha +; CHECK-FISL-NEXT: addi r3, r3, .LCPI65_0@toc@l +; CHECK-FISL-NEXT: lxvw4x v3, 0, r3 +; CHECK-FISL-NEXT: vadduwm v2, v2, v3 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test80: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: mtvsrd f0, r3 +; CHECK-LE-NEXT: addis r4, r2, .LCPI65_0@toc@ha +; CHECK-LE-NEXT: addi r3, r4, .LCPI65_0@toc@l +; CHECK-LE-NEXT: xxswapd vs0, vs0 +; CHECK-LE-NEXT: lvx v3, 0, r3 +; CHECK-LE-NEXT: xxspltw v2, vs0, 3 +; CHECK-LE-NEXT: vadduwm v2, v2, v3 +; CHECK-LE-NEXT: blr %b1 = insertelement <2 x i32> undef, i32 %v, i32 0 %b2 = shufflevector <2 x i32> %b1, <2 x i32> undef, <2 x i32> zeroinitializer %i = add <2 x i32> %b2, ret <2 x i32> %i -; CHECK-REG-LABEL: @test80 -; CHECK-REG-DAG: stw r3, -16(r1) -; CHECK-REG-DAG: addi r4, r1, -16 -; CHECK-REG: addis r3, r2, .LCPI65_0@toc@ha -; CHECK-REG-DAG: addi r3, r3, .LCPI65_0@toc@l -; CHECK-REG-DAG: lxvw4x vs0, 0, r4 -; CHECK-REG-DAG: lxvw4x v3, 0, r3 -; CHECK-REG: xxspltw v2, vs0, 0 -; CHECK-REG: vadduwm v2, v2, v3 -; CHECK-REG-NOT: stxvw4x -; CHECK-REG: blr - -; CHECK-FISL-LABEL: @test80 -; CHECK-FISL: mr r4, r3 -; CHECK-FISL: stw r4, -16(r1) -; CHECK-FISL: addi r3, r1, -16 -; CHECK-FISL-DAG: lxvw4x vs0, 0, r3 -; CHECK-FISL-DAG: xxspltw v2, vs0, 0 -; CHECK-FISL: addis r3, r2, .LCPI65_0@toc@ha -; CHECK-FISL: addi r3, r3, .LCPI65_0@toc@l -; CHECK-FISL-DAG: lxvw4x v3, 0, r3 -; CHECK-FISL: vadduwm -; CHECK-FISL-NOT: stxvw4x -; CHECK-FISL: blr - -; CHECK-LE-LABEL: @test80 -; CHECK-LE-DAG: mtvsrd f0, r3 -; CHECK-LE-DAG: xxswapd vs0, vs0 -; CHECK-LE-DAG: addi r3, r4, .LCPI65_0@toc@l -; CHECK-LE-DAG: lvx v3, 0, r3 -; CHECK-LE-DAG: xxspltw v2, vs0, 3 -; CHECK-LE-NOT: xxswapd v3, -; CHECK-LE: vadduwm v2, v2, v3 -; CHECK-LE: blr + + } define <2 x double> @test81(<4 x float> %b) { +; CHECK-LABEL: test81: +; CHECK: # %bb.0: +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test81: +; CHECK-REG: # %bb.0: +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test81: +; CHECK-FISL: # %bb.0: +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test81: +; CHECK-LE: # %bb.0: +; CHECK-LE-NEXT: blr %w = bitcast <4 x float> %b to <2 x double> ret <2 x double> %w -; CHECK-LABEL: @test81 -; CHECK: blr -; CHECK-LE-LABEL: @test81 -; CHECK-LE: blr } define double @test82(double %a, double %b, double %c, double %d) { +; CHECK-LABEL: test82: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xscmpudp cr0, f3, f4 +; CHECK-NEXT: beqlr cr0 +; CHECK-NEXT: # %bb.1: # %entry +; CHECK-NEXT: fmr f1, f2 +; CHECK-NEXT: blr +; +; CHECK-REG-LABEL: test82: +; CHECK-REG: # %bb.0: # %entry +; CHECK-REG-NEXT: xscmpudp cr0, f3, f4 +; CHECK-REG-NEXT: beqlr cr0 +; CHECK-REG-NEXT: # %bb.1: # %entry +; CHECK-REG-NEXT: fmr f1, f2 +; CHECK-REG-NEXT: blr +; +; CHECK-FISL-LABEL: test82: +; CHECK-FISL: # %bb.0: # %entry +; CHECK-FISL-NEXT: xscmpudp cr0, f3, f4 +; CHECK-FISL-NEXT: stfd f2, -8(r1) # 8-byte Folded Spill +; CHECK-FISL-NEXT: stfd f1, -16(r1) # 8-byte Folded Spill +; CHECK-FISL-NEXT: beq cr0, .LBB67_2 +; CHECK-FISL-NEXT: # %bb.1: # %entry +; CHECK-FISL-NEXT: lfd f0, -8(r1) # 8-byte Folded Reload +; CHECK-FISL-NEXT: stfd f0, -16(r1) # 8-byte Folded Spill +; CHECK-FISL-NEXT: .LBB67_2: # %entry +; CHECK-FISL-NEXT: lfd f0, -16(r1) # 8-byte Folded Reload +; CHECK-FISL-NEXT: fmr f1, f0 +; CHECK-FISL-NEXT: blr +; +; CHECK-LE-LABEL: test82: +; CHECK-LE: # %bb.0: # %entry +; CHECK-LE-NEXT: xscmpudp cr0, f3, f4 +; CHECK-LE-NEXT: beqlr cr0 +; CHECK-LE-NEXT: # %bb.1: # %entry +; CHECK-LE-NEXT: fmr f1, f2 +; CHECK-LE-NEXT: blr entry: %m = fcmp oeq double %c, %d %v = select i1 %m, double %a, double %b ret double %v -; CHECK-REG-LABEL: @test82 -; CHECK-REG: xscmpudp cr0, f3, f4 -; CHECK-REG: beqlr cr0 -; CHECK-FISL-LABEL: @test82 -; CHECK-FISL: xscmpudp cr0, f3, f4 -; CHECK-FISL: beq cr0 -; CHECK-LE-LABEL: @test82 -; CHECK-LE: xscmpudp cr0, f3, f4 -; CHECK-LE: beqlr cr0 } Index: test/CodeGen/SystemZ/swifterror.ll =================================================================== --- test/CodeGen/SystemZ/swifterror.ll +++ test/CodeGen/SystemZ/swifterror.ll @@ -16,7 +16,7 @@ ; CHECK-O0-LABEL: foo: ; CHECK-O0: lghi %r2, 16 ; CHECK-O0: brasl %r14, malloc -; CHECK-O0: lgr %r9, %r2 +; CHECK-O0: lgr %r0, %r2 ; CHECK-O0: mvi 8(%r2), 1 entry: %call = call i8* @malloc(i64 16) Index: test/CodeGen/X86/atomic-unordered.ll =================================================================== --- test/CodeGen/X86/atomic-unordered.ll +++ test/CodeGen/X86/atomic-unordered.ll @@ -422,9 +422,8 @@ define i64 @load_fold_add1(i64* %p) { ; CHECK-O0-LABEL: load_fold_add1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: addq $15, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: addq $15, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_add1: @@ -459,9 +458,8 @@ define i64 @load_fold_add3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_add3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: addq (%rsi), %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: addq (%rsi), %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_add3: @@ -480,9 +478,8 @@ define i64 @load_fold_sub1(i64* %p) { ; CHECK-O0-LABEL: load_fold_sub1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: subq $15, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: subq $15, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_sub1: @@ -499,9 +496,8 @@ define i64 @load_fold_sub2(i64* %p, i64 %v2) { ; CHECK-O0-LABEL: load_fold_sub2: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: subq %rsi, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: subq %rsi, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_sub2: @@ -518,9 +514,8 @@ define i64 @load_fold_sub3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_sub3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: subq (%rsi), %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: subq (%rsi), %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_sub3: @@ -575,9 +570,8 @@ define i64 @load_fold_mul3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_mul3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: imulq (%rsi), %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: imulq (%rsi), %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_mul3: @@ -598,8 +592,8 @@ ; CHECK-O0: # %bb.0: ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: cqto -; CHECK-O0-NEXT: movl $15, %edi -; CHECK-O0-NEXT: idivq %rdi +; CHECK-O0-NEXT: movl $15, %ecx +; CHECK-O0-NEXT: idivq %rcx ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_sdiv1: @@ -668,8 +662,8 @@ ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: xorl %ecx, %ecx ; CHECK-O0-NEXT: movl %ecx, %edx -; CHECK-O0-NEXT: movl $15, %edi -; CHECK-O0-NEXT: divq %rdi +; CHECK-O0-NEXT: movl $15, %esi +; CHECK-O0-NEXT: divq %rsi ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_udiv1: @@ -735,8 +729,8 @@ ; CHECK-O0: # %bb.0: ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: cqto -; CHECK-O0-NEXT: movl $15, %edi -; CHECK-O0-NEXT: idivq %rdi +; CHECK-O0-NEXT: movl $15, %ecx +; CHECK-O0-NEXT: idivq %rcx ; CHECK-O0-NEXT: movq %rdx, %rax ; CHECK-O0-NEXT: retq ; @@ -814,8 +808,8 @@ ; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: xorl %ecx, %ecx ; CHECK-O0-NEXT: movl %ecx, %edx -; CHECK-O0-NEXT: movl $15, %edi -; CHECK-O0-NEXT: divq %rdi +; CHECK-O0-NEXT: movl $15, %esi +; CHECK-O0-NEXT: divq %rsi ; CHECK-O0-NEXT: movq %rdx, %rax ; CHECK-O0-NEXT: retq ; @@ -888,9 +882,8 @@ define i64 @load_fold_shl1(i64* %p) { ; CHECK-O0-LABEL: load_fold_shl1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: shlq $15, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: shlq $15, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_shl1: @@ -907,11 +900,10 @@ define i64 @load_fold_shl2(i64* %p, i64 %v2) { ; CHECK-O0-LABEL: load_fold_shl2: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi +; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: movq %rsi, %rcx ; CHECK-O0-NEXT: # kill: def $cl killed $rcx -; CHECK-O0-NEXT: shlq %cl, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: shlq %cl, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_shl2: @@ -930,11 +922,10 @@ define i64 @load_fold_shl3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_shl3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi +; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: movq (%rsi), %rcx ; CHECK-O0-NEXT: # kill: def $cl killed $rcx -; CHECK-O0-NEXT: shlq %cl, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: shlq %cl, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_shl3: @@ -954,9 +945,8 @@ define i64 @load_fold_lshr1(i64* %p) { ; CHECK-O0-LABEL: load_fold_lshr1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: shrq $15, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: shrq $15, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_lshr1: @@ -973,11 +963,10 @@ define i64 @load_fold_lshr2(i64* %p, i64 %v2) { ; CHECK-O0-LABEL: load_fold_lshr2: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi +; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: movq %rsi, %rcx ; CHECK-O0-NEXT: # kill: def $cl killed $rcx -; CHECK-O0-NEXT: shrq %cl, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: shrq %cl, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_lshr2: @@ -996,11 +985,10 @@ define i64 @load_fold_lshr3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_lshr3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi +; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: movq (%rsi), %rcx ; CHECK-O0-NEXT: # kill: def $cl killed $rcx -; CHECK-O0-NEXT: shrq %cl, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: shrq %cl, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_lshr3: @@ -1020,9 +1008,8 @@ define i64 @load_fold_ashr1(i64* %p) { ; CHECK-O0-LABEL: load_fold_ashr1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: sarq $15, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: sarq $15, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_ashr1: @@ -1039,11 +1026,10 @@ define i64 @load_fold_ashr2(i64* %p, i64 %v2) { ; CHECK-O0-LABEL: load_fold_ashr2: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi +; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: movq %rsi, %rcx ; CHECK-O0-NEXT: # kill: def $cl killed $rcx -; CHECK-O0-NEXT: sarq %cl, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: sarq %cl, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_ashr2: @@ -1062,11 +1048,10 @@ define i64 @load_fold_ashr3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_ashr3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi +; CHECK-O0-NEXT: movq (%rdi), %rax ; CHECK-O0-NEXT: movq (%rsi), %rcx ; CHECK-O0-NEXT: # kill: def $cl killed $rcx -; CHECK-O0-NEXT: sarq %cl, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: sarq %cl, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_ashr3: @@ -1086,9 +1071,8 @@ define i64 @load_fold_and1(i64* %p) { ; CHECK-O0-LABEL: load_fold_and1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: andq $15, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: andq $15, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_and1: @@ -1123,9 +1107,8 @@ define i64 @load_fold_and3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_and3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: andq (%rsi), %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: andq (%rsi), %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_and3: @@ -1144,9 +1127,8 @@ define i64 @load_fold_or1(i64* %p) { ; CHECK-O0-LABEL: load_fold_or1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: orq $15, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: orq $15, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_or1: @@ -1181,9 +1163,8 @@ define i64 @load_fold_or3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_or3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: orq (%rsi), %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: orq (%rsi), %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_or3: @@ -1202,9 +1183,8 @@ define i64 @load_fold_xor1(i64* %p) { ; CHECK-O0-LABEL: load_fold_xor1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: xorq $15, %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: xorq $15, %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_xor1: @@ -1239,9 +1219,8 @@ define i64 @load_fold_xor3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_xor3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: xorq (%rsi), %rdi -; CHECK-O0-NEXT: movq %rdi, %rax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: xorq (%rsi), %rax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_xor3: @@ -1260,10 +1239,11 @@ define i1 @load_fold_icmp1(i64* %p) { ; CHECK-O0-LABEL: load_fold_icmp1: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: subq $15, %rdi -; CHECK-O0-NEXT: sete %al -; CHECK-O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: subq $15, %rax +; CHECK-O0-NEXT: sete %cl +; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK-O0-NEXT: movb %cl, %al ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_icmp1: @@ -1281,10 +1261,11 @@ define i1 @load_fold_icmp2(i64* %p, i64 %v2) { ; CHECK-O0-LABEL: load_fold_icmp2: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: subq %rsi, %rdi -; CHECK-O0-NEXT: sete %al -; CHECK-O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: subq %rsi, %rax +; CHECK-O0-NEXT: sete %cl +; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK-O0-NEXT: movb %cl, %al ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_icmp2: @@ -1302,11 +1283,12 @@ define i1 @load_fold_icmp3(i64* %p1, i64* %p2) { ; CHECK-O0-LABEL: load_fold_icmp3: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: movq (%rsi), %rsi -; CHECK-O0-NEXT: subq %rsi, %rdi -; CHECK-O0-NEXT: sete %al -; CHECK-O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: movq (%rsi), %rcx +; CHECK-O0-NEXT: subq %rcx, %rax +; CHECK-O0-NEXT: sete %dl +; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill +; CHECK-O0-NEXT: movb %dl, %al ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: load_fold_icmp3: @@ -1950,8 +1932,9 @@ define i32 @fold_trunc(i64* %p) { ; CHECK-O0-LABEL: fold_trunc: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: movl %edi, %eax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: movl %eax, %ecx +; CHECK-O0-NEXT: movl %ecx, %eax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: fold_trunc: @@ -1968,9 +1951,10 @@ define i32 @fold_trunc_add(i64* %p, i32 %v2) { ; CHECK-O0-LABEL: fold_trunc_add: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: movl %edi, %eax -; CHECK-O0-NEXT: addl %esi, %eax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: movl %eax, %ecx +; CHECK-O0-NEXT: addl %esi, %ecx +; CHECK-O0-NEXT: movl %ecx, %eax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: fold_trunc_add: @@ -1989,9 +1973,10 @@ define i32 @fold_trunc_and(i64* %p, i32 %v2) { ; CHECK-O0-LABEL: fold_trunc_and: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: movl %edi, %eax -; CHECK-O0-NEXT: andl %esi, %eax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: movl %eax, %ecx +; CHECK-O0-NEXT: andl %esi, %ecx +; CHECK-O0-NEXT: movl %ecx, %eax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: fold_trunc_and: @@ -2010,9 +1995,10 @@ define i32 @fold_trunc_or(i64* %p, i32 %v2) { ; CHECK-O0-LABEL: fold_trunc_or: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: movl %edi, %eax -; CHECK-O0-NEXT: orl %esi, %eax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: movl %eax, %ecx +; CHECK-O0-NEXT: orl %esi, %ecx +; CHECK-O0-NEXT: movl %ecx, %eax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: fold_trunc_or: @@ -2032,12 +2018,12 @@ define i32 @split_load(i64* %p) { ; CHECK-O0-LABEL: split_load: ; CHECK-O0: # %bb.0: -; CHECK-O0-NEXT: movq (%rdi), %rdi -; CHECK-O0-NEXT: movb %dil, %al -; CHECK-O0-NEXT: shrq $32, %rdi -; CHECK-O0-NEXT: movb %dil, %cl -; CHECK-O0-NEXT: orb %cl, %al -; CHECK-O0-NEXT: movzbl %al, %eax +; CHECK-O0-NEXT: movq (%rdi), %rax +; CHECK-O0-NEXT: movb %al, %cl +; CHECK-O0-NEXT: shrq $32, %rax +; CHECK-O0-NEXT: movb %al, %dl +; CHECK-O0-NEXT: orb %dl, %cl +; CHECK-O0-NEXT: movzbl %cl, %eax ; CHECK-O0-NEXT: retq ; ; CHECK-O3-LABEL: split_load: Index: test/CodeGen/X86/avx-load-store.ll =================================================================== --- test/CodeGen/X86/avx-load-store.ll +++ test/CodeGen/X86/avx-load-store.ll @@ -45,15 +45,15 @@ ; CHECK_O0-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK_O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill ; CHECK_O0-NEXT: callq dummy -; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload +; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload ; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload -; CHECK_O0-NEXT: vmovapd %ymm0, (%rdx) -; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; CHECK_O0-NEXT: vmovapd %ymm0, (%rax) +; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload ; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload -; CHECK_O0-NEXT: vmovaps %ymm1, (%rsi) -; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload +; CHECK_O0-NEXT: vmovaps %ymm1, (%rcx) +; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload ; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload -; CHECK_O0-NEXT: vmovdqa %ymm2, (%rdi) +; CHECK_O0-NEXT: vmovdqa %ymm2, (%rdx) ; CHECK_O0-NEXT: addq $152, %rsp ; CHECK_O0-NEXT: vzeroupper ; CHECK_O0-NEXT: retq Index: test/CodeGen/X86/extend-set-cc-uses-dbg.ll =================================================================== --- test/CodeGen/X86/extend-set-cc-uses-dbg.ll +++ test/CodeGen/X86/extend-set-cc-uses-dbg.ll @@ -7,8 +7,8 @@ bb: %tmp = load i32, i32* %p, align 4, !dbg !7 ; CHECK: $eax = MOV32rm killed {{.*}} $rdi, {{.*}} debug-location !7 :: (load 4 from %ir.p) - ; CHECK-NEXT: $edi = MOV32rr killed $eax, implicit-def $rdi, debug-location !7 - ; CHECK-NEXT: $rcx = MOV64rr $rdi, debug-location !7 + ; CHECK-NEXT: $ecx = MOV32rr killed $eax, implicit-def $rcx, debug-location !7 + ; CHECK-NEXT: $rdx = MOV64rr $rcx, debug-location !7 switch i32 %tmp, label %bb7 [ i32 0, label %bb1 Index: test/CodeGen/X86/fast-isel-select.ll =================================================================== --- test/CodeGen/X86/fast-isel-select.ll +++ test/CodeGen/X86/fast-isel-select.ll @@ -11,12 +11,12 @@ ; CHECK: ## %bb.0: ; CHECK-NEXT: movb %sil, %al ; CHECK-NEXT: movb %dil, %cl -; CHECK-NEXT: xorl %esi, %esi +; CHECK-NEXT: xorl %edx, %edx ; CHECK-NEXT: subb %al, %cl ; CHECK-NEXT: testb $1, %cl -; CHECK-NEXT: movl $1204476887, %edi ## imm = 0x47CADBD7 -; CHECK-NEXT: cmovnel %edi, %esi -; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: movl $1204476887, %esi ## imm = 0x47CADBD7 +; CHECK-NEXT: cmovnel %esi, %edx +; CHECK-NEXT: movl %edx, %eax ; CHECK-NEXT: retq %shuffleInternal15257_8932 = sub i1 %exchSub2211_, %trunc_8766 %counter_diff1345 = select i1 %shuffleInternal15257_8932, i32 1204476887, i32 0 Index: test/CodeGen/X86/fast-isel-x86-64.ll =================================================================== --- test/CodeGen/X86/fast-isel-x86-64.ll +++ test/CodeGen/X86/fast-isel-x86-64.ll @@ -13,7 +13,7 @@ } ; CHECK-LABEL: test1: -; CHECK: andl $8, +; CHECK: andl $8, ; rdar://9289512 - The load should fold into the compare. @@ -119,14 +119,14 @@ %Y = udiv i32 %X, 8 ret i32 %Y ; CHECK-LABEL: test10: -; CHECK: shrl $3, +; CHECK: shrl $3, } define i32 @test11(i32 %X) nounwind { %Y = sdiv exact i32 %X, 8 ret i32 %Y ; CHECK-LABEL: test11: -; CHECK: sarl $3, +; CHECK: sarl $3, } @@ -168,7 +168,7 @@ call void @test13f(i1 zeroext %tobool) noredzone ret void ; CHECK-LABEL: test14: -; CHECK: andb $1, +; CHECK: andb $1, ; CHECK: callq } @@ -227,7 +227,7 @@ ; CHECK: movl (%rdi), %eax ; CHECK: callq _foo ; CHECK: cmpl $5, %eax -; CHECK-NEXT: je +; CHECK-NEXT: je } ; Check that 0.0 is materialized using xorps @@ -299,8 +299,8 @@ ; CHECK-LABEL: test23: ; CHECK: movq %rdi, [[STACK:[0-9]+\(%rsp\)]] ; CHECK: call -; CHECK: movq [[STACK]], %rdi -; CHECK: movq %rdi, %rax +; CHECK: movq [[STACK]], %rcx +; CHECK: movq %rcx, %rax ; CHECK: ret } Index: test/CodeGen/X86/pr27591.ll =================================================================== --- test/CodeGen/X86/pr27591.ll +++ test/CodeGen/X86/pr27591.ll @@ -9,8 +9,9 @@ ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: cmpl $0, %edi ; CHECK-NEXT: setne %al -; CHECK-NEXT: movzbl %al, %edi -; CHECK-NEXT: andl $1, %edi +; CHECK-NEXT: movzbl %al, %ecx +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: movl %ecx, %edi ; CHECK-NEXT: callq callee1 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq @@ -26,9 +27,10 @@ ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: cmpl $0, %edi ; CHECK-NEXT: setne %al -; CHECK-NEXT: movzbl %al, %edi -; CHECK-NEXT: andl $1, %edi -; CHECK-NEXT: negl %edi +; CHECK-NEXT: movzbl %al, %ecx +; CHECK-NEXT: andl $1, %ecx +; CHECK-NEXT: negl %ecx +; CHECK-NEXT: movl %ecx, %edi ; CHECK-NEXT: callq callee2 ; CHECK-NEXT: popq %rax ; CHECK-NEXT: retq Index: test/CodeGen/X86/swift-return.ll =================================================================== --- test/CodeGen/X86/swift-return.ll +++ test/CodeGen/X86/swift-return.ll @@ -28,10 +28,10 @@ ; CHECK-O0-NEXT: movl %edi, {{[0-9]+}}(%rsp) ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %edi ; CHECK-O0-NEXT: callq gen -; CHECK-O0-NEXT: movswl %ax, %edi -; CHECK-O0-NEXT: movsbl %dl, %ecx -; CHECK-O0-NEXT: addl %ecx, %edi -; CHECK-O0-NEXT: movw %di, %ax +; CHECK-O0-NEXT: movswl %ax, %ecx +; CHECK-O0-NEXT: movsbl %dl, %esi +; CHECK-O0-NEXT: addl %esi, %ecx +; CHECK-O0-NEXT: movw %cx, %ax ; CHECK-O0-NEXT: popq %rcx ; CHECK-O0-NEXT: .cfi_def_cfa_offset 8 ; CHECK-O0-NEXT: retq @@ -79,16 +79,16 @@ ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %edi ; CHECK-O0-NEXT: movq %rsp, %rax ; CHECK-O0-NEXT: callq gen2 -; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %edi ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %ecx ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %edx -; CHECK-O0-NEXT: movl (%rsp), %esi +; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %esi +; CHECK-O0-NEXT: movl (%rsp), %edi ; CHECK-O0-NEXT: movl {{[0-9]+}}(%rsp), %r8d -; CHECK-O0-NEXT: addl %r8d, %esi -; CHECK-O0-NEXT: addl %edx, %esi -; CHECK-O0-NEXT: addl %ecx, %esi -; CHECK-O0-NEXT: addl %edi, %esi -; CHECK-O0-NEXT: movl %esi, %eax +; CHECK-O0-NEXT: addl %r8d, %edi +; CHECK-O0-NEXT: addl %esi, %edi +; CHECK-O0-NEXT: addl %edx, %edi +; CHECK-O0-NEXT: addl %ecx, %edi +; CHECK-O0-NEXT: movl %edi, %eax ; CHECK-O0-NEXT: addq $24, %rsp ; CHECK-O0-NEXT: .cfi_def_cfa_offset 8 ; CHECK-O0-NEXT: retq Index: test/DebugInfo/X86/op_deref.ll =================================================================== --- test/DebugInfo/X86/op_deref.ll +++ test/DebugInfo/X86/op_deref.ll @@ -6,10 +6,10 @@ ; RUN: | FileCheck %s -check-prefix=CHECK -check-prefix=DWARF3 ; DWARF4: DW_AT_location [DW_FORM_sec_offset] (0x00000000 -; DWARF4-NEXT: {{.*}}: DW_OP_breg2 RCX+0, DW_OP_deref +; DWARF4-NEXT: {{.*}}: DW_OP_breg1 RDX+0, DW_OP_deref ; DWARF3: DW_AT_location [DW_FORM_data4] (0x00000000 -; DWARF3-NEXT: {{.*}}: DW_OP_breg2 RCX+0, DW_OP_deref +; DWARF3-NEXT: {{.*}}: DW_OP_breg1 RDX+0, DW_OP_deref ; CHECK-NOT: DW_TAG ; CHECK: DW_AT_name [DW_FORM_strp] ( .debug_str[0x00000067] = "vla") @@ -17,8 +17,8 @@ ; Check the DEBUG_VALUE comments for good measure. ; RUN: llc -O0 -mtriple=x86_64-apple-darwin %s -o - -filetype=asm | FileCheck %s -check-prefix=ASM-CHECK ; vla should have a register-indirect address at one point. -; ASM-CHECK: DEBUG_VALUE: vla <- [DW_OP_deref] [$rcx+0] -; ASM-CHECK: DW_OP_breg2 +; ASM-CHECK: DEBUG_VALUE: vla <- [DW_OP_deref] [$rdx+0] +; ASM-CHECK: DW_OP_breg1 ; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s --check-prefix=PRETTY-PRINT ; PRETTY-PRINT: DIExpression(DW_OP_deref)