Index: llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp =================================================================== --- llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -1393,29 +1393,93 @@ } for (MachineOperand *Op : HardenOpRegs) { - auto *OpRC = MRI->getRegClass(Op->getReg()); - unsigned OpReg = Op->getReg(); + auto *OpRC = MRI->getRegClass(OpReg); unsigned TmpReg = MRI->createVirtualRegister(OpRC); - if (!EFLAGSLive) { - // Merge our potential poison state into the value with an or. - auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg) - .addReg(StateReg) - .addReg(OpReg); - OrI->addRegisterDead(X86::EFLAGS, TRI); + // If this is a vector register, we'll need somewhat custom logic to handle + // hardening it. + if (OpRC->hasSuperClassEq(&X86::VR128RegClass) || + OpRC->hasSuperClassEq(&X86::VR256RegClass)) { + assert(Subtarget->hasAVX2() && + "We can only have vector registers involved in addressing " + "starting with AVX2."); + + bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass); + + // Move our state into a vector register. + // FIXME: We could skip this at the cost of longer encodings with AVX-512 + // but that doesn't seem likely worth it. + unsigned VStateReg = MRI->createVirtualRegister(&X86::VR128RegClass); + auto MovI = + BuildMI(MBB, InsertPt, Loc, TII->get(X86::VMOV64toPQIrr), VStateReg) + .addReg(StateReg); + (void)MovI; + ++NumInstsInserted; + LLVM_DEBUG(dbgs() << " Inserting mov: "; MovI->dump(); dbgs() << "\n"); + + // Broadcast it across the vector register. + unsigned VBStateReg = MRI->createVirtualRegister(OpRC); + auto BroadcastI = BuildMI(MBB, InsertPt, Loc, + TII->get(Is128Bit ? X86::VPBROADCASTQrr + : X86::VPBROADCASTQYrr), + VBStateReg) + .addReg(VStateReg); + (void)BroadcastI; + ++NumInstsInserted; + LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump(); + dbgs() << "\n"); + + // Merge our potential poison state into the value with a vector or. + auto OrI = + BuildMI(MBB, InsertPt, Loc, + TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg) + .addReg(VBStateReg) + .addReg(OpReg); + (void)OrI; ++NumInstsInserted; LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); - } else { - // We need to avoid touching EFLAGS so shift out all but the least - // significant bit using the instruction that doesn't update flags. - auto ShiftI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg) - .addReg(OpReg) - .addReg(StateReg); - (void)ShiftI; + } else if (OpRC->hasSuperClassEq(&X86::VR512RegClass)) { + // Broadcast our state into a vector register. + unsigned VStateReg = MRI->createVirtualRegister(OpRC); + auto BroadcastI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::VPBROADCASTQrZr), VStateReg) + .addReg(StateReg); + (void)BroadcastI; ++NumInstsInserted; - LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump(); + LLVM_DEBUG(dbgs() << " Inserting broadcast: "; BroadcastI->dump(); dbgs() << "\n"); + + // Merge our potential poison state into the value with a vector or. + auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::VPORQZrr), TmpReg) + .addReg(VStateReg) + .addReg(OpReg); + ++NumInstsInserted; + LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); + } else { + // FIXME: Need to support GR32 here for 32-bit code. + assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) && + "Not a supported register class for address hardening!"); + + if (!EFLAGSLive) { + // Merge our potential poison state into the value with an or. + auto OrI = BuildMI(MBB, InsertPt, Loc, TII->get(X86::OR64rr), TmpReg) + .addReg(StateReg) + .addReg(OpReg); + OrI->addRegisterDead(X86::EFLAGS, TRI); + ++NumInstsInserted; + LLVM_DEBUG(dbgs() << " Inserting or: "; OrI->dump(); dbgs() << "\n"); + } else { + // We need to avoid touching EFLAGS so shift out all but the least + // significant bit using the instruction that doesn't update flags. + auto ShiftI = + BuildMI(MBB, InsertPt, Loc, TII->get(X86::SHRX64rr), TmpReg) + .addReg(OpReg) + .addReg(StateReg); + (void)ShiftI; + ++NumInstsInserted; + LLVM_DEBUG(dbgs() << " Inserting shrx: "; ShiftI->dump(); + dbgs() << "\n"); + } } // Record this register as checked and update the operand. Index: llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll @@ -0,0 +1,270 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening | FileCheck %s +; +; FIXME: It would be much nicer to do all of this by writing generic IR gathers +; that were overly wide and letting them legalize down to whatever fits on the +; target. Sadly, we have to do this with intrinsics. + +declare void @sink4f(<4 x float>) +declare void @sink8f(<8 x float>) +declare void @sink2d(<2 x double>) +declare void @sink4d(<4 x double>) +declare void @sink4i32(<4 x i32>) +declare void @sink2i64(<2 x i64>) +declare void @sink8i32(<8 x i32>) +declare void @sink4i64(<4 x i64>) + +declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*, <4 x i32>, <4 x float>, i8) +declare <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float>, i8*, <2 x i64>, <4 x float>, i8) +declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, i8*, <4 x i32>, <2 x double>, i8) +declare <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double>, i8*, <2 x i64>, <2 x double>, i8) +declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, i8*, <8 x i32>, <8 x float>, i8) +declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, i8*, <4 x i64>, <4 x float>, i8) +declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, i8*, <4 x i32>, <4 x double>, i8) +declare <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double>, i8*, <4 x i64>, <4 x double>, i8) +declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, i8*, <4 x i32>, <4 x i32>, i8) +declare <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32>, i8*, <2 x i64>, <4 x i32>, i8) +declare <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64>, i8*, <4 x i32>, <2 x i64>, i8) +declare <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64>, i8*, <2 x i64>, <2 x i64>, i8) +declare <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32>, i8*, <8 x i32>, <8 x i32>, i8) +declare <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32>, i8*, <4 x i64>, <4 x i32>, i8) +declare <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64>, i8*, <4 x i32>, <4 x i64>, i8) +declare <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64>, i8*, <4 x i64>, <4 x i64>, i8) + +define void @test_avx2(i32 %a, i8* %base, <4 x i32> %indices4i32, <4 x float> %mask4f, <4 x i32> %mask4i32, <2 x i64> %indices2i64, <2 x double> %mask2d, <2 x i64> %mask2i64, <8 x i32> %indices8i32, <8 x float> %mask8f, <8 x i32> %mask8i32, <4 x i64> %indices4i64, <4 x double> %mask4d, <4 x i64> %mask4i64) nounwind "target-features"="+avx,+avx2" { +; CHECK-LABEL: test_avx2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: pushq %rbp +; CHECK-NEXT: movq %rsp, %rbp +; CHECK-NEXT: andq $-32, %rsp +; CHECK-NEXT: subq $384, %rsp # imm = 0x180 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: movq $-1, %rcx +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: testl %edi, %edi +; CHECK-NEXT: je .LBB0_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: cmoveq %rcx, %rax +; CHECK-NEXT: jmp .LBB0_3 +; CHECK-NEXT: .LBB0_2: # %then +; CHECK-NEXT: cmovneq %rcx, %rax +; CHECK-NEXT: vmovapd 80(%rbp), %ymm9 +; CHECK-NEXT: vmovdqa 48(%rbp), %ymm10 +; CHECK-NEXT: orq %rax, %rsi +; CHECK-NEXT: vmovq %rax, %xmm8 +; CHECK-NEXT: vpbroadcastq %xmm8, %xmm8 +; CHECK-NEXT: vpor %xmm0, %xmm8, %xmm8 +; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 +; CHECK-NEXT: vmovaps %xmm1, %xmm11 +; CHECK-NEXT: vgatherdps %xmm11, (%rsi,%xmm8,4), %xmm0 +; CHECK-NEXT: vmovq %rax, %xmm11 +; CHECK-NEXT: vpbroadcastq %xmm11, %xmm11 +; CHECK-NEXT: vpor %xmm3, %xmm11, %xmm11 +; CHECK-NEXT: vxorps %xmm12, %xmm12, %xmm12 +; CHECK-NEXT: vmovaps %xmm1, %xmm3 +; CHECK-NEXT: vgatherqps %xmm3, (%rsi,%xmm11,4), %xmm12 +; CHECK-NEXT: vmovaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vxorps %xmm12, %xmm12, %xmm12 +; CHECK-NEXT: vmovapd %xmm4, %xmm3 +; CHECK-NEXT: vgatherdpd %xmm3, (%rsi,%xmm8,8), %xmm12 +; CHECK-NEXT: vmovapd %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vgatherqpd %xmm4, (%rsi,%xmm11,8), %xmm3 +; CHECK-NEXT: vmovapd %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovq %rax, %xmm3 +; CHECK-NEXT: vpbroadcastq %xmm3, %ymm3 +; CHECK-NEXT: vpor %ymm6, %ymm3, %ymm3 +; CHECK-NEXT: vxorpd %xmm4, %xmm4, %xmm4 +; CHECK-NEXT: vgatherdps %ymm7, (%rsi,%ymm3,4), %ymm4 +; CHECK-NEXT: vmovaps %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vmovq %rax, %xmm4 +; CHECK-NEXT: vpbroadcastq %xmm4, %ymm4 +; CHECK-NEXT: vpor %ymm10, %ymm4, %ymm4 +; CHECK-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vgatherqps %xmm1, (%rsi,%ymm4,4), %xmm6 +; CHECK-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovapd %ymm9, %ymm1 +; CHECK-NEXT: vxorps %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vgatherdpd %ymm1, (%rsi,%xmm8,8), %ymm6 +; CHECK-NEXT: vmovapd %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vgatherqpd %ymm9, (%rsi,%ymm4,8), %ymm1 +; CHECK-NEXT: vmovapd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vmovdqa %xmm2, %xmm1 +; CHECK-NEXT: vxorpd %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vpgatherdd %xmm1, (%rsi,%xmm8,4), %xmm6 +; CHECK-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa %xmm2, %xmm1 +; CHECK-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vpgatherqd %xmm1, (%rsi,%xmm11,4), %xmm6 +; CHECK-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vpxor %xmm6, %xmm6, %xmm6 +; CHECK-NEXT: vmovdqa %xmm5, %xmm1 +; CHECK-NEXT: vpgatherdq %xmm1, (%rsi,%xmm8,8), %xmm6 +; CHECK-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vpgatherqq %xmm5, (%rsi,%xmm11,8), %xmm1 +; CHECK-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 16(%rbp), %ymm1 +; CHECK-NEXT: vpxor %xmm5, %xmm5, %xmm5 +; CHECK-NEXT: vpgatherdd %ymm1, (%rsi,%ymm3,4), %ymm5 +; CHECK-NEXT: vmovdqa %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; CHECK-NEXT: vpgatherqd %xmm2, (%rsi,%ymm4,4), %xmm1 +; CHECK-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; CHECK-NEXT: vmovdqa 112(%rbp), %ymm1 +; CHECK-NEXT: vmovdqa %ymm1, %ymm2 +; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vpgatherdq %ymm2, (%rsi,%xmm8,8), %ymm3 +; CHECK-NEXT: vmovdqa %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vpgatherqq %ymm1, (%rsi,%ymm4,8), %ymm2 +; CHECK-NEXT: vmovdqa %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq sink4f +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink4f +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink2d +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink2d +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink8f +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq sink4f +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink4d +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink4d +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq sink4i32 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink4i32 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink2i64 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink2i64 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink8i32 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: callq sink4i32 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink4i64 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: callq sink4i64 +; CHECK-NEXT: movq %rsp, %rax +; CHECK-NEXT: sarq $63, %rax +; CHECK-NEXT: .LBB0_3: # %exit +; CHECK-NEXT: shlq $47, %rax +; CHECK-NEXT: orq %rax, %rsp +; CHECK-NEXT: movq %rbp, %rsp +; CHECK-NEXT: popq %rbp +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq +entry: + %a.cmp = icmp eq i32 %a, 0 + br i1 %a.cmp, label %then, label %exit + +then: + %f1 = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> zeroinitializer, i8* %base, <4 x i32> %indices4i32, <4 x float> %mask4f, i8 4) + %f2 = call <4 x float> @llvm.x86.avx2.gather.q.ps(<4 x float> zeroinitializer, i8* %base, <2 x i64> %indices2i64, <4 x float> %mask4f, i8 4) + %f3 = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> zeroinitializer, i8* %base, <4 x i32> %indices4i32, <2 x double> %mask2d, i8 8) + %f4 = call <2 x double> @llvm.x86.avx2.gather.q.pd(<2 x double> zeroinitializer, i8* %base, <2 x i64> %indices2i64, <2 x double> %mask2d, i8 8) + %f5 = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> zeroinitializer, i8* %base, <8 x i32> %indices8i32, <8 x float> %mask8f, i8 4) + %f6 = call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> zeroinitializer, i8* %base, <4 x i64> %indices4i64, <4 x float> %mask4f, i8 4) + %f7 = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> zeroinitializer, i8* %base, <4 x i32> %indices4i32, <4 x double> %mask4d, i8 8) + %f8 = call <4 x double> @llvm.x86.avx2.gather.q.pd.256(<4 x double> zeroinitializer, i8* %base, <4 x i64> %indices4i64, <4 x double> %mask4d, i8 8) + %i1 = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> zeroinitializer, i8* %base, <4 x i32> %indices4i32, <4 x i32> %mask4i32, i8 4) + %i2 = call <4 x i32> @llvm.x86.avx2.gather.q.d(<4 x i32> zeroinitializer, i8* %base, <2 x i64> %indices2i64, <4 x i32> %mask4i32, i8 4) + %i3 = call <2 x i64> @llvm.x86.avx2.gather.d.q(<2 x i64> zeroinitializer, i8* %base, <4 x i32> %indices4i32, <2 x i64> %mask2i64, i8 8) + %i4 = call <2 x i64> @llvm.x86.avx2.gather.q.q(<2 x i64> zeroinitializer, i8* %base, <2 x i64> %indices2i64, <2 x i64> %mask2i64, i8 8) + %i5 = call <8 x i32> @llvm.x86.avx2.gather.d.d.256(<8 x i32> zeroinitializer, i8* %base, <8 x i32> %indices8i32, <8 x i32> %mask8i32, i8 4) + %i6 = call <4 x i32> @llvm.x86.avx2.gather.q.d.256(<4 x i32> zeroinitializer, i8* %base, <4 x i64> %indices4i64, <4 x i32> %mask4i32, i8 4) + %i7 = call <4 x i64> @llvm.x86.avx2.gather.d.q.256(<4 x i64> zeroinitializer, i8* %base, <4 x i32> %indices4i32, <4 x i64> %mask4i64, i8 8) + %i8 = call <4 x i64> @llvm.x86.avx2.gather.q.q.256(<4 x i64> zeroinitializer, i8* %base, <4 x i64> %indices4i64, <4 x i64> %mask4i64, i8 8) + call void @sink4f(<4 x float> %f1) + call void @sink4f(<4 x float> %f2) + call void @sink2d(<2 x double> %f3) + call void @sink2d(<2 x double> %f4) + call void @sink8f(<8 x float> %f5) + call void @sink4f(<4 x float> %f6) + call void @sink4d(<4 x double> %f7) + call void @sink4d(<4 x double> %f8) + call void @sink4i32(<4 x i32> %i1) + call void @sink4i32(<4 x i32> %i2) + call void @sink2i64(<2 x i64> %i3) + call void @sink2i64(<2 x i64> %i4) + call void @sink8i32(<8 x i32> %i5) + call void @sink4i32(<4 x i32> %i6) + call void @sink4i64(<4 x i64> %i7) + call void @sink4i64(<4 x i64> %i8) + br label %exit + +exit: + ret void +}