Index: lib/Target/X86/X86FastISel.cpp =================================================================== --- lib/Target/X86/X86FastISel.cpp +++ lib/Target/X86/X86FastISel.cpp @@ -84,7 +84,7 @@ bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT, DebugLoc DL); bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, MachineMemOperand *MMO, - unsigned &ResultReg); + unsigned &ResultReg, bool Aligned = false); bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM, MachineMemOperand *MMO = nullptr, bool Aligned = false); @@ -327,7 +327,8 @@ /// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV. /// Return true and the result register by reference if it is possible. bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM, - MachineMemOperand *MMO, unsigned &ResultReg) { + MachineMemOperand *MMO, unsigned &ResultReg, + bool Aligned) { // Get opcode and regclass of the output for the given load instruction. unsigned Opc = 0; const TargetRegisterClass *RC = nullptr; @@ -372,6 +373,30 @@ case MVT::f80: // No f80 support yet. return false; + case MVT::v4f32: + if (Aligned) + Opc = Subtarget->hasAVX() ? X86::VMOVAPSrm : X86::MOVAPSrm; + else + Opc = Subtarget->hasAVX() ? X86::VMOVUPSrm : X86::MOVUPSrm; + RC = &X86::VR128RegClass; + break; + case MVT::v2f64: + if (Aligned) + Opc = Subtarget->hasAVX() ? X86::VMOVAPDrm : X86::MOVAPDrm; + else + Opc = Subtarget->hasAVX() ? X86::VMOVUPDrm : X86::MOVUPDrm; + RC = &X86::VR128RegClass; + break; + case MVT::v4i32: + case MVT::v2i64: + case MVT::v8i16: + case MVT::v16i8: + if (Aligned) + Opc = Subtarget->hasAVX() ? X86::VMOVDQArm : X86::MOVDQArm; + else + Opc = Subtarget->hasAVX() ? X86::VMOVDQUrm : X86::MOVDQUrm; + RC = &X86::VR128RegClass; + break; } ResultReg = createResultReg(RC); @@ -1068,8 +1093,15 @@ if (!X86SelectAddress(Ptr, AM)) return false; + unsigned Alignment = LI->getAlignment(); + unsigned ABIAlignment = DL.getABITypeAlignment(LI->getType()); + if (Alignment == 0) // Ensure that codegen never sees alignment 0 + Alignment = ABIAlignment; + bool Aligned = Alignment >= ABIAlignment; + unsigned ResultReg = 0; - if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg)) + if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg, + Aligned)) return false; updateValueMap(I, ResultReg); Index: test/CodeGen/X86/fast-isel-vecload.ll =================================================================== --- test/CodeGen/X86/fast-isel-vecload.ll +++ test/CodeGen/X86/fast-isel-vecload.ll @@ -0,0 +1,125 @@ +; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE --check-prefix=ALL +; RUN: llc -O0 -fast-isel -fast-isel-abort=1 -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=ALL + +; Verify that fast-isel knows how to select aligned/unaligned vector loads. +; Also verify that the selected load instruction is in the correct domain. + +define <16 x i8> @test_v16i8(<16 x i8>* %V) { +; ALL-LABEL: test_v16i8: +; SSE: movdqa (%rdi), %xmm0 +; AVX: vmovdqa (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <16 x i8>, <16 x i8>* %V, align 16 + ret <16 x i8> %0 +} + +define <8 x i16> @test_v8i16(<8 x i16>* %V) { +; ALL-LABEL: test_v8i16: +; SSE: movdqa (%rdi), %xmm0 +; AVX: vmovdqa (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <8 x i16>, <8 x i16>* %V, align 16 + ret <8 x i16> %0 +} + +define <4 x i32> @test_v4i32(<4 x i32>* %V) { +; ALL-LABEL: test_v4i32: +; SSE: movdqa (%rdi), %xmm0 +; AVX: vmovdqa (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <4 x i32>, <4 x i32>* %V, align 16 + ret <4 x i32> %0 +} + +define <2 x i64> @test_v2i64(<2 x i64>* %V) { +; ALL-LABEL: test_v2i64: +; SSE: movdqa (%rdi), %xmm0 +; AVX: vmovdqa (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <2 x i64>, <2 x i64>* %V, align 16 + ret <2 x i64> %0 +} + +define <16 x i8> @test_v16i8_unaligned(<16 x i8>* %V) { +; ALL-LABEL: test_v16i8_unaligned: +; SSE: movdqu (%rdi), %xmm0 +; AVX: vmovdqu (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <16 x i8>, <16 x i8>* %V, align 4 + ret <16 x i8> %0 +} + +define <8 x i16> @test_v8i16_unaligned(<8 x i16>* %V) { +; ALL-LABEL: test_v8i16_unaligned: +; SSE: movdqu (%rdi), %xmm0 +; AVX: vmovdqu (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <8 x i16>, <8 x i16>* %V, align 4 + ret <8 x i16> %0 +} + +define <4 x i32> @test_v4i32_unaligned(<4 x i32>* %V) { +; ALL-LABEL: test_v4i32_unaligned: +; SSE: movdqu (%rdi), %xmm0 +; AVX: vmovdqu (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <4 x i32>, <4 x i32>* %V, align 4 + ret <4 x i32> %0 +} + +define <2 x i64> @test_v2i64_unaligned(<2 x i64>* %V) { +; ALL-LABEL: test_v2i64_unaligned: +; SSE: movdqu (%rdi), %xmm0 +; AVX: vmovdqu (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <2 x i64>, <2 x i64>* %V, align 4 + ret <2 x i64> %0 +} + +define <4 x float> @test_v4f32(<4 x float>* %V) { +; ALL-LABEL: test_v4f32: +; SSE: movaps (%rdi), %xmm0 +; AVX: vmovaps (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <4 x float>, <4 x float>* %V, align 16 + ret <4 x float> %0 +} + +define <2 x double> @test_v2f64(<2 x double>* %V) { +; ALL-LABEL: test_v2f64: +; SSE: movapd (%rdi), %xmm0 +; AVX: vmovapd (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <2 x double>, <2 x double>* %V, align 16 + ret <2 x double> %0 +} + +define <4 x float> @test_v4f32_unaligned(<4 x float>* %V) { +; ALL-LABEL: test_v4f32_unaligned: +; SSE: movups (%rdi), %xmm0 +; AVX: vmovups (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <4 x float>, <4 x float>* %V, align 4 + ret <4 x float> %0 +} + +define <2 x double> @test_v2f64_unaligned(<2 x double>* %V) { +; ALL-LABEL: test_v2f64_unaligned: +; SSE: movupd (%rdi), %xmm0 +; AVX: vmovupd (%rdi), %xmm0 +; ALL-NEXT: retq +entry: + %0 = load <2 x double>, <2 x double>* %V, align 4 + ret <2 x double> %0 +}