Index: include/llvm/Target/TargetLowering.h =================================================================== --- include/llvm/Target/TargetLowering.h +++ include/llvm/Target/TargetLowering.h @@ -275,8 +275,21 @@ /// efficiently, casting the load to a smaller vector of larger types and /// loading is more efficient, however, this can be undone by optimizations in /// dag combiner. - virtual bool isLoadBitCastBeneficial(EVT /* Load */, - EVT /* Bitcast */) const { + virtual bool isLoadBitCastBeneficial(EVT LoadVT, + EVT BitcastVT) const { + // Don't do if we could do an indexed load on the original type, but not on + // the new one. + if (!LoadVT.isSimple() || !BitcastVT.isSimple()) + return true; + + MVT LoadMVT = LoadVT.getSimpleVT(); + + // Don't bother doing this if it's just going to be promoted again later, as + // doing so might interfere with other combines. + if (getOperationAction(ISD::LOAD, LoadMVT) == Promote && + getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT()) + return false; + return true; } Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7298,11 +7298,12 @@ (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) && TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { LoadSDNode *LN0 = cast(N0); - unsigned Align = DAG.getDataLayout().getABITypeAlignment( - VT.getTypeForEVT(*DAG.getContext())); unsigned OrigAlign = LN0->getAlignment(); - if (Align <= OrigAlign) { + bool Fast = false; + if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, + LN0->getAddressSpace(), OrigAlign, &Fast) && + Fast) { SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), LN0->getPointerInfo(), LN0->isVolatile(), LN0->isNonTemporal(), Index: lib/Target/X86/X86ISelLowering.cpp =================================================================== --- lib/Target/X86/X86ISelLowering.cpp +++ lib/Target/X86/X86ISelLowering.cpp @@ -1353,6 +1353,15 @@ setOperationAction(ISD::LOAD, MVT::v16i32, Legal); setOperationAction(ISD::LOAD, MVT::v16i1, Legal); + setOperationAction(ISD::LOAD, MVT::v8i1, Promote); + AddPromotedToType(ISD::LOAD, MVT::v8i1, MVT::i8); + + setOperationAction(ISD::LOAD, MVT::v32i1, Promote); + AddPromotedToType(ISD::LOAD, MVT::v32i1, MVT::i32); + + setOperationAction(ISD::LOAD, MVT::v64i1, Promote); + AddPromotedToType(ISD::LOAD, MVT::v64i1, MVT::i64); + setOperationAction(ISD::FADD, MVT::v16f32, Legal); setOperationAction(ISD::FSUB, MVT::v16f32, Legal); setOperationAction(ISD::FMUL, MVT::v16f32, Legal); Index: test/CodeGen/AMDGPU/reduce-load-width-alignment.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/reduce-load-width-alignment.ll @@ -0,0 +1,38 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s + +; GCN-LABEL: {{^}}reduce_i64_load_align_4_width_to_i32: +; GCN: buffer_load_dword [[VAL:v[0-9]+]] +; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x12d687, [[VAL]] +; GCN: buffer_store_dwordx2 +define void @reduce_i64_load_align_4_width_to_i32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %and = and i64 %a, 1234567 + store i64 %and, i64 addrspace(1)* %out, align 8 + ret void +} + +; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt0: +; GCN: buffer_load_dword [[VAL:v[0-9]+]] +; GCN: buffer_store_dword [[VAL]] +define void @reduce_i64_align_4_bitcast_v2i32_elt0(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %vec = bitcast i64 %a to <2 x i32> + %elt0 = extractelement <2 x i32> %vec, i32 0 + store i32 %elt0, i32 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}reduce_i64_align_4_bitcast_v2i32_elt1: +; GCN: buffer_load_dword [[VAL:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:4 +; GCN: buffer_store_dword [[VAL]] +define void @reduce_i64_align_4_bitcast_v2i32_elt1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #0 { + %a = load i64, i64 addrspace(1)* %in, align 4 + %vec = bitcast i64 %a to <2 x i32> + %elt0 = extractelement <2 x i32> %vec, i32 1 + store i32 %elt0, i32 addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind }