Index: include/llvm/Target/TargetLowering.h =================================================================== --- include/llvm/Target/TargetLowering.h +++ include/llvm/Target/TargetLowering.h @@ -1720,6 +1720,12 @@ return false; } + // Return true if it is profitable to use a scalar input to a BUILD_VECTOR + // even if the vector itself has multiple uses. + virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const { + return false; + } + //===--------------------------------------------------------------------===// // Runtime Library hooks // Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11886,7 +11886,24 @@ } SDValue EltNo = N->getOperand(1); - bool ConstEltNo = isa(EltNo); + ConstantSDNode *ConstEltNo = dyn_cast(EltNo); + + // extract_vector_elt (build_vector x, y), 1 -> y + if (ConstEltNo && + InVec.getOpcode() == ISD::BUILD_VECTOR && + TLI.isTypeLegal(VT) && + (InVec.hasOneUse() || + TLI.aggressivelyPreferBuildVectorSources(VT))) { + SDValue Elt = InVec.getOperand(ConstEltNo->getZExtValue()); + EVT InEltVT = Elt.getValueType(); + + // Sometimes build_vector's scalar input types do not match result type. + if (NVT == InEltVT) + return Elt; + + // TODO: It may be useful to truncate if free if the build_vector implicitly + // converts. + } // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. // We only perform this optimization before the op legalization phase because @@ -11894,13 +11911,11 @@ // patterns. For example on AVX, extracting elements from a wide vector // without using extract_subvector. However, if we can find an underlying // scalar value, then we can always use that. - if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE - && ConstEltNo) { - int Elt = cast(EltNo)->getZExtValue(); + if (ConstEltNo && InVec.getOpcode() == ISD::VECTOR_SHUFFLE) { int NumElem = VT.getVectorNumElements(); ShuffleVectorSDNode *SVOp = cast(InVec); // Find the new index to extract from. - int OrigElt = SVOp->getMaskElt(Elt); + int OrigElt = SVOp->getMaskElt(ConstEltNo->getZExtValue()); // Extracting an undef index is undef. if (OrigElt == -1) Index: lib/Target/AMDGPU/AMDGPUISelLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.h +++ lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -138,6 +138,7 @@ bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, unsigned AS) const override; + bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override; bool isCheapToSpeculateCttz() const override; bool isCheapToSpeculateCtlz() const override; Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -533,6 +533,18 @@ return true; } +bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { + // There are few operations which truly have vector input operands. Any vector + // operation is going to involve operations on each component, and a + // build_vector will be a copy per element, so it always makes sense to use a + // build_vector input in place of the extracted element to avoid a copy into a + // super register. + // + // We should probably only do this if all users are extracts only, but this + // should be the common case. + return true; +} + bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { // Truncate is just accessing a subregister. return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0); Index: test/CodeGen/AArch64/fold-constants.ll =================================================================== --- test/CodeGen/AArch64/fold-constants.ll +++ test/CodeGen/AArch64/fold-constants.ll @@ -3,9 +3,6 @@ define i64 @dotests_616() { ; CHECK-LABEL: dotests_616 ; CHECK: movi d0, #0000000000000000 -; CHECK-NEXT: umov w8, v0.b[2] -; CHECK-NEXT: sbfx w8, w8, #0, #1 -; CHECK-NEXT: fmov s0, w8 ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret entry: Index: test/CodeGen/AMDGPU/ds_read2.ll =================================================================== --- test/CodeGen/AMDGPU/ds_read2.ll +++ test/CodeGen/AMDGPU/ds_read2.ll @@ -216,10 +216,8 @@ ret void } -; We should be able to merge in this case, but probably not worth the effort. -; SI-NOT: ds_read2_b32 -; SI: ds_read_b32 -; SI: ds_read_b32 +; SI-LABEL: {{^}}read2_ptr_is_subreg_f32: +; SI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset1:8{{$}} ; SI: s_endpgm define void @read2_ptr_is_subreg_f32(float addrspace(1)* %out) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 Index: test/CodeGen/AMDGPU/fceil64.ll =================================================================== --- test/CodeGen/AMDGPU/fceil64.ll +++ test/CodeGen/AMDGPU/fceil64.ll @@ -17,12 +17,12 @@ ; SI: s_lshr_b64 ; SI: s_not_b64 ; SI: s_and_b64 -; SI: cmp_gt_i32 -; SI: cndmask_b32 -; SI: cndmask_b32 -; SI: cmp_lt_i32 -; SI: cndmask_b32 -; SI: cndmask_b32 +; SI-DAG: cmp_gt_i32 +; SI-DAG: cndmask_b32 +; SI-DAG: cndmask_b32 +; SI-DAG: cmp_lt_i32 +; SI-DAG: cndmask_b32 +; SI-DAG: cndmask_b32 ; SI-DAG: v_cmp_lt_f64 ; SI-DAG: v_cmp_lg_f64 ; SI: s_and_b64 Index: test/CodeGen/AMDGPU/ftrunc.f64.ll =================================================================== --- test/CodeGen/AMDGPU/ftrunc.f64.ll +++ test/CodeGen/AMDGPU/ftrunc.f64.ll @@ -29,12 +29,12 @@ ; SI: s_lshr_b64 ; SI: s_not_b64 ; SI: s_and_b64 -; SI: cmp_gt_i32 -; SI: cndmask_b32 -; SI: cndmask_b32 -; SI: cmp_lt_i32 -; SI: cndmask_b32 -; SI: cndmask_b32 +; SI-DAG: cmp_gt_i32 +; SI-DAG: cndmask_b32 +; SI-DAG: cndmask_b32 +; SI-DAG: cmp_lt_i32 +; SI-DAG: cndmask_b32 +; SI-DAG: cndmask_b32 ; SI: s_endpgm define void @ftrunc_f64(double addrspace(1)* %out, double %x) { %y = call double @llvm.trunc.f64(double %x) nounwind readnone Index: test/CodeGen/AMDGPU/gep-address-space.ll =================================================================== --- test/CodeGen/AMDGPU/gep-address-space.ll +++ test/CodeGen/AMDGPU/gep-address-space.ll @@ -11,24 +11,35 @@ ret void } -define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind { ; CHECK-LABEL: {{^}}use_gep_address_space_large_offset: ; The LDS offset will be 65536 bytes, which is larger than the size of LDS on ; SI, which is why it is being OR'd with the base pointer. ; SI: s_or_b32 ; CI: s_add_i32 ; CHECK: ds_write_b32 +define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind { %p = getelementptr [1024 x i32], [1024 x i32] addrspace(3)* %array, i16 0, i16 16384 store i32 99, i32 addrspace(3)* %p ret void } -define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind { ; CHECK-LABEL: {{^}}gep_as_vector_v4: -; CHECK: s_add_i32 -; CHECK: s_add_i32 -; CHECK: s_add_i32 -; CHECK: s_add_i32 +; SI: s_add_i32 +; SI: s_add_i32 +; SI: s_add_i32 +; SI: s_add_i32 + +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} + +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CHECK: s_endpgm +define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind { %p = getelementptr [1024 x i32], <4 x [1024 x i32] addrspace(3)*> %array, <4 x i16> zeroinitializer, <4 x i16> %p0 = extractelement <4 x i32 addrspace(3)*> %p, i32 0 %p1 = extractelement <4 x i32 addrspace(3)*> %p, i32 1 @@ -41,10 +52,15 @@ ret void } -define void @gep_as_vector_v2(<2 x [1024 x i32] addrspace(3)*> %array) nounwind { ; CHECK-LABEL: {{^}}gep_as_vector_v2: -; CHECK: s_add_i32 -; CHECK: s_add_i32 +; SI: s_add_i32 +; SI: s_add_i32 +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CHECK: s_endpgm +define void @gep_as_vector_v2(<2 x [1024 x i32] addrspace(3)*> %array) nounwind { %p = getelementptr [1024 x i32], <2 x [1024 x i32] addrspace(3)*> %array, <2 x i16> zeroinitializer, <2 x i16> %p0 = extractelement <2 x i32 addrspace(3)*> %p, i32 0 %p1 = extractelement <2 x i32 addrspace(3)*> %p, i32 1