Index: lib/Transforms/Vectorize/LoadStoreVectorizer.cpp =================================================================== --- lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -65,7 +65,9 @@ bool run(); private: - Value *getPointerOperand(Value *I); + Value *getPointerOperand(Value *I) const; + + GetElementPtrInst *getSourceGEP(Value *Src) const; unsigned getPointerAddressSpace(Value *I); @@ -215,7 +217,7 @@ return Changed; } -Value *Vectorizer::getPointerOperand(Value *I) { +Value *Vectorizer::getPointerOperand(Value *I) const { if (LoadInst *LI = dyn_cast(I)) return LI->getPointerOperand(); if (StoreInst *SI = dyn_cast(I)) @@ -231,6 +233,19 @@ return -1; } +GetElementPtrInst *Vectorizer::getSourceGEP(Value *Src) const { + // First strip pointer bitcasts. Make sure pointee size is the same with + // and without casts. + // TODO: a stride set by the add instruction below can match the difference + // in pointee type size here. Currently it will not be vectorized. + Value *SrcPtr = getPointerOperand(Src); + Value *SrcBase = SrcPtr->stripPointerCasts(); + if (DL.getTypeStoreSize(SrcPtr->getType()->getPointerElementType()) == + DL.getTypeStoreSize(SrcBase->getType()->getPointerElementType())) + SrcPtr = SrcBase; + return dyn_cast(SrcPtr); +} + // FIXME: Merge with llvm::isConsecutiveAccess bool Vectorizer::isConsecutiveAccess(Value *A, Value *B) { Value *PtrA = getPointerOperand(A); @@ -283,8 +298,8 @@ // Look through GEPs after checking they're the same except for the last // index. - GetElementPtrInst *GEPA = dyn_cast(getPointerOperand(A)); - GetElementPtrInst *GEPB = dyn_cast(getPointerOperand(B)); + GetElementPtrInst *GEPA = getSourceGEP(A); + GetElementPtrInst *GEPB = getSourceGEP(B); if (!GEPA || !GEPB || GEPA->getNumOperands() != GEPB->getNumOperands()) return false; unsigned FinalIndex = GEPA->getNumOperands() - 1; Index: test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll =================================================================== --- /dev/null +++ test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll @@ -0,0 +1,83 @@ +; RUN: opt -S -mtriple=amdgcn--amdhsa -load-store-vectorizer < %s | FileCheck %s + +; Check that vectorizer can find a GEP through bitcast +; CHECK-LABEL: @vect_zext_bitcast_f32_to_i32_idx +; CHECK: load <4 x i32> +define void @vect_zext_bitcast_f32_to_i32_idx(float addrspace(1)* %arg1, i32 %base) { + %add1 = add nuw i32 %base, 0 + %zext1 = zext i32 %add1 to i64 + %gep1 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %zext1 + %f2i1 = bitcast float addrspace(1)* %gep1 to i32 addrspace(1)* + %load1 = load i32, i32 addrspace(1)* %f2i1, align 4 + %add2 = add nuw i32 %base, 1 + %zext2 = zext i32 %add2 to i64 + %gep2 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %zext2 + %f2i2 = bitcast float addrspace(1)* %gep2 to i32 addrspace(1)* + %load2 = load i32, i32 addrspace(1)* %f2i2, align 4 + %add3 = add nuw i32 %base, 2 + %zext3 = zext i32 %add3 to i64 + %gep3 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %zext3 + %f2i3 = bitcast float addrspace(1)* %gep3 to i32 addrspace(1)* + %load3 = load i32, i32 addrspace(1)* %f2i3, align 4 + %add4 = add nuw i32 %base, 3 + %zext4 = zext i32 %add4 to i64 + %gep4 = getelementptr inbounds float, float addrspace(1)* %arg1, i64 %zext4 + %f2i4 = bitcast float addrspace(1)* %gep4 to i32 addrspace(1)* + %load4 = load i32, i32 addrspace(1)* %f2i4, align 4 + ret void +} + +; CHECK-LABEL: @vect_zext_bitcast_i8_st1_to_i32_idx +; CHECK: load i32 +; CHECK: load i32 +; CHECK: load i32 +; CHECK: load i32 +define void @vect_zext_bitcast_i8_st1_to_i32_idx(i8 addrspace(1)* %arg1, i32 %base) { + %add1 = add nuw i32 %base, 0 + %zext1 = zext i32 %add1 to i64 + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext1 + %f2i1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %load1 = load i32, i32 addrspace(1)* %f2i1, align 4 + %add2 = add nuw i32 %base, 1 + %zext2 = zext i32 %add2 to i64 + %gep2 = getelementptr inbounds i8,i8 addrspace(1)* %arg1, i64 %zext2 + %f2i2 = bitcast i8 addrspace(1)* %gep2 to i32 addrspace(1)* + %load2 = load i32, i32 addrspace(1)* %f2i2, align 4 + %add3 = add nuw i32 %base, 2 + %zext3 = zext i32 %add3 to i64 + %gep3 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext3 + %f2i3 = bitcast i8 addrspace(1)* %gep3 to i32 addrspace(1)* + %load3 = load i32, i32 addrspace(1)* %f2i3, align 4 + %add4 = add nuw i32 %base, 3 + %zext4 = zext i32 %add4 to i64 + %gep4 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext4 + %f2i4 = bitcast i8 addrspace(1)* %gep4 to i32 addrspace(1)* + %load4 = load i32, i32 addrspace(1)* %f2i4, align 4 + ret void +} + +; TODO: This can be vectorized, but currently vectorizer unable to do it. +; CHECK-LABEL: @vect_zext_bitcast_i8_st4_to_i32_idx +define void @vect_zext_bitcast_i8_st4_to_i32_idx(i8 addrspace(1)* %arg1, i32 %base) { + %add1 = add nuw i32 %base, 0 + %zext1 = zext i32 %add1 to i64 + %gep1 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext1 + %f2i1 = bitcast i8 addrspace(1)* %gep1 to i32 addrspace(1)* + %load1 = load i32, i32 addrspace(1)* %f2i1, align 4 + %add2 = add nuw i32 %base, 4 + %zext2 = zext i32 %add2 to i64 + %gep2 = getelementptr inbounds i8,i8 addrspace(1)* %arg1, i64 %zext2 + %f2i2 = bitcast i8 addrspace(1)* %gep2 to i32 addrspace(1)* + %load2 = load i32, i32 addrspace(1)* %f2i2, align 4 + %add3 = add nuw i32 %base, 8 + %zext3 = zext i32 %add3 to i64 + %gep3 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext3 + %f2i3 = bitcast i8 addrspace(1)* %gep3 to i32 addrspace(1)* + %load3 = load i32, i32 addrspace(1)* %f2i3, align 4 + %add4 = add nuw i32 %base, 16 + %zext4 = zext i32 %add4 to i64 + %gep4 = getelementptr inbounds i8, i8 addrspace(1)* %arg1, i64 %zext4 + %f2i4 = bitcast i8 addrspace(1)* %gep4 to i32 addrspace(1)* + %load4 = load i32, i32 addrspace(1)* %f2i4, align 4 + ret void +}