diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp --- a/llvm/lib/IR/Operator.cpp +++ b/llvm/lib/IR/Operator.cpp @@ -45,6 +45,11 @@ if (OpC->isZero()) continue; + // Scalable vectors have are multiplied by a runtime constant. + if (auto *VecTy = dyn_cast(GTI.getIndexedType())) + if (VecTy->isScalable()) + return false; + // Handle a struct index, which adds its field offset to the pointer. if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); diff --git a/llvm/test/Transforms/InstCombine/gep-vector.ll b/llvm/test/Transforms/InstCombine/gep-vector.ll --- a/llvm/test/Transforms/InstCombine/gep-vector.ll +++ b/llvm/test/Transforms/InstCombine/gep-vector.ll @@ -134,3 +134,24 @@ %gep = getelementptr inbounds [4 x i32], [4 x i32] addrspace(3)* %asc, i64 %y, i64 %z ret i32 addrspace(3)* %gep } + +; Negative test - avoid doing bitcast on i8*, because '16' should be scaled by 'vscale'. + +define i8* @test_accumulate_constant_offset_vscale_nonzero( %pg, i8* %base) { +; CHECK-LABEL: @test_accumulate_constant_offset_vscale_nonzero +; CHECK-NEXT: %bc = bitcast i8* %base to * +; CHECK-NEXT: %gep = getelementptr , * %bc, i64 1, i64 4 +; CHECK-NEXT: ret i8* %gep + %bc = bitcast i8* %base to * + %gep = getelementptr , * %bc, i64 1, i64 4 + ret i8* %gep +} + +define i8* @test_accumulate_constant_offset_vscale_zero( %pg, i8* %base) { +; CHECK-LABEL: @test_accumulate_constant_offset_vscale_zero +; CHECK-NEXT: %[[RES:.*]] = getelementptr i8, i8* %base, i64 4 +; CHECK-NEXT: ret i8* %[[RES]] + %bc = bitcast i8* %base to * + %gep = getelementptr , * %bc, i64 0, i64 4 + ret i8* %gep +}