Index: lib/IR/ConstantFold.cpp =================================================================== --- lib/IR/ConstantFold.cpp +++ lib/IR/ConstantFold.cpp @@ -545,7 +545,11 @@ opc != Instruction::AddrSpaceCast && // Do not fold bitcast (gep) with inrange index, as this loses // information. - !cast(CE)->getInRangeIndex().hasValue()) { + !cast(CE)->getInRangeIndex().hasValue() && + // Do not fold if the gep type is a vector, as bitcasting + // operand 0 of a vector gep will result in a bitcast between + // different sizes. + !CE->getType()->isVectorTy()) { // If all of the indexes in the GEP are null values, there is no pointer // adjustment going on. We might as well cast the source pointer. bool isAllNull = true; Index: test/Analysis/ConstantFolding/gep-zeroinit-vector.ll =================================================================== --- test/Analysis/ConstantFolding/gep-zeroinit-vector.ll +++ test/Analysis/ConstantFolding/gep-zeroinit-vector.ll @@ -1,15 +1,15 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -constprop -S -o - | FileCheck %s -; Testcase that point out faulty bitcast that cast between different sizes. -; See "bitcast ([1 x %rec8]* @a to <2 x i16*>)" in checks below +; Testcase that verify that we don't get a faulty bitcast that cast between +; different sizes. %rec8 = type { i16 } @a = global [1 x %rec8] zeroinitializer define <2 x i16*> @test_gep() { ; CHECK-LABEL: @test_gep( -; CHECK-NEXT: ret <2 x i16*> bitcast ([1 x %rec8]* @a to <2 x i16*>) +; CHECK-NEXT: ret <2 x i16*> getelementptr ([1 x %rec8], [1 x %rec8]* @a, <2 x i64> zeroinitializer, <2 x i64> zeroinitializer), i32 0), i32 0, i32 0), i16* getelementptr inbounds (%rec8, %rec8* extractelement (<2 x %rec8*> getelementptr ([1 x %rec8], [1 x %rec8]* @a, <2 x i64> zeroinitializer, <2 x i64> zeroinitializer), i32 1), i32 0, i32 0)> ; %A = getelementptr [1 x %rec8], [1 x %rec8]* @a, <2 x i16> zeroinitializer, <2 x i64> zeroinitializer %B = bitcast <2 x %rec8*> %A to <2 x i16*> Index: test/Transforms/LoopVectorize/X86/constant-fold.ll =================================================================== --- test/Transforms/LoopVectorize/X86/constant-fold.ll +++ test/Transforms/LoopVectorize/X86/constant-fold.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -loop-vectorize -S -mtriple=x86_64-- -o - %s | FileCheck %s -; Testcase that point out faulty bitcast that cast between different sizes. -; See "bitcast ([1 x %rec8]* @a to <2 x i16*>)" in checks below +; Testcase that verify that we don't get a faulty bitcast that cast between +; different sizes. %rec8 = type { i16 } @@ -28,7 +28,7 @@ ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr [2 x i16*], [2 x i16*]* @b, i16 0, i64 [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i16*, i16** [[TMP3]], i32 0 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i16** [[TMP4]] to <2 x i16*>* -; CHECK-NEXT: store <2 x i16*> bitcast ([1 x %rec8]* @a to <2 x i16*>), <2 x i16*>* [[TMP5]], align 8 +; CHECK-NEXT: store <2 x i16*> getelementptr ([1 x %rec8], [1 x %rec8]* @a, <2 x i16> zeroinitializer, <2 x i64> zeroinitializer), i32 0), i32 0, i32 0), i16* getelementptr inbounds (%rec8, %rec8* extractelement (<2 x %rec8*> getelementptr ([1 x %rec8], [1 x %rec8]* @a, <2 x i16> zeroinitializer, <2 x i64> zeroinitializer), i32 1), i32 0, i32 0)>, <2 x i16*>* [[TMP5]], align 8 ; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 2 ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0