Index: test/CodeGen/ARM/fold-sext-sextload.ll =================================================================== --- test/CodeGen/ARM/fold-sext-sextload.ll +++ test/CodeGen/ARM/fold-sext-sextload.ll @@ -1,15 +1,14 @@ ; RUN: llc -mtriple armv7 %s -stop-before=livedebugvalues -o - | FileCheck %s -define <4 x i8> @i(<4 x i8>*) !dbg !8 { - %2 = load <4 x i8>, <4 x i8>* %0, align 4, !dbg !14 +define <4 x i8> @i(<4 x i8>*, <4 x i8>) !dbg !8 { + %3 = load <4 x i8>, <4 x i8>* %0, align 4, !dbg !14 ; CHECK: $[[reg:.*]] = VLD1LNd32 {{.*}} debug-location !14 :: (load 4 from %ir.0) - ; CHECK-NEXT: VMOVLsv8i16 {{.*}} $[[reg]], {{.*}} debug-location !14 - ; CHECK-NEXT: VMOVLsv4i32 {{.*}} $[[reg]], {{.*}} debug-location !14 - - %3 = sdiv <4 x i8> zeroinitializer, %2, !dbg !15 - call void @llvm.dbg.value(metadata <4 x i8> %2, metadata !11, metadata !DIExpression()), !dbg !14 - call void @llvm.dbg.value(metadata <4 x i8> %3, metadata !13, metadata !DIExpression()), !dbg !15 - ret <4 x i8> %3, !dbg !16 + ; CHECK: VMOVLsv8i16 {{.*}} $[[reg]], {{.*}} debug-location !14 + ; CHECK: VMOVLsv4i32 {{.*}} $[[reg]], {{.*}} debug-location !14 + %4 = sdiv <4 x i8> %1, %3, !dbg !15 + call void @llvm.dbg.value(metadata <4 x i8> %3, metadata !11, metadata !DIExpression()), !dbg !14 + call void @llvm.dbg.value(metadata <4 x i8> %4, metadata !13, metadata !DIExpression()), !dbg !15 + ret <4 x i8> %4, !dbg !16 } declare void @llvm.dbg.value(metadata, metadata, metadata) Index: test/CodeGen/ARM/fold-zext-zextload.ll =================================================================== --- test/CodeGen/ARM/fold-zext-zextload.ll +++ test/CodeGen/ARM/fold-zext-zextload.ll @@ -1,15 +1,14 @@ ; RUN: llc -mtriple armv7 %s -stop-before=livedebugvalues -o - | FileCheck %s -define <4 x i8> @i(<4 x i8>*) !dbg !8 { - %2 = load <4 x i8>, <4 x i8>* %0, align 4, !dbg !14 +define <4 x i8> @i(<4 x i8>*, <4 x i8>) !dbg !8 { + %3 = load <4 x i8>, <4 x i8>* %0, align 4, !dbg !14 ; CHECK: $[[reg:.*]] = VLD1LNd32 {{.*}} debug-location !14 :: (load 4 from %ir.0) ; CHECK-NEXT: VMOVLuv8i16 {{.*}} $[[reg]], {{.*}} debug-location !14 ; CHECK-NEXT: VMOVLuv4i32 {{.*}} $[[reg]], {{.*}} debug-location !14 - - %3 = udiv <4 x i8> zeroinitializer, %2, !dbg !15 - call void @llvm.dbg.value(metadata <4 x i8> %2, metadata !11, metadata !DIExpression()), !dbg !14 - call void @llvm.dbg.value(metadata <4 x i8> %3, metadata !13, metadata !DIExpression()), !dbg !15 - ret <4 x i8> %3, !dbg !16 + %4 = udiv <4 x i8> %1, %3, !dbg !15 + call void @llvm.dbg.value(metadata <4 x i8> %3, metadata !11, metadata !DIExpression()), !dbg !14 + call void @llvm.dbg.value(metadata <4 x i8> %4, metadata !13, metadata !DIExpression()), !dbg !15 + ret <4 x i8> %4, !dbg !16 } declare void @llvm.dbg.value(metadata, metadata, metadata) Index: test/CodeGen/ARM/vector-extend-narrow.ll =================================================================== --- test/CodeGen/ARM/vector-extend-narrow.ll +++ test/CodeGen/ARM/vector-extend-narrow.ll @@ -48,7 +48,7 @@ } ; CHECK-LABEL: i: -define <4 x i8> @i(<4 x i8>* %x) { +define <4 x i8> @i(<4 x i8>* %x, <4 x i8> %y) { ; Note: vld1 here is reasonably important. Mixing VFP and NEON ; instructions is bad on some cores ; CHECK: vld1 @@ -59,7 +59,7 @@ ; CHECK: vmul ; CHECK: vmovn %1 = load <4 x i8>, <4 x i8>* %x, align 4 - %2 = sdiv <4 x i8> zeroinitializer, %1 + %2 = sdiv <4 x i8> %y, %1 ret <4 x i8> %2 } ; CHECK-LABEL: j: