Index: lib/Transforms/InstCombine/InstCombineCalls.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineCalls.cpp +++ lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -121,8 +121,6 @@ return MI; } - // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with - // load/store. ConstantInt *MemOpLength = dyn_cast(MI->getLength()); if (!MemOpLength) return nullptr; @@ -132,9 +130,15 @@ // case. uint64_t Size = MemOpLength->getLimitedValue(); assert(Size && "0-sized memory transferring should be removed already."); + if (!isPowerOf2_64(Size)) + return nullptr; - if (Size > 8 || (Size&(Size-1))) - return nullptr; // If not 1/2/4/8 bytes, exit. + // Since we don't have perfect knowledge here, make some assumptions: assume + // the maximum allowed stores for memcpy operation is the same size as the + // largest legal integer size. + unsigned LargestInt = DL.getLargestLegalIntTypeSizeInBits(); + if (!LargestInt || Size > LargestInt/8) + return nullptr; // Use an integer load+store unless we can find something better. unsigned SrcAddrSp = Index: test/DebugInfo/X86/array2.ll =================================================================== --- test/DebugInfo/X86/array2.ll +++ test/DebugInfo/X86/array2.ll @@ -16,7 +16,9 @@ ; Test that we correctly lower dbg.declares for arrays. ; ; CHECK: define i32 @main -; CHECK: call void @llvm.dbg.value(metadata i32 42, metadata ![[ARRAY:[0-9]+]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32)) +; CHECK: tail call void @llvm.dbg.value(metadata i32 [[ARGC:%.*]], i64 0, metadata !22, metadata !12), !dbg !23 +; CHECK: tail call void @llvm.dbg.value(metadata i8** [[ARGV:%.*]], i64 0, metadata !24, metadata !12), !dbg !23 +; CHECK: tail call void @llvm.dbg.value(metadata i32 42, metadata ![[ARRAY:[0-9]+]], metadata !DIExpression(DW_OP_LLVM_fragment, 0, 32)) ; CHECK: ![[ARRAY]] = !DILocalVariable(name: "array",{{.*}} line: 6 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.9.0" Index: test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll =================================================================== --- test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll +++ test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -instcombine -S | not grep call ; RUN: opt < %s -O3 -S | not grep xyz -target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" +target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128-n32" @.str = internal constant [4 x i8] c"xyz\00" ; <[4 x i8]*> [#uses=1] Index: test/Transforms/InstCombine/alloca.ll =================================================================== --- test/Transforms/InstCombine/alloca.ll +++ test/Transforms/InstCombine/alloca.ll @@ -144,7 +144,6 @@ entry: %inalloca.save = call i8* @llvm.stacksave() %argmem = alloca inalloca <{ %struct_type }> -; CHECK: alloca inalloca i64, align 8 %0 = getelementptr inbounds <{ %struct_type }>, <{ %struct_type }>* %argmem, i32 0, i32 0 %1 = bitcast %struct_type* %0 to i8* %2 = bitcast %struct_type* %a to i8* Index: test/Transforms/InstCombine/element-atomic-memintrins.ll =================================================================== --- test/Transforms/InstCombine/element-atomic-memintrins.ll +++ test/Transforms/InstCombine/element-atomic-memintrins.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -instcombine -S < %s | FileCheck %s +target datalayout = "e-p:64:64:64-p1:32:32:32-p2:16:16:16-n8:16:32:64" ;; ---- memset ----- ; Ensure 0-length memset is removed @@ -97,7 +98,7 @@ ; Check that a memmove from a global constant is converted into a memcpy define void @test_memmove_to_memcpy(i8* %dest) { ; CHECK-LABEL: @test_memmove_to_memcpy( -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST:%.*]], i8* align 16 getelementptr inbounds ([32 x i8], [32 x i8]* @gconst, i64 0, i64 0), i32 32, i32 1) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 16 getelementptr inbounds ([32 x i8], [32 x i8]* @gconst, i64 0, i64 0), i32 32, i32 1) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 getelementptr inbounds ([32 x i8], [32 x i8]* @gconst, i64 0, i64 0), i32 32, i32 1) @@ -146,7 +147,7 @@ ; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 1 ; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 1 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 16, i32 1) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1) @@ -171,7 +172,7 @@ ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP9:%.*]] = load atomic i64, i64* [[TMP7]] unordered, align 2 ; CHECK-NEXT: store atomic i64 [[TMP9]], i64* [[TMP8]] unordered, align 2 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 16, i32 2) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 2, i32 2) @@ -191,7 +192,7 @@ ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP6:%.*]] = load atomic i64, i64* [[TMP4]] unordered, align 4 ; CHECK-NEXT: store atomic i64 [[TMP6]], i64* [[TMP5]] unordered, align 4 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 16, i32 4) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 4, i32 4) @@ -206,7 +207,7 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[DEST:%.*]] to i64* ; CHECK-NEXT: [[TMP3:%.*]] = load atomic i64, i64* [[TMP1]] unordered, align 8 ; CHECK-NEXT: store atomic i64 [[TMP3]], i64* [[TMP2]] unordered, align 8 -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 16, i32 8) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 8, i32 8) @@ -216,7 +217,7 @@ define void @test_memmove_loadstore_16(i8* %dest, i8* %src) { ; CHECK-LABEL: @test_memmove_loadstore_16( -; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16) +; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 16, i32 16) ; CHECK-NEXT: ret void ; call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 16, i32 16) @@ -270,7 +271,8 @@ ; CHECK-NEXT: [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 1 ; CHECK-NEXT: store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 1 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 16, i32 1) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 128, i32 1) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1) @@ -278,6 +280,7 @@ call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 4, i32 1) call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 8, i32 1) call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 16, i32 1) + call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 128, i32 1) ret void } @@ -295,13 +298,13 @@ ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP9:%.*]] = load atomic i64, i64* [[TMP7]] unordered, align 2 ; CHECK-NEXT: store atomic i64 [[TMP9]], i64* [[TMP8]] unordered, align 2 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 128, i32 2) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 2, i32 2) call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 4, i32 2) call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 8, i32 2) - call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 16, i32 2) + call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 128, i32 2) ret void } @@ -315,12 +318,12 @@ ; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[DEST]] to i64* ; CHECK-NEXT: [[TMP6:%.*]] = load atomic i64, i64* [[TMP4]] unordered, align 4 ; CHECK-NEXT: store atomic i64 [[TMP6]], i64* [[TMP5]] unordered, align 4 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 128, i32 4) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 4, i32 4) call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 8, i32 4) - call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 16, i32 4) + call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 128, i32 4) ret void } @@ -330,20 +333,20 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[DEST:%.*]] to i64* ; CHECK-NEXT: [[TMP3:%.*]] = load atomic i64, i64* [[TMP1]] unordered, align 8 ; CHECK-NEXT: store atomic i64 [[TMP3]], i64* [[TMP2]] unordered, align 8 -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 128, i32 8) ; CHECK-NEXT: ret void ; call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 8, i32 8) - call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 16, i32 8) + call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 128, i32 8) ret void } define void @test_memcpy_loadstore_16(i8* %dest, i8* %src) { ; CHECK-LABEL: @test_memcpy_loadstore_16( -; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16) +; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 128, i32 16) ; CHECK-NEXT: ret void ; - call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 16, i32 16) + call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 128, i32 16) ret void } Index: test/Transforms/InstCombine/memcpy-to-load.ll =================================================================== --- test/Transforms/InstCombine/memcpy-to-load.ll +++ test/Transforms/InstCombine/memcpy-to-load.ll @@ -1,5 +1,4 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -instcombine -S | FileCheck %s --check-prefix=ALL --check-prefix=NODL ; RUN: opt < %s -instcombine -S -data-layout=n32 | FileCheck %s --check-prefix=ALL --check-prefix=I32 ; RUN: opt < %s -instcombine -S -data-layout=n32:64 | FileCheck %s --check-prefix=ALL --check-prefix=I64 ; RUN: opt < %s -instcombine -S -data-layout=n32:64:128 | FileCheck %s --check-prefix=ALL --check-prefix=I128 @@ -65,22 +64,49 @@ } define void @copy_8_bytes(i8* %d, i8* %s) { -; ALL-LABEL: @copy_8_bytes( -; ALL-NEXT: [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i64* -; ALL-NEXT: [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i64* -; ALL-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 -; ALL-NEXT: store i64 [[TMP3]], i64* [[TMP2]], align 1 -; ALL-NEXT: ret void +; I32-LABEL: @copy_8_bytes( +; I32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i32 8, i1 false) +; I32-NEXT: ret void +; +; For datalayout with largest legal integer type size of 4 bytes, all memcpy with size less than 8 bytes (and power-of-2) will be expanded inline with load/store +; I64-LABEL: @copy_8_bytes( +; I64-NEXT: [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i64* +; I64-NEXT: [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i64* +; I64-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 +; I64-NEXT: store i64 [[TMP3]], i64* [[TMP2]], align 1 +; I64-NEXT: ret void +; +; I128-LABEL: @copy_8_bytes( +; I128-NEXT: [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i64* +; I128-NEXT: [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i64* +; I128-NEXT: [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 1 +; I128-NEXT: store i64 [[TMP3]], i64* [[TMP2]], align 1 +; I128-NEXT: ret void ; call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 8, i1 false) ret void } define void @copy_16_bytes(i8* %d, i8* %s) { -; ALL-LABEL: @copy_16_bytes( -; ALL-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i32 16, i1 false) -; ALL-NEXT: ret void +; I32-LABEL: @copy_16_bytes( +; I32-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i32 16, i1 false) +; I32-NEXT: ret void ; +; I64-LABEL: @copy_16_bytes( +; I64-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 [[D:%.*]], i8* align 1 [[S:%.*]], i32 16, i1 false) +; I64-NEXT: ret void + +; For datalayout with largest legal integer type size of 4 bytes, all memcpy with size less than 8 bytes (and power-of-2) will be expanded inline with load/store +; I128-LABEL: @copy_16_bytes( +; I128-NEXT: [[TMP1:%.*]] = bitcast i8* [[S:%.*]] to i128* +; I128-NEXT: [[TMP2:%.*]] = bitcast i8* [[D:%.*]] to i128* +; I128-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]], align 1 +; I128-NEXT: store i128 [[TMP3]], i128* [[TMP2]], align 1 +; I128-NEXT: ret void +; +; For datalayout with largest legal integer type size of 8 bytes, all memcpy with size less than 16 bytes (and power-of-2) will be expanded inline with load/store +; For datalayout with largest legal integer type size of 16 bytes, all memcpy with size less than 32 bytes (and power-of-2) will be expanded inline with load/store + call void @llvm.memcpy.p0i8.p0i8.i32(i8* %d, i8* %s, i32 16, i1 false) ret void } Index: test/Transforms/InstCombine/memmove.ll =================================================================== --- test/Transforms/InstCombine/memmove.ll +++ test/Transforms/InstCombine/memmove.ll @@ -1,5 +1,6 @@ ; This test makes sure that memmove instructions are properly eliminated. ; +target datalayout = "e-p:64:64:64-p1:32:32:32-p2:16:16:16-n8:16:32:64" ; RUN: opt < %s -instcombine -S | FileCheck %s @S = internal constant [33 x i8] c"panic: restorelist inconsistency\00" ; <[33 x i8]*> [#uses=1] Index: test/Transforms/InstCombine/pr31990_wrong_memcpy.ll =================================================================== --- test/Transforms/InstCombine/pr31990_wrong_memcpy.ll +++ test/Transforms/InstCombine/pr31990_wrong_memcpy.ll @@ -1,4 +1,5 @@ ; RUN: opt -S -instcombine %s -o - | FileCheck %s +target datalayout = "e-p:64:64:64-p1:32:32:32-p2:16:16:16-n8:16:32:64" ; Regression test of PR31990. A memcpy of one byte, copying 0xff, was ; replaced with a single store of an i4 0xf. Index: test/Transforms/InstCombine/snprintf.ll =================================================================== --- test/Transforms/InstCombine/snprintf.ll +++ test/Transforms/InstCombine/snprintf.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -instcombine -S | FileCheck %s +; RUN: opt -data-layout=n32 < %s -instcombine -S | FileCheck %s @.str = private unnamed_addr constant [4 x i8] c"str\00", align 1 @.str.1 = private unnamed_addr constant [3 x i8] c"%%\00", align 1 Index: test/Transforms/InstCombine/sprintf-1.ll =================================================================== --- test/Transforms/InstCombine/sprintf-1.ll +++ test/Transforms/InstCombine/sprintf-1.ll @@ -3,7 +3,7 @@ ; RUN: opt < %s -instcombine -S | FileCheck %s ; RUN: opt < %s -mtriple xcore-xmos-elf -instcombine -S | FileCheck %s -check-prefix=CHECK-IPRINTF -target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n32" @hello_world = constant [13 x i8] c"hello world\0A\00" @null = constant [1 x i8] zeroinitializer