Index: llvm/trunk/test/CodeGen/ARM/vzip.ll =================================================================== --- llvm/trunk/test/CodeGen/ARM/vzip.ll +++ llvm/trunk/test/CodeGen/ARM/vzip.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { @@ -20,11 +21,11 @@ define <16 x i8> @vzipi8_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_Qres: ; CHECK: @ BB#0: -; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] -; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] -; CHECK-NEXT: vzip.8 [[LDR0]], [[LDR1]] -; CHECK-NEXT: vmov r0, r1, [[LDR0]] -; CHECK-NEXT: vmov r2, r3, [[LDR1]] +; CHECK-NEXT: vldr d17, [r1] +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vzip.8 d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B @@ -52,11 +53,11 @@ define <8 x i16> @vzipi16_Qres(<4 x i16>* %A, <4 x i16>* %B) nounwind { ; CHECK-LABEL: vzipi16_Qres: ; CHECK: @ BB#0: -; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] -; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] -; CHECK-NEXT: vzip.16 [[LDR0]], [[LDR1]] -; CHECK-NEXT: vmov r0, r1, [[LDR0]] -; CHECK-NEXT: vmov r2, r3, [[LDR1]] +; CHECK-NEXT: vldr d17, [r1] +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vzip.16 d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B @@ -220,11 +221,11 @@ define <16 x i8> @vzipi8_undef_Qres(<8 x i8>* %A, <8 x i8>* %B) nounwind { ; CHECK-LABEL: vzipi8_undef_Qres: ; CHECK: @ BB#0: -; CHECK-NEXT: vldr [[LDR1:d[0-9]+]], [r1] -; CHECK-NEXT: vldr [[LDR0:d[0-9]+]], [r0] -; CHECK-NEXT: vzip.8 [[LDR0]], [[LDR1]] -; CHECK-NEXT: vmov r0, r1, [[LDR0]] -; CHECK-NEXT: vmov r2, r3, [[LDR1]] +; CHECK-NEXT: vldr d17, [r1] +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vzip.8 d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr %tmp1 = load <8 x i8>, <8 x i8>* %A %tmp2 = load <8 x i8>, <8 x i8>* %B @@ -266,9 +267,15 @@ } define <8 x i16> @vzip_lower_shufflemask_undef(<4 x i16>* %A, <4 x i16>* %B) { +; CHECK-LABEL: vzip_lower_shufflemask_undef: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vldr d17, [r1] +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vzip.16 d16, d17 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_lower_shufflemask_undef - ; CHECK: vzip %tmp1 = load <4 x i16>, <4 x i16>* %A %tmp2 = load <4 x i16>, <4 x i16>* %B %0 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <8 x i32> @@ -276,30 +283,45 @@ } define <4 x i32> @vzip_lower_shufflemask_zeroed(<2 x i32>* %A) { +; CHECK-LABEL: vzip_lower_shufflemask_zeroed: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vdup.32 q9, d16[0] +; CHECK-NEXT: vzip.32 q8, q9 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_lower_shufflemask_zeroed - ; CHECK-NOT: vtrn - ; CHECK: vzip %tmp1 = load <2 x i32>, <2 x i32>* %A %0 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp1, <4 x i32> ret <4 x i32> %0 } define <4 x i32> @vzip_lower_shufflemask_vuzp(<2 x i32>* %A) { +; CHECK-LABEL: vzip_lower_shufflemask_vuzp: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vdup.32 q9, d16[0] +; CHECK-NEXT: vzip.32 q8, q9 +; CHECK-NEXT: vmov r0, r1, d16 +; CHECK-NEXT: vmov r2, r3, d17 +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_lower_shufflemask_vuzp - ; CHECK-NOT: vuzp - ; CHECK: vzip %tmp1 = load <2 x i32>, <2 x i32>* %A %0 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp1, <4 x i32> ret <4 x i32> %0 } define void @vzip_undef_rev_shufflemask_vtrn(<2 x i32>* %A, <4 x i32>* %B) { +; CHECK-LABEL: vzip_undef_rev_shufflemask_vtrn: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vldr d16, [r0] +; CHECK-NEXT: vorr q9, q8, q8 +; CHECK-NEXT: vzip.32 q8, q9 +; CHECK-NEXT: vext.32 q8, q8, q8, #2 +; CHECK-NEXT: vst1.64 {d16, d17}, [r1] +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_undef_rev_shufflemask_vtrn - ; CHECK-NOT: vtrn - ; CHECK: vzip %tmp1 = load <2 x i32>, <2 x i32>* %A %0 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> store <4 x i32> %0, <4 x i32>* %B @@ -307,10 +329,16 @@ } define void @vzip_vext_factor(<8 x i16>* %A, <4 x i16>* %B) { +; CHECK-LABEL: vzip_vext_factor: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vld1.64 {d16, d17}, [r0] +; CHECK-NEXT: vext.16 d16, d16, d17, #3 +; CHECK-NEXT: vext.16 d17, d16, d16, #1 +; CHECK-NEXT: vzip.16 d16, d17 +; CHECK-NEXT: vext.16 d16, d16, d16, #1 +; CHECK-NEXT: vstr d16, [r1] +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vzip_vext_factor - ; CHECK: vext.16 d16, d16, d17, #3 - ; CHECK: vzip %tmp1 = load <8 x i16>, <8 x i16>* %A %0 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> store <4 x i16> %0, <4 x i16>* %B @@ -318,12 +346,14 @@ } define <8 x i8> @vdup_zip(i8* nocapture readonly %x, i8* nocapture readonly %y) { +; CHECK-LABEL: vdup_zip: +; CHECK: @ BB#0: @ %entry +; CHECK-NEXT: vld1.8 {d16[]}, [r1] +; CHECK-NEXT: vld1.8 {d17[]}, [r0] +; CHECK-NEXT: vzip.8 d17, d16 +; CHECK-NEXT: vmov r0, r1, d17 +; CHECK-NEXT: mov pc, lr entry: - ; CHECK-LABEL: vdup_zip: - ; CHECK: vld1.8 - ; CHECK-NEXT: vld1.8 - ; CHECK-NEXT: vzip.8 - ; CHECK-NEXT: vmov r0, r1 %0 = load i8, i8* %x, align 1 %1 = insertelement <8 x i8> undef, i8 %0, i32 0 %lane = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> Index: llvm/trunk/utils/update_llc_test_checks.py =================================================================== --- llvm/trunk/utils/update_llc_test_checks.py +++ llvm/trunk/utils/update_llc_test_checks.py @@ -28,6 +28,13 @@ SCRUB_WHITESPACE_RE = re.compile(r'(?!^(| \w))[ \t]+', flags=re.M) SCRUB_TRAILING_WHITESPACE_RE = re.compile(r'[ \t]+$', flags=re.M) +SCRUB_KILL_COMMENT_RE = re.compile(r'^ *#+ +kill:.*\n') + +ASM_FUNCTION_X86_RE = re.compile( + r'^_?(?P[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?' + r'(?P^##?[ \t]+[^:]+:.*?)\s*' + r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section)', + flags=(re.M | re.S)) SCRUB_X86_SHUFFLES_RE = ( re.compile( r'^(\s*\w+) [^#\n]+#+ ((?:[xyz]mm\d+|mem)( \{%k\d+\}( \{z\})?)? = .*)$', @@ -35,20 +42,21 @@ SCRUB_X86_SP_RE = re.compile(r'\d+\(%(esp|rsp)\)') SCRUB_X86_RIP_RE = re.compile(r'[.\w]+\(%rip\)') SCRUB_X86_LCP_RE = re.compile(r'\.LCPI[0-9]+_[0-9]+') -SCRUB_KILL_COMMENT_RE = re.compile(r'^ *#+ +kill:.*\n') + +ASM_FUNCTION_ARM_RE = re.compile( + r'^(?P[0-9a-zA-Z_]+):\n' # f: (name of function) + r'\s+\.fnstart\n' # .fnstart + r'(?P.*?)\n' # (body of the function) + r'.Lfunc_end[0-9]+:\n', # .Lfunc_end0: + flags=(re.M | re.S)) RUN_LINE_RE = re.compile('^\s*;\s*RUN:\s*(.*)$') IR_FUNCTION_RE = re.compile('^\s*define\s+(?:internal\s+)?[^@]*@(\w+)\s*\(') -ASM_FUNCTION_RE = re.compile( - r'^_?(?P[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?' - r'(?P^##?[ \t]+[^:]+:.*?)\s*' - r'^\s*(?:[^:\n]+?:\s*\n\s*\.size|\.cfi_endproc|\.globl|\.comm|\.(?:sub)?section)', - flags=(re.M | re.S)) CHECK_PREFIX_RE = re.compile('--check-prefix=(\S+)') CHECK_RE = re.compile(r'^\s*;\s*([^:]+?)(?:-NEXT|-NOT|-DAG|-LABEL)?:') -def scrub_asm(asm): +def scrub_asm_x86(asm): # Scrub runs of whitespace out of the assembly, but leave the leading # whitespace in place. asm = SCRUB_WHITESPACE_RE.sub(r' ', asm) @@ -68,14 +76,31 @@ asm = SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm) return asm +def scrub_asm_arm(asm): + # Scrub runs of whitespace out of the assembly, but leave the leading + # whitespace in place. + asm = SCRUB_WHITESPACE_RE.sub(r' ', asm) + # Expand the tabs used for indentation. + asm = string.expandtabs(asm, 2) + # Strip kill operands inserted into the asm. + asm = SCRUB_KILL_COMMENT_RE.sub('', asm) + # Strip trailing whitespace. + asm = SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm) + return asm + # Build up a dictionary of all the function bodies. def build_function_body_dictionary(raw_tool_output, prefixes, func_dict, verbose): - for m in ASM_FUNCTION_RE.finditer(raw_tool_output): + is_arm = re.compile(r'\n\s+\.syntax unified\n').search(raw_tool_output) + function_re = ASM_FUNCTION_ARM_RE if is_arm else ASM_FUNCTION_X86_RE + for m in function_re.finditer(raw_tool_output): if not m: continue func = m.group('func') - scrubbed_body = scrub_asm(m.group('body')) + if is_arm: + scrubbed_body = scrub_asm_arm(m.group('body')) + else: + scrubbed_body = scrub_asm_x86(m.group('body')) if func.startswith('stress'): # We only use the last line of the function body for stress tests. scrubbed_body = '\n'.join(scrubbed_body.splitlines()[-1:])