diff --git a/llvm/test/CodeGen/ARM/fast-isel-inline-asm.ll b/llvm/test/CodeGen/ARM/fast-isel-inline-asm.ll --- a/llvm/test/CodeGen/ARM/fast-isel-inline-asm.ll +++ b/llvm/test/CodeGen/ARM/fast-isel-inline-asm.ll @@ -1,34 +1,30 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 -; RUN: llc -opaque-pointers=0 -fast-isel < %s | FileCheck %s +; RUN: llc -fast-isel < %s | FileCheck %s target datalayout = "e-m:o-p:32:32-i1:8:32-i8:8:32-i16:16:32-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32" target triple = "thumbv7-apple-ios5.0.0" %0 = type opaque -; Make sure that the inline asm starts right after the call to bar. -define void @test_inline_asm_sideeffect(%0* %call) { +; Make sure that there are no unexpected instructions between the call to bar +; and the inline asm. +define void @test_inline_asm_sideeffect(ptr %call) { ; CHECK-LABEL: test_inline_asm_sideeffect: ; CHECK: @ %bb.0: ; CHECK-NEXT: push {r4, r7, lr} ; CHECK-NEXT: add r7, sp, #4 ; CHECK-NEXT: mov r4, r0 ; CHECK-NEXT: bl _bar +; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: @ InlineAsm Start ; CHECK-NEXT: mov r7, r7 @ marker ; CHECK-NEXT: @ InlineAsm End -; CHECK-NEXT: movw r0, :lower16:(L_foo$non_lazy_ptr-(LPC0_0+4)) -; CHECK-NEXT: movt r0, :upper16:(L_foo$non_lazy_ptr-(LPC0_0+4)) -; CHECK-NEXT: LPC0_0: -; CHECK-NEXT: add r0, pc -; CHECK-NEXT: ldr r1, [r0] -; CHECK-NEXT: mov r0, r4 -; CHECK-NEXT: blx r1 +; CHECK-NEXT: bl _foo ; CHECK-NEXT: pop {r4, r7, pc} call void @bar() call void asm sideeffect "mov\09r7, r7\09\09@ marker", ""() - %1 = call %0* bitcast (i8* (i8*)* @foo to %0* (%0*)*)(%0* %call) + %1 = call ptr @foo(ptr %call) ret void } -declare i8* @foo(i8*) +declare ptr @foo(ptr) declare void @bar()