diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -24,6 +24,7 @@ #include "clang/Basic/PrettyStackTrace.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/Assumptions.h" @@ -2325,6 +2326,84 @@ } } +static void EmitAsmStores( + CodeGenFunction &CGF, + const AsmStmt &S, + const llvm::ArrayRef RegResults, + const llvm::ArrayRef ResultRegTypes, + const llvm::ArrayRef ResultTruncRegTypes, + const llvm::ArrayRef ResultRegDests, + const llvm::ArrayRef ResultRegQualTys, + const llvm::BitVector &ResultTypeRequiresCast, + const llvm::BitVector &ResultRegIsFlagReg) { + CGBuilderTy &Builder = CGF.Builder; + CodeGenModule &CGM = CGF.CGM; + llvm::LLVMContext &CTX = CGF.getLLVMContext(); + + for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { + llvm::Value *Tmp = RegResults[i]; + llvm::Type *TruncTy = ResultTruncRegTypes[i]; + + if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) { + // Target must guarantee the Value `Tmp` here is lowered to a boolean + // value. + llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2); + llvm::Value *IsBooleanValue = + Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two); + llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume); + Builder.CreateCall(FnAssume, IsBooleanValue); + } + + // If the result type of the LLVM IR asm doesn't match the result type of + // the expression, do the conversion. + if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { + + // Truncate the integer result to the right size, note that TruncTy can be + // a pointer. + if (TruncTy->isFloatingPointTy()) + Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); + else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { + uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); + Tmp = Builder.CreateTrunc(Tmp, + llvm::IntegerType::get(CTX, (unsigned)ResSize)); + Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); + } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { + uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); + Tmp = Builder.CreatePtrToInt(Tmp, + llvm::IntegerType::get(CTX, (unsigned)TmpSize)); + Tmp = Builder.CreateTrunc(Tmp, TruncTy); + } else if (TruncTy->isIntegerTy()) { + Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy); + } else if (TruncTy->isVectorTy()) { + Tmp = Builder.CreateBitCast(Tmp, TruncTy); + } + } + + LValue Dest = ResultRegDests[i]; + // ResultTypeRequiresCast elements correspond to the first + // ResultTypeRequiresCast.size() elements of RegResults. + if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { + unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]); + Address A = Builder.CreateElementBitCast(Dest.getAddress(CGF), + ResultRegTypes[i]); + if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) { + Builder.CreateStore(Tmp, A); + continue; + } + + QualType Ty = CGF.getContext().getIntTypeForBitwidth(Size, /*Signed*/ false); + if (Ty.isNull()) { + const Expr *OutExpr = S.getOutputExpr(i); + CGM.getDiags().Report(OutExpr->getExprLoc(), + diag::err_store_value_to_reg); + return; + } + Dest = CGF.MakeAddrLValue(A, Ty); + } + CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest); + } +} + void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // Pop all cleanup blocks at the end of the asm statement. CodeGenFunction::RunCleanupsScope Cleanups(*this); @@ -2625,7 +2704,7 @@ SmallVector Transfer; llvm::BasicBlock *Fallthrough = nullptr; bool IsGCCAsmGoto = false; - if (const auto *GS = dyn_cast(&S)) { + if (const auto *GS = dyn_cast(&S)) { IsGCCAsmGoto = GS->isAsmGoto(); if (IsGCCAsmGoto) { for (const auto *E : GS->labels()) { @@ -2719,24 +2798,51 @@ FTy, AsmString, Constraints, HasSideEffect, /* IsAlignStack */ false, AsmDialect, HasUnwindClobber); std::vector RegResults; + llvm::CallBrInst *CBR; + // A 2D vector, indexed by indirect destination, then ResultReg. + std::vector> CBRRegResults; if (IsGCCAsmGoto) { - llvm::CallBrInst *Result = - Builder.CreateCallBr(IA, Fallthrough, Transfer, Args); + CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args); EmitBlock(Fallthrough); - UpdateAsmCallInst(cast(*Result), HasSideEffect, false, - ReadOnly, ReadNone, InNoMergeAttributedStmt, S, - ResultRegTypes, ArgElemTypes, *this, RegResults); + UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone, + InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes, + *this, RegResults); + // Because we are emitting code top to bottom, we don't have enough + // information at this point to know precisely whether we have a critical + // edge. If we have outputs, split all indirect destinations. + if (RegResults.size()) { + CBRRegResults.resize(CBR->getNumIndirectDests()); + for (unsigned i = 0, e = CBR->getNumIndirectDests(); i != e; ++i) { + CBRRegResults[i].resize(ResultRegTypes.size()); + llvm::BasicBlock *Dest = CBR->getIndirectDest(i); + llvm::Twine SynthName = Dest->getName() + ".split"; + llvm::BasicBlock *SynthBB = createBasicBlock(SynthName); + llvm::IRBuilderBase::InsertPointGuard IPG(Builder); + Builder.SetInsertPoint(SynthBB); + + if (ResultRegTypes.size() == 1) { + CBRRegResults[i][0] = CBR; + } else { + for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) { + llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult"); + CBRRegResults[i][j] = Tmp; + } + } + EmitBranch(Dest); + EmitBlock(SynthBB); + CBR->setIndirectDest(i, SynthBB); + } + } } else if (HasUnwindClobber) { llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, ""); UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes, *this, RegResults); } else { - llvm::CallInst *Result = - Builder.CreateCall(IA, Args, getBundlesForFunclet(IA)); - UpdateAsmCallInst(cast(*Result), HasSideEffect, false, - ReadOnly, ReadNone, InNoMergeAttributedStmt, S, - ResultRegTypes, ArgElemTypes, *this, RegResults); + llvm::CallInst *Result = Builder.CreateCall(IA, Args, getBundlesForFunclet(IA)); + UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone, + InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes, + *this, RegResults); } assert(RegResults.size() == ResultRegTypes.size()); @@ -2746,67 +2852,26 @@ // in which case its size may grow. assert(ResultTypeRequiresCast.size() <= ResultRegDests.size()); assert(ResultRegIsFlagReg.size() <= ResultRegDests.size()); - for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { - llvm::Value *Tmp = RegResults[i]; - llvm::Type *TruncTy = ResultTruncRegTypes[i]; - - if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) { - // Target must guarantee the Value `Tmp` here is lowered to a boolean - // value. - llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2); - llvm::Value *IsBooleanValue = - Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two); - llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume); - Builder.CreateCall(FnAssume, IsBooleanValue); - } - - // If the result type of the LLVM IR asm doesn't match the result type of - // the expression, do the conversion. - if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { - - // Truncate the integer result to the right size, note that TruncTy can be - // a pointer. - if (TruncTy->isFloatingPointTy()) - Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); - else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { - uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); - Tmp = Builder.CreateTrunc(Tmp, - llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize)); - Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); - } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { - uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); - Tmp = Builder.CreatePtrToInt(Tmp, - llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize)); - Tmp = Builder.CreateTrunc(Tmp, TruncTy); - } else if (TruncTy->isIntegerTy()) { - Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy); - } else if (TruncTy->isVectorTy()) { - Tmp = Builder.CreateBitCast(Tmp, TruncTy); - } - } - LValue Dest = ResultRegDests[i]; - // ResultTypeRequiresCast elements correspond to the first - // ResultTypeRequiresCast.size() elements of RegResults. - if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) { - unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]); - Address A = Builder.CreateElementBitCast(Dest.getAddress(*this), - ResultRegTypes[i]); - if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) { - Builder.CreateStore(Tmp, A); + EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes, + ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, + ResultRegIsFlagReg); + + if (IsGCCAsmGoto && CBRRegResults.size()) { + for (unsigned i = 0, e = CBR->getNumIndirectDests(); i != e; ++i) { + assert(CBRRegResults[i].size() == ResultRegTypes.size()); + // If we happen to share the same indirect and default dest, don't re-add + // stores. That was done for the default destination in the above call to + // EmitAsmStores. + llvm::BasicBlock *Succ = CBR->getIndirectDest(i); + if (Succ == CBR->getDefaultDest()) continue; - } - - QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false); - if (Ty.isNull()) { - const Expr *OutExpr = S.getOutputExpr(i); - CGM.getDiags().Report(OutExpr->getExprLoc(), - diag::err_store_value_to_reg); - return; - } - Dest = MakeAddrLValue(A, Ty); + llvm::IRBuilderBase::InsertPointGuard IPG(Builder); + Builder.SetInsertPoint(Succ, --(Succ->end())); + EmitAsmStores(*this, S, CBRRegResults[i], ResultRegTypes, ResultTruncRegTypes, + ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast, + ResultRegIsFlagReg); } - EmitStoreThroughLValue(RValue::get(Tmp), Dest); } } diff --git a/clang/test/CodeGen/asm-goto.c b/clang/test/CodeGen/asm-goto.c --- a/clang/test/CodeGen/asm-goto.c +++ b/clang/test/CodeGen/asm-goto.c @@ -22,12 +22,12 @@ int test2(int cond) { // CHECK-LABEL: define{{.*}} i32 @test2( // CHECK: callbr i32 asm sideeffect - // CHECK: to label %asm.fallthrough [label %label_true, label %loop] + // CHECK: to label %asm.fallthrough [label %label_true.split, label %loop.split] // CHECK-LABEL: asm.fallthrough: asm volatile goto("testl %0, %0; jne %l2;" : "=r"(cond) : "r"(cond) :: label_true, loop); asm volatile goto("testl %0, %0; jne %l3;" : "=r"(cond) : "r"(cond) :: label_true, loop); // CHECK: callbr i32 asm sideeffect - // CHECK: to label %asm.fallthrough1 [label %label_true, label %loop] + // CHECK: to label %asm.fallthrough1 [label %label_true.split2, label %loop.split3] // CHECK-LABEL: asm.fallthrough1: return 0; loop: @@ -39,13 +39,13 @@ int test3(int out1, int out2) { // CHECK-LABEL: define{{.*}} i32 @test3( // CHECK: callbr { i32, i32 } asm sideeffect - // CHECK: to label %asm.fallthrough [label %label_true, label %loop] + // CHECK: to label %asm.fallthrough [label %label_true.split, label %loop.split] // CHECK-LABEL: asm.fallthrough: asm volatile goto("testl %0, %0; jne %l3;" : "=r"(out1), "=r"(out2) : "r"(out1) :: label_true, loop); asm volatile goto("testl %0, %0; jne %l4;" : "=r"(out1), "=r"(out2) : "r"(out1) :: label_true, loop); // CHECK: callbr { i32, i32 } asm sideeffect - // CHECK: to label %asm.fallthrough2 [label %label_true, label %loop] - // CHECK-LABEL: asm.fallthrough2: + // CHECK: to label %asm.fallthrough6 [label %label_true.split11, label %loop.split14] + // CHECK-LABEL: asm.fallthrough6: return 0; loop: return 0; @@ -56,15 +56,15 @@ int test4(int out1, int out2) { // CHECK-LABEL: define{{.*}} i32 @test4( // CHECK: callbr { i32, i32 } asm sideeffect "jne ${5:l}", "={si},={di},r,0,1,!i,!i - // CHECK: to label %asm.fallthrough [label %label_true, label %loop] + // CHECK: to label %asm.fallthrough [label %label_true.split, label %loop.split] // CHECK-LABEL: asm.fallthrough: if (out1 < out2) asm volatile goto("jne %l5" : "+S"(out1), "+D"(out2) : "r"(out1) :: label_true, loop); else asm volatile goto("jne %l7" : "+S"(out1), "+D"(out2) : "r"(out1), "r"(out2) :: label_true, loop); // CHECK: callbr { i32, i32 } asm sideeffect "jne ${7:l}", "={si},={di},r,r,0,1,!i,!i - // CHECK: to label %asm.fallthrough2 [label %label_true, label %loop] - // CHECK-LABEL: asm.fallthrough2: + // CHECK: to label %asm.fallthrough6 [label %label_true.split11, label %loop.split14] + // CHECK-LABEL: asm.fallthrough6: return out1 + out2; loop: return -1; @@ -75,7 +75,7 @@ int test5(int addr, int size, int limit) { // CHECK-LABEL: define{{.*}} i32 @test5( // CHECK: callbr i32 asm "add $1,$0 ; jc ${4:l} ; cmp $2,$0 ; ja ${4:l} ; ", "=r,imr,imr,0,!i - // CHECK: to label %asm.fallthrough [label %t_err] + // CHECK: to label %asm.fallthrough [label %t_err.split] // CHECK-LABEL: asm.fallthrough: asm goto( "add %1,%0 ; " @@ -93,7 +93,7 @@ int test6(int out1) { // CHECK-LABEL: define{{.*}} i32 @test6( // CHECK: callbr i32 asm sideeffect "testl $0, $0; testl $1, $1; jne ${3:l}", "={si},r,0,!i,!i,{{.*}} - // CHECK: to label %asm.fallthrough [label %label_true, label %landing] + // CHECK: to label %asm.fallthrough [label %label_true.split, label %landing.split] // CHECK-LABEL: asm.fallthrough: // CHECK-LABEL: landing: int out2 = 42; @@ -111,7 +111,7 @@ void *test7(void) { // CHECK-LABEL: define{{.*}} ptr @test7( // CHECK: %1 = callbr ptr asm "# $0\0A\09# ${2:l}", "=r,0,!i,~{dirflag},~{fpsr},~{flags}"(ptr %0) - // CHECK-NEXT: to label %asm.fallthrough [label %foo] + // CHECK-NEXT: to label %asm.fallthrough [label %foo.split] void *p = &&foo; asm goto ("# %0\n\t# %l2":"+r"(p):::foo); foo: @@ -123,7 +123,7 @@ void *test8(void) { // CHECK-LABEL: define{{.*}} ptr @test8( // CHECK: %1 = callbr ptr asm "# $0\0A\09# ${2:l}", "=r,0,!i,~{dirflag},~{fpsr},~{flags}"(ptr %0) - // CHECK-NEXT: to label %asm.fallthrough [label %foo] + // CHECK-NEXT: to label %asm.fallthrough [label %foo.split] void *p = &&foo; asm goto ("# %0\n\t# %l[foo]":"+r"(p):::foo); foo: diff --git a/clang/test/CodeGen/asm-goto2.c b/clang/test/CodeGen/asm-goto2.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/asm-goto2.c @@ -0,0 +1,156 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py +// REQUIRES: x86-registered-target +// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -O0 -emit-llvm %s -o - | FileCheck %s + +// CHECK-LABEL: @test0( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[TMP0:%.*]] = callbr i32 asm "", "=r,!i,~{dirflag},~{fpsr},~{flags}"() #[[ATTR1:[0-9]+]] +// CHECK-NEXT: to label [[ASM_FALLTHROUGH:%.*]] [label %z.split], !srcloc !2 +// CHECK: asm.fallthrough: +// CHECK-NEXT: store i32 [[TMP0]], ptr [[RET]], align 4 +// CHECK-NEXT: store i32 42, ptr [[RET]], align 4 +// CHECK-NEXT: br label [[Z:%.*]] +// CHECK: z: +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[RET]], align 4 +// CHECK-NEXT: ret i32 [[TMP1]] +// CHECK: z.split: +// CHECK-NEXT: store i32 [[TMP0]], ptr [[RET]], align 4 +// CHECK-NEXT: br label [[Z]] +// +int test0 (void) { + int ret; + asm goto ("" : "=r"(ret):::z); + ret = 42; +z: + return ret; +} + +// CHECK-LABEL: @test1( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[TMP0:%.*]] = callbr { i32, i32 } asm "", "=r,=r,!i,~{dirflag},~{fpsr},~{flags}"() #[[ATTR1]] +// CHECK-NEXT: to label [[ASM_FALLTHROUGH:%.*]] [label %z.split], !srcloc !3 +// CHECK: asm.fallthrough: +// CHECK-NEXT: [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0 +// CHECK-NEXT: [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1 +// CHECK-NEXT: store i32 [[ASMRESULT]], ptr [[RET]], align 4 +// CHECK-NEXT: store i32 [[ASMRESULT1]], ptr [[B]], align 4 +// CHECK-NEXT: store i32 42, ptr [[RET]], align 4 +// CHECK-NEXT: br label [[Z:%.*]] +// CHECK: z: +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[RET]], align 4 +// CHECK-NEXT: ret i32 [[TMP1]] +// CHECK: z.split: +// CHECK-NEXT: [[ASMRESULT2:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0 +// CHECK-NEXT: [[ASMRESULT3:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1 +// CHECK-NEXT: store i32 [[ASMRESULT2]], ptr [[RET]], align 4 +// CHECK-NEXT: store i32 [[ASMRESULT3]], ptr [[B]], align 4 +// CHECK-NEXT: br label [[Z]] +// +int test1 (void) { + int ret, b; + asm goto ("" : "=r"(ret), "=r"(b):::z); + ret = 42; +z: + return ret; +} + +// CHECK-LABEL: @test2( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RET:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[B:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[TMP0:%.*]] = callbr { i32, i32 } asm "", "=r,=r,!i,~{dirflag},~{fpsr},~{flags}"() #[[ATTR1]] +// CHECK-NEXT: to label [[ASM_FALLTHROUGH:%.*]] [label %z.split], !srcloc !4 +// CHECK: asm.fallthrough: +// CHECK-NEXT: [[ASMRESULT:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0 +// CHECK-NEXT: [[ASMRESULT1:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1 +// CHECK-NEXT: store i32 [[ASMRESULT]], ptr [[RET]], align 4 +// CHECK-NEXT: store i32 [[ASMRESULT1]], ptr [[B]], align 4 +// CHECK-NEXT: [[TMP1:%.*]] = callbr { i32, i32 } asm "", "=r,=r,!i,~{dirflag},~{fpsr},~{flags}"() #[[ATTR1]] +// CHECK-NEXT: to label [[ASM_FALLTHROUGH4:%.*]] [label %z.split9], !srcloc !5 +// CHECK: asm.fallthrough4: +// CHECK-NEXT: [[ASMRESULT5:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0 +// CHECK-NEXT: [[ASMRESULT6:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1 +// CHECK-NEXT: store i32 [[ASMRESULT5]], ptr [[RET]], align 4 +// CHECK-NEXT: store i32 [[ASMRESULT6]], ptr [[B]], align 4 +// CHECK-NEXT: br label [[Z:%.*]] +// CHECK: z: +// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[RET]], align 4 +// CHECK-NEXT: ret i32 [[TMP2]] +// CHECK: z.split: +// CHECK-NEXT: [[ASMRESULT2:%.*]] = extractvalue { i32, i32 } [[TMP0]], 0 +// CHECK-NEXT: [[ASMRESULT3:%.*]] = extractvalue { i32, i32 } [[TMP0]], 1 +// CHECK-NEXT: store i32 [[ASMRESULT2]], ptr [[RET]], align 4 +// CHECK-NEXT: store i32 [[ASMRESULT3]], ptr [[B]], align 4 +// CHECK-NEXT: br label [[Z]] +// CHECK: z.split9: +// CHECK-NEXT: [[ASMRESULT7:%.*]] = extractvalue { i32, i32 } [[TMP1]], 0 +// CHECK-NEXT: [[ASMRESULT8:%.*]] = extractvalue { i32, i32 } [[TMP1]], 1 +// CHECK-NEXT: store i32 [[ASMRESULT7]], ptr [[RET]], align 4 +// CHECK-NEXT: store i32 [[ASMRESULT8]], ptr [[B]], align 4 +// CHECK-NEXT: br label [[Z]] +// +int test2 (void) { + int ret, b; + asm goto ("" : "=r"(ret), "=r"(b):::z); + asm goto ("" : "=r"(ret), "=r"(b):::z); +z: + return ret; +} +// CHECK-LABEL: @test3( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[OUT1_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store i32 [[OUT1:%.*]], ptr [[OUT1_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = callbr i32 asm "", "=r,!i,!i,~{dirflag},~{fpsr},~{flags}"() #[[ATTR1]] +// CHECK-NEXT: to label [[ASM_FALLTHROUGH:%.*]] [label [[LABEL_TRUE_SPLIT:%.*]], label %loop.split], !srcloc !6 +// CHECK: asm.fallthrough: +// CHECK-NEXT: store i32 [[TMP0]], ptr [[OUT1_ADDR]], align 4 +// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK-NEXT: br label [[RETURN:%.*]] +// CHECK: label_true.split: +// CHECK-NEXT: store i32 [[TMP0]], ptr [[OUT1_ADDR]], align 4 +// CHECK-NEXT: br label [[LABEL_TRUE:%.*]] +// CHECK: loop.split: +// CHECK-NEXT: store i32 [[TMP0]], ptr [[OUT1_ADDR]], align 4 +// CHECK-NEXT: br label [[LOOP:%.*]] +// CHECK: loop: +// CHECK-NEXT: store i32 0, ptr [[RETVAL]], align 4 +// CHECK-NEXT: br label [[RETURN]] +// CHECK: label_true: +// CHECK-NEXT: store i32 1, ptr [[RETVAL]], align 4 +// CHECK-NEXT: br label [[RETURN]] +// CHECK: return: +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[RETVAL]], align 4 +// CHECK-NEXT: ret i32 [[TMP1]] +// +int test3 (int out1) { + asm goto("" : "=r"(out1)::: label_true, loop); + return 0; +loop: + return 0; +label_true: + return 1; +} + +// CHECK-LABEL: @test4( +// CHECK-NEXT: entry: +// CHECK-NEXT: [[X:%.*]] = alloca i32, align 4 +// CHECK-NEXT: br label [[FOO:%.*]] +// CHECK: foo: +// CHECK-NEXT: [[TMP0:%.*]] = callbr i32 asm "", "=r,!i,~{dirflag},~{fpsr},~{flags}"() #[[ATTR1]] +// CHECK-NEXT: to label [[ASM_FALLTHROUGH:%.*]] [label %foo.split], !srcloc !7 +// CHECK: asm.fallthrough: +// CHECK-NEXT: store i32 [[TMP0]], ptr [[X]], align 4 +// CHECK-NEXT: ret void +// CHECK: foo.split: +// CHECK-NEXT: store i32 [[TMP0]], ptr [[X]], align 4 +// CHECK-NEXT: br label [[FOO]] +// +void test4 (void) { + int x; +foo: + asm goto ("" : "=r"(x):::foo); +} diff --git a/clang/test/Modules/asm-goto.c b/clang/test/Modules/asm-goto.c --- a/clang/test/Modules/asm-goto.c +++ b/clang/test/Modules/asm-goto.c @@ -5,7 +5,7 @@ // CHECK-LABEL: define {{.*}} @foo( // CHECK: callbr {{.*}} "=r,!i{{.*}}() -// CHECK-NEXT: to label %asm.fallthrough [label %indirect] +// CHECK-NEXT: to label %asm.fallthrough [label %indirect.split] int bar(void) { return foo();