diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -2013,36 +2013,95 @@ return Result; } -/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared -/// as using a particular register add that as a constraint that will be used -/// in this asm stmt. -static std::string -AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, - const TargetInfo &Target, CodeGenModule &CGM, - const AsmStmt &Stmt, const bool EarlyClobber, - std::string *GCCReg = nullptr) { +/// Is it valid to apply a register constraint for a variable marked with +/// the "register asm" construct? +/// Optionally, if it is determined that we can, we set "Register" to the +/// regiser name. +static bool +ShouldApplyRegisterVariableConstraint(const Expr &AsmExpr, + std::string *Register = nullptr) { + const DeclRefExpr *AsmDeclRef = dyn_cast(&AsmExpr); if (!AsmDeclRef) - return Constraint; + return false; const ValueDecl &Value = *AsmDeclRef->getDecl(); const VarDecl *Variable = dyn_cast(&Value); if (!Variable) - return Constraint; + return false; if (Variable->getStorageClass() != SC_Register) - return Constraint; + return false; AsmLabelAttr *Attr = Variable->getAttr(); if (!Attr) + return false; + + if (Register != nullptr) + // Set the register to return from Attr. + *Register = Attr->getLabel().str(); + return true; +} + +/// AddVariableConstraints: +/// Look at AsmExpr and if it is a variable declared as using a particular +/// register add that as a constraint that will be used in this asm stmt. +/// Whether it can be used or not is dependent on querying +/// ShouldApplyRegisterVariableConstraint() Also check whether the "hard +/// register" inline asm constraint (i.e. "{reg-name}") is specified. If so, add +/// that as a constraint that will be used in this asm stmt. +static std::string +AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, + const TargetInfo &Target, CodeGenModule &CGM, + const AsmStmt &Stmt, const bool EarlyClobber, + std::string *GCCReg = nullptr) { + // Do we have the "hard register" inline asm constraint. + bool ApplyHardRegisterConstraint = + Constraint[0] == '{' || (EarlyClobber && Constraint[1] == '{'); + + // Do we have "register asm" on a variable. + std::string Reg = ""; + bool ApplyRegisterVariableConstraint = + ShouldApplyRegisterVariableConstraint(AsmExpr, &Reg); + + // Diagnose the scenario where we apply both the register variable constraint + // and a hard register variable constraint as an unsupported error. + // Why? Because we could have a situation where the register passed in through + // {...} and the register passed in through the "register asm" construct could + // be different, and in this case, there's no way for the compiler to know + // which one to emit. + // FIXME: Should we add one additional level of granulariy, where the compiler + // can forgive the user, if they mention the same register to be used through + // both "register asm" and the hard register inline asm constraint ({...})? + if (ApplyHardRegisterConstraint && ApplyRegisterVariableConstraint) { + CGM.ErrorUnsupported(&Stmt, "__asm__"); return Constraint; - StringRef Register = Attr->getLabel(); - assert(Target.isValidGCCRegisterName(Register)); + } + + if (!ApplyHardRegisterConstraint && !ApplyRegisterVariableConstraint) + return Constraint; + // We're using validateOutputConstraint here because we only care if // this is a register constraint. TargetInfo::ConstraintInfo Info(Constraint, ""); - if (Target.validateOutputConstraint(Info) && - !Info.allowsRegister()) { + if (Target.validateOutputConstraint(Info) && !Info.allowsRegister()) { CGM.ErrorUnsupported(&Stmt, "__asm__"); return Constraint; } + + if (ApplyHardRegisterConstraint) { + int Start = EarlyClobber ? 2 : 1; + int End = Constraint.find('}'); + Reg = Constraint.substr(Start, End - Start); + // If we don't have a valid register name, simply return the constraint. + // For example: There are some targets like X86 that use a constraint such + // as "@cca", which is validated and then converted into {@cca}. Now this + // isn't necessarily a "GCC Register", but in terms of emission, it is + // valid since it lowered appropriately in the X86 backend. For the {..} + // constraint, we shouldn't be too strict and error out if the register + // itself isn't a valid "GCC register". + if (!Target.isValidGCCRegisterName(Reg)) + return Constraint; + } + + StringRef Register(Reg); // Canonicalize the register here before returning it. Register = Target.getNormalizedGCCRegisterName(Register); if (GCCReg != nullptr) diff --git a/clang/test/CodeGen/SystemZ/systemz-inline-asm-02.c b/clang/test/CodeGen/SystemZ/systemz-inline-asm-02.c --- a/clang/test/CodeGen/SystemZ/systemz-inline-asm-02.c +++ b/clang/test/CodeGen/SystemZ/systemz-inline-asm-02.c @@ -5,9 +5,15 @@ // Test that an error is given if a physreg is defined by multiple operands. int test_physreg_defs(void) { register int l __asm__("r7") = 0; + int m; // CHECK: error: multiple outputs to hard register: r7 - __asm__("" : "+r"(l), "=r"(l)); + __asm__("" + : "+r"(l), "=r"(l)); - return l; + // CHECK: error: multiple outputs to hard register: r6 + __asm__("" + : "+{r6}"(m), "={r6}"(m)); + + return l + m; } diff --git a/clang/test/CodeGen/SystemZ/systemz-inline-asm.c b/clang/test/CodeGen/SystemZ/systemz-inline-asm.c --- a/clang/test/CodeGen/SystemZ/systemz-inline-asm.c +++ b/clang/test/CodeGen/SystemZ/systemz-inline-asm.c @@ -134,12 +134,25 @@ int test_physregs(void) { // CHECK-LABEL: define{{.*}} signext i32 @test_physregs() register int l __asm__("r7") = 0; + int m = 0; // CHECK: call i32 asm "lr $0, $1", "={r7},{r7}" - __asm__("lr %0, %1" : "+r"(l)); + __asm__("lr %0, %1" + : "+r"(l)); // CHECK: call i32 asm "$0 $1 $2", "={r7},{r7},{r7}" - __asm__("%0 %1 %2" : "+r"(l) : "r"(l)); + __asm__("%0 %1 %2" + : "+r"(l) + : "r"(l)); - return l; + // CHECK: call i32 asm "lr $0, $1", "={r6},{r6}" + __asm__("lr %0, %1" + : "+{r6}"(m)); + + // CHECK: call i32 asm "$0 $1 $2", "={r6},{r6},{r6}" + __asm__("%0 %1 %2" + : "+{r6}"(m) + : "{r6}"(m)); + + return l + m; } diff --git a/clang/test/CodeGen/aarch64-inline-asm.c b/clang/test/CodeGen/aarch64-inline-asm.c --- a/clang/test/CodeGen/aarch64-inline-asm.c +++ b/clang/test/CodeGen/aarch64-inline-asm.c @@ -77,6 +77,14 @@ void test_tied_earlyclobber(void) { register int a asm("x1"); - asm("" : "+&r"(a)); + asm("" + : "+&r"(a)); + // CHECK: call i32 asm "", "=&{x1},0"(i32 %0) +} + +void test_tied_earlyclobber2(void) { + int a; + asm("" + : "+&{x1}"(a)); // CHECK: call i32 asm "", "=&{x1},0"(i32 %0) } diff --git a/clang/test/CodeGen/asm-goto.c b/clang/test/CodeGen/asm-goto.c --- a/clang/test/CodeGen/asm-goto.c +++ b/clang/test/CodeGen/asm-goto.c @@ -55,14 +55,18 @@ int test4(int out1, int out2) { // CHECK-LABEL: define{{.*}} i32 @test4( - // CHECK: callbr { i32, i32 } asm sideeffect "jne ${3:l}", "={si},={di},r,X,X,0,1 + // CHECK: callbr { i32, i32 } asm sideeffect "jne ${3:l}", "={si},={di},r,X,X,{si},{di} // CHECK: to label %asm.fallthrough [label %label_true, label %loop] // CHECK-LABEL: asm.fallthrough: if (out1 < out2) - asm volatile goto("jne %l3" : "+S"(out1), "+D"(out2) : "r"(out1) :: label_true, loop); + asm volatile goto("jne %l3" + : "+S"(out1), "+D"(out2) + : "r"(out1)::label_true, loop); else - asm volatile goto("jne %l5" : "+S"(out1), "+D"(out2) : "r"(out1), "r"(out2) :: label_true, loop); - // CHECK: callbr { i32, i32 } asm sideeffect "jne ${5:l}", "={si},={di},r,r,X,X,0,1 + asm volatile goto("jne %l5" + : "+S"(out1), "+D"(out2) + : "r"(out1), "r"(out2)::label_true, loop); + // CHECK: callbr { i32, i32 } asm sideeffect "jne ${5:l}", "={si},={di},r,r,X,X,{si},{di} // CHECK: to label %asm.fallthrough2 [label %label_true, label %loop] // CHECK-LABEL: asm.fallthrough2: return out1 + out2; @@ -92,7 +96,7 @@ int test6(int out1) { // CHECK-LABEL: define{{.*}} i32 @test6( - // CHECK: callbr i32 asm sideeffect "testl $0, $0; testl $1, $1; jne ${2:l}", "={si},r,X,X,0,{{.*}} i8* blockaddress(@test6, %label_true), i8* blockaddress(@test6, %landing) + // CHECK: callbr i32 asm sideeffect "testl $0, $0; testl $1, $1; jne ${2:l}", "={si},r,X,X,{si},{{.*}} i8* blockaddress(@test6, %label_true), i8* blockaddress(@test6, %landing) // CHECK: to label %asm.fallthrough [label %label_true, label %landing] // CHECK-LABEL: asm.fallthrough: // CHECK-LABEL: landing: diff --git a/clang/test/CodeGen/ms-intrinsics.c b/clang/test/CodeGen/ms-intrinsics.c --- a/clang/test/CodeGen/ms-intrinsics.c +++ b/clang/test/CodeGen/ms-intrinsics.c @@ -36,12 +36,12 @@ return __movsb(Dest, Src, Count); } // CHECK-I386-LABEL: define{{.*}} void @test__movsb -// CHECK-I386: call { i8*, i8*, i32 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i32 %Count) +// CHECK-I386: call { i8*, i8*, i32 } asm sideeffect "rep movsb", "={di},={si},={cx},{di},{si},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i32 %Count) // CHECK-I386: ret void // CHECK-I386: } // CHECK-X64-LABEL: define{{.*}} void @test__movsb -// CHECK-X64: call { i8*, i8*, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i64 %Count) +// CHECK-X64: call { i8*, i8*, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},{di},{si},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8* %Dest, i8* %Src, i64 %Count) // CHECK-X64: ret void // CHECK-X64: } @@ -49,12 +49,12 @@ return __stosw(Dest, Data, Count); } // CHECK-I386-LABEL: define{{.*}} void @test__stosw -// CHECK-I386: call { i16*, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i32 %Count) +// CHECK-I386: call { i16*, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},{di},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i32 %Count) // CHECK-I386: ret void // CHECK-I386: } // CHECK-X64-LABEL: define{{.*}} void @test__stosw -// CHECK-X64: call { i16*, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i64 %Count) +// CHECK-X64: call { i16*, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},{di},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, i16* %Dest, i64 %Count) // CHECK-X64: ret void // CHECK-X64: } @@ -62,12 +62,12 @@ return __movsw(Dest, Src, Count); } // CHECK-I386-LABEL: define{{.*}} void @test__movsw -// CHECK-I386: call { i16*, i16*, i32 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i32 %Count) +// CHECK-I386: call { i16*, i16*, i32 } asm sideeffect "rep movsw", "={di},={si},={cx},{di},{si},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i32 %Count) // CHECK-I386: ret void // CHECK-I386: } // CHECK-X64-LABEL: define{{.*}} void @test__movsw -// CHECK-X64: call { i16*, i16*, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i64 %Count) +// CHECK-X64: call { i16*, i16*, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},{di},{si},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i16* %Dest, i16* %Src, i64 %Count) // CHECK-X64: ret void // CHECK-X64: } @@ -75,12 +75,12 @@ return __stosd(Dest, Data, Count); } // CHECK-I386-LABEL: define{{.*}} void @test__stosd -// CHECK-I386: call { i32*, i32 } asm sideeffect "rep stosl", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i32 %Count) +// CHECK-I386: call { i32*, i32 } asm sideeffect "rep stosl", "={di},={cx},{ax},{di},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i32 %Count) // CHECK-I386: ret void // CHECK-I386: } // CHECK-X64-LABEL: define{{.*}} void @test__stosd -// CHECK-X64: call { i32*, i64 } asm sideeffect "rep stosl", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i64 %Count) +// CHECK-X64: call { i32*, i64 } asm sideeffect "rep stosl", "={di},={cx},{ax},{di},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, i32* %Dest, i64 %Count) // CHECK-X64: ret void // CHECK-X64: } @@ -88,12 +88,12 @@ return __movsd(Dest, Src, Count); } // CHECK-I386-LABEL: define{{.*}} void @test__movsd -// CHECK-I386: call { i32*, i32*, i32 } asm sideeffect "rep movsl", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i32 %Count) +// CHECK-I386: call { i32*, i32*, i32 } asm sideeffect "rep movsl", "={di},={si},={cx},{di},{si},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i32 %Count) // CHECK-I386: ret void // CHECK-I386: } // CHECK-X64-LABEL: define{{.*}} void @test__movsd -// CHECK-X64: call { i32*, i32*, i64 } asm sideeffect "rep movsl", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i64 %Count) +// CHECK-X64: call { i32*, i32*, i64 } asm sideeffect "rep movsl", "={di},={si},={cx},{di},{si},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Dest, i32* %Src, i64 %Count) // CHECK-X64: ret void // CHECK-X64: } @@ -102,7 +102,7 @@ return __stosq(Dest, Data, Count); } // CHECK-X64-LABEL: define{{.*}} void @test__stosq -// CHECK-X64: call { i64*, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, i64* %Dest, i64 %Count) +// CHECK-X64: call { i64*, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},{di},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, i64* %Dest, i64 %Count) // CHECK-X64: ret void // CHECK-X64: } @@ -110,7 +110,7 @@ return __movsq(Dest, Src, Count); } // CHECK-X64-LABEL: define{{.*}} void @test__movsq -// CHECK-X64: call { i64*, i64*, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Dest, i64* %Src, i64 %Count) +// CHECK-X64: call { i64*, i64*, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},{di},{si},{cx},~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Dest, i64* %Src, i64 %Count) // CHECK-X64: ret void // CHECK-X64: } #endif @@ -636,14 +636,14 @@ } long test_InterlockedCompareExchange_HLEAcquire(long volatile *Destination, long Exchange, long Comparand) { -// CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand) -// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Destination, i32 %Exchange, i32 %Comparand, i32* %Destination) + // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand) + // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Destination, i32 %Exchange, i32 %Comparand, i32* %Destination) return _InterlockedCompareExchange_HLEAcquire(Destination, Exchange, Comparand); } long test_InterlockedCompareExchange_HLERelease(long volatile *Destination, - long Exchange, long Comparand) { -// CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand) -// CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Destination, i32 %Exchange, i32 %Comparand, i32* %Destination) + long Exchange, long Comparand) { + // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand) + // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i32* %Destination, i32 %Exchange, i32 %Comparand, i32* %Destination) return _InterlockedCompareExchange_HLERelease(Destination, Exchange, Comparand); } #endif @@ -660,14 +660,14 @@ } __int64 test_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *Destination, __int64 Exchange, __int64 Comparand) { -// CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand) -// CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Destination, i64 %Exchange, i64 %Comparand, i64* %Destination) + // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand) + // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Destination, i64 %Exchange, i64 %Comparand, i64* %Destination) return _InterlockedCompareExchange64_HLEAcquire(Destination, Exchange, Comparand); } __int64 test_InterlockedCompareExchange64_HLERelease(__int64 volatile *Destination, __int64 Exchange, __int64 Comparand) { -// CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand) -// CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Destination, i64 %Exchange, i64 %Comparand, i64* %Destination) + // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand) + // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $2, $1", "={ax},=*m,r,{ax},*m,~{memory},~{dirflag},~{fpsr},~{flags}"(i64* %Destination, i64 %Exchange, i64 %Comparand, i64* %Destination) return _InterlockedCompareExchange64_HLERelease(Destination, Exchange, Comparand); } #endif diff --git a/clang/test/CodeGen/z-hard-register-inline-asm.c b/clang/test/CodeGen/z-hard-register-inline-asm.c new file mode 100644 --- /dev/null +++ b/clang/test/CodeGen/z-hard-register-inline-asm.c @@ -0,0 +1,48 @@ +// RUN: %clang_cc1 -triple s390x-ibm-linux -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple s390x-ibm-zos -emit-llvm -o - %s | FileCheck %s + +void f1() { + int a, b; + register int c asm("r1"); + register int d asm("r2"); + + // CHECK-COUNT-2: call i32 asm "lhi $0,5\0A", "={r1}" + __asm("lhi %0,5\n" + : "={r1}"(a) + : + :); + __asm("lhi %0,5\n" + : "=r"(c) + : + :); + + // CHECK-COUNT-2: call i32 asm "lgr $0,$1\0A", "={r1},{r2}" + __asm("lgr %0,%1\n" + : "={r1}"(a) + : "{r2}"(b) + :); + __asm("lgr %0,%1\n" + : "=r"(c) + : "r"(d) + :); + + // CHECK-COUNT-2: call i32 asm "lgr $0,$1\0A", "={r1},{r2}" + __asm("lgr %0,%1\n" + : "={%r1}"(a) + : "{%r2}"(b) + :); + __asm("lgr %0,%1\n" + : "={r1}"(a) + : "{%r2}"(b) + :); + + // CHECK-COUNT-2: call i32 asm "lgr $0,$1\0A", "=&{r1},{r2}" + __asm("lgr %0,%1\n" + : "=&{r1}"(a) + : "{%r2}"(b) + :); + __asm("lgr %0,%1\n" + : "=&r"(c) + : "r"(d) + :); +}