Index: clang/lib/Basic/Targets/SystemZ.h =================================================================== --- clang/lib/Basic/Targets/SystemZ.h +++ clang/lib/Basic/Targets/SystemZ.h @@ -72,6 +72,28 @@ bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &info) const override; + std::string convertConstraint(const char *&Constraint) const override { + switch(Constraint[0]) { + case 'p': // Keep 'p' constraint. + return std::string("p"); + case 'Z': + switch (Constraint[1]) { + default: + break; + case 'Q': // Address with base and unsigned 12-bit displacement + case 'R': // Likewise, plus an index + case 'S': // Address with base and signed 20-bit displacement + case 'T': // Likewise, plus an index + // "^" hints llvm that this is a 2 letter constraint. + // "Constraint++" is used to promote the string iterator + // to the next constraint. + return std::string("^") + std::string(Constraint++, 2); + } + break; + } + return TargetInfo::convertConstraint(Constraint); + } + const char *getClobbers() const override { // FIXME: Is this really right? return ""; Index: clang/lib/Basic/Targets/SystemZ.cpp =================================================================== --- clang/lib/Basic/Targets/SystemZ.cpp +++ clang/lib/Basic/Targets/SystemZ.cpp @@ -59,6 +59,18 @@ default: return false; + case 'Z': + switch (Name[1]) { + default: + return false; + case 'Q': // Address with base and unsigned 12-bit displacement + case 'R': // Likewise, plus an index + case 'S': // Address with base and signed 20-bit displacement + case 'T': // Likewise, plus an index + break; + } + LLVM_FALLTHROUGH; + case 'p': // Address case 'a': // Address register case 'd': // Data register (equivalent to 'r') case 'f': // Floating-point register Index: clang/test/CodeGen/SystemZ/systemz-inline-asm-03.c =================================================================== --- /dev/null +++ clang/test/CodeGen/SystemZ/systemz-inline-asm-03.c @@ -0,0 +1,147 @@ +// RUN: %clang_cc1 -triple s390x-linux-gnu -O2 -emit-llvm -o - %s 2>&1 \ +// RUN: | FileCheck %s +// REQUIRES: systemz-registered-target + +long *A; +long Idx; +unsigned long Addr; + +unsigned long fun_BD12_Q() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD12_Q() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZQ"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZQ" (&A[100])); + return Addr; +} + +unsigned long fun_BD12_R() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD12_R() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZR"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZR" (&A[100])); + return Addr; +} + +unsigned long fun_BD12_S() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD12_S() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZS"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZS" (&A[100])); + return Addr; +} + +unsigned long fun_BD12_T() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD12_T() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZT"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZT" (&A[100])); + return Addr; +} + +unsigned long fun_BD12_p() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD12_p() +// CHECK: call i64 asm "lay $0, $1", "=r,p"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "p" (&A[100])); + return Addr; +} + +unsigned long fun_BDX12_Q() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX12_Q() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZQ"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZQ" (&A[Idx + 100])); + return Addr; +} + +unsigned long fun_BDX12_R() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX12_R() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZR"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZR" (&A[Idx + 100])); + return Addr; +} + +unsigned long fun_BDX12_S() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX12_S() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZS"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZS" (&A[Idx + 100])); + return Addr; +} + +unsigned long fun_BDX12_T() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX12_T() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZT"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZT" (&A[Idx + 100])); + return Addr; +} + +unsigned long fun_BDX12_p() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX12_p() +// CHECK: call i64 asm "lay $0, $1", "=r,p"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "p" (&A[Idx + 100])); + return Addr; +} + +unsigned long fun_BD20_Q() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD20_Q() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZQ"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZQ" (&A[1000])); + return Addr; +} + +unsigned long fun_BD20_R() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD20_R() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZR"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZR" (&A[1000])); + return Addr; +} + +unsigned long fun_BD20_S() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD20_S() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZS"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZS" (&A[1000])); + return Addr; +} + +unsigned long fun_BD20_T() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD20_T() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZT"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZT" (&A[1000])); + return Addr; +} + +unsigned long fun_BD20_p() { +// CHECK-LABEL: define{{.*}} i64 @fun_BD20_p() +// CHECK: call i64 asm "lay $0, $1", "=r,p"(i64* nonnull %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "p" (&A[1000])); + return Addr; +} + +unsigned long fun_BDX20_Q() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX20_Q() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZQ"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZQ" (&A[Idx + 1000])); + return Addr; +} + +unsigned long fun_BDX20_R() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX20_R() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZR"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZR" (&A[Idx + 1000])); + return Addr; +} + +unsigned long fun_BDX20_S() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX20_S() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZS"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZS" (&A[Idx + 1000])); + return Addr; +} + +unsigned long fun_BDX20_T() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX20_T() +// CHECK: call i64 asm "lay $0, $1", "=r,^ZT"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "ZT" (&A[Idx + 1000])); + return Addr; +} + +unsigned long fun_BDX20_p() { +// CHECK-LABEL: define{{.*}} i64 @fun_BDX20_p() +// CHECK: call i64 asm "lay $0, $1", "=r,p"(i64* %arrayidx) + asm("lay %0, %1" : "=r" (Addr) : "p" (&A[Idx + 1000])); + return Addr; +} Index: llvm/include/llvm/CodeGen/TargetLowering.h =================================================================== --- llvm/include/llvm/CodeGen/TargetLowering.h +++ llvm/include/llvm/CodeGen/TargetLowering.h @@ -4265,6 +4265,11 @@ return InlineAsm::Constraint_Unknown; } + bool isAddress(StringRef ConstraintCode) const { + return getInlineAsmMemConstraint(ConstraintCode) > + InlineAsm::Constraint_FirstAddressCode; + } + /// Try to replace an X constraint, which matches anything, with another that /// has more specific requirements based on the type of the corresponding /// operand. This returns null if there is no replacement to make. Index: llvm/include/llvm/IR/InlineAsm.h =================================================================== --- llvm/include/llvm/IR/InlineAsm.h +++ llvm/include/llvm/IR/InlineAsm.h @@ -235,12 +235,15 @@ Kind_RegDefEarlyClobber = 3, // Early-clobber output register, "=&r". Kind_Clobber = 4, // Clobbered register, "~r". Kind_Imm = 5, // Immediate. - Kind_Mem = 6, // Memory operand, "m". + Kind_Mem = 6, // Memory operand, "m", or an address. // Memory constraint codes. // These could be tablegenerated but there's little need to do that since // there's plenty of space in the encoding to support the union of all // constraint codes for all targets. + // Addresses are included here as they need to be treated the same by the + // backend, the only difference is that they are not used to actaully + // access memory by the instruction. Constraint_Unknown = 0, Constraint_es, Constraint_i, @@ -263,7 +266,15 @@ Constraint_Z, Constraint_ZC, Constraint_Zy, - Constraints_Max = Constraint_Zy, + + Constraint_FirstAddressCode, + Constraint_Addr_p, + Constraint_Addr_ZQ, + Constraint_Addr_ZR, + Constraint_Addr_ZS, + Constraint_Addr_ZT, + + Constraints_Max = Constraint_Addr_ZT, Constraints_ShiftAmount = 16, Flag_MatchingOperand = 0x80000000 @@ -448,6 +459,16 @@ return "ZC"; case InlineAsm::Constraint_Zy: return "Zy"; + case InlineAsm::Constraint_Addr_p: + return "addr_p"; + case InlineAsm::Constraint_Addr_ZQ: + return "addrp_zq"; + case InlineAsm::Constraint_Addr_ZR: + return "addrp_zr"; + case InlineAsm::Constraint_Addr_ZS: + return "addrp_zs"; + case InlineAsm::Constraint_Addr_ZT: + return "addrp_zt"; default: llvm_unreachable("Unknown memory constraint"); } Index: llvm/lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- llvm/lib/CodeGen/CodeGenPrepare.cpp +++ llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -5627,6 +5627,7 @@ // Compute the constraint code and ConstraintType to use. TLI->ComputeConstraintToUse(OpInfo, SDValue()); + // TODO: Addresses (InlineAsm::Constraint_Addr_...) could also be handled. if (OpInfo.ConstraintType == TargetLowering::C_Memory && OpInfo.isIndirect) { Value *OpVal = CS->getArgOperand(ArgNo++); Index: llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -8253,7 +8253,8 @@ return true; for (const auto &Code : Codes) - if (TLI.getConstraintType(Code) == TargetLowering::C_Memory) + if (TLI.getConstraintType(Code) == TargetLowering::C_Memory && + !TLI.isAddress(Code)) return true; return false; @@ -8526,12 +8527,14 @@ Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; } - void update(const TargetLowering::AsmOperandInfo &OpInfo) { + void update(const TargetLowering::AsmOperandInfo &OpInfo, + const TargetLowering &TLI) { // Ideally, we would only check against memory constraints. However, the // meaning of an Other constraint can be target-specific and we can't easily // reason about it. Therefore, be conservative and set MayLoad/MayStore // for Other constraints as well. - if (OpInfo.ConstraintType == TargetLowering::C_Memory || + if ((OpInfo.ConstraintType == TargetLowering::C_Memory && + !TLI.isAddress(OpInfo.ConstraintCode)) || OpInfo.ConstraintType == TargetLowering::C_Other) { if (OpInfo.Type == InlineAsm::isInput) Flags |= InlineAsm::Extra_MayLoad; @@ -8634,7 +8637,7 @@ "' expects an integer constant " "expression"); - ExtraInfo.update(T); + ExtraInfo.update(T, TLI); } // We won't need to flush pending loads if this asm doesn't touch @@ -8674,7 +8677,8 @@ TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); if (OpInfo.ConstraintType == TargetLowering::C_Memory && - OpInfo.Type == InlineAsm::isClobber) + (OpInfo.Type == InlineAsm::isClobber || + TLI.isAddress(OpInfo.ConstraintCode))) continue; // If this is a memory input, and if the operand is not indirect, do what we @@ -8738,6 +8742,10 @@ } return false; }; + assert((!TLI.isAddress(OpInfo.ConstraintCode) || + (OpInfo.Type == InlineAsm::isInput && + !OpInfo.isMatchingInputConstraint())) && + "Only address as input operand is supported."); switch (OpInfo.Type) { case InlineAsm::isOutput: @@ -8871,7 +8879,8 @@ } if (OpInfo.ConstraintType == TargetLowering::C_Memory) { - assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); + assert((OpInfo.isIndirect || TLI.isAddress(OpInfo.ConstraintCode)) && + "Operand must be indirect mem or address to be a mem!"); assert(InOperandVal.getValueType() == TLI.getPointerTy(DAG.getDataLayout()) && "Memory operands expect pointer values"); Index: llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp =================================================================== --- llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp +++ llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp @@ -1684,16 +1684,19 @@ llvm_unreachable("Unexpected asm memory constraint"); case InlineAsm::Constraint_i: case InlineAsm::Constraint_Q: + case InlineAsm::Constraint_Addr_ZQ: // Accept an address with a short displacement, but no index. Form = SystemZAddressingMode::FormBD; DispRange = SystemZAddressingMode::Disp12Only; break; case InlineAsm::Constraint_R: + case InlineAsm::Constraint_Addr_ZR: // Accept an address with a short displacement and an index. Form = SystemZAddressingMode::FormBDXNormal; DispRange = SystemZAddressingMode::Disp12Only; break; case InlineAsm::Constraint_S: + case InlineAsm::Constraint_Addr_ZS: // Accept an address with a long displacement, but no index. Form = SystemZAddressingMode::FormBD; DispRange = SystemZAddressingMode::Disp20Only; @@ -1701,6 +1704,8 @@ case InlineAsm::Constraint_T: case InlineAsm::Constraint_m: case InlineAsm::Constraint_o: + case InlineAsm::Constraint_Addr_p: + case InlineAsm::Constraint_Addr_ZT: // Accept an address with a long displacement and an index. // m works the same as T, as this is the most general case. // We don't really have any special handling of "offsettable" Index: llvm/lib/Target/SystemZ/SystemZISelLowering.h =================================================================== --- llvm/lib/Target/SystemZ/SystemZISelLowering.h +++ llvm/lib/Target/SystemZ/SystemZISelLowering.h @@ -498,6 +498,21 @@ return InlineAsm::Constraint_S; case 'T': return InlineAsm::Constraint_T; + case 'p': + return InlineAsm::Constraint_Addr_p; + } + } else if (ConstraintCode.size() == 2 && ConstraintCode[0] == 'Z') { + switch (ConstraintCode[1]) { + default: + break; + case 'Q': + return InlineAsm::Constraint_Addr_ZQ; + case 'R': + return InlineAsm::Constraint_Addr_ZR; + case 'S': + return InlineAsm::Constraint_Addr_ZS; + case 'T': + return InlineAsm::Constraint_Addr_ZT; } } return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); Index: llvm/lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1022,6 +1022,7 @@ case 'S': // Memory with base and signed 20-bit displacement case 'T': // Likewise, plus an index case 'm': // Equivalent to 'T'. + case 'p': // Address. return C_Memory; case 'I': // Unsigned 8-bit constant @@ -1031,6 +1032,17 @@ case 'M': // 0x7fffffff return C_Immediate; + default: + break; + } + } else if (Constraint.size() == 2 && Constraint[0] == 'Z') { + switch (Constraint[1]) { + case 'Q': // Address with base and unsigned 12-bit displacement + case 'R': // Likewise, plus an index + case 'S': // Address with base and signed 20-bit displacement + case 'T': // Likewise, plus an index + return C_Memory; + default: break; } Index: llvm/test/CodeGen/SystemZ/inline-asm-addr.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/inline-asm-addr.ll @@ -0,0 +1,265 @@ +; RUN: llc -mtriple=s390x-linux-gnu < %s | FileCheck %s + +@Addr = global i64 0, align 8 +@A = global i64* null, align 8 +@Idx = global i64 0, align 8 + +define i64 @fun_BD12_Q() { +; CHECK-LABEL: fun_BD12_Q: +; CHECK: #APP +; CHECK: lay %r2, 800(%r1) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 100 + %1 = tail call i64 asm "lay $0, $1", "=r,^ZQ"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BD12_R() { +; CHECK-LABEL: fun_BD12_R: +; CHECK: #APP +; CHECK: lay %r2, 800(%r1) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 100 + %1 = tail call i64 asm "lay $0, $1", "=r,^ZR"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BD12_S() { +; CHECK-LABEL: fun_BD12_S: +; CHECK: #APP +; CHECK: lay %r2, 800(%r1) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 100 + %1 = tail call i64 asm "lay $0, $1", "=r,^ZS"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BD12_T() { +; CHECK-LABEL: fun_BD12_T: +; CHECK: #APP +; CHECK: lay %r2, 800(%r1) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 100 + %1 = tail call i64 asm "lay $0, $1", "=r,^ZT"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BD12_p() { +; CHECK-LABEL: fun_BD12_p: +; CHECK: #APP +; CHECK: lay %r2, 800(%r1) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 100 + %1 = tail call i64 asm "lay $0, $1", "=r,p"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BDX12_Q() { +; CHECK-LABEL: fun_BDX12_Q: +; CHECK: #APP +; CHECK: lay %r2, 800(%r2) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 100 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,^ZQ"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BDX12_R() { +; CHECK-LABEL: fun_BDX12_R: +; CHECK: #APP +; CHECK: lay %r2, 800(%r1,%r2) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 100 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,^ZR"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BDX12_S() { +; CHECK-LABEL: fun_BDX12_S: +; CHECK: #APP +; CHECK: lay %r2, 800(%r2) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 100 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,^ZS"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BDX12_T() { +; CHECK-LABEL: fun_BDX12_T: +; CHECK: #APP +; CHECK: lay %r2, 800(%r1,%r2) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 100 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,^ZT"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BDX12_p() { +; CHECK-LABEL: fun_BDX12_p: +; CHECK: #APP +; CHECK: lay %r2, 800(%r1,%r2) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 100 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,p"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BD20_Q() { +; CHECK-LABEL: fun_BD20_Q: +; CHECK: #APP +; CHECK: lay %r2, 0(%r2) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 1000 + %1 = tail call i64 asm "lay $0, $1", "=r,^ZQ"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BD20_R() { +; CHECK-LABEL: fun_BD20_R: +; CHECK: #APP +; CHECK: lay %r2, 0(%r2) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 1000 + %1 = tail call i64 asm "lay $0, $1", "=r,^ZR"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BD20_S() { +; CHECK-LABEL: fun_BD20_S: +; CHECK: #APP +; CHECK: lay %r2, 8000(%r1) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 1000 + %1 = tail call i64 asm "lay $0, $1", "=r,^ZS"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BD20_T() { +; CHECK-LABEL: fun_BD20_T: +; CHECK: #APP +; CHECK: lay %r2, 8000(%r1) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 1000 + %1 = tail call i64 asm "lay $0, $1", "=r,^ZT"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BD20_p() { +; CHECK-LABEL: fun_BD20_p: +; CHECK: #APP +; CHECK: lay %r2, 8000(%r1) +entry: + %0 = load i64*, i64** @A + %arrayidx = getelementptr inbounds i64, i64* %0, i64 1000 + %1 = tail call i64 asm "lay $0, $1", "=r,p"(i64* nonnull %arrayidx) + store i64 %1, i64* @Addr + ret i64 %1 +} + +define i64 @fun_BDX20_Q() { +; CHECK-LABEL: fun_BDX20_Q: +; CHECK: #APP +; CHECK: lay %r2, 0(%r1) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 1000 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,^ZQ"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BDX20_R() { +; CHECK-LABEL: fun_BDX20_R: +; CHECK: #APP +; CHECK: lay %r2, 0(%r1) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 1000 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,^ZR"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BDX20_S() { +; CHECK-LABEL: fun_BDX20_S: +; CHECK: #APP +; CHECK: lay %r2, 8000(%r2) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 1000 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,^ZS"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BDX20_T() { +; CHECK-LABEL: fun_BDX20_T: +; CHECK: #APP +; CHECK: lay %r2, 8000(%r1,%r2) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 1000 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,^ZT"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +} + +define i64 @fun_BDX20_p() { +; CHECK-LABEL: fun_BDX20_p: +; CHECK: #APP +; CHECK: lay %r2, 8000(%r1,%r2) +entry: + %0 = load i64*, i64** @A + %1 = load i64, i64* @Idx + %add = add nsw i64 %1, 1000 + %arrayidx = getelementptr inbounds i64, i64* %0, i64 %add + %2 = tail call i64 asm "lay $0, $1", "=r,p"(i64* %arrayidx) + store i64 %2, i64* @Addr + ret i64 %2 +}