diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -3506,46 +3506,29 @@ case 'i': // Simple Integer or Relocatable Constant case 'n': // Simple Integer case 's': { // Relocatable Constant - // These operands are interested in values of the form (GV+C), where C may - // be folded in as an offset of GV, or it may be explicitly added. Also, it - // is possible and fine if either GV or C are missing. - ConstantSDNode *C = dyn_cast(Op); - GlobalAddressSDNode *GA = dyn_cast(Op); - - // If we have "(add GV, C)", pull out GV/C - if (Op.getOpcode() == ISD::ADD) { - C = dyn_cast(Op.getOperand(1)); - GA = dyn_cast(Op.getOperand(0)); - if (!C || !GA) { - C = dyn_cast(Op.getOperand(0)); - GA = dyn_cast(Op.getOperand(1)); - } - if (!C || !GA) { - C = nullptr; - GA = nullptr; - } - } - // If we find a valid operand, map to the TargetXXX version so that the - // value itself doesn't get selected. - if (GA) { // Either &GV or &GV+C - if (ConstraintLetter != 'n') { - int64_t Offs = GA->getOffset(); - if (C) Offs += C->getZExtValue(); - Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), - C ? SDLoc(C) : SDLoc(), - Op.getValueType(), Offs)); - } - return; - } - if (C) { // just C, no GV. - // Simple constants are not allowed for 's'. - if (ConstraintLetter != 's') { - // gcc prints these as sign extended. Sign extend value to 64 bits - // now; without this it would get ZExt'd later in - // ScheduleDAGSDNodes::EmitNode, which is very generic. - Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), + GlobalAddressSDNode *GA; + ConstantSDNode *C; + int64_t Offset = 0; + + // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), + // etc., since getelementpointer is variadic. + while (1) { + if ((GA = dyn_cast(Op)) && ConstraintLetter != 'n') { + Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), + GA->getValueType(0), + Offset + GA->getOffset())); + return; + } else if ((C = dyn_cast(Op)) && ConstraintLetter != 's') { + Ops.push_back(DAG.getTargetConstant(Offset + C->getSExtValue(), SDLoc(C), MVT::i64)); + return; + } else if (Op.getOpcode() == ISD::ADD || Op.getOpcode() == ISD::SUB) { + if ((C = dyn_cast(Op.getOperand(1)))) { + Offset += (Op.getOpcode() == ISD::ADD ? 1 : -1) * C->getZExtValue(); + Op = Op.getOperand(0); + continue; + } } return; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -43811,40 +43811,11 @@ // If we are in non-pic codegen mode, we allow the address of a global (with // an optional displacement) to be used with 'i'. - GlobalAddressSDNode *GA = nullptr; - int64_t Offset = 0; - - // Match either (GA), (GA+C), (GA+C1+C2), etc. - while (1) { - if ((GA = dyn_cast(Op))) { - Offset += GA->getOffset(); - break; - } else if (Op.getOpcode() == ISD::ADD) { - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { - Offset += C->getZExtValue(); - Op = Op.getOperand(0); - continue; - } - } else if (Op.getOpcode() == ISD::SUB) { - if (ConstantSDNode *C = dyn_cast(Op.getOperand(1))) { - Offset += -C->getZExtValue(); - Op = Op.getOperand(0); - continue; - } - } - - // Otherwise, this isn't something we can handle, reject it. - return; - } - - const GlobalValue *GV = GA->getGlobal(); - // If we require an extra load to get this address, as in PIC mode, we - // can't accept it. - if (isGlobalStubReference(Subtarget.classifyGlobalReference(GV))) - return; - - Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op), - GA->getValueType(0), Offset); + if (auto *GA = dyn_cast(Op)) + // If we require an extra load to get this address, as in PIC mode, we + // can't accept it. + if (isGlobalStubReference(Subtarget.classifyGlobalReference(GA->getGlobal()))) + return; break; } } diff --git a/llvm/test/CodeGen/AArch64/inline-asm-multilevel-gep.ll b/llvm/test/CodeGen/AArch64/inline-asm-multilevel-gep.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/inline-asm-multilevel-gep.ll @@ -0,0 +1,12 @@ +; RUN: llc < %s -mtriple aarch64-gnu-linux | FileCheck %s + +; @foo is a 2d array of i32s, ex. +; i32 foo [2][2] +@foo = internal global [2 x [2 x i32]] zeroinitializer, align 4 + +define void @bar() { +; access foo[1][1] +; CHECK: // foo+12 + tail call void asm sideeffect "// ${0:c}", "i"(i32* getelementptr inbounds ([2 x [2 x i32]], [2 x [2 x i32]]* @foo, i64 0, i64 1, i64 1)) + ret void +} diff --git a/llvm/test/CodeGen/PowerPC/inline-asm-multilevel-gep.ll b/llvm/test/CodeGen/PowerPC/inline-asm-multilevel-gep.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/inline-asm-multilevel-gep.ll @@ -0,0 +1,12 @@ +; RUN: llc < %s -mtriple ppc32-- | FileCheck %s + +; @foo is a 2d array of i32s, ex. +; i32 foo [2][2] +@foo = internal global [2 x [2 x i32]] zeroinitializer, align 4 + +define void @bar() { +; access foo[1][1] +; CHECK: # foo+12 + tail call void asm sideeffect "# ${0:c}", "i"(i32* getelementptr inbounds ([2 x [2 x i32]], [2 x [2 x i32]]* @foo, i64 0, i64 1, i64 1)) + ret void +} diff --git a/llvm/test/CodeGen/X86/inline-asm-multilevel-gep.ll b/llvm/test/CodeGen/X86/inline-asm-multilevel-gep.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/X86/inline-asm-multilevel-gep.ll @@ -0,0 +1,12 @@ +; RUN: llc < %s -mtriple x86_64-gnu-linux | FileCheck %s + +; @foo is a 2d array of i32s, ex. +; i32 foo [2][2] +@foo = internal global [2 x [2 x i32]] zeroinitializer, align 4 + +define void @bar() { +; access foo[1][1] +; CHECK: # foo+12 + tail call void asm sideeffect "# ${0:c}", "i"(i32* getelementptr inbounds ([2 x [2 x i32]], [2 x [2 x i32]]* @foo, i64 0, i64 1, i64 1)) + ret void +}