diff --git a/llvm/lib/Target/VE/VEAsmPrinter.cpp b/llvm/lib/Target/VE/VEAsmPrinter.cpp --- a/llvm/lib/Target/VE/VEAsmPrinter.cpp +++ b/llvm/lib/Target/VE/VEAsmPrinter.cpp @@ -63,6 +63,8 @@ void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &OS); bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &O) override; + bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) override; }; } // end of anonymous namespace @@ -362,6 +364,9 @@ case MachineOperand::MO_Register: O << "%" << StringRef(getRegisterName(MO.getReg())).lower(); break; + case MachineOperand::MO_Immediate: + O << (int)MO.getImm(); + break; default: llvm_unreachable(""); } @@ -389,6 +394,34 @@ return false; } +bool VEAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, + raw_ostream &O) { + if (ExtraCode && ExtraCode[0]) + return true; // Unknown modifier + + if (MI->getOperand(OpNo+1).isImm() && + MI->getOperand(OpNo+1).getImm() == 0) { + // don't print "+0" + } else { + printOperand(MI, OpNo+1, O); + } + if (MI->getOperand(OpNo).isImm() && + MI->getOperand(OpNo).getImm() == 0) { + if (MI->getOperand(OpNo+1).isImm() && + MI->getOperand(OpNo+1).getImm() == 0) { + O << "0"; + } else { + // don't print "(0)" + } + } else { + O << "("; + printOperand(MI, OpNo, O); + O << ")"; + } + return false; +} + // Force static initialization. extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeVEAsmPrinter() { RegisterAsmPrinter X(getTheVETarget()); diff --git a/llvm/lib/Target/VE/VEISelDAGToDAG.cpp b/llvm/lib/Target/VE/VEISelDAGToDAG.cpp --- a/llvm/lib/Target/VE/VEISelDAGToDAG.cpp +++ b/llvm/lib/Target/VE/VEISelDAGToDAG.cpp @@ -142,6 +142,12 @@ bool selectADDRri(SDValue N, SDValue &Base, SDValue &Offset); bool selectADDRzi(SDValue N, SDValue &Base, SDValue &Offset); + /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for + /// inline asm expressions. + bool SelectInlineAsmMemoryOperand(const SDValue &Op, + unsigned ConstraintID, + std::vector &OutOps) override; + StringRef getPassName() const override { return "VE DAG->DAG Pattern Instruction Selection"; } @@ -380,6 +386,33 @@ SelectCode(N); } +/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for +/// inline asm expressions. +bool +VEDAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op, + unsigned ConstraintID, + std::vector &OutOps) { + SDValue Op0, Op1; + switch (ConstraintID) { + default: + llvm_unreachable("Unexpected asm memory constraint"); + case InlineAsm::Constraint_o: + case InlineAsm::Constraint_m: // memory + // Try to match ADDRri since reg+imm style is safe for all VE instructions + // with a memory operand. + if (selectADDRri(Op, Op0, Op1)) { + OutOps.push_back(Op0); + OutOps.push_back(Op1); + return false; + } + // Otherwise, require the address to be in a register and immediate 0. + OutOps.push_back(Op); + OutOps.push_back(CurDAG->getTargetConstant(0, SDLoc(Op), MVT::i32)); + return false; + } + return true; +} + SDNode *VEDAGToDAGISel::getGlobalBaseReg() { Register GlobalBaseReg = Subtarget->getInstrInfo()->getGlobalBaseReg(MF); return CurDAG diff --git a/llvm/lib/Target/VE/VERegisterInfo.cpp b/llvm/lib/Target/VE/VERegisterInfo.cpp --- a/llvm/lib/Target/VE/VERegisterInfo.cpp +++ b/llvm/lib/Target/VE/VERegisterInfo.cpp @@ -121,6 +121,7 @@ { using namespace llvm::VE; switch (MI.getOpcode()) { + case INLINEASM: case RRCAS_multi_cases(TS1AML): case RRCAS_multi_cases(TS1AMW): case RRCAS_multi_cases(CASL): diff --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-gv.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-gv.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-gv.ll @@ -0,0 +1,17 @@ +; RUN: llc < %s -mtriple=ve | FileCheck %s + +@A = dso_local global i64 0, align 8 + +define i64 @leam(i64 %x) nounwind { +; CHECK-LABEL: leam: +; CHECK: # %bb.0: +; CHECK-NEXT: lea %s0, A@lo +; CHECK-NEXT: and %s0, %s0, (32)0 +; CHECK-NEXT: lea.sl %s0, A@hi(, %s0) +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, (%s0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: b.l.t (, %s10) + %asmtmp = tail call i64 asm "lea $0, $1", "=r,*m"(i64* elementtype(i64) @A) nounwind + ret i64 %asmtmp +} diff --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-lo.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-lo.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-mem-lo.ll @@ -0,0 +1,14 @@ +; RUN: llc < %s -mtriple=ve | FileCheck %s + +define i64 @leam(i64 %x) nounwind { +; CHECK-LABEL: leam: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, 8(%s11) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: adds.l %s11, 16, %s11 +; CHECK-NEXT: b.l.t (, %s10) + %z = alloca i64, align 8 + %asmtmp = tail call i64 asm "lea $0, $1", "=r,*m"(i64* elementtype(i64) %z) nounwind + ret i64 %asmtmp +}