diff --git a/llvm/lib/Target/VE/VEAsmPrinter.cpp b/llvm/lib/Target/VE/VEAsmPrinter.cpp --- a/llvm/lib/Target/VE/VEAsmPrinter.cpp +++ b/llvm/lib/Target/VE/VEAsmPrinter.cpp @@ -60,6 +60,9 @@ static const char *getRegisterName(unsigned RegNo) { return VEInstPrinter::getRegisterName(RegNo); } + void printOperand(const MachineInstr *MI, int OpNum, raw_ostream &OS); + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) override; }; } // end of anonymous namespace @@ -349,6 +352,40 @@ } while ((++I != E) && I->isInsideBundle()); // Delay slot check. } +void VEAsmPrinter::printOperand(const MachineInstr *MI, int OpNum, + raw_ostream &O) { + const MachineOperand &MO = MI->getOperand(OpNum); + + switch (MO.getType()) { + case MachineOperand::MO_Register: + O << "%" << StringRef(getRegisterName(MO.getReg())).lower(); + break; + default: + llvm_unreachable(""); + } +} + +// PrintAsmOperand - Print out an operand for an inline asm expression. +bool VEAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + const char *ExtraCode, raw_ostream &O) { + if (ExtraCode && ExtraCode[0]) { + if (ExtraCode[1] != 0) + return true; // Unknown modifier. + + switch (ExtraCode[0]) { + default: + // See if this is a generic print operand + return AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, O); + case 'r': + break; + } + } + + printOperand(MI, OpNo, O); + + return false; +} + // Force static initialization. extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeVEAsmPrinter() { RegisterAsmPrinter X(getTheVETarget()); diff --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h --- a/llvm/lib/Target/VE/VEISelLowering.h +++ b/llvm/lib/Target/VE/VEISelLowering.h @@ -126,6 +126,14 @@ MachineMemOperand::Flags Flags, bool *Fast) const override; + /// Inline Assembly { + + std::pair + getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, MVT VT) const override; + + /// } Inline Assembly + /// Target Optimization { // SX-Aurora VE's s/udiv is 5-9 times slower than multiply. diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -1546,3 +1546,26 @@ return SDValue(); } + +//===----------------------------------------------------------------------===// +// VE Inline Assembly Support +//===----------------------------------------------------------------------===// + +std::pair +VETargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, + StringRef Constraint, + MVT VT) const { + const TargetRegisterClass *RC = nullptr; + if (Constraint.size() == 1) { + switch (Constraint[0]) { + default: + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); + case 'r': + RC = &VE::I64RegClass; + break; + } + return std::make_pair(0U, RC); + } + + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); +} diff --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-lea.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-lea.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-lea.ll @@ -0,0 +1,56 @@ +; RUN: llc < %s -mtriple=ve | FileCheck %s + +define i64 @lea1a(i64 %x) nounwind { +; CHECK-LABEL: lea1a: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, (%s0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea $0, ($1)", "=r,r"(i64 %x) nounwind + ret i64 %asmtmp +} + +define i64 @lea1b(i64 %x) nounwind { +; CHECK-LABEL: lea1b: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, (, %s0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea $0, (, $1)", "=r,r"(i64 %x) nounwind + ret i64 %asmtmp +} + +define i64 @lea2(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: lea2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, (%s0, %s1) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea $0, ($1, $2)", "=r,r,r"(i64 %x, i64 %y) nounwind + ret i64 %asmtmp +} + +define i64 @lea3(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: lea3: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s0, 2048(%s0, %s1) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea $0, 2048($1, $2)", "=r,r,r"(i64 %x, i64 %y) nounwind + ret i64 %asmtmp +} + +define i64 @leasl3(i64 %x, i64 %y) nounwind { +; CHECK-LABEL: leasl3: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea.sl %s0, 2048(%s1, %s0) +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %asmtmp = tail call i64 asm "lea.sl $0, 2048($1, $2)", "=r,r,r"(i64 %y, i64 %x) nounwind + ret i64 %asmtmp +}