diff --git a/llvm/lib/Target/VE/VEAsmPrinter.cpp b/llvm/lib/Target/VE/VEAsmPrinter.cpp --- a/llvm/lib/Target/VE/VEAsmPrinter.cpp +++ b/llvm/lib/Target/VE/VEAsmPrinter.cpp @@ -377,6 +377,7 @@ // See if this is a generic print operand return AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, O); case 'r': + case 'v': break; } } diff --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h --- a/llvm/lib/Target/VE/VEISelLowering.h +++ b/llvm/lib/Target/VE/VEISelLowering.h @@ -128,6 +128,7 @@ /// Inline Assembly { + ConstraintType getConstraintType(StringRef Constraint) const override; std::pair getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override; diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp --- a/llvm/lib/Target/VE/VEISelLowering.cpp +++ b/llvm/lib/Target/VE/VEISelLowering.cpp @@ -1551,6 +1551,19 @@ // VE Inline Assembly Support //===----------------------------------------------------------------------===// +VETargetLowering::ConstraintType +VETargetLowering::getConstraintType(StringRef Constraint) const { + if (Constraint.size() == 1) { + switch (Constraint[0]) { + default: + break; + case 'v': // vector registers + return C_RegisterClass; + } + } + return TargetLowering::getConstraintType(Constraint); +} + std::pair VETargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, @@ -1563,6 +1576,9 @@ case 'r': RC = &VE::I64RegClass; break; + case 'v': + RC = &VE::V64RegClass; + break; } return std::make_pair(0U, RC); } diff --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll @@ -0,0 +1,74 @@ +; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s + +define void @vld(i8* %p, i64 %i) nounwind { +; CHECK-LABEL: vld: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s2, 256 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: lvl %s2 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vld %v0, %s1, %s0 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind + tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind + tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind + ret void +} + +define void @vldvst(i8* %p, i64 %i) nounwind { +; CHECK-LABEL: vldvst: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s2, 256 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: lvl %s2 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vld %v0, %s1, %s0 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vst %v0, %s1, %s0 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind + tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind + %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind + tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, i8* %p, i64 %i) nounwind + ret void +} + +define void @vld2vst2(i8* %p, i64 %i) nounwind { +; CHECK-LABEL: vld2vst2: +; CHECK: .LBB{{[0-9]+}}_2: +; CHECK-NEXT: #APP +; CHECK-NEXT: lea %s2, 256 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: lvl %s2 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vld %v0, %s1, %s0 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vld %v1, %s1, %s0 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vst %v0, %s1, %s0 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: #APP +; CHECK-NEXT: vst %v1, %s1, %s0 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: or %s11, 0, %s9 + %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind + tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind + %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind + %2 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind + tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, i8* %p, i64 %i) nounwind + tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %2, i8* %p, i64 %i) nounwind + ret void +}