diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -8083,7 +8083,16 @@ // Get the actual register value type. This is important, because the user // may have asked for (e.g.) the AX register in i32 type. We need to // remember that AX is actually i16 to get the right extension. - const MVT RegVT = *TRI.legalclasstypes_begin(*RC); + MVT RegVT = *TRI.legalclasstypes_begin(*RC); + if (RegVT.getSizeInBits() != RefOpInfo.ConstraintVT.getSizeInBits()) { + auto E = TRI.legalclasstypes_end(*RC); + auto I = std::find_if(TRI.legalclasstypes_begin(*RC), E, + [&RefOpInfo](const MVT::SimpleValueType &VTy) { + return MVT(VTy) == RefOpInfo.ConstraintVT; + }); + if (I != E) + RegVT = *I; + } if (OpInfo.ConstraintVT != MVT::Other) { // If this is an FP operand in an integer register (or visa versa), or more diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -5655,6 +5655,7 @@ default: break; case 'f': + case 'v': return C_RegisterClass; case 'I': case 'J': @@ -5685,6 +5686,13 @@ if (Subtarget.hasStdExtD() && VT == MVT::f64) return std::make_pair(0U, &RISCV::FPR64RegClass); break; + case 'v': + for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass, + &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { + if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) + return std::make_pair(0U, RC); + } + break; default: break; } @@ -5784,6 +5792,54 @@ } } + if (Subtarget.hasStdExtV()) { + Register VReg = StringSwitch(Constraint.lower()) + .Case("{v0}", RISCV::V0) + .Case("{v1}", RISCV::V1) + .Case("{v2}", RISCV::V2) + .Case("{v3}", RISCV::V3) + .Case("{v4}", RISCV::V4) + .Case("{v5}", RISCV::V5) + .Case("{v6}", RISCV::V6) + .Case("{v7}", RISCV::V7) + .Case("{v8}", RISCV::V8) + .Case("{v9}", RISCV::V9) + .Case("{v10}", RISCV::V10) + .Case("{v11}", RISCV::V11) + .Case("{v12}", RISCV::V12) + .Case("{v13}", RISCV::V13) + .Case("{v14}", RISCV::V14) + .Case("{v15}", RISCV::V15) + .Case("{v16}", RISCV::V16) + .Case("{v17}", RISCV::V17) + .Case("{v18}", RISCV::V18) + .Case("{v19}", RISCV::V19) + .Case("{v20}", RISCV::V20) + .Case("{v21}", RISCV::V21) + .Case("{v22}", RISCV::V22) + .Case("{v23}", RISCV::V23) + .Case("{v24}", RISCV::V24) + .Case("{v25}", RISCV::V25) + .Case("{v26}", RISCV::V26) + .Case("{v27}", RISCV::V27) + .Case("{v28}", RISCV::V28) + .Case("{v29}", RISCV::V29) + .Case("{v30}", RISCV::V30) + .Case("{v31}", RISCV::V31) + .Default(RISCV::NoRegister); + if (VReg != RISCV::NoRegister) { + if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy)) + return std::make_pair(VReg, &RISCV::VRRegClass); + for (const auto *RC : + {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { + if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) { + VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC); + return std::make_pair(VReg, RC); + } + } + } + } + return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); } diff --git a/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/inline-asm.ll @@ -0,0 +1,410 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v < %s \ +; RUN: --verify-machineinstrs | FileCheck %s + +define @test_1xi1( %in, %in2) nounwind { +; CHECK-LABEL: test_1xi1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vmand.mm $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_2xi1( %in, %in2) nounwind { +; CHECK-LABEL: test_2xi1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vmand.mm $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_4xi1( %in, %in2) nounwind { +; CHECK-LABEL: test_4xi1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vmand.mm $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_8xi1( %in, %in2) nounwind { +; CHECK-LABEL: test_8xi1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vmand.mm $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_16xi1( %in, %in2) nounwind { +; CHECK-LABEL: test_16xi1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vmand.mm $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_32xi1( %in, %in2) nounwind { +; CHECK-LABEL: test_32xi1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vmand.mm $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_64xi1( %in, %in2) nounwind { +; CHECK-LABEL: test_64xi1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vmand.mm v0, v0, v8 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vmand.mm $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_1xi64( %in, %in2) nounwind { +; CHECK-LABEL: test_1xi64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_2xi64( %in, %in2) nounwind { +; CHECK-LABEL: test_2xi64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_4xi64( %in, %in2) nounwind { +; CHECK-LABEL: test_4xi64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_8xi64( %in, %in2) nounwind { +; CHECK-LABEL: test_8xi64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_1xi32( %in, %in2) nounwind { +; CHECK-LABEL: test_1xi32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_2xi32( %in, %in2) nounwind { +; CHECK-LABEL: test_2xi32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_4xi32( %in, %in2) nounwind { +; CHECK-LABEL: test_4xi32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_8xi32( %in, %in2) nounwind { +; CHECK-LABEL: test_8xi32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_16xi32( %in, %in2) nounwind { +; CHECK-LABEL: test_16xi32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_1xi16( %in, %in2) nounwind { +; CHECK-LABEL: test_1xi16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_2xi16( %in, %in2) nounwind { +; CHECK-LABEL: test_2xi16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_4xi16( %in, %in2) nounwind { +; CHECK-LABEL: test_4xi16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_8xi16( %in, %in2) nounwind { +; CHECK-LABEL: test_8xi16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_16xi16( %in, %in2) nounwind { +; CHECK-LABEL: test_16xi16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_32xi16( %in, %in2) nounwind { +; CHECK-LABEL: test_32xi16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_1xi8( %in, %in2) nounwind { +; CHECK-LABEL: test_1xi8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_2xi8( %in, %in2) nounwind { +; CHECK-LABEL: test_2xi8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_4xi8( %in, %in2) nounwind { +; CHECK-LABEL: test_4xi8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_8xi8( %in, %in2) nounwind { +; CHECK-LABEL: test_8xi8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_16xi8( %in, %in2) nounwind { +; CHECK-LABEL: test_16xi8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_32xi8( %in, %in2) nounwind { +; CHECK-LABEL: test_32xi8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_64xi8( %in, %in2) nounwind { +; CHECK-LABEL: test_64xi8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "=v,v,v"( %in, %in2) + ret %0 +} + +define @test_specify_reg_mf2( %in, %in2) nounwind { +; CHECK-LABEL: test_specify_reg_mf2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v2, v9 +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v0, v1, v2 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vmv1r.v v8, v0 +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "={v0},{v1},{v2}"( %in, %in2) + ret %0 +} + +define @test_specify_reg_m1( %in, %in2) nounwind { +; CHECK-LABEL: test_specify_reg_m1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v2, v9 +; CHECK-NEXT: vmv1r.v v1, v8 +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v0, v1, v2 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vmv1r.v v8, v0 +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "={v0},{v1},{v2}"( %in, %in2) + ret %0 +} + +define @test_specify_reg_m2( %in, %in2) nounwind { +; CHECK-LABEL: test_specify_reg_m2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv2r.v v4, v10 +; CHECK-NEXT: vmv2r.v v2, v8 +; CHECK-NEXT: #APP +; CHECK-NEXT: vadd.vv v0, v2, v4 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vmv2r.v v8, v0 +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vadd.vv $0, $1, $2", "={v0},{v2},{v4}"( %in, %in2) + ret %0 +} + +define @test_specify_reg_mask( %in, %in2) nounwind { +; CHECK-LABEL: test_specify_reg_mask: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v2, v8 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: #APP +; CHECK-NEXT: vmand.mm v0, v1, v2 +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ret +entry: + %0 = tail call asm "vmand.mm $0, $1, $2", "={v0},{v1},{v2}"( %in, %in2) + ret %0 +}