diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -129,6 +129,15 @@ unsigned RVVBitsMin = RVVVectorBitsMinOpt; unsigned RVVBitsMax = RVVVectorBitsMaxOpt; + Attribute VScaleRangeAttr = F.getFnAttribute(Attribute::VScaleRange); + if (VScaleRangeAttr.isValid()) { + if (!RVVVectorBitsMinOpt.getNumOccurrences()) + RVVBitsMin = VScaleRangeAttr.getVScaleRangeMin() * RISCV::RVVBitsPerBlock; + std::optional VScaleMax = VScaleRangeAttr.getVScaleRangeMax(); + if (VScaleMax.has_value() && !RVVVectorBitsMaxOpt.getNumOccurrences()) + RVVBitsMax = *VScaleMax * RISCV::RVVBitsPerBlock; + } + if (RVVBitsMin != -1U) { // FIXME: Change to >= 32 when VLEN = 32 is supported. assert((RVVBitsMin == 0 || (RVVBitsMin >= 64 && RVVBitsMin <= 65536 && diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll @@ -0,0 +1,168 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s + +define <512 x i8> @vadd_v512i8_zvl128(<512 x i8> %a, <512 x i8> %b) #0 { +; CHECK-LABEL: vadd_v512i8_zvl128: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a4, 40 +; CHECK-NEXT: mul a2, a2, a4 +; CHECK-NEXT: sub sp, sp, a2 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: li a4, 24 +; CHECK-NEXT: mul a2, a2, a4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 5 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: li a2, 128 +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: addi a2, a3, 128 +; CHECK-NEXT: addi a4, a3, 384 +; CHECK-NEXT: vle8.v v8, (a4) +; CHECK-NEXT: csrr a4, vlenb +; CHECK-NEXT: slli a4, a4, 4 +; CHECK-NEXT: add a4, sp, a4 +; CHECK-NEXT: addi a4, a4, 16 +; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; CHECK-NEXT: addi a4, a1, 128 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: addi a1, a3, 256 +; CHECK-NEXT: vle8.v v8, (a1) +; CHECK-NEXT: vle8.v v16, (a4) +; CHECK-NEXT: vle8.v v24, (a2) +; CHECK-NEXT: vle8.v v0, (a3) +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vs8r.v v0, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vadd.vv v8, v0, v8 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vadd.vv v16, v16, v8 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 24 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vadd.vv v8, v8, v24 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: addi a1, sp, 16 +; CHECK-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vadd.vv v0, v24, v0 +; CHECK-NEXT: vse8.v v0, (a0) +; CHECK-NEXT: addi a1, a0, 384 +; CHECK-NEXT: vse8.v v16, (a1) +; CHECK-NEXT: addi a1, a0, 256 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 3 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; CHECK-NEXT: vse8.v v16, (a1) +; CHECK-NEXT: addi a0, a0, 128 +; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 40 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %c = add <512 x i8> %a, %b + ret <512 x i8> %c +} + +define <512 x i8> @vadd_v512i8_zvl256(<512 x i8> %a, <512 x i8> %b) #1 { +; CHECK-LABEL: vadd_v512i8_zvl256: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a1, a0, 256 +; CHECK-NEXT: li a2, 256 +; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma +; CHECK-NEXT: vle8.v v24, (a0) +; CHECK-NEXT: vle8.v v0, (a1) +; CHECK-NEXT: vadd.vv v8, v8, v24 +; CHECK-NEXT: vadd.vv v16, v16, v0 +; CHECK-NEXT: ret + %c = add <512 x i8> %a, %b + ret <512 x i8> %c +} + +define <512 x i8> @vadd_v512i8_zvl512(<512 x i8> %a, <512 x i8> %b) #2 { +; CHECK-LABEL: vadd_v512i8_zvl512: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: ret + %c = add <512 x i8> %a, %b + ret <512 x i8> %c +} + +define <512 x i8> @vadd_v512i8_zvl1024(<512 x i8> %a, <512 x i8> %b) #3 { +; CHECK-LABEL: vadd_v512i8_zvl1024: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: ret + %c = add <512 x i8> %a, %b + ret <512 x i8> %c +} + +define <512 x i8> @vadd_v512i8_zvl2048(<512 x i8> %a, <512 x i8> %b) #4 { +; CHECK-LABEL: vadd_v512i8_zvl2048: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: ret + %c = add <512 x i8> %a, %b + ret <512 x i8> %c +} + +define <512 x i8> @vadd_v512i8_zvl4096(<512 x i8> %a, <512 x i8> %b) #5 { +; CHECK-LABEL: vadd_v512i8_zvl4096: +; CHECK: # %bb.0: +; CHECK-NEXT: li a0, 512 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %c = add <512 x i8> %a, %b + ret <512 x i8> %c +} + +attributes #0 = { vscale_range(2,1024) } +attributes #1 = { vscale_range(4,1024) } +attributes #2 = { vscale_range(8,1024) } +attributes #3 = { vscale_range(16,1024) } +attributes #4 = { vscale_range(32,1024) } +attributes #5 = { vscale_range(64,1024) } diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll --- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll @@ -2353,5 +2353,5 @@ ret %res } -attributes #0 = { vscale_range(2,2) } +attributes #0 = { vscale_range(2,0) } diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll b/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll --- a/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll +++ b/llvm/test/Transforms/LoopVectorize/RISCV/short-trip-count.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -mtriple=riscv64 -mattr=+v -passes=loop-vectorize < %s | FileCheck %s +; RUN: opt -S -mtriple=riscv64 -mattr=+zve32x -passes=loop-vectorize < %s | FileCheck %s define void @small_trip_count_min_vlen_128(ptr nocapture %a) nounwind vscale_range(4,1024) { ; CHECK-LABEL: @small_trip_count_min_vlen_128( @@ -52,8 +52,6 @@ ret void } -; Note: This test uses a vscale_range starting at 1, which is technically incompatibile -; with +v. If we expose a target hook for minimum vlen, this example will need reworked define void @small_trip_count_min_vlen_32(ptr nocapture %a) nounwind vscale_range(1,1024) { ; CHECK-LABEL: @small_trip_count_min_vlen_32( ; CHECK-NEXT: entry: