diff --git a/clang/lib/CodeGen/Targets/RISCV.cpp b/clang/lib/CodeGen/Targets/RISCV.cpp --- a/clang/lib/CodeGen/Targets/RISCV.cpp +++ b/clang/lib/CodeGen/Targets/RISCV.cpp @@ -8,7 +8,6 @@ #include "ABIInfoImpl.h" #include "TargetInfo.h" -#include "llvm/TargetParser/RISCVTargetParser.h" using namespace clang; using namespace clang::CodeGen; @@ -315,11 +314,15 @@ assert(VT->getElementType()->isBuiltinType() && "expected builtin type!"); - const auto *BT = VT->getElementType()->castAs(); - unsigned EltSize = getContext().getTypeSize(BT); + auto VScale = + getContext().getTargetInfo().getVScaleRange(getContext().getLangOpts()); + // The MinNumElts is simplified from equation: + // NumElts / VScale = + // (EltSize * NumElts / (VScale * RVVBitsPerBlock)) + // * (RVVBitsPerBlock / EltSize) llvm::ScalableVectorType *ResType = - llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()), - llvm::RISCV::RVVBitsPerBlock / EltSize); + llvm::ScalableVectorType::get(CGT.ConvertType(VT->getElementType()), + VT->getNumElements() / VScale->first); return ABIArgInfo::getDirect(ResType); } diff --git a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c --- a/clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c +++ b/clang/test/CodeGen/attr-riscv-rvv-vector-bits-codegen.c @@ -42,8 +42,8 @@ // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[TMP0]], align 8 // CHECK-NEXT: store <8 x i32> [[TMP1]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP2]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m1_t test_ptr_to_global() { fixed_int32m1_t *global_vec_ptr; @@ -63,8 +63,8 @@ // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, ptr [[ARRAYIDX]], align 8 // CHECK-NEXT: store <8 x i32> [[TMP1]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP2]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m1_t array_arg(fixed_int32m1_t arr[]) { return arr[0]; @@ -76,14 +76,14 @@ // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 4 // CHECK-NEXT: store [[VEC:%.*]], ptr [[VEC_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load <8 x i32>, ptr @global_vec, align 8 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP0]], i64 0) +// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP0]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = load , ptr [[VEC_ADDR]], align 4 -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( poison, [[CASTSCALABLESVE]], [[TMP1]], i64 8) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[TMP2]], i64 0) -// CHECK-NEXT: store <8 x i32> [[CASTFIXEDSVE]], ptr [[RETVAL]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.riscv.vadd.nxv2i32.nxv2i32.i64( poison, [[CAST_SCALABLE]], [[TMP1]], i64 8) +// CHECK-NEXT: [[CAST_FIXED:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[TMP2]], i64 0) +// CHECK-NEXT: store <8 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i32>, ptr [[RETVAL]], align 8 -// CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP3]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE1]] +// CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[TMP3]], i64 0) +// CHECK-NEXT: ret [[CAST_SCALABLE1]] // fixed_int32m1_t test_cast(vint32m1_t vec) { return __riscv_vadd(global_vec, vec, __riscv_v_fixed_vlen/32); @@ -98,8 +98,8 @@ // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[TMP0]], align 8 // CHECK-NEXT: store <16 x i32> [[TMP1]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m2_t test_ptr_to_global_m2() { fixed_int32m2_t *global_vec_ptr; @@ -119,8 +119,8 @@ // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[ARRAYIDX]], align 8 // CHECK-NEXT: store <16 x i32> [[TMP1]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP2]], i64 0) +// CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m2_t array_arg_m2(fixed_int32m2_t arr[]) { return arr[0]; @@ -132,14 +132,14 @@ // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 4 // CHECK-NEXT: store [[VEC:%.*]], ptr [[VEC_ADDR]], align 4 // CHECK-NEXT: [[TMP0:%.*]] = load <16 x i32>, ptr @global_vec_m2, align 8 -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP0]], i64 0) +// CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP0]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = load , ptr [[VEC_ADDR]], align 4 -// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( poison, [[CASTSCALABLESVE]], [[TMP1]], i64 16) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TMP2]], i64 0) -// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], ptr [[RETVAL]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = call @llvm.riscv.vadd.nxv4i32.nxv4i32.i64( poison, [[CAST_SCALABLE]], [[TMP1]], i64 16) +// CHECK-NEXT: [[CAST_FIXED:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TMP2]], i64 0) +// CHECK-NEXT: store <16 x i32> [[CAST_FIXED]], ptr [[RETVAL]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 8 -// CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.vector.insert.nxv2i32.v16i32( undef, <16 x i32> [[TMP3]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE1]] +// CHECK-NEXT: [[CAST_SCALABLE1:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP3]], i64 0) +// CHECK-NEXT: ret [[CAST_SCALABLE1]] // fixed_int32m2_t test_cast_m2(vint32m2_t vec) { return __riscv_vadd(global_vec_m2, vec, __riscv_v_fixed_vlen/16);