diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -2036,7 +2036,7 @@ // Only handle legal scalar cases. Anything else requires too much work. Type *Ty = CountZeros->getType(); - unsigned SizeInBits = Ty->getPrimitiveSizeInBits(); + unsigned SizeInBits = Ty->getScalarSizeInBits(); if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits()) return false; diff --git a/llvm/test/Transforms/CodeGenPrepare/RISCV/cttz-ctlz.ll b/llvm/test/Transforms/CodeGenPrepare/RISCV/cttz-ctlz.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/CodeGenPrepare/RISCV/cttz-ctlz.ll @@ -0,0 +1,28 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -codegenprepare < %s | FileCheck %s + +target triple = "riscv64-unknown-unknown" + +; Check that despeculating count-zeros intrinsics doesn't crash when those +; intrinsics use scalable types. + +define @cttz_nxv4i64( %x) { +; CHECK-LABEL: @cttz_nxv4i64( +; CHECK-NEXT: [[Z:%.*]] = call @llvm.cttz.nxv4i64( [[X:%.*]], i1 false) +; CHECK-NEXT: ret [[Z]] +; + %z = call @llvm.cttz.nxv4i64( %x, i1 false) + ret %z +} + +define @ctlz_nxv4i64( %x) { +; CHECK-LABEL: @ctlz_nxv4i64( +; CHECK-NEXT: [[Z:%.*]] = call @llvm.ctlz.nxv4i64( [[X:%.*]], i1 false) +; CHECK-NEXT: ret [[Z]] +; + %z = call @llvm.ctlz.nxv4i64( %x, i1 false) + ret %z +} + +declare @llvm.cttz.nxv4i64(, i1) +declare @llvm.ctlz.nxv4i64(, i1) diff --git a/llvm/test/Transforms/CodeGenPrepare/RISCV/lit.local.cfg b/llvm/test/Transforms/CodeGenPrepare/RISCV/lit.local.cfg new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/CodeGenPrepare/RISCV/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'RISCV' in config.root.targets: + config.unsupported = True