diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1361,6 +1361,11 @@ return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); } + if (match(Src, m_VScale(DL))) { + Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); + return replaceInstUsesWith(CI, VScale); + } + return nullptr; } @@ -1605,6 +1610,11 @@ return BinaryOperator::CreateAShr(A, NewShAmt); } + if (match(Src, m_VScale(DL))) { + Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); + return replaceInstUsesWith(CI, VScale); + } + return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/vscale_sext_and_zext.ll b/llvm/test/Transforms/InstCombine/vscale_sext_and_zext.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/vscale_sext_and_zext.ll @@ -0,0 +1,50 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s + +define i64 @vscale_SExt_i32toi64() { +; CHECK-LABEL: @vscale_SExt_i32toi64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: ret i64 [[TMP0]] +entry: + %0 = call i32 @llvm.vscale.i32() + %1 = sext i32 %0 to i64 + ret i64 %1 +} + +define i32 @vscale_SExt_i8toi32() { +; CHECK-LABEL: @vscale_SExt_i8toi32( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: ret i32 [[TMP0]] +entry: + %0 = call i8 @llvm.vscale.i8() + %1 = sext i8 %0 to i32 + ret i32 %1 +} + +define i64 @vscale_ZExt_i32toi64() { +; CHECK-LABEL: @vscale_ZExt_i32toi64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: ret i64 [[TMP0]] +entry: + %0 = call i32 @llvm.vscale.i32() + %1 = zext i32 %0 to i64 + ret i64 %1 +} + +define i64 @vscale_ZExt_i16toi64() { +; CHECK-LABEL: @vscale_ZExt_i16toi64( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: ret i64 [[TMP0]] +entry: + %0 = call i16@llvm.vscale.i16() + %1 = zext i16 %0 to i64 + ret i64 %1 +} + +declare i8 @llvm.vscale.i8() +declare i16 @llvm.vscale.i16() +declare i32 @llvm.vscale.i32()