diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1361,6 +1361,20 @@ return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); } + if (match(Src, m_VScale(DL))) { + if (CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) { + unsigned MaxVScale = CI.getFunction() + ->getFnAttribute(Attribute::VScaleRange) + .getVScaleRangeArgs() + .second; + unsigned TypeWidth = Src->getType()->getScalarSizeInBits(); + if (Log2_32(MaxVScale) < TypeWidth) { + Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); + return replaceInstUsesWith(CI, VScale); + } + } + } + return nullptr; } @@ -1605,6 +1619,20 @@ return BinaryOperator::CreateAShr(A, NewShAmt); } + if (match(Src, m_VScale(DL))) { + if (CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) { + unsigned MaxVScale = CI.getFunction() + ->getFnAttribute(Attribute::VScaleRange) + .getVScaleRangeArgs() + .second; + unsigned TypeWidth = Src->getType()->getScalarSizeInBits(); + if (Log2_32(MaxVScale) < (TypeWidth - 1)) { + Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1)); + return replaceInstUsesWith(CI, VScale); + } + } + } + return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/vscale_sext_and_zext.ll b/llvm/test/Transforms/InstCombine/vscale_sext_and_zext.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/vscale_sext_and_zext.ll @@ -0,0 +1,85 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s + +define i64 @vscale_SExt_i32toi64() #0 { +; CHECK: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: ret i64 [[TMP0]] +entry: + %0 = call i32 @llvm.vscale.i32() + %1 = sext i32 %0 to i64 + ret i64 %1 +} + +define i32 @vscale_SExt_i8toi32() #0 { +; CHECK: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: ret i32 [[TMP0]] +entry: + %0 = call i8 @llvm.vscale.i8() + %1 = sext i8 %0 to i32 + ret i32 %1 +} + + +define i32 @vscale_SExt_i8toi32_poison() vscale_range(0, 192) { +; CHECK: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i8 @llvm.vscale.i8() +; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[TMP0]] to i32 +; CHECK-NEXT: ret i32 [[TMP1]] + entry: + %0 = call i8 @llvm.vscale.i8() + %1 = sext i8 %0 to i32 + ret i32 %1 +} + + + +define i64 @vscale_ZExt_i32toi64() #0 { +; CHECK: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: ret i64 [[TMP0]] +entry: + %0 = call i32 @llvm.vscale.i32() + %1 = zext i32 %0 to i64 + ret i64 %1 +} + +define i64 @vscale_ZExt_i1toi64() vscale_range(0, 1) { +; CHECK: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: ret i64 [[TMP0]] +entry: + %0 = call i1 @llvm.vscale.i1() + %1 = zext i1 %0 to i64 + ret i64 %1 +} + +define i32 @vscale_ZExt_i8toi32_poison() vscale_range(0, 1024) { +; CHECK: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i8 @llvm.vscale.i8() +; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i32 +; CHECK-NEXT: ret i32 [[TMP1]] + entry: + %0 = call i8 @llvm.vscale.i8() + %1 = zext i8 %0 to i32 + ret i32 %1 +} + +define i32 @vscale_ZExt_i16toi32_unknown() { +; CHECK: entry: +; CHECK-NEXT: [[TMP0:%.*]] = call i16 @llvm.vscale.i16() +; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[TMP0]] to i32 +; CHECK-NEXT: ret i32 [[TMP1]] + entry: + %0 = call i16 @llvm.vscale.i16() + %1 = zext i16 %0 to i32 + ret i32 %1 +} + +attributes #0 = { vscale_range(0, 16) } + +declare i1 @llvm.vscale.i1() +declare i8 @llvm.vscale.i8() +declare i16 @llvm.vscale.i16() +declare i32 @llvm.vscale.i32()