diff --git a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp --- a/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp +++ b/clang-tools-extra/clang-tidy/ClangTidyDiagnosticConsumer.cpp @@ -79,16 +79,22 @@ return CharSourceRange::getCharRange(SourceRange.getBegin(), End); }; + // We are only interested in valid ranges. + auto ValidRanges = + llvm::make_filter_range(Ranges, [](const CharSourceRange &R) { + return R.getAsRange().isValid(); + }); + if (Level == DiagnosticsEngine::Note) { Error.Notes.push_back(TidyMessage); - for (const CharSourceRange &SourceRange : Ranges) + for (const CharSourceRange &SourceRange : ValidRanges) Error.Notes.back().Ranges.emplace_back(Loc.getManager(), ToCharRange(SourceRange)); return; } assert(Error.Message.Message.empty() && "Overwriting a diagnostic message"); Error.Message = TidyMessage; - for (const CharSourceRange &SourceRange : Ranges) + for (const CharSourceRange &SourceRange : ValidRanges) Error.Message.Ranges.emplace_back(Loc.getManager(), ToCharRange(SourceRange)); } diff --git a/clang-tools-extra/clang-tidy/altera/StructPackAlignCheck.cpp b/clang-tools-extra/clang-tidy/altera/StructPackAlignCheck.cpp --- a/clang-tools-extra/clang-tidy/altera/StructPackAlignCheck.cpp +++ b/clang-tools-extra/clang-tidy/altera/StructPackAlignCheck.cpp @@ -51,6 +51,10 @@ if (Struct->isTemplated()) return; + // Packing and alignment requirements for invalid decls are meaningless. + if (Struct->isInvalidDecl()) + return; + // Get sizing info for the struct. llvm::SmallVector, 10> FieldSizes; unsigned int TotalBitSize = 0; diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/NarrowingConversionsCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/NarrowingConversionsCheck.cpp --- a/clang-tools-extra/clang-tidy/cppcoreguidelines/NarrowingConversionsCheck.cpp +++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/NarrowingConversionsCheck.cpp @@ -58,6 +58,14 @@ Options.store(Opts, "PedanticMode", PedanticMode); } +AST_MATCHER(FieldDecl, hasIntBitwidth) { + assert(Node.isBitField()); + const ASTContext &Ctx = Node.getASTContext(); + unsigned IntBitWidth = Ctx.getIntWidth(Ctx.IntTy); + unsigned CurrentBitWidth = Node.getBitWidthValue(Ctx); + return IntBitWidth == CurrentBitWidth; +} + void NarrowingConversionsCheck::registerMatchers(MatchFinder *Finder) { // ceil() and floor() are guaranteed to return integers, even though the type // is not integral. @@ -83,6 +91,46 @@ binaryOperator(hasOperands(IsConversionFromIgnoredType, hasType(isInteger())))); + // Bitfields are special. Due to integral promotion [conv.prom/5] bitfield + // member access expressions are frequently wrapped by an implicit cast to + // `int` if that type can represent all the values of the bitfield. + // + // Consider these examples: + // struct SmallBitfield { unsigned int id : 4; }; + // x.id & 1; (case-1) + // x.id & 1u; (case-2) + // x.id << 1u; (case-3) + // (unsigned)x.id << 1; (case-4) + // + // Due to the promotion rules, we would get a warning for case-1. It's + // debatable how useful this is, but the user at least has a convenient way of + // //fixing// it by adding the `u` unsigned-suffix to the literal as + // demonstrated by case-2. However, this won't work for shift operators like + // the one in case-3. In case of a normal binary operator, both operands + // contribute to the result type. However, the type of the shift expression is + // the promoted type of the left operand. One could still suppress this + // superfluous warning by explicitly casting the bitfield member access as + // case-4 demonstrates, but why? The compiler already knew that the value from + // the member access should safely fit into an `int`, why do we have this + // warning in the first place? So, hereby we suppress this specific scenario. + // + // Note that the bitshift operation might invoke unspecified/undefined + // behavior, but that's another topic, this checker is about detecting + // conversion-related defects. + // + // Example AST for `x.id << 1`: + // BinaryOperator 'int' '<<' + // |-ImplicitCastExpr 'int' + // | `-ImplicitCastExpr 'unsigned int' + // | `-MemberExpr 'unsigned int' lvalue bitfield .id + // | `-DeclRefExpr 'SmallBitfield' lvalue ParmVar 'x' 'SmallBitfield' + // `-IntegerLiteral 'int' 1 + const auto ImplicitIntWidenedBitfieldValue = implicitCastExpr( + hasCastKind(CK_IntegralCast), hasType(asString("int")), + has(castExpr(hasCastKind(CK_LValueToRValue), + has(ignoringParens(memberExpr(hasDeclaration( + fieldDecl(isBitField(), unless(hasIntBitwidth()))))))))); + // Casts: // i = 0.5; // void f(int); f(0.5); @@ -100,7 +148,8 @@ IgnoreConversionFromTypes.empty() ? castExpr() : castExpr(unless(hasSourceExpression( - IsIgnoredTypeTwoLevelsDeep)))) + IsIgnoredTypeTwoLevelsDeep))), + unless(ImplicitIntWidenedBitfieldValue)) .bind("cast")), this); diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/VirtualClassDestructorCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/VirtualClassDestructorCheck.cpp --- a/clang-tools-extra/clang-tidy/cppcoreguidelines/VirtualClassDestructorCheck.cpp +++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/VirtualClassDestructorCheck.cpp @@ -46,9 +46,12 @@ this); } -static CharSourceRange +static Optional getVirtualKeywordRange(const CXXDestructorDecl &Destructor, const SourceManager &SM, const LangOptions &LangOpts) { + if (Destructor.getLocation().isMacroID()) + return None; + SourceLocation VirtualBeginLoc = Destructor.getBeginLoc(); SourceLocation VirtualEndLoc = VirtualBeginLoc.getLocWithOffset( Lexer::MeasureTokenLength(VirtualBeginLoc, SM, LangOpts)); @@ -190,8 +193,10 @@ Fix = FixItHint::CreateInsertion(Destructor->getLocation(), "virtual "); } else if (Destructor->getAccess() == AccessSpecifier::AS_protected) { ProtectedAndVirtual = true; - Fix = FixItHint::CreateRemoval(getVirtualKeywordRange( - *Destructor, *Result.SourceManager, Result.Context->getLangOpts())); + if (const auto MaybeRange = + getVirtualKeywordRange(*Destructor, *Result.SourceManager, + Result.Context->getLangOpts())) + Fix = FixItHint::CreateRemoval(*MaybeRange); } } else { Fix = generateUserDeclaredDestructor(*MatchedClassOrStruct, diff --git a/clang-tools-extra/test/clang-tidy/checkers/altera-struct-pack-align-invalid-decl-no-crash.cpp b/clang-tools-extra/test/clang-tidy/checkers/altera-struct-pack-align-invalid-decl-no-crash.cpp new file mode 100644 --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/checkers/altera-struct-pack-align-invalid-decl-no-crash.cpp @@ -0,0 +1,6 @@ +// RUN: %check_clang_tidy -expect-clang-tidy-error %s altera-struct-pack-align %t -- -header-filter=.* + +struct Foo { + member; // no-crash +}; +// CHECK-MESSAGES: :[[@LINE-2]]:3: error: C++ requires a type specifier for all declarations [clang-diagnostic-error] diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines-narrowing-conversions-bitfields.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines-narrowing-conversions-bitfields.cpp new file mode 100644 --- /dev/null +++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines-narrowing-conversions-bitfields.cpp @@ -0,0 +1,203 @@ +// RUN: %check_clang_tidy %s cppcoreguidelines-narrowing-conversions %t \ +// RUN: -std=c++17 -- -target x86_64-unknown-linux + +#define CHAR_BITS 8 +static_assert(sizeof(unsigned int) == 32 / CHAR_BITS); + +template +struct is_same { + static constexpr bool value = false; +}; +template +struct is_same { + static constexpr bool value = true; +}; + +template +static constexpr bool is_same_v = is_same::value; + +struct NoBitfield { + unsigned int id; +}; +struct SmallBitfield { + unsigned int id : 4; +}; + +struct BigBitfield { + unsigned int id : 31; +}; +struct CompleteBitfield { + unsigned int id : 32; +}; + +int example_warning(unsigned x) { + // CHECK-MESSAGES: :[[@LINE+1]]:10: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined [cppcoreguidelines-narrowing-conversions] + return x; +} + +void test_binary_and(SmallBitfield x) { + static_assert(is_same_v); + static_assert(is_same_v); + + x.id & 1; + x.id & 1u; + + 1 & x.id; + 1u & x.id; +} + +void test_binary_or(SmallBitfield x) { + static_assert(is_same_v); + static_assert(is_same_v); + + x.id | 1; + x.id | 1u; + + 1 | x.id; + 1u | x.id; +} + +template +void take(T); + +void test_parameter_passing(NoBitfield x) { + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:14: warning: narrowing conversion from 'unsigned int' to signed type 'char' is implementation-defined + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:15: warning: narrowing conversion from 'unsigned int' to signed type 'short' is implementation-defined + take(x.id); + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined + take(x.id); + take(x.id); +} + +void test_parameter_passing(SmallBitfield x) { + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:14: warning: narrowing conversion from 'unsigned int' to signed type 'char' is implementation-defined + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:15: warning: narrowing conversion from 'unsigned int' to signed type 'short' is implementation-defined + take(x.id); + take(x.id); // no-warning + take(x.id); + take(x.id); +} + +void test_parameter_passing(BigBitfield x) { + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:14: warning: narrowing conversion from 'unsigned int' to signed type 'char' is implementation-defined + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:15: warning: narrowing conversion from 'unsigned int' to signed type 'short' is implementation-defined + take(x.id); + take(x.id); // no-warning + take(x.id); + take(x.id); +} + +void test_parameter_passing(CompleteBitfield x) { + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:14: warning: narrowing conversion from 'unsigned int' to signed type 'char' is implementation-defined + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:15: warning: narrowing conversion from 'unsigned int' to signed type 'short' is implementation-defined + take(x.id); + take(x.id); + // CHECK-MESSAGES: :[[@LINE-1]]:13: warning: narrowing conversion from 'unsigned int' to signed type 'int' is implementation-defined + take(x.id); + take(x.id); +} + +void test(NoBitfield x) { + static_assert(is_same_v); + static_assert(is_same_v); + static_assert(is_same_v); + static_assert(is_same_v); + + x.id << 1; + x.id << 1u; + x.id >> 1; + x.id >> 1u; + x.id + 1; + x.id + 1u; + + 1 << x.id; + 1u << x.id; + 1 >> x.id; + 1u >> x.id; + 1 + x.id; + 1u + x.id; +} + +void test(SmallBitfield x) { + static_assert(is_same_v); + static_assert(is_same_v); + + x.id << 1; + x.id << 1u; + x.id >> 1; + x.id >> 1u; + + x.id + 1; + x.id + 1u; + + 1 << x.id; + 1u << x.id; + 1 >> x.id; + 1u >> x.id; + + 1 + x.id; + 1u + x.id; +} + +void test(BigBitfield x) { + static_assert(is_same_v); + static_assert(is_same_v); + + x.id << 1; + x.id << 1u; + x.id >> 1; + x.id >> 1u; + + x.id + 1; + x.id + 1u; + + 1 << x.id; + 1u << x.id; + 1 >> x.id; + 1u >> x.id; + + 1 + x.id; + 1u + x.id; +} + +void test(CompleteBitfield x) { + static_assert(is_same_v); + static_assert(is_same_v); + + x.id << 1; + x.id << 1u; + x.id >> 1; + x.id >> 1u; + + x.id + 1; + x.id + 1u; + + 1 << x.id; + 1u << x.id; + 1 >> x.id; + 1u >> x.id; + + 1 + x.id; + 1u + x.id; +} + +void test_parens(SmallBitfield x) { + static_assert(is_same_v); + static_assert(is_same_v); + x.id << (2); + ((x.id)) << (2); + + static_assert(is_same_v); + static_assert(is_same_v); + (2) << x.id; + (2) << ((x.id)); +} diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines-virtual-class-destructor.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines-virtual-class-destructor.cpp --- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines-virtual-class-destructor.cpp +++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines-virtual-class-destructor.cpp @@ -270,3 +270,53 @@ DerivedFromTemplateNonVirtualBaseStruct2Typedef InstantiationWithPublicNonVirtualBaseStruct2; } // namespace Bugzilla_51912 + +namespace macro_tests { +#define CONCAT(x, y) x##y + +// CHECK-MESSAGES: :[[@LINE+2]]:7: warning: destructor of 'FooBar1' is protected and virtual [cppcoreguidelines-virtual-class-destructor] +// CHECK-MESSAGES: :[[@LINE+1]]:7: note: make it protected and non-virtual +class FooBar1 { +protected: + CONCAT(vir, tual) CONCAT(~Foo, Bar1()); // no-fixit +}; + +// CHECK-MESSAGES: :[[@LINE+2]]:7: warning: destructor of 'FooBar2' is protected and virtual [cppcoreguidelines-virtual-class-destructor] +// CHECK-MESSAGES: :[[@LINE+1]]:7: note: make it protected and non-virtual +class FooBar2 { +protected: + virtual CONCAT(~Foo, Bar2()); // FIXME: We should have a fixit for this. +}; + +// CHECK-MESSAGES: :[[@LINE+6]]:7: warning: destructor of 'FooBar3' is protected and virtual [cppcoreguidelines-virtual-class-destructor] +// CHECK-MESSAGES: :[[@LINE+5]]:7: note: make it protected and non-virtual +// CHECK-FIXES: class FooBar3 { +// CHECK-FIXES-NEXT: protected: +// CHECK-FIXES-NEXT: ~FooBar3(); +// CHECK-FIXES-NEXT: }; +class FooBar3 { +protected: + CONCAT(vir, tual) ~FooBar3(); +}; + +// CHECK-MESSAGES: :[[@LINE+6]]:7: warning: destructor of 'FooBar4' is protected and virtual [cppcoreguidelines-virtual-class-destructor] +// CHECK-MESSAGES: :[[@LINE+5]]:7: note: make it protected and non-virtual +// CHECK-FIXES: class FooBar4 { +// CHECK-FIXES-NEXT: protected: +// CHECK-FIXES-NEXT: ~CONCAT(Foo, Bar4()); +// CHECK-FIXES-NEXT: }; +class FooBar4 { +protected: + CONCAT(vir, tual) ~CONCAT(Foo, Bar4()); +}; + +// CHECK-MESSAGES: :[[@LINE+3]]:7: warning: destructor of 'FooBar5' is protected and virtual [cppcoreguidelines-virtual-class-destructor] +// CHECK-MESSAGES: :[[@LINE+2]]:7: note: make it protected and non-virtual +#define XMACRO(COLUMN1, COLUMN2) COLUMN1 COLUMN2 +class FooBar5 { +protected: + XMACRO(CONCAT(vir, tual), ~CONCAT(Foo, Bar5());) // no-crash, no-fixit +}; +#undef XMACRO +#undef CONCAT +} // namespace macro_tests diff --git a/clang-tools-extra/test/clang-tidy/infrastructure/export-diagnostics.cpp b/clang-tools-extra/test/clang-tidy/infrastructure/export-diagnostics.cpp --- a/clang-tools-extra/test/clang-tidy/infrastructure/export-diagnostics.cpp +++ b/clang-tools-extra/test/clang-tidy/infrastructure/export-diagnostics.cpp @@ -7,6 +7,11 @@ int a[-1]; int b[0]; +void test(x); +struct Foo { + member; +}; + // CHECK-MESSAGES: -input.cpp:2:1: warning: no previous prototype for function 'ff' [clang-diagnostic-missing-prototypes] // CHECK-MESSAGES: -input.cpp:1:19: note: expanded from macro 'X' // CHECK-MESSAGES: {{^}}note: expanded from here{{$}} @@ -14,6 +19,8 @@ // CHECK-MESSAGES: -input.cpp:1:14: note: expanded from macro 'X' // CHECK-MESSAGES: -input.cpp:3:7: error: 'a' declared as an array with a negative size [clang-diagnostic-error] // CHECK-MESSAGES: -input.cpp:4:7: warning: zero size arrays are an extension [clang-diagnostic-zero-length-array] +// CHECK-MESSAGES: -input.cpp:6:11: error: unknown type name 'x' [clang-diagnostic-error] +// CHECK-MESSAGES: -input.cpp:8:3: error: C++ requires a type specifier for all declarations [clang-diagnostic-error] // CHECK-YAML: --- // CHECK-YAML-NEXT: MainSourceFile: '{{.*}}-input.cpp' @@ -71,4 +78,20 @@ // CHECK-YAML-NEXT: Length: 1 // CHECK-YAML-NEXT: Level: Warning // CHECK-YAML-NEXT: BuildDirectory: '{{.*}}' +// CHECK-YAML-NEXT: - DiagnosticName: clang-diagnostic-error +// CHECK-YAML-NEXT: DiagnosticMessage: +// CHECK-YAML-NEXT: Message: 'unknown type name ''x''' +// CHECK-YAML-NEXT: FilePath: '{{.*}}-input.cpp' +// CHECK-YAML-NEXT: FileOffset: 67 +// CHECK-YAML-NEXT: Replacements: [] +// CHECK-YAML-NEXT: Level: Error +// CHECK-YAML-NEXT: BuildDirectory: '{{.*}}' +// CHECK-YAML-NEXT: - DiagnosticName: clang-diagnostic-error +// CHECK-YAML-NEXT: DiagnosticMessage: +// CHECK-YAML-NEXT: Message: 'C++ requires a type specifier for all declarations' +// CHECK-YAML-NEXT: FilePath: '{{.*}}-input.cpp' +// CHECK-YAML-NEXT: FileOffset: 86 +// CHECK-YAML-NEXT: Replacements: [] +// CHECK-YAML-NEXT: Level: Error +// CHECK-YAML-NEXT: BuildDirectory: '{{.*}}' // CHECK-YAML-NEXT: ... diff --git a/clang-tools-extra/unittests/clang-tidy/ClangTidyDiagnosticConsumerTest.cpp b/clang-tools-extra/unittests/clang-tidy/ClangTidyDiagnosticConsumerTest.cpp --- a/clang-tools-extra/unittests/clang-tidy/ClangTidyDiagnosticConsumerTest.cpp +++ b/clang-tools-extra/unittests/clang-tidy/ClangTidyDiagnosticConsumerTest.cpp @@ -37,6 +37,33 @@ } }; +class InvalidRangeTestCheck : public ClangTidyCheck { +public: + InvalidRangeTestCheck(StringRef Name, ClangTidyContext *Context) + : ClangTidyCheck(Name, Context) {} + void registerMatchers(ast_matchers::MatchFinder *Finder) override { + Finder->addMatcher(ast_matchers::varDecl().bind("var"), this); + } + void check(const ast_matchers::MatchFinder::MatchResult &Result) override { + const auto *Var = Result.Nodes.getNodeAs("var"); + SourceLocation ValidBeginLoc = Var->getBeginLoc(); + SourceLocation ValidEndLoc = Var->getEndLoc(); + SourceLocation InvalidLoc; + ASSERT_TRUE(ValidBeginLoc.isValid()); + ASSERT_TRUE(ValidEndLoc.isValid()); + ASSERT_TRUE(InvalidLoc.isInvalid()); + + diag(ValidBeginLoc, "valid->valid") + << SourceRange(ValidBeginLoc, ValidEndLoc); + diag(ValidBeginLoc, "valid->invalid") + << SourceRange(ValidBeginLoc, InvalidLoc); + diag(ValidBeginLoc, "invalid->valid") + << SourceRange(InvalidLoc, ValidEndLoc); + diag(ValidBeginLoc, "invalid->invalid") + << SourceRange(InvalidLoc, InvalidLoc); + } +}; + } // namespace TEST(ClangTidyDiagnosticConsumer, SortsErrors) { @@ -66,6 +93,24 @@ EXPECT_EQ(7ul, Errors[0].Message.Ranges[0].Length); } +TEST(ClangTidyDiagnosticConsumer, InvalidSourceLocationRangesIgnored) { + std::vector Errors; + runCheckOnCode("int x;", &Errors); + EXPECT_EQ(4ul, Errors.size()); + + EXPECT_EQ("invalid->invalid", Errors[0].Message.Message); + EXPECT_TRUE(Errors[0].Message.Ranges.empty()); + + EXPECT_EQ("invalid->valid", Errors[1].Message.Message); + EXPECT_TRUE(Errors[1].Message.Ranges.empty()); + + EXPECT_EQ("valid->invalid", Errors[2].Message.Message); + EXPECT_TRUE(Errors[2].Message.Ranges.empty()); + + EXPECT_EQ("valid->valid", Errors[3].Message.Message); + EXPECT_EQ(1ul, Errors[3].Message.Ranges.size()); +} + } // namespace test } // namespace tidy } // namespace clang diff --git a/clang/docs/JSONCompilationDatabase.rst b/clang/docs/JSONCompilationDatabase.rst --- a/clang/docs/JSONCompilationDatabase.rst +++ b/clang/docs/JSONCompilationDatabase.rst @@ -36,6 +36,12 @@ For projects on Linux, there is an alternative to intercept compiler calls with a tool called `Bear `_. +`Bazel `_ can export a compilation database via +`this extractor extension +`_. +Bazel is otherwise resistant to Bear and other compiler-intercept +techniques. + Clang's tooling interface supports reading compilation databases; see the :doc:`LibTooling documentation `. libclang and its python bindings also support this (since clang 3.2); see diff --git a/clang/docs/analyzer/checkers.rst b/clang/docs/analyzer/checkers.rst --- a/clang/docs/analyzer/checkers.rst +++ b/clang/docs/analyzer/checkers.rst @@ -2317,8 +2317,15 @@ alpha.security.taint.TaintPropagation (C, C++) """""""""""""""""""""""""""""""""""""""""""""" -Generate taint information used by other checkers. -A data is tainted when it comes from an unreliable source. + +Taint analysis identifies untrusted sources of information (taint sources), rules as to how the untrusted data flows along the execution path (propagation rules), and points of execution where the use of tainted data is risky (taints sinks). +The most notable examples of taint sources are: + + - network originating data + - environment variables + - database originating data + +``GenericTaintChecker`` is the main implementation checker for this rule, and it generates taint information used by other checkers. .. code-block:: c @@ -2344,6 +2351,25 @@ // warn: untrusted data as buffer size } +There are built-in sources, propagations and sinks defined in code inside ``GenericTaintChecker``. +These operations are handled even if no external taint configuration is provided. + +Default sources defined by ``GenericTaintChecker``: +``fdopen``, ``fopen``, ``freopen``, ``getch``, ``getchar``, ``getchar_unlocked``, ``gets``, ``scanf``, ``socket``, ``wgetch`` + +Default propagations defined by ``GenericTaintChecker``: +``atoi``, ``atol``, ``atoll``, ``fgetc``, ``fgetln``, ``fgets``, ``fscanf``, ``sscanf``, ``getc``, ``getc_unlocked``, ``getdelim``, ``getline``, ``getw``, ``pread``, ``read``, ``strchr``, ``strrchr``, ``tolower``, ``toupper`` + +Default sinks defined in ``GenericTaintChecker``: +``printf``, ``setproctitle``, ``system``, ``popen``, ``execl``, ``execle``, ``execlp``, ``execv``, ``execvp``, ``execvP``, ``execve``, ``dlopen``, ``memcpy``, ``memmove``, ``strncpy``, ``strndup``, ``malloc``, ``calloc``, ``alloca``, ``memccpy``, ``realloc``, ``bcopy`` + +The user can configure taint sources, sinks, and propagation rules by providing a configuration file via checker option ``alpha.security.taint.TaintPropagation:Config``. + +External taint configuration is in `YAML `_ format. The taint-related options defined in the config file extend but do not override the built-in sources, rules, sinks. +The format of the external taint configuration file is not stable, and could change without any notice even in a non-backward compatible way. + +For a more detailed description of configuration options, please see the :doc:`user-docs/TaintAnalysisConfiguration`. For an example see :ref:`clangsa-taint-configuration-example`. + alpha.unix ^^^^^^^^^^^ diff --git a/clang/docs/analyzer/user-docs.rst b/clang/docs/analyzer/user-docs.rst --- a/clang/docs/analyzer/user-docs.rst +++ b/clang/docs/analyzer/user-docs.rst @@ -7,3 +7,4 @@ :maxdepth: 2 user-docs/CrossTranslationUnit + user-docs/TaintAnalysisConfiguration diff --git a/clang/docs/analyzer/user-docs/TaintAnalysisConfiguration.rst b/clang/docs/analyzer/user-docs/TaintAnalysisConfiguration.rst new file mode 100644 --- /dev/null +++ b/clang/docs/analyzer/user-docs/TaintAnalysisConfiguration.rst @@ -0,0 +1,170 @@ +============================ +Taint Analysis Configuration +============================ + +The Clang Static Analyzer uses taint analysis to detect security-related issues in code. +The backbone of taint analysis in the Clang SA is the `GenericTaintChecker`, which the user can access via the :ref:`alpha-security-taint-TaintPropagation` checker alias and this checker has a default taint-related configuration. +The built-in default settings are defined in code, and they are always in effect once the checker is enabled, either directly or via the alias. +The checker also provides a configuration interface for extending the default settings by providing a configuration file in `YAML `_ format. +This documentation describes the syntax of the configuration file and gives the informal semantics of the configuration options. + +.. contents:: + :local: + +.. _clangsa-taint-configuration-overview: + +Overview +________ + +Taint analysis works by checking for the occurrence of special operations during the symbolic execution of the program. +Taint analysis defines sources, sinks, and propagation rules. It identifies errors by detecting a flow of information that originates from a taint source, reaches a taint sink, and propagates through the program paths via propagation rules. +A source, sink, or an operation that propagates taint is mainly domain-specific knowledge, but there are some built-in defaults provided by :ref:`alpha-security-taint-TaintPropagation`. +It is possible to express that a statement sanitizes tainted values by providing a ``Filters`` section in the external configuration (see :ref:`clangsa-taint-configuration-example` and :ref:`clangsa-taint-filter-details`). +There are no default filters defined in the built-in settings. +The checker's documentation also specifies how to provide a custom taint configuration with command-line options. + +.. _clangsa-taint-configuration-example: + +Example configuration file +__________________________ + +.. code-block:: yaml + + # The entries that specify arguments use 0-based indexing when specifying + # input arguments, and -1 is used to denote the return value. + + Filters: + # Filter functions + # Taint is sanitized when tainted variables are pass arguments to filters. + + # Filter function + # void cleanse_first_arg(int* arg) + # + # Result example: + # int x; // x is tainted + # cleanse_first_arg(&x); // x is not tainted after the call + - Name: cleanse_first_arg + Args: [0] + + Propagations: + # Source functions + # The omission of SrcArgs key indicates unconditional taint propagation, + # which is conceptually what a source does. + + # Source function + # size_t fread(void *ptr, size_t size, size_t nmemb, FILE * stream) + # + # Result example: + # FILE* f = fopen("file.txt"); + # char buf[1024]; + # size_t read = fread(buf, sizeof(buf[0]), sizeof(buf)/sizeof(buf[0]), f); + # // both read and buf are tainted + - Name: fread + DstArgs: [0, -1] + + # Propagation functions + # The presence of SrcArgs key indicates conditional taint propagation, + # which is conceptually what a propagator does. + + # Propagation function + # char *dirname(char *path) + # + # Result example: + # char* path = read_path(); + # char* dir = dirname(path); + # // dir is tainted if path was tainted + - Name: dirname + SrcArgs: [0] + DstArgs: [-1] + + Sinks: + # Sink functions + # If taint reaches any of the arguments specified, a warning is emitted. + + # Sink function + # int system(const char* command) + # + # Result example: + # const char* command = read_command(); + # system(command); // emit diagnostic if command is tainted + - Name: system + Args: [0] + +In the example file above, the entries under the `Propagation` key implement the conceptual sources and propagations, and sinks have their dedicated `Sinks` key. +The user can define operations (function calls) where the tainted values should be cleansed by listing entries under the `Filters` key. +Filters model the sanitization of values done by the programmer, and providing these is key to avoiding false-positive findings. + +Configuration file syntax and semantics +_______________________________________ + +The configuration file should have valid `YAML `_ syntax. + +The configuration file can have the following top-level keys: + - Filters + - Propagations + - Sinks + +Under the `Filters` key, the user can specify a list of operations that remove taint (see :ref:`clangsa-taint-filter-details` for details). + +Under the `Propagations` key, the user can specify a list of operations that introduce and propagate taint (see :ref:`clangsa-taint-propagation-details` for details). +The user can mark taint sources with a `SrcArgs` key in the `Propagation` key, while propagations have none. +The lack of the `SrcArgs` key means unconditional propagation, which is how sources are modeled. +The semantics of propagations are such, that if any of the source arguments are tainted (specified by indexes in `SrcArgs`) then all of the destination arguments (specified by indexes in `DstArgs`) also become tainted. + +Under the `Sinks` key, the user can specify a list of operations where the checker should emit a bug report if tainted data reaches it (see :ref:`clangsa-taint-sink-details` for details). + +.. _clangsa-taint-filter-details: + +Filter syntax and semantics +########################### + +An entry under `Filters` is a `YAML `_ object with the following mandatory keys: + - `Name` is a string that specifies the name of a function. + Encountering this function during symbolic execution the checker will sanitize taint from the memory region referred to by the given arguments or return a sanitized value. + - `Args` is a list of numbers in the range of ``[-1..int_max]``. + It indicates the indexes of arguments in the function call. + The number ``-1`` signifies the return value; other numbers identify call arguments. + The values of these arguments are considered clean after the function call. + +The following keys are optional: + - `Scope` is a string that specifies the prefix of the function's name in its fully qualified name. This option restricts the set of matching function calls. It can encode not only namespaces but struct/class names as well to match member functions. + + .. _clangsa-taint-propagation-details: + +Propagation syntax and semantics +################################ + +An entry under `Propagation` is a `YAML `_ object with the following mandatory keys: + - `Name` is a string that specifies the name of a function. + Encountering this function during symbolic execution propagate taint from one or more arguments to other arguments and possibly the return value. + It helps model the taint-related behavior of functions that are not analyzable otherwise. + +The following keys are optional: + - `Scope` is a string that specifies the prefix of the function's name in its fully qualified name. This option restricts the set of matching function calls. + - `SrcArgs` is a list of numbers in the range of ``[0..int_max]`` that indicates the indexes of arguments in the function call. + Taint-propagation considers the values of these arguments during the evaluation of the function call. + If any `SrcArgs` arguments are tainted, the checker will consider all `DstArgs` arguments tainted after the call. + - `DstArgs` is a list of numbers in the range of ``[-1..int_max]`` that indicates the indexes of arguments in the function call. + The number ``-1`` specifies the return value of the function. + If any `SrcArgs` arguments are tainted, the checker will consider all `DstArgs` arguments tainted after the call. + - `VariadicType` is a string that can be one of ``None``, ``Dst``, ``Src``. + It is used in conjunction with `VariadicIndex` to specify arguments inside a variadic argument. + The value of ``Src`` will treat every call site argument that is part of a variadic argument list as a source concerning propagation rules (as if specified by `SrcArg`). + The value of ``Dst`` will treat every call site argument that is part of a variadic argument list a destination concerning propagation rules. + The value of ``None`` will not consider the arguments that are part of a variadic argument list (this option is redundant but can be used to temporarily switch off handling of a particular variadic argument option without removing the VariadicIndex key). + - `VariadicIndex` is a number in the range of ``[0..int_max]``. It indicates the starting index of the variadic argument in the signature of the function. + + +.. _clangsa-taint-sink-details: + +Sink syntax and semantics +######################### + +An entry under `Sinks` is a `YAML `_ object with the following mandatory keys: + - `Name` is a string that specifies the name of a function. + Encountering this function during symbolic execution will emit a taint-related diagnostic if any of the arguments specified with `Args` are tainted at the call site. + - `Args` is a list of numbers in the range of ``[0..int_max]`` that indicates the indexes of arguments in the function call. + The checker reports an error if any of the specified arguments are tainted. + +The following keys are optional: + - `Scope` is a string that specifies the prefix of the function's name in its fully qualified name. This option restricts the set of matching function calls. diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp --- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp +++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp @@ -225,7 +225,7 @@ bool success = true; // Enable NEON by default. Features.push_back("+neon"); - llvm::StringRef WaMArch = ""; + llvm::StringRef WaMArch; if (ForAS) for (const auto *A : Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) @@ -235,7 +235,7 @@ // Call getAArch64ArchFeaturesFromMarch only if "-Wa,-march=" or // "-Xassembler -march" is detected. Otherwise it may return false // and causes Clang to error out. - if (WaMArch.size()) + if (!WaMArch.empty()) success = getAArch64ArchFeaturesFromMarch(D, WaMArch, Args, Features); else if ((A = Args.getLastArg(options::OPT_march_EQ))) success = getAArch64ArchFeaturesFromMarch(D, A->getValue(), Args, Features); @@ -259,8 +259,15 @@ success = getAArch64MicroArchFeaturesFromMcpu( D, getAArch64TargetCPU(Args, Triple, A), Args, Features); - if (!success) - D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args); + if (!success) { + auto Diag = D.Diag(diag::err_drv_clang_unsupported); + // If "-Wa,-march=" is used, 'WaMArch' will contain the argument's value, + // while 'A' is uninitialized. Only dereference 'A' in the other case. + if (!WaMArch.empty()) + Diag << "-march=" + WaMArch.str(); + else + Diag << A->getAsString(Args); + } if (Args.getLastArg(options::OPT_mgeneral_regs_only)) { Features.push_back("-fp-armv8"); diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp --- a/clang/lib/Driver/ToolChains/Linux.cpp +++ b/clang/lib/Driver/ToolChains/Linux.cpp @@ -421,6 +421,9 @@ (Triple.getEnvironment() == llvm::Triple::MuslEABIHF || tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard)) ArchName += "hf"; + if (Arch == llvm::Triple::ppc && + Triple.getSubArch() == llvm::Triple::PPCSubArch_spe) + ArchName = "powerpc-sf"; return "/lib/ld-musl-" + ArchName + ".so.1"; } diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp --- a/clang/lib/Sema/AnalysisBasedWarnings.cpp +++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp @@ -464,7 +464,7 @@ // No more CFGElements in the block? if (ri == re) { const Stmt *Term = B.getTerminatorStmt(); - if (Term && isa(Term)) { + if (Term && (isa(Term) || isa(Term))) { HasAbnormalEdge = true; continue; } diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp --- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp +++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp @@ -591,16 +591,24 @@ // - Main source file: run both path-sensitive and non-path-sensitive checks. // - Header files: run non-path-sensitive checks only. // - System headers: don't run any checks. - SourceManager &SM = Ctx->getSourceManager(); - const Stmt *Body = D->getBody(); - SourceLocation SL = Body ? Body->getBeginLoc() : D->getLocation(); - SL = SM.getExpansionLoc(SL); - - if (!Opts->AnalyzeAll && !Mgr->isInCodeFile(SL)) { - if (SL.isInvalid() || SM.isInSystemHeader(SL)) - return AM_None; + if (Opts->AnalyzeAll) + return Mode; + + const SourceManager &SM = Ctx->getSourceManager(); + + const SourceLocation Loc = [&SM](Decl *D) -> SourceLocation { + const Stmt *Body = D->getBody(); + SourceLocation SL = Body ? Body->getBeginLoc() : D->getLocation(); + return SM.getExpansionLoc(SL); + }(D); + + // Ignore system headers. + if (Loc.isInvalid() || SM.isInSystemHeader(Loc)) + return AM_None; + + // Disable path sensitive analysis in user-headers. + if (!Mgr->isInCodeFile(Loc)) return Mode & ~AM_Path; - } return Mode; } diff --git a/clang/test/Driver/aarch64-target-as-march.s b/clang/test/Driver/aarch64-target-as-march.s --- a/clang/test/Driver/aarch64-target-as-march.s +++ b/clang/test/Driver/aarch64-target-as-march.s @@ -44,3 +44,12 @@ // TARGET-FEATURE-3-NOT: "-target-feature" "+v8.4a" // TARGET-FEATURE-4: "-target-feature" "+v8.4a" // TARGET-FEATURE-4-NOT: "-target-feature" "+v8.3a" + +// Invalid -march settings +// RUN: %clang --target=aarch64-linux-gnueabi -### -c -Wa,-march=all %s 2>&1 | \ +// RUN: FileCheck --check-prefix=INVALID-ARCH-1 %s +// RUN: %clang --target=aarch64-linux-gnueabi -### -c -Wa,-march=foobar %s 2>&1 | \ +// RUN: FileCheck --check-prefix=INVALID-ARCH-2 %s + +// INVALID-ARCH-1: error: the clang compiler does not support '-march=all' +// INVALID-ARCH-2: error: the clang compiler does not support '-march=foobar' diff --git a/clang/test/Driver/linux-ld.c b/clang/test/Driver/linux-ld.c --- a/clang/test/Driver/linux-ld.c +++ b/clang/test/Driver/linux-ld.c @@ -1718,6 +1718,9 @@ // RUN: --target=powerpc64-pc-linux-musl \ // RUN: | FileCheck --check-prefix=CHECK-MUSL-PPC64 %s // RUN: %clang %s -### -o %t.o 2>&1 \ +// RUN: --target=powerpcspe-pc-linux-musl \ +// RUN: | FileCheck --check-prefix=CHECK-MUSL-PPCSPE %s +// RUN: %clang %s -### -o %t.o 2>&1 \ // RUN: --target=thumb-pc-linux-musleabi \ // RUN: | FileCheck --check-prefix=CHECK-MUSL-ARM %s // RUN: %clang %s -### -o %t.o 2>&1 \ @@ -1767,6 +1770,7 @@ // CHECK-MUSL-MIPS64EL: "-dynamic-linker" "/lib/ld-musl-mips64el.so.1" // CHECK-MUSL-PPC: "-dynamic-linker" "/lib/ld-musl-powerpc.so.1" // CHECK-MUSL-PPC64: "-dynamic-linker" "/lib/ld-musl-powerpc64.so.1" +// CHECK-MUSL-PPCSPE: "-dynamic-linker" "/lib/ld-musl-powerpc-sf.so.1" // CHECK-MUSL-ARM: "-dynamic-linker" "/lib/ld-musl-arm.so.1" // CHECK-MUSL-ARMHF: "-dynamic-linker" "/lib/ld-musl-armhf.so.1" // CHECK-MUSL-ARMEB: "-dynamic-linker" "/lib/ld-musl-armeb.so.1" diff --git a/clang/test/SemaObjC/return-noreturn.m b/clang/test/SemaObjC/return-noreturn.m new file mode 100644 --- /dev/null +++ b/clang/test/SemaObjC/return-noreturn.m @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 %s -fsyntax-only -fobjc-exceptions -verify -Wreturn-type -Wmissing-noreturn + +id f(id self) { +} // expected-warning {{non-void function does not return a value}} + +id f2(id self) { + @try { + @throw (id)0; + } @catch (id) { + } + return (id)0; +} + diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h @@ -25,7 +25,9 @@ public: constexpr StackStore() = default; - using Id = uptr; + using Id = u32; // Enough for 2^32 * sizeof(uptr) bytes of traces. + static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8), + ""); Id Store(const StackTrace &trace); StackTrace Load(Id id) const; @@ -42,7 +44,19 @@ return frame_idx % kBlockSizeFrames; } - uptr *Alloc(uptr count); + static constexpr uptr IdToOffset(Id id) { + CHECK_NE(id, 0); + return id - 1; // Avoid zero as id. + } + + static constexpr uptr OffsetToId(Id id) { + // This makes UINT32_MAX to 0 and it will be retrived as and empty stack. + // But this is not a problem as we will not be able to store anything after + // that anyway. + return id + 1; // Avoid zero as id. + } + + uptr *Alloc(uptr count, uptr *idx); // Total number of allocated frames. atomic_uintptr_t total_frames_ = {}; @@ -53,9 +67,9 @@ StaticSpinMutex mtx_; // Protects alloc of new blocks. uptr *Create(); - uptr *Get() const; public: + uptr *Get() const; uptr *GetOrCreate(); void TestOnlyUnmap(); }; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp @@ -37,16 +37,23 @@ if (!trace.size && !trace.tag) return 0; StackTraceHeader h(trace); - uptr *stack_trace = Alloc(h.size + 1); + uptr idx; + uptr *stack_trace = Alloc(h.size + 1, &idx); *stack_trace = h.ToUptr(); internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr)); - return reinterpret_cast(stack_trace); + return OffsetToId(idx); } StackTrace StackStore::Load(Id id) const { if (!id) return {}; - const uptr *stack_trace = reinterpret_cast(id); + uptr idx = IdToOffset(id); + uptr block_idx = GetBlockIdx(idx); + CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); + const uptr *stack_trace = blocks_[block_idx].Get(); + if (!stack_trace) + return {}; + stack_trace += GetInBlockIdx(idx); StackTraceHeader h(*stack_trace); return StackTrace(stack_trace + 1, h.size, h.tag); } @@ -57,7 +64,7 @@ sizeof(*this); } -uptr *StackStore::Alloc(uptr count) { +uptr *StackStore::Alloc(uptr count, uptr *idx) { for (;;) { // Optimisic lock-free allocation, essentially try to bump the // total_frames_. @@ -66,6 +73,7 @@ if (LIKELY(block_idx == GetBlockIdx(start + count - 1))) { // Fits into the a single block. CHECK_LT(block_idx, ARRAY_SIZE(blocks_)); + *idx = start; return blocks_[block_idx].GetOrCreate() + GetInBlockIdx(start); } diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp @@ -23,6 +23,7 @@ using hash_type = u64; hash_type stack_hash; u32 link; + StackStore::Id store_id; static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20; @@ -53,11 +54,6 @@ typedef StackDepotBase StackDepot; static StackDepot theDepot; -// Keep rarely accessed stack traces out of frequently access nodes to improve -// caching efficiency. -static TwoLevelMap - storeIds; // Keep mutable data out of frequently access nodes to improve caching // efficiency. static TwoLevelMap> and \p sourceBox a fir.box. +/// \p destBox Fortran descriptor may be modified if destBox is an allocatable +/// according to Fortran allocatable assignment rules, otherwise it is not +/// modified. +void genAssign(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value destBox, mlir::Value sourceBox); + +} // namespace fir::runtime +#endif // FORTRAN_OPTIMIZER_BUILDER_RUNTIME_ASSIGN_H diff --git a/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h new file mode 100644 --- /dev/null +++ b/flang/include/flang/Optimizer/Builder/Runtime/RTBuilder.h @@ -0,0 +1,416 @@ +//===-- RTBuilder.h ---------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file defines some C++17 template classes that are used to convert the +/// signatures of plain old C functions into a model that can be used to +/// generate MLIR calls to those functions. This can be used to autogenerate +/// tables at compiler compile-time to call runtime support code. +/// +//===----------------------------------------------------------------------===// + +#ifndef FORTRAN_OPTIMIZER_BUILDER_RUNTIME_RTBUILDER_H +#define FORTRAN_OPTIMIZER_BUILDER_RUNTIME_RTBUILDER_H + +#include "flang/Common/Fortran.h" +#include "flang/Common/uint128.h" +#include "flang/Optimizer/Builder/FIRBuilder.h" +#include "flang/Optimizer/Dialect/FIRType.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/MLIRContext.h" +#include "llvm/ADT/SmallVector.h" +#include + +// Incomplete type indicating C99 complex ABI in interfaces. Beware, _Complex +// and std::complex are layout compatible, but not compatible in all ABI call +// interfaces (e.g. X86 32 bits). _Complex is not standard C++, so do not use +// it here. +struct c_float_complex_t; +struct c_double_complex_t; + +namespace Fortran::runtime { +class Descriptor; +} + +namespace fir::runtime { + +using TypeBuilderFunc = mlir::Type (*)(mlir::MLIRContext *); +using FuncTypeBuilderFunc = mlir::FunctionType (*)(mlir::MLIRContext *); + +//===----------------------------------------------------------------------===// +// Type builder models +//===----------------------------------------------------------------------===// + +// TODO: all usages of sizeof in this file assume build == host == target. +// This will need to be re-visited for cross compilation. + +/// Return a function that returns the type signature model for the type `T` +/// when provided an MLIRContext*. This allows one to translate C(++) function +/// signatures from runtime header files to MLIR signatures into a static table +/// at compile-time. +/// +/// For example, when `T` is `int`, return a function that returns the MLIR +/// standard type `i32` when `sizeof(int)` is 4. +template +static constexpr TypeBuilderFunc getModel(); +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, 8 * sizeof(short int)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, 8 * sizeof(int)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + TypeBuilderFunc f{getModel()}; + return fir::ReferenceType::get(f(context)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::ReferenceType::get(mlir::IntegerType::get(context, 8)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return getModel(); +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::ReferenceType::get(mlir::IntegerType::get(context, 16)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::ReferenceType::get(mlir::IntegerType::get(context, 32)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, 8 * sizeof(signed char)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::LLVMPointerType::get(context, + mlir::IntegerType::get(context, 8)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::ReferenceType::get( + fir::LLVMPointerType::get(context, mlir::IntegerType::get(context, 8))); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, 8 * sizeof(long)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + TypeBuilderFunc f{getModel()}; + return fir::ReferenceType::get(f(context)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return getModel(); +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, 8 * sizeof(long long)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, + 8 * sizeof(Fortran::common::int128_t)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + TypeBuilderFunc f{getModel()}; + return fir::ReferenceType::get(f(context)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return getModel(); +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, 8 * sizeof(unsigned long)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, 8 * sizeof(unsigned long long)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::FloatType::getF64(context); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + TypeBuilderFunc f{getModel()}; + return fir::ReferenceType::get(f(context)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return getModel(); +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::FloatType::getF32(context); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + TypeBuilderFunc f{getModel()}; + return fir::ReferenceType::get(f(context)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return getModel(); +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, 1); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + TypeBuilderFunc f{getModel()}; + return fir::ReferenceType::get(f(context)); + }; +} +template <> +constexpr TypeBuilderFunc getModel &>() { + return [](mlir::MLIRContext *context) -> mlir::Type { + auto ty = mlir::ComplexType::get(mlir::FloatType::getF32(context)); + return fir::ReferenceType::get(ty); + }; +} +template <> +constexpr TypeBuilderFunc getModel &>() { + return [](mlir::MLIRContext *context) -> mlir::Type { + auto ty = mlir::ComplexType::get(mlir::FloatType::getF64(context)); + return fir::ReferenceType::get(ty); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::ComplexType::get(context, sizeof(float)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::ComplexType::get(context, sizeof(double)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::BoxType::get(mlir::NoneType::get(context)); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return fir::ReferenceType::get( + fir::BoxType::get(mlir::NoneType::get(context))); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return getModel(); +} +template <> +constexpr TypeBuilderFunc getModel() { + return getModel(); +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::IntegerType::get(context, + sizeof(Fortran::common::TypeCategory) * 8); + }; +} +template <> +constexpr TypeBuilderFunc getModel() { + return [](mlir::MLIRContext *context) -> mlir::Type { + return mlir::NoneType::get(context); + }; +} + +template +struct RuntimeTableKey; +template +struct RuntimeTableKey { + static constexpr FuncTypeBuilderFunc getTypeModel() { + return [](mlir::MLIRContext *ctxt) { + TypeBuilderFunc ret = getModel(); + std::array args = {getModel()...}; + mlir::Type retTy = ret(ctxt); + llvm::SmallVector argTys; + for (auto f : args) + argTys.push_back(f(ctxt)); + return mlir::FunctionType::get(ctxt, argTys, {retTy}); + }; + } +}; + +//===----------------------------------------------------------------------===// +// Runtime table building (constexpr folded) +//===----------------------------------------------------------------------===// + +template +using RuntimeIdentifier = std::integer_sequence; + +namespace details { +template +static constexpr std::integer_sequence +concat(std::integer_sequence, std::integer_sequence) { + return {}; +} +template +static constexpr auto concat(std::integer_sequence, + std::integer_sequence, Cs...) { + return concat(std::integer_sequence{}, Cs{}...); +} +template +static constexpr std::integer_sequence concat(std::integer_sequence) { + return {}; +} +template +static constexpr auto filterZero(std::integer_sequence) { + if constexpr (a != 0) { + return std::integer_sequence{}; + } else { + return std::integer_sequence{}; + } +} +template +static constexpr auto filter(std::integer_sequence) { + if constexpr (sizeof...(b) > 0) { + return details::concat(filterZero(std::integer_sequence{})...); + } else { + return std::integer_sequence{}; + } +} +} // namespace details + +template +struct RuntimeTableEntry; +template +struct RuntimeTableEntry, RuntimeIdentifier> { + static constexpr FuncTypeBuilderFunc getTypeModel() { + return RuntimeTableKey::getTypeModel(); + } + static constexpr const char name[sizeof...(Cs) + 1] = {Cs..., '\0'}; +}; + +#undef E +#define E(L, I) (I < sizeof(L) / sizeof(*L) ? L[I] : 0) +#define QuoteKey(X) #X +#define ExpandAndQuoteKey(X) QuoteKey(X) +#define MacroExpandKey(X) \ + E(X, 0), E(X, 1), E(X, 2), E(X, 3), E(X, 4), E(X, 5), E(X, 6), E(X, 7), \ + E(X, 8), E(X, 9), E(X, 10), E(X, 11), E(X, 12), E(X, 13), E(X, 14), \ + E(X, 15), E(X, 16), E(X, 17), E(X, 18), E(X, 19), E(X, 20), E(X, 21), \ + E(X, 22), E(X, 23), E(X, 24), E(X, 25), E(X, 26), E(X, 27), E(X, 28), \ + E(X, 29), E(X, 30), E(X, 31), E(X, 32), E(X, 33), E(X, 34), E(X, 35), \ + E(X, 36), E(X, 37), E(X, 38), E(X, 39), E(X, 40), E(X, 41), E(X, 42), \ + E(X, 43), E(X, 44), E(X, 45), E(X, 46), E(X, 47), E(X, 48), E(X, 49) +#define ExpandKey(X) MacroExpandKey(QuoteKey(X)) +#define FullSeq(X) std::integer_sequence +#define AsSequence(X) decltype(fir::runtime::details::filter(FullSeq(X){})) +#define mkKey(X) \ + fir::runtime::RuntimeTableEntry, \ + AsSequence(X)> +#define mkRTKey(X) mkKey(RTNAME(X)) + +/// Get (or generate) the MLIR FuncOp for a given runtime function. Its template +/// argument is intended to be of the form: +/// Clients should add "using namespace Fortran::runtime" +/// in order to use this function. +template +static mlir::FuncOp getRuntimeFunc(mlir::Location loc, + fir::FirOpBuilder &builder) { + auto name = RuntimeEntry::name; + auto func = builder.getNamedFunction(name); + if (func) + return func; + auto funTy = RuntimeEntry::getTypeModel()(builder.getContext()); + func = builder.createFunction(loc, name, funTy); + func->setAttr("fir.runtime", builder.getUnitAttr()); + return func; +} + +namespace helper { +template +void createArguments(llvm::SmallVectorImpl &result, + fir::FirOpBuilder &builder, mlir::Location loc, + mlir::FunctionType fTy, A arg) { + result.emplace_back(builder.createConvert(loc, fTy.getInput(N), arg)); +} + +template +void createArguments(llvm::SmallVectorImpl &result, + fir::FirOpBuilder &builder, mlir::Location loc, + mlir::FunctionType fTy, A arg, As... args) { + result.emplace_back(builder.createConvert(loc, fTy.getInput(N), arg)); + createArguments(result, builder, loc, fTy, args...); +} +} // namespace helper + +/// Create a SmallVector of arguments for a runtime call. +template +llvm::SmallVector +createArguments(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::FunctionType fTy, As... args) { + llvm::SmallVector result; + helper::createArguments<0>(result, builder, loc, fTy, args...); + return result; +} + +} // namespace fir::runtime + +#endif // FORTRAN_OPTIMIZER_BUILDER_RUNTIME_RTBUILDER_H diff --git a/flang/include/flang/Optimizer/Builder/Runtime/Transformational.h b/flang/include/flang/Optimizer/Builder/Runtime/Transformational.h new file mode 100644 --- /dev/null +++ b/flang/include/flang/Optimizer/Builder/Runtime/Transformational.h @@ -0,0 +1,63 @@ +//===-- Transformational.h --------------------------------------*- C++ -*-===// +// Generate transformational intrinsic runtime API calls. +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef FORTRAN_OPTIMIZER_BUILDER_RUNTIME_TRANSFORMATIONAL_H +#define FORTRAN_OPTIMIZER_BUILDER_RUNTIME_TRANSFORMATIONAL_H + +#include "mlir/Dialect/StandardOps/IR/Ops.h" + +namespace fir { +class ExtendedValue; +class FirOpBuilder; +} // namespace fir + +namespace fir::runtime { + +void genCshift(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value arrayBox, + mlir::Value shiftBox, mlir::Value dimBox); + +void genCshiftVector(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value arrayBox, + mlir::Value shiftBox); + +void genEoshift(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value arrayBox, + mlir::Value shiftBox, mlir::Value boundBox, mlir::Value dimBox); + +void genEoshiftVector(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value arrayBox, + mlir::Value shiftBox, mlir::Value boundBox); + +void genMatmul(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value matrixABox, mlir::Value matrixBBox, + mlir::Value resultBox); + +void genPack(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value arrayBox, mlir::Value maskBox, + mlir::Value vectorBox); + +void genReshape(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value sourceBox, + mlir::Value shapeBox, mlir::Value padBox, mlir::Value orderBox); + +void genSpread(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value sourceBox, mlir::Value dim, + mlir::Value ncopies); + +void genTranspose(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value sourceBox); + +void genUnpack(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value vectorBox, + mlir::Value maskBox, mlir::Value fieldBox); + +} // namespace fir::runtime + +#endif // FORTRAN_OPTIMIZER_BUILDER_RUNTIME_TRANSFORMATIONAL_H diff --git a/flang/include/flang/Optimizer/Transforms/Passes.h b/flang/include/flang/Optimizer/Transforms/Passes.h --- a/flang/include/flang/Optimizer/Transforms/Passes.h +++ b/flang/include/flang/Optimizer/Transforms/Passes.h @@ -31,6 +31,7 @@ std::unique_ptr createFirToCfgPass(); std::unique_ptr createCharacterConversionPass(); std::unique_ptr createExternalNameConversionPass(); +std::unique_ptr createMemDataFlowOptPass(); std::unique_ptr createPromoteToAffinePass(); /// Support for inlining on FIR. diff --git a/flang/include/flang/Optimizer/Transforms/Passes.td b/flang/include/flang/Optimizer/Transforms/Passes.td --- a/flang/include/flang/Optimizer/Transforms/Passes.td +++ b/flang/include/flang/Optimizer/Transforms/Passes.td @@ -120,4 +120,17 @@ let constructor = "::fir::createExternalNameConversionPass()"; } +def MemRefDataFlowOpt : FunctionPass<"fir-memref-dataflow-opt"> { + let summary = + "Perform store/load forwarding and potentially removing dead stores."; + let description = [{ + This pass performs store to load forwarding to eliminate memory accesses and + potentially the entire allocation if all the accesses are forwarded. + }]; + let constructor = "::fir::createMemDataFlowOptPass()"; + let dependentDialects = [ + "fir::FIROpsDialect", "mlir::StandardOpsDialect" + ]; +} + #endif // FLANG_OPTIMIZER_TRANSFORMS_PASSES diff --git a/flang/lib/Evaluate/check-expression.cpp b/flang/lib/Evaluate/check-expression.cpp --- a/flang/lib/Evaluate/check-expression.cpp +++ b/flang/lib/Evaluate/check-expression.cpp @@ -661,10 +661,15 @@ return true; } else if (semantics::IsPointer(ultimate)) { return false; + } else if (semantics::IsAllocatable(ultimate)) { + // TODO: this could be merged with the case below if + // details->IsAssumedShape() did not return true for allocatables. Current + // ArraySpec building in semantics does not allow making a difference + // between some_assumed_shape(:) and some_allocatable(:). Both + // isDeferredShape() and isAssumedShape() are true in each case. + return true; } else if (const auto *details{ ultimate.detailsIf()}) { - // N.B. ALLOCATABLEs are deferred shape, not assumed, and - // are obviously contiguous. return !details->IsAssumedShape() && !details->IsAssumedRank(); } else if (auto assoc{Base::operator()(ultimate)}) { return assoc; diff --git a/flang/lib/Optimizer/Builder/CMakeLists.txt b/flang/lib/Optimizer/Builder/CMakeLists.txt --- a/flang/lib/Optimizer/Builder/CMakeLists.txt +++ b/flang/lib/Optimizer/Builder/CMakeLists.txt @@ -7,6 +7,8 @@ DoLoopHelper.cpp FIRBuilder.cpp MutableBox.cpp + Runtime/Assign.cpp + Runtime/Transformational.cpp DEPENDS FIRDialect diff --git a/flang/lib/Optimizer/Builder/Runtime/Assign.cpp b/flang/lib/Optimizer/Builder/Runtime/Assign.cpp new file mode 100644 --- /dev/null +++ b/flang/lib/Optimizer/Builder/Runtime/Assign.cpp @@ -0,0 +1,26 @@ +//===-- Assign.cpp -- generate assignment runtime API calls ---------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "flang/Optimizer/Builder/Runtime/Assign.h" +#include "flang/Optimizer/Builder/FIRBuilder.h" +#include "flang/Optimizer/Builder/Runtime/RTBuilder.h" +#include "flang/Runtime/assign.h" + +using namespace Fortran::runtime; + +void fir::runtime::genAssign(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value destBox, mlir::Value sourceBox) { + auto func = fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = func.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(3)); + auto args = fir::runtime::createArguments(builder, loc, fTy, destBox, + sourceBox, sourceFile, sourceLine); + builder.create(loc, func, args); +} diff --git a/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp b/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp new file mode 100644 --- /dev/null +++ b/flang/lib/Optimizer/Builder/Runtime/Transformational.cpp @@ -0,0 +1,176 @@ +//===-- Transformational.cpp ------------------------------------*- C++ -*-===// +// Generate transformational intrinsic runtime API calls. +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "flang/Optimizer/Builder/Runtime/Transformational.h" +#include "flang/Lower/Todo.h" +#include "flang/Optimizer/Builder/BoxValue.h" +#include "flang/Optimizer/Builder/Character.h" +#include "flang/Optimizer/Builder/FIRBuilder.h" +#include "flang/Optimizer/Builder/Runtime/RTBuilder.h" +#include "flang/Runtime/matmul.h" +#include "flang/Runtime/transformational.h" +#include "mlir/Dialect/StandardOps/IR/Ops.h" + +using namespace Fortran::runtime; + +/// Generate call to Cshift intrinsic +void fir::runtime::genCshift(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value arrayBox, + mlir::Value shiftBox, mlir::Value dimBox) { + auto cshiftFunc = fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = cshiftFunc.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(5)); + auto args = + fir::runtime::createArguments(builder, loc, fTy, resultBox, arrayBox, + shiftBox, dimBox, sourceFile, sourceLine); + builder.create(loc, cshiftFunc, args); +} + +/// Generate call to the vector version of the Cshift intrinsic +void fir::runtime::genCshiftVector(fir::FirOpBuilder &builder, + mlir::Location loc, mlir::Value resultBox, + mlir::Value arrayBox, mlir::Value shiftBox) { + auto cshiftFunc = + fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = cshiftFunc.getType(); + + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(4)); + auto args = fir::runtime::createArguments( + builder, loc, fTy, resultBox, arrayBox, shiftBox, sourceFile, sourceLine); + builder.create(loc, cshiftFunc, args); +} + +/// Generate call to Eoshift intrinsic +void fir::runtime::genEoshift(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value arrayBox, + mlir::Value shiftBox, mlir::Value boundBox, + mlir::Value dimBox) { + auto eoshiftFunc = + fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = eoshiftFunc.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(6)); + auto args = fir::runtime::createArguments(builder, loc, fTy, resultBox, + arrayBox, shiftBox, boundBox, + dimBox, sourceFile, sourceLine); + builder.create(loc, eoshiftFunc, args); +} + +/// Generate call to the vector version of the Eoshift intrinsic +void fir::runtime::genEoshiftVector(fir::FirOpBuilder &builder, + mlir::Location loc, mlir::Value resultBox, + mlir::Value arrayBox, mlir::Value shiftBox, + mlir::Value boundBox) { + auto eoshiftFunc = + fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = eoshiftFunc.getType(); + + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(5)); + + auto args = + fir::runtime::createArguments(builder, loc, fTy, resultBox, arrayBox, + shiftBox, boundBox, sourceFile, sourceLine); + builder.create(loc, eoshiftFunc, args); +} + +/// Generate call to Matmul intrinsic runtime routine. +void fir::runtime::genMatmul(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value matrixABox, + mlir::Value matrixBBox) { + auto func = fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = func.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(4)); + auto args = + fir::runtime::createArguments(builder, loc, fTy, resultBox, matrixABox, + matrixBBox, sourceFile, sourceLine); + builder.create(loc, func, args); +} + +/// Generate call to Pack intrinsic runtime routine. +void fir::runtime::genPack(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value arrayBox, + mlir::Value maskBox, mlir::Value vectorBox) { + auto packFunc = fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = packFunc.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(5)); + auto args = + fir::runtime::createArguments(builder, loc, fTy, resultBox, arrayBox, + maskBox, vectorBox, sourceFile, sourceLine); + builder.create(loc, packFunc, args); +} + +/// Generate call to Reshape intrinsic runtime routine. +void fir::runtime::genReshape(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value sourceBox, + mlir::Value shapeBox, mlir::Value padBox, + mlir::Value orderBox) { + auto func = fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = func.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(6)); + auto args = fir::runtime::createArguments(builder, loc, fTy, resultBox, + sourceBox, shapeBox, padBox, + orderBox, sourceFile, sourceLine); + builder.create(loc, func, args); +} + +/// Generate call to Spread intrinsic runtime routine. +void fir::runtime::genSpread(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value sourceBox, + mlir::Value dim, mlir::Value ncopies) { + auto func = fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = func.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(5)); + auto args = + fir::runtime::createArguments(builder, loc, fTy, resultBox, sourceBox, + dim, ncopies, sourceFile, sourceLine); + builder.create(loc, func, args); +} + +/// Generate call to Transpose intrinsic runtime routine. +void fir::runtime::genTranspose(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value sourceBox) { + auto func = fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = func.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(3)); + auto args = fir::runtime::createArguments(builder, loc, fTy, resultBox, + sourceBox, sourceFile, sourceLine); + builder.create(loc, func, args); +} + +/// Generate call to Unpack intrinsic runtime routine. +void fir::runtime::genUnpack(fir::FirOpBuilder &builder, mlir::Location loc, + mlir::Value resultBox, mlir::Value vectorBox, + mlir::Value maskBox, mlir::Value fieldBox) { + auto unpackFunc = fir::runtime::getRuntimeFunc(loc, builder); + auto fTy = unpackFunc.getType(); + auto sourceFile = fir::factory::locationToFilename(builder, loc); + auto sourceLine = + fir::factory::locationToLineNo(builder, loc, fTy.getInput(5)); + auto args = + fir::runtime::createArguments(builder, loc, fTy, resultBox, vectorBox, + maskBox, fieldBox, sourceFile, sourceLine); + builder.create(loc, unpackFunc, args); +} diff --git a/flang/lib/Optimizer/Transforms/CMakeLists.txt b/flang/lib/Optimizer/Transforms/CMakeLists.txt --- a/flang/lib/Optimizer/Transforms/CMakeLists.txt +++ b/flang/lib/Optimizer/Transforms/CMakeLists.txt @@ -5,6 +5,7 @@ CharacterConversion.cpp Inliner.cpp ExternalNameConversion.cpp + MemRefDataFlowOpt.cpp RewriteLoop.cpp DEPENDS diff --git a/flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp b/flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp new file mode 100644 --- /dev/null +++ b/flang/lib/Optimizer/Transforms/MemRefDataFlowOpt.cpp @@ -0,0 +1,130 @@ +//===- MemRefDataFlowOpt.cpp - Memory DataFlow Optimization pass ----------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "PassDetail.h" +#include "flang/Optimizer/Dialect/FIRDialect.h" +#include "flang/Optimizer/Dialect/FIROps.h" +#include "flang/Optimizer/Dialect/FIRType.h" +#include "flang/Optimizer/Transforms/Passes.h" +#include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/IR/Dominance.h" +#include "mlir/IR/Operation.h" +#include "mlir/Transforms/Passes.h" +#include "llvm/ADT/Optional.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallVector.h" + +#define DEBUG_TYPE "fir-memref-dataflow-opt" + +namespace { + +template +static std::vector getSpecificUsers(mlir::Value v) { + std::vector ops; + for (mlir::Operation *user : v.getUsers()) + if (auto op = dyn_cast(user)) + ops.push_back(op); + return ops; +} + +/// This is based on MLIR's MemRefDataFlowOpt which is specialized on AffineRead +/// and AffineWrite interface +template +class LoadStoreForwarding { +public: + LoadStoreForwarding(mlir::DominanceInfo *di) : domInfo(di) {} + + // FIXME: This algorithm has a bug. It ignores escaping references between a + // store and a load. + llvm::Optional findStoreToForward(ReadOp loadOp, + std::vector &&storeOps) { + llvm::SmallVector candidateSet; + + for (auto storeOp : storeOps) + if (domInfo->dominates(storeOp, loadOp)) + candidateSet.push_back(storeOp); + + if (candidateSet.empty()) + return {}; + + llvm::Optional nearestStore; + for (auto candidate : candidateSet) { + auto nearerThan = [&](WriteOp otherStore) { + if (candidate == otherStore) + return false; + bool rv = domInfo->properlyDominates(candidate, otherStore); + if (rv) { + LLVM_DEBUG(llvm::dbgs() + << "candidate " << candidate << " is not the nearest to " + << loadOp << " because " << otherStore << " is closer\n"); + } + return rv; + }; + if (!llvm::any_of(candidateSet, nearerThan)) { + nearestStore = mlir::cast(candidate); + break; + } + } + if (!nearestStore) { + LLVM_DEBUG( + llvm::dbgs() + << "load " << loadOp << " has " << candidateSet.size() + << " store candidates, but this algorithm can't find a best.\n"); + } + return nearestStore; + } + + llvm::Optional findReadForWrite(WriteOp storeOp, + std::vector &&loadOps) { + for (auto &loadOp : loadOps) { + if (domInfo->dominates(storeOp, loadOp)) + return loadOp; + } + return {}; + } + +private: + mlir::DominanceInfo *domInfo; +}; + +class MemDataFlowOpt : public fir::MemRefDataFlowOptBase { +public: + void runOnFunction() override { + mlir::FuncOp f = getFunction(); + + auto *domInfo = &getAnalysis(); + LoadStoreForwarding lsf(domInfo); + f.walk([&](fir::LoadOp loadOp) { + auto maybeStore = lsf.findStoreToForward( + loadOp, getSpecificUsers(loadOp.memref())); + if (maybeStore) { + auto storeOp = maybeStore.getValue(); + LLVM_DEBUG(llvm::dbgs() << "FlangMemDataFlowOpt: In " << f.getName() + << " erasing load " << loadOp + << " with value from " << storeOp << '\n'); + loadOp.getResult().replaceAllUsesWith(storeOp.value()); + loadOp.erase(); + } + }); + f.walk([&](fir::AllocaOp alloca) { + for (auto &storeOp : getSpecificUsers(alloca.getResult())) { + if (!lsf.findReadForWrite( + storeOp, getSpecificUsers(storeOp.memref()))) { + LLVM_DEBUG(llvm::dbgs() << "FlangMemDataFlowOpt: In " << f.getName() + << " erasing store " << storeOp << '\n'); + storeOp.erase(); + } + } + }); + } +}; +} // namespace + +std::unique_ptr fir::createMemDataFlowOptPass() { + return std::make_unique(); +} diff --git a/flang/test/Evaluate/folding09.f90 b/flang/test/Evaluate/folding09.f90 --- a/flang/test/Evaluate/folding09.f90 +++ b/flang/test/Evaluate/folding09.f90 @@ -9,9 +9,10 @@ real, pointer, contiguous :: f(:) f => hosted end function - subroutine test(arr1, arr2, arr3, mat) + subroutine test(arr1, arr2, arr3, mat, alloc) real, intent(in) :: arr1(:), arr2(10), mat(10, 10) real, intent(in), contiguous :: arr3(:) + real, allocatable :: alloc(:) real :: scalar logical, parameter :: test_isc01 = is_contiguous(0) logical, parameter :: test_isc02 = is_contiguous(scalar) @@ -24,5 +25,6 @@ logical, parameter :: test_isc09 = is_contiguous(arr2(1:10:1)) logical, parameter :: test_isc10 = is_contiguous(arr3) logical, parameter :: test_isc11 = is_contiguous(f()) + logical, parameter :: test_isc12 = is_contiguous(alloc) end subroutine end module diff --git a/flang/test/Fir/memref-data-flow.fir b/flang/test/Fir/memref-data-flow.fir new file mode 100644 --- /dev/null +++ b/flang/test/Fir/memref-data-flow.fir @@ -0,0 +1,79 @@ +// RUN: fir-opt --split-input-file --fir-memref-dataflow-opt %s | FileCheck %s + +// Test that all load-store chains are removed + +func @load_store_chain_removal(%arg0: !fir.ref>, %arg1: !fir.ref>, %arg2: !fir.ref>) { + %c1_i64 = arith.constant 1 : i64 + %c60 = arith.constant 60 : index + %c0 = arith.constant 0 : index + %c1 = arith.constant 1 : index + %0 = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFf1dcEi"} + %1 = fir.alloca !fir.array<60xi32> {bindc_name = "t1", uniq_name = "_QFf1dcEt1"} + br ^bb1(%c1, %c60 : index, index) +^bb1(%2: index, %3: index): // 2 preds: ^bb0, ^bb2 + %4 = arith.cmpi sgt, %3, %c0 : index + cond_br %4, ^bb2, ^bb3 +^bb2: // pred: ^bb1 + %5 = fir.convert %2 : (index) -> i32 + fir.store %5 to %0 : !fir.ref + %6 = fir.load %0 : !fir.ref + %7 = fir.convert %6 : (i32) -> i64 + %8 = arith.subi %7, %c1_i64 : i64 + %9 = fir.coordinate_of %arg0, %8 : (!fir.ref>, i64) -> !fir.ref + %10 = fir.load %9 : !fir.ref + %11 = arith.addi %10, %10 : i32 + %12 = fir.coordinate_of %1, %8 : (!fir.ref>, i64) -> !fir.ref + fir.store %11 to %12 : !fir.ref + %13 = arith.addi %2, %c1 : index + %14 = arith.subi %3, %c1 : index + br ^bb1(%13, %14 : index, index) +^bb3: // pred: ^bb1 + %15 = fir.convert %2 : (index) -> i32 + fir.store %15 to %0 : !fir.ref + br ^bb4(%c1, %c60 : index, index) +^bb4(%16: index, %17: index): // 2 preds: ^bb3, ^bb5 + %18 = arith.cmpi sgt, %17, %c0 : index + cond_br %18, ^bb5, ^bb6 +^bb5: // pred: ^bb4 + %19 = fir.convert %16 : (index) -> i32 + fir.store %19 to %0 : !fir.ref + %20 = fir.load %0 : !fir.ref + %21 = fir.convert %20 : (i32) -> i64 + %22 = arith.subi %21, %c1_i64 : i64 + %23 = fir.coordinate_of %1, %22 : (!fir.ref>, i64) -> !fir.ref + %24 = fir.load %23 : !fir.ref + %25 = fir.coordinate_of %arg1, %22 : (!fir.ref>, i64) -> !fir.ref + %26 = fir.load %25 : !fir.ref + %27 = arith.muli %24, %26 : i32 + %28 = fir.coordinate_of %arg2, %22 : (!fir.ref>, i64) -> !fir.ref + fir.store %27 to %28 : !fir.ref + %29 = arith.addi %16, %c1 : index + %30 = arith.subi %17, %c1 : index + br ^bb4(%29, %30 : index, index) +^bb6: // pred: ^bb4 + %31 = fir.convert %16 : (index) -> i32 + fir.store %31 to %0 : !fir.ref + return +} + +// CHECK-LABEL: func @load_store_chain_removal +// CHECK-LABEL: ^bb1 +// CHECK-LABEL: ^bb2: +// Make sure the previous fir.store/fir.load pair have been elimated and we +// preserve the last pair of fir.load/fir.store. +// CHECK-COUNT-1: %{{.*}} = fir.load %{{.*}} : !fir.ref +// CHECK-COUNT-1: fir.store %{{.*}} to %{{.*}} : !fir.ref +// CHECK-LABEL: ^bb3: +// Make sure the fir.store has been removed. +// CHECK-NOT: fir.store %{{.*}} to %{{.*}} : !fir.ref +// CHECK-LABEL: ^bb5: +// CHECK: %{{.*}} = fir.convert %{{.*}} : (index) -> i32 +// Check that the fir.store/fir.load pair has been removed between the convert. +// CHECK-NOT: fir.store %{{.*}} to %{{.*}} : !fir.ref +// CHECK-NOT: %{{.*}} = fir.load %{{.*}} : !fir.ref +// CHECK: %{{.*}} = fir.convert %{{.*}} : (i32) -> i64 +// CHECK: %{{.*}} = fir.load %{{.*}} : !fir.ref +// CHECK: %{{.*}} = fir.load %{{.*}} : !fir.ref +// CHECK: fir.store %{{.*}} to %{{.*}} : !fir.ref +// CHECK-LABEL: ^bb6: +// CHECK-NOT: fir.store %{{.*}} to %{{.*}} : !fir.ref diff --git a/flang/unittests/Optimizer/Builder/Runtime/AssignTest.cpp b/flang/unittests/Optimizer/Builder/Runtime/AssignTest.cpp new file mode 100644 --- /dev/null +++ b/flang/unittests/Optimizer/Builder/Runtime/AssignTest.cpp @@ -0,0 +1,21 @@ +//===- AssignTest.cpp -- assignment runtime builder unit tests ------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "flang/Optimizer/Builder/Runtime/Assign.h" +#include "RuntimeCallTestBase.h" +#include "gtest/gtest.h" + +TEST_F(RuntimeCallTest, genDerivedTypeAssign) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value source = firBuilder->create(loc, seqTy); + mlir::Value dest = firBuilder->create(loc, seqTy); + fir::runtime::genAssign(*firBuilder, loc, dest, source); + checkCallOpFromResultBox(dest, "_FortranAAssign", 2); +} diff --git a/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h b/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h new file mode 100644 --- /dev/null +++ b/flang/unittests/Optimizer/Builder/Runtime/RuntimeCallTestBase.h @@ -0,0 +1,118 @@ +//===- RuntimeCallTestBase.cpp -- Base for runtime call generation tests --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef FORTRAN_OPTIMIZER_BUILDER_RUNTIME_RUNTIMECALLTESTBASE_H +#define FORTRAN_OPTIMIZER_BUILDER_RUNTIME_RUNTIMECALLTESTBASE_H + +#include "gtest/gtest.h" +#include "flang/Optimizer/Builder/FIRBuilder.h" +#include "flang/Optimizer/Support/InitFIR.h" +#include "flang/Optimizer/Support/KindMapping.h" + +struct RuntimeCallTest : public testing::Test { +public: + void SetUp() override { + mlir::OpBuilder builder(&context); + auto loc = builder.getUnknownLoc(); + + // Set up a Module with a dummy function operation inside. + // Set the insertion point in the function entry block. + mlir::ModuleOp mod = builder.create(loc); + mlir::FuncOp func = mlir::FuncOp::create(loc, "runtime_unit_tests_func", + builder.getFunctionType(llvm::None, llvm::None)); + auto *entryBlock = func.addEntryBlock(); + mod.push_back(mod); + builder.setInsertionPointToStart(entryBlock); + + fir::support::loadDialects(context); + kindMap = std::make_unique(&context); + firBuilder = std::make_unique(mod, *kindMap); + + i8Ty = firBuilder->getI8Type(); + i16Ty = firBuilder->getIntegerType(16); + i32Ty = firBuilder->getI32Type(); + i64Ty = firBuilder->getI64Type(); + i128Ty = firBuilder->getIntegerType(128); + + f32Ty = firBuilder->getF32Type(); + f64Ty = firBuilder->getF64Type(); + f80Ty = firBuilder->getF80Type(); + f128Ty = firBuilder->getF128Type(); + + c4Ty = fir::ComplexType::get(firBuilder->getContext(), 4); + c8Ty = fir::ComplexType::get(firBuilder->getContext(), 8); + c10Ty = fir::ComplexType::get(firBuilder->getContext(), 10); + c16Ty = fir::ComplexType::get(firBuilder->getContext(), 16); + } + + mlir::MLIRContext context; + std::unique_ptr kindMap; + std::unique_ptr firBuilder; + + // Commonly used types + mlir::Type i8Ty; + mlir::Type i16Ty; + mlir::Type i32Ty; + mlir::Type i64Ty; + mlir::Type i128Ty; + mlir::Type f32Ty; + mlir::Type f64Ty; + mlir::Type f80Ty; + mlir::Type f128Ty; + mlir::Type c4Ty; + mlir::Type c8Ty; + mlir::Type c10Ty; + mlir::Type c16Ty; +}; + +/// Check that the \p op is a `fir::CallOp` operation and its name matches +/// \p fctName and the number of arguments is equal to \p nbArgs. +/// Most runtime calls have two additional location arguments added. These are +/// added in this check when \p addLocArgs is true. +static void checkCallOp(mlir::Operation *op, llvm::StringRef fctName, + unsigned nbArgs, bool addLocArgs = true) { + EXPECT_TRUE(mlir::isa(*op)); + auto callOp = mlir::dyn_cast(*op); + EXPECT_TRUE(callOp.callee().hasValue()); + mlir::SymbolRefAttr callee = *callOp.callee(); + EXPECT_EQ(fctName, callee.getRootReference().getValue()); + // sourceFile and sourceLine are added arguments. + if (addLocArgs) + nbArgs += 2; + EXPECT_EQ(nbArgs, callOp.args().size()); +} + +/// Check the call operation from the \p result value. In some cases the +/// value is directly used in the call and sometimes there is an indirection +/// through a `fir.convert` operation. Once the `fir.call` operation is +/// retrieved the check is made by `checkCallOp`. +/// +/// Directly used in `fir.call`. +/// ``` +/// %result = arith.constant 1 : i32 +/// %0 = fir.call @foo(%result) : (i32) -> i1 +/// ``` +/// +/// Value used in `fir.call` through `fir.convert` indirection. +/// ``` +/// %result = arith.constant 1 : i32 +/// %arg = fir.convert %result : (i32) -> i16 +/// %0 = fir.call @foo(%arg) : (i16) -> i1 +/// ``` +static void checkCallOpFromResultBox(mlir::Value result, + llvm::StringRef fctName, unsigned nbArgs, bool addLocArgs = true) { + EXPECT_TRUE(result.hasOneUse()); + const auto &u = result.user_begin(); + if (mlir::isa(*u)) + return checkCallOp(*u, fctName, nbArgs, addLocArgs); + auto convOp = mlir::dyn_cast(*u); + EXPECT_NE(nullptr, convOp); + checkCallOpFromResultBox(convOp.getResult(), fctName, nbArgs, addLocArgs); +} + +#endif // FORTRAN_OPTIMIZER_BUILDER_RUNTIME_RUNTIMECALLTESTBASE_H diff --git a/flang/unittests/Optimizer/Builder/Runtime/TransformationalTest.cpp b/flang/unittests/Optimizer/Builder/Runtime/TransformationalTest.cpp new file mode 100644 --- /dev/null +++ b/flang/unittests/Optimizer/Builder/Runtime/TransformationalTest.cpp @@ -0,0 +1,129 @@ +//===- TransformationalTest.cpp -- Transformational intrinsic generation --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "flang/Optimizer/Builder/Runtime/Transformational.h" +#include "RuntimeCallTestBase.h" +#include "gtest/gtest.h" + +TEST_F(RuntimeCallTest, genCshiftTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value array = firBuilder->create(loc, seqTy); + mlir::Value shift = firBuilder->create(loc, seqTy); + mlir::Value dim = firBuilder->create(loc, seqTy); + fir::runtime::genCshift(*firBuilder, loc, result, array, shift, dim); + checkCallOpFromResultBox(result, "_FortranACshift", 4); +} + +TEST_F(RuntimeCallTest, genCshiftVectorTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value array = firBuilder->create(loc, seqTy); + mlir::Value shift = firBuilder->create(loc, seqTy); + fir::runtime::genCshiftVector(*firBuilder, loc, result, array, shift); + checkCallOpFromResultBox(result, "_FortranACshiftVector", 3); +} + +TEST_F(RuntimeCallTest, genEoshiftTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value array = firBuilder->create(loc, seqTy); + mlir::Value shift = firBuilder->create(loc, seqTy); + mlir::Value bound = firBuilder->create(loc, seqTy); + mlir::Value dim = firBuilder->create(loc, seqTy); + fir::runtime::genEoshift(*firBuilder, loc, result, array, shift, bound, dim); + checkCallOpFromResultBox(result, "_FortranAEoshift", 5); +} + +TEST_F(RuntimeCallTest, genEoshiftVectorTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value array = firBuilder->create(loc, seqTy); + mlir::Value shift = firBuilder->create(loc, seqTy); + mlir::Value bound = firBuilder->create(loc, seqTy); + fir::runtime::genEoshiftVector(*firBuilder, loc, result, array, shift, bound); + checkCallOpFromResultBox(result, "_FortranAEoshiftVector", 4); +} + +TEST_F(RuntimeCallTest, genMatmulTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value matrixA = firBuilder->create(loc, seqTy); + mlir::Value matrixB = firBuilder->create(loc, seqTy); + fir::runtime::genMatmul(*firBuilder, loc, matrixA, matrixB, result); + checkCallOpFromResultBox(result, "_FortranAMatmul", 3); +} + +TEST_F(RuntimeCallTest, genPackTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value array = firBuilder->create(loc, seqTy); + mlir::Value mask = firBuilder->create(loc, seqTy); + mlir::Value vector = firBuilder->create(loc, seqTy); + fir::runtime::genPack(*firBuilder, loc, result, array, mask, vector); + checkCallOpFromResultBox(result, "_FortranAPack", 4); +} + +TEST_F(RuntimeCallTest, genReshapeTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value source = firBuilder->create(loc, seqTy); + mlir::Value shape = firBuilder->create(loc, seqTy); + mlir::Value pad = firBuilder->create(loc, seqTy); + mlir::Value order = firBuilder->create(loc, seqTy); + fir::runtime::genReshape(*firBuilder, loc, result, source, shape, pad, order); + checkCallOpFromResultBox(result, "_FortranAReshape", 5); +} + +TEST_F(RuntimeCallTest, genSpreadTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value source = firBuilder->create(loc, seqTy); + mlir::Value dim = firBuilder->create(loc, seqTy); + mlir::Value ncopies = firBuilder->create(loc, seqTy); + fir::runtime::genSpread(*firBuilder, loc, result, source, dim, ncopies); + checkCallOpFromResultBox(result, "_FortranASpread", 4); +} + +TEST_F(RuntimeCallTest, genTransposeTest) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value source = firBuilder->create(loc, seqTy); + fir::runtime::genTranspose(*firBuilder, loc, result, source); + checkCallOpFromResultBox(result, "_FortranATranspose", 2); +} + +TEST_F(RuntimeCallTest, genUnpack) { + auto loc = firBuilder->getUnknownLoc(); + mlir::Type seqTy = + fir::SequenceType::get(fir::SequenceType::Shape(1, 10), i32Ty); + mlir::Value result = firBuilder->create(loc, seqTy); + mlir::Value vector = firBuilder->create(loc, seqTy); + mlir::Value mask = firBuilder->create(loc, seqTy); + mlir::Value field = firBuilder->create(loc, seqTy); + fir::runtime::genUnpack(*firBuilder, loc, result, vector, mask, field); + checkCallOpFromResultBox(result, "_FortranAUnpack", 4); +} diff --git a/flang/unittests/Optimizer/CMakeLists.txt b/flang/unittests/Optimizer/CMakeLists.txt --- a/flang/unittests/Optimizer/CMakeLists.txt +++ b/flang/unittests/Optimizer/CMakeLists.txt @@ -13,6 +13,8 @@ Builder/ComplexTest.cpp Builder/DoLoopHelperTest.cpp Builder/FIRBuilderTest.cpp + Builder/Runtime/AssignTest.cpp + Builder/Runtime/TransformationalTest.cpp FIRContextTest.cpp InternalNamesTest.cpp KindMappingTest.cpp diff --git a/libcxx/CREDITS.TXT b/libcxx/CREDITS.TXT --- a/libcxx/CREDITS.TXT +++ b/libcxx/CREDITS.TXT @@ -149,6 +149,10 @@ E: klaas at klaasgaaf dot nl D: Minor bug fix. +N: Mark de Wever +E: koraq at xs4all dot nl +D: Format library support. + N: Zhang Xiongpang E: zhangxiongpang@gmail.com D: Minor patches and bug fixes. diff --git a/libcxx/benchmarks/format.bench.cpp b/libcxx/benchmarks/format.bench.cpp new file mode 100644 --- /dev/null +++ b/libcxx/benchmarks/format.bench.cpp @@ -0,0 +1,36 @@ +//===----------------------------------------------------------------------===// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include + +#include + +#include "benchmark/benchmark.h" +#include "make_string.h" + +#define CSTR(S) MAKE_CSTRING(CharT, S) + +template +static void BM_format_string(benchmark::State& state) { + size_t size = state.range(0); + std::basic_string str(size, CharT('*')); + + while (state.KeepRunningBatch(str.size())) + benchmark::DoNotOptimize(std::format(CSTR("{}"), str)); + + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} +BENCHMARK_TEMPLATE(BM_format_string, char)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_string, wchar_t)->RangeMultiplier(2)->Range(1, 1 << 20); + +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + if (benchmark::ReportUnrecognizedArguments(argc, argv)) + return 1; + + benchmark::RunSpecifiedBenchmarks(); +} diff --git a/libcxx/benchmarks/format_to.bench.cpp b/libcxx/benchmarks/format_to.bench.cpp new file mode 100644 --- /dev/null +++ b/libcxx/benchmarks/format_to.bench.cpp @@ -0,0 +1,107 @@ +//===----------------------------------------------------------------------===// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "make_string.h" + +#define CSTR(S) MAKE_CSTRING(CharT, S) + +/*** Back inserter ***/ + +template +static void BM_format_to_string_back_inserter(benchmark::State& state) { + using CharT = typename Container::value_type; + size_t size = state.range(0); + auto str = std::basic_string(size, CharT('*')); + + for (auto _ : state) { + Container output; + benchmark::DoNotOptimize(std::format_to(std::back_inserter(output), CSTR("{}"), str)); + } + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} + +/*** Begin ***/ + +template +static void BM_format_to_string_begin(benchmark::State& state) { + using CharT = typename Container::value_type; + size_t size = state.range(0); + auto str = std::basic_string(size, CharT('*')); + + Container output(size, CharT('-')); + for (auto _ : state) + benchmark::DoNotOptimize(std::format_to(std::begin(output), CSTR("{}"), str)); + + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} + +/*** Pointer ***/ + +template +static void BM_format_to_string_span(benchmark::State& state) { + size_t size = state.range(0); + auto str = std::basic_string(size, CharT('*')); + + auto buffer = std::basic_string(size, CharT('-')); + std::span output{buffer}; + for (auto _ : state) + benchmark::DoNotOptimize(std::format_to(std::begin(output), CSTR("{}"), str)); + + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} + +template +static void BM_format_to_string_pointer(benchmark::State& state) { + size_t size = state.range(0); + auto str = std::basic_string(size, CharT('*')); + + auto buffer = std::basic_string(size, CharT('-')); + CharT* output = buffer.data(); + for (auto _ : state) + benchmark::DoNotOptimize(std::format_to(output, CSTR("{}"), str)); + + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} + +/*** Main ***/ + +BENCHMARK_TEMPLATE(BM_format_to_string_back_inserter, std::string)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_back_inserter, std::vector)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_back_inserter, std::list)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_begin, std::string)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_begin, std::vector)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_begin, std::list)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_span, char)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_pointer, char)->RangeMultiplier(2)->Range(1, 1 << 20); + +BENCHMARK_TEMPLATE(BM_format_to_string_back_inserter, std::wstring)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_back_inserter, std::vector)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_back_inserter, std::list)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_begin, std::wstring)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_begin, std::vector)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_begin, std::list)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_span, wchar_t)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_string_pointer, wchar_t)->RangeMultiplier(2)->Range(1, 1 << 20); + +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + if (benchmark::ReportUnrecognizedArguments(argc, argv)) + return 1; + + benchmark::RunSpecifiedBenchmarks(); +} diff --git a/libcxx/benchmarks/format_to_n.bench.cpp b/libcxx/benchmarks/format_to_n.bench.cpp new file mode 100644 --- /dev/null +++ b/libcxx/benchmarks/format_to_n.bench.cpp @@ -0,0 +1,107 @@ +//===----------------------------------------------------------------------===// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "make_string.h" + +#define CSTR(S) MAKE_CSTRING(CharT, S) + +/*** Back inserter ***/ + +template +static void BM_format_to_n_string_back_inserter(benchmark::State& state) { + using CharT = typename Container::value_type; + size_t size = state.range(0); + auto str = std::basic_string(2 * size, CharT('*')); + + for (auto _ : state) { + Container output; + benchmark::DoNotOptimize(std::format_to_n(std::back_inserter(output), size, CSTR("{}"), str)); + } + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} + +/*** Begin ***/ + +template +static void BM_format_to_n_string_begin(benchmark::State& state) { + using CharT = typename Container::value_type; + size_t size = state.range(0); + auto str = std::basic_string(2 * size, CharT('*')); + + Container output(size, CharT('-')); + for (auto _ : state) + benchmark::DoNotOptimize(std::format_to_n(std::begin(output), size, CSTR("{}"), str)); + + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} + +/*** Pointer ***/ + +template +static void BM_format_to_n_string_span(benchmark::State& state) { + size_t size = state.range(0); + auto str = std::basic_string(2 * size, CharT('*')); + + auto buffer = std::basic_string(size, CharT('-')); + std::span output{buffer}; + for (auto _ : state) + benchmark::DoNotOptimize(std::format_to_n(std::begin(output), size, CSTR("{}"), str)); + + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} + +template +static void BM_format_to_n_string_pointer(benchmark::State& state) { + size_t size = state.range(0); + auto str = std::basic_string(2 * size, CharT('*')); + + auto buffer = std::basic_string(size, CharT('-')); + CharT* output = buffer.data(); + for (auto _ : state) + benchmark::DoNotOptimize(std::format_to_n(output, size, CSTR("{}"), str)); + + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} + +/*** Main ***/ + +BENCHMARK_TEMPLATE(BM_format_to_n_string_back_inserter, std::string)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_back_inserter, std::vector)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_back_inserter, std::list)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_begin, std::string)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_begin, std::vector)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_begin, std::list)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_span, char)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_pointer, char)->RangeMultiplier(2)->Range(1, 1 << 20); + +BENCHMARK_TEMPLATE(BM_format_to_n_string_back_inserter, std::wstring)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_back_inserter, std::vector)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_back_inserter, std::list)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_begin, std::wstring)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_begin, std::vector)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_begin, std::list)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_span, wchar_t)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_format_to_n_string_pointer, wchar_t)->RangeMultiplier(2)->Range(1, 1 << 20); + +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + if (benchmark::ReportUnrecognizedArguments(argc, argv)) + return 1; + + benchmark::RunSpecifiedBenchmarks(); +} diff --git a/libcxx/benchmarks/formatted_size.bench.cpp b/libcxx/benchmarks/formatted_size.bench.cpp new file mode 100644 --- /dev/null +++ b/libcxx/benchmarks/formatted_size.bench.cpp @@ -0,0 +1,36 @@ +//===----------------------------------------------------------------------===// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include + +#include + +#include "benchmark/benchmark.h" +#include "make_string.h" + +#define CSTR(S) MAKE_CSTRING(CharT, S) + +template +static void BM_formatted_size_string(benchmark::State& state) { + size_t size = state.range(0); + std::basic_string str(size, CharT('*')); + + while (state.KeepRunningBatch(str.size())) + benchmark::DoNotOptimize(std::formatted_size(CSTR("{}"), str)); + + state.SetBytesProcessed(state.iterations() * size * sizeof(CharT)); +} +BENCHMARK_TEMPLATE(BM_formatted_size_string, char)->RangeMultiplier(2)->Range(1, 1 << 20); +BENCHMARK_TEMPLATE(BM_formatted_size_string, wchar_t)->RangeMultiplier(2)->Range(1, 1 << 20); + +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + if (benchmark::ReportUnrecognizedArguments(argc, argv)) + return 1; + + benchmark::RunSpecifiedBenchmarks(); +} diff --git a/libcxx/include/type_traits b/libcxx/include/type_traits --- a/libcxx/include/type_traits +++ b/libcxx/include/type_traits @@ -1416,9 +1416,7 @@ // is_signed -// Before Clang 10, __is_signed didn't work for floating-point types or enums. -#if __has_keyword(__is_signed) && \ - !(defined(_LIBCPP_CLANG_VER) && _LIBCPP_CLANG_VER < 1000) +#if __has_keyword(__is_signed) template struct _LIBCPP_TEMPLATE_VIS is_signed : _BoolConstant<__is_signed(_Tp)> { }; diff --git a/lld/ELF/Arch/ARM.cpp b/lld/ELF/Arch/ARM.cpp --- a/lld/ELF/Arch/ARM.cpp +++ b/lld/ELF/Arch/ARM.cpp @@ -140,7 +140,16 @@ case R_ARM_THM_MOVT_PREL: return R_PC; case R_ARM_ALU_PC_G0: + case R_ARM_ALU_PC_G0_NC: + case R_ARM_ALU_PC_G1: + case R_ARM_ALU_PC_G1_NC: + case R_ARM_ALU_PC_G2: case R_ARM_LDR_PC_G0: + case R_ARM_LDR_PC_G1: + case R_ARM_LDR_PC_G2: + case R_ARM_LDRS_PC_G0: + case R_ARM_LDRS_PC_G1: + case R_ARM_LDRS_PC_G2: case R_ARM_THM_ALU_PREL_11_0: case R_ARM_THM_PC8: case R_ARM_THM_PC12: @@ -411,56 +420,83 @@ } } -// Utility functions taken from ARMAddressingModes.h, only changes are LLD -// coding style. - // Rotate a 32-bit unsigned value right by a specified amt of bits. static uint32_t rotr32(uint32_t val, uint32_t amt) { assert(amt < 32 && "Invalid rotate amount"); return (val >> amt) | (val << ((32 - amt) & 31)); } -// Rotate a 32-bit unsigned value left by a specified amt of bits. -static uint32_t rotl32(uint32_t val, uint32_t amt) { - assert(amt < 32 && "Invalid rotate amount"); - return (val << amt) | (val >> ((32 - amt) & 31)); +static std::pair getRemAndLZForGroup(unsigned group, + uint32_t val) { + uint32_t rem, lz; + do { + lz = llvm::countLeadingZeros(val) & ~1; + rem = val; + if (lz == 32) // implies rem == 0 + break; + val &= 0xffffff >> lz; + } while (group--); + return {rem, lz}; } -// Try to encode a 32-bit unsigned immediate imm with an immediate shifter -// operand, this form is an 8-bit immediate rotated right by an even number of -// bits. We compute the rotate amount to use. If this immediate value cannot be -// handled with a single shifter-op, determine a good rotate amount that will -// take a maximal chunk of bits out of the immediate. -static uint32_t getSOImmValRotate(uint32_t imm) { - // 8-bit (or less) immediates are trivially shifter_operands with a rotate - // of zero. - if ((imm & ~255U) == 0) - return 0; - - // Use CTZ to compute the rotate amount. - unsigned tz = llvm::countTrailingZeros(imm); - - // Rotate amount must be even. Something like 0x200 must be rotated 8 bits, - // not 9. - unsigned rotAmt = tz & ~1; - - // If we can handle this spread, return it. - if ((rotr32(imm, rotAmt) & ~255U) == 0) - return (32 - rotAmt) & 31; // HW rotates right, not left. +static void encodeAluGroup(uint8_t *loc, const Relocation &rel, uint64_t val, + int group, bool check) { + // ADD/SUB (immediate) add = bit23, sub = bit22 + // immediate field carries is a 12-bit modified immediate, made up of a 4-bit + // even rotate right and an 8-bit immediate. + uint32_t opcode = 0x00800000; + if (val >> 63) { + opcode = 0x00400000; + val = -val; + } + uint32_t imm, lz; + std::tie(imm, lz) = getRemAndLZForGroup(group, val); + uint32_t rot = 0; + if (lz < 24) { + imm = rotr32(imm, 24 - lz); + rot = (lz + 8) << 7; + } + if (check && imm > 0xff) + error(getErrorLocation(loc) + "unencodeable immediate " + Twine(val).str() + + " for relocation " + toString(rel.type)); + write32le(loc, (read32le(loc) & 0xff3ff000) | opcode | rot | (imm & 0xff)); +} - // For values like 0xF000000F, we should ignore the low 6 bits, then - // retry the hunt. - if (imm & 63U) { - unsigned tz2 = countTrailingZeros(imm & ~63U); - unsigned rotAmt2 = tz2 & ~1; - if ((rotr32(imm, rotAmt2) & ~255U) == 0) - return (32 - rotAmt2) & 31; // HW rotates right, not left. +static void encodeLdrGroup(uint8_t *loc, const Relocation &rel, uint64_t val, + int group) { + // R_ARM_LDR_PC_Gn is S + A - P, we have ((S + A) | T) - P, if S is a + // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear + // bottom bit to recover S + A - P. + if (rel.sym->isFunc()) + val &= ~0x1; + // LDR (literal) u = bit23 + uint32_t opcode = 0x00800000; + if (val >> 63) { + opcode = 0x0; + val = -val; } + uint32_t imm = getRemAndLZForGroup(group, val).first; + checkUInt(loc, imm, 12, rel); + write32le(loc, (read32le(loc) & 0xff7ff000) | opcode | imm); +} - // Otherwise, we have no way to cover this span of bits with a single - // shifter_op immediate. Return a chunk of bits that will be useful to - // handle. - return (32 - rotAmt) & 31; // HW rotates right, not left. +static void encodeLdrsGroup(uint8_t *loc, const Relocation &rel, uint64_t val, + int group) { + // R_ARM_LDRS_PC_Gn is S + A - P, we have ((S + A) | T) - P, if S is a + // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear + // bottom bit to recover S + A - P. + if (rel.sym->isFunc()) + val &= ~0x1; + // LDRD/LDRH/LDRSB/LDRSH (literal) u = bit23 + uint32_t opcode = 0x00800000; + if (val >> 63) { + opcode = 0x0; + val = -val; + } + uint32_t imm = getRemAndLZForGroup(group, val).first; + checkUInt(loc, imm, 8, rel); + write32le(loc, (read32le(loc) & 0xff7ff0f0) | opcode | ((imm & 0xf0) << 4) | + (imm & 0xf)); } void ARM::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { @@ -633,45 +669,39 @@ ((val << 4) & 0x7000) | // imm3 (val & 0x00ff)); // imm8 break; - case R_ARM_ALU_PC_G0: { - // ADR (literal) add = bit23, sub = bit22 - // literal is a 12-bit modified immediate, made up of a 4-bit even rotate - // right and an 8-bit immediate. The code-sequence here is derived from - // ARMAddressingModes.h in llvm/Target/ARM/MCTargetDesc. In our case we - // want to give an error if we cannot encode the constant. - uint32_t opcode = 0x00800000; - if (val >> 63) { - opcode = 0x00400000; - val = ~val + 1; - } - if ((val & ~255U) != 0) { - uint32_t rotAmt = getSOImmValRotate(val); - // Error if we cannot encode this with a single shift - if (rotr32(~255U, rotAmt) & val) - error(getErrorLocation(loc) + "unencodeable immediate " + - Twine(val).str() + " for relocation " + toString(rel.type)); - val = rotl32(val, rotAmt) | ((rotAmt >> 1) << 8); - } - write32le(loc, (read32le(loc) & 0xff0ff000) | opcode | val); + case R_ARM_ALU_PC_G0: + encodeAluGroup(loc, rel, val, 0, true); break; - } - case R_ARM_LDR_PC_G0: { - // R_ARM_LDR_PC_G0 is S + A - P, we have ((S + A) | T) - P, if S is a - // function then addr is 0 (modulo 2) and Pa is 0 (modulo 4) so we can clear - // bottom bit to recover S + A - P. - if (rel.sym->isFunc()) - val &= ~0x1; - // LDR (literal) u = bit23 - int64_t imm = val; - uint32_t u = 0x00800000; - if (imm < 0) { - imm = -imm; - u = 0; - } - checkUInt(loc, imm, 12, rel); - write32le(loc, (read32le(loc) & 0xff7ff000) | u | imm); + case R_ARM_ALU_PC_G0_NC: + encodeAluGroup(loc, rel, val, 0, false); + break; + case R_ARM_ALU_PC_G1: + encodeAluGroup(loc, rel, val, 1, true); + break; + case R_ARM_ALU_PC_G1_NC: + encodeAluGroup(loc, rel, val, 1, false); + break; + case R_ARM_ALU_PC_G2: + encodeAluGroup(loc, rel, val, 2, true); + break; + case R_ARM_LDR_PC_G0: + encodeLdrGroup(loc, rel, val, 0); + break; + case R_ARM_LDR_PC_G1: + encodeLdrGroup(loc, rel, val, 1); + break; + case R_ARM_LDR_PC_G2: + encodeLdrGroup(loc, rel, val, 2); + break; + case R_ARM_LDRS_PC_G0: + encodeLdrsGroup(loc, rel, val, 0); + break; + case R_ARM_LDRS_PC_G1: + encodeLdrsGroup(loc, rel, val, 1); + break; + case R_ARM_LDRS_PC_G2: + encodeLdrsGroup(loc, rel, val, 2); break; - } case R_ARM_THM_ALU_PREL_11_0: { // ADR encoding T2 (sub), T3 (add) i:imm3:imm8 int64_t imm = val; @@ -816,7 +846,11 @@ ((lo & 0x7000) >> 4) | // imm3 (lo & 0x00ff)); // imm8 } - case R_ARM_ALU_PC_G0: { + case R_ARM_ALU_PC_G0: + case R_ARM_ALU_PC_G0_NC: + case R_ARM_ALU_PC_G1: + case R_ARM_ALU_PC_G1_NC: + case R_ARM_ALU_PC_G2: { // 12-bit immediate is a modified immediate made up of a 4-bit even // right rotation and 8-bit constant. After the rotation the value // is zero-extended. When bit 23 is set the instruction is an add, when @@ -825,13 +859,25 @@ uint32_t val = rotr32(instr & 0xff, ((instr & 0xf00) >> 8) * 2); return (instr & 0x00400000) ? -val : val; } - case R_ARM_LDR_PC_G0: { + case R_ARM_LDR_PC_G0: + case R_ARM_LDR_PC_G1: + case R_ARM_LDR_PC_G2: { // ADR (literal) add = bit23, sub = bit22 // LDR (literal) u = bit23 unsigned imm12 bool u = read32le(buf) & 0x00800000; uint32_t imm12 = read32le(buf) & 0xfff; return u ? imm12 : -imm12; } + case R_ARM_LDRS_PC_G0: + case R_ARM_LDRS_PC_G1: + case R_ARM_LDRS_PC_G2: { + // LDRD/LDRH/LDRSB/LDRSH (literal) u = bit23 unsigned imm8 + uint32_t opcode = read32le(buf); + bool u = opcode & 0x00800000; + uint32_t imm4l = opcode & 0xf; + uint32_t imm4h = (opcode & 0xf00) >> 4; + return u ? (imm4h | imm4l) : -(imm4h | imm4l); + } case R_ARM_THM_ALU_PREL_11_0: { // Thumb2 ADR, which is an alias for a sub or add instruction with an // unsigned immediate. diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp --- a/lld/ELF/Driver.cpp +++ b/lld/ELF/Driver.cpp @@ -2281,7 +2281,6 @@ // Create elfHeader early. We need a dummy section in // addReservedSymbols to mark the created symbols as not absolute. Out::elfHeader = make("", 0, SHF_ALLOC); - Out::elfHeader->size = sizeof(typename ELFT::Ehdr); std::vector wrapped = addWrappedSymbols(args); diff --git a/lld/ELF/InputSection.h b/lld/ELF/InputSection.h --- a/lld/ELF/InputSection.h +++ b/lld/ELF/InputSection.h @@ -78,7 +78,7 @@ // These corresponds to the fields in Elf_Shdr. uint32_t alignment; uint64_t flags; - uint64_t entsize; + uint32_t entsize; uint32_t type; uint32_t link; uint32_t info; @@ -99,9 +99,9 @@ void markDead() { partition = 0; } protected: - SectionBase(Kind sectionKind, StringRef name, uint64_t flags, - uint64_t entsize, uint64_t alignment, uint32_t type, - uint32_t info, uint32_t link) + constexpr SectionBase(Kind sectionKind, StringRef name, uint64_t flags, + uint32_t entsize, uint32_t alignment, uint32_t type, + uint32_t info, uint32_t link) : name(name), repl(this), sectionKind(sectionKind), bss(false), keepUnique(false), partition(0), alignment(alignment), flags(flags), entsize(entsize), type(type), link(link), info(info) {} @@ -121,14 +121,14 @@ static bool classof(const SectionBase *s) { return s->kind() != Output; } - // Section index of the relocation section if exists. - uint32_t relSecIdx = 0; - // The file which contains this section. Its dynamic type is always // ObjFile, but in order to avoid ELFT, we use InputFile as // its static type. InputFile *file; + // Section index of the relocation section if exists. + uint32_t relSecIdx = 0; + template ObjFile *getFile() const { return cast_or_null>(file); } @@ -352,8 +352,6 @@ // beginning of the output section. template void writeTo(uint8_t *buf); - uint64_t getOffset(uint64_t offset) const { return outSecOff + offset; } - OutputSection *getParent() const; // This variable has two usages. Initially, it represents an index in the diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -187,7 +187,7 @@ } case Regular: case Synthetic: - return cast(this)->getOffset(offset); + return cast(this)->outSecOff + offset; case EHFrame: // The file crtbeginT.o has relocations pointing to the start of an empty // .eh_frame that is known to be the first in the link. It does that to @@ -196,7 +196,7 @@ case Merge: const MergeInputSection *ms = cast(this); if (InputSection *isec = ms->getParent()) - return isec->getOffset(ms->getParentOffset(offset)); + return isec->outSecOff + ms->getParentOffset(offset); return ms->getParentOffset(offset); } llvm_unreachable("invalid section kind"); diff --git a/lld/ELF/LinkerScript.h b/lld/ELF/LinkerScript.h --- a/lld/ELF/LinkerScript.h +++ b/lld/ELF/LinkerScript.h @@ -292,10 +292,6 @@ std::pair findMemoryRegion(OutputSection *sec, MemoryRegion *hint); - void switchTo(OutputSection *sec); - uint64_t advance(uint64_t size, unsigned align); - void output(InputSection *sec); - void assignOffsets(OutputSection *sec); // Ctx captures the local AddressState and makes it accessible diff --git a/lld/ELF/LinkerScript.cpp b/lld/ELF/LinkerScript.cpp --- a/lld/ELF/LinkerScript.cpp +++ b/lld/ELF/LinkerScript.cpp @@ -53,12 +53,6 @@ return name.startswith(prefix) || name == prefix.drop_back(); } -static uint64_t getOutputSectionVA(SectionBase *sec) { - OutputSection *os = sec->getOutputSection(); - assert(os && "input section has no output section assigned"); - return os ? os->addr : 0; -} - static StringRef getOutputSectionName(const InputSectionBase *s) { if (config->relocatable) return s->name; @@ -118,15 +112,13 @@ uint64_t ExprValue::getValue() const { if (sec) - return alignTo(sec->getOffset(val) + getOutputSectionVA(sec), + return alignTo(sec->getOutputSection()->addr + sec->getOffset(val), alignment); return alignTo(val, alignment); } uint64_t ExprValue::getSecAddr() const { - if (sec) - return sec->getOffset(0) + getOutputSectionVA(sec); - return 0; + return sec ? sec->getOutputSection()->addr + sec->getOffset(0) : 0; } uint64_t ExprValue::getSectionOffset() const { @@ -908,38 +900,6 @@ } } -uint64_t LinkerScript::advance(uint64_t size, unsigned alignment) { - dot = alignTo(dot, alignment) + size; - return dot; -} - -void LinkerScript::output(InputSection *s) { - assert(ctx->outSec == s->getParent()); - uint64_t before = advance(0, 1); - uint64_t pos = advance(s->getSize(), s->alignment); - s->outSecOff = pos - s->getSize() - ctx->outSec->addr; - - // Update output section size after adding each section. This is so that - // SIZEOF works correctly in the case below: - // .foo { *(.aaa) a = SIZEOF(.foo); *(.bbb) } - expandOutputSection(pos - before); -} - -void LinkerScript::switchTo(OutputSection *sec) { - ctx->outSec = sec; - - uint64_t pos = advance(0, 1); - if (sec->addrExpr && script->hasSectionsCommand) { - // The alignment is ignored. - ctx->outSec->addr = pos; - } else { - // ctx->outSec->alignment is the max of ALIGN and the maximum of input - // section alignments. - ctx->outSec->addr = advance(0, ctx->outSec->alignment); - expandMemoryRegions(ctx->outSec->addr - pos); - } -} - // This function searches for a memory region to place the given output // section in. If found, a pointer to the appropriate memory region is // returned in the first member of the pair. Otherwise, a nullptr is returned. @@ -1028,7 +988,18 @@ sec->name); } - switchTo(sec); + ctx->outSec = sec; + if (sec->addrExpr && script->hasSectionsCommand) { + // The alignment is ignored. + sec->addr = dot; + } else { + // sec->alignment is the max of ALIGN and the maximum of input + // section alignments. + const uint64_t pos = dot; + dot = alignTo(dot, sec->alignment); + sec->addr = dot; + expandMemoryRegions(dot - pos); + } // ctx->lmaOffset is LMA minus VMA. If LMA is explicitly specified via AT() or // AT>, recompute ctx->lmaOffset; otherwise, if both previous/current LMA @@ -1048,7 +1019,7 @@ } // Propagate ctx->lmaOffset to the first "non-header" section. - if (PhdrEntry *l = ctx->outSec->ptLoad) + if (PhdrEntry *l = sec->ptLoad) if (sec == findFirstSection(l)) l->lmaOffset = ctx->lmaOffset; @@ -1070,7 +1041,7 @@ // Handle BYTE(), SHORT(), LONG(), or QUAD(). if (auto *data = dyn_cast(cmd)) { - data->offset = dot - ctx->outSec->addr; + data->offset = dot - sec->addr; dot += data->size; expandOutputSection(data->size); continue; @@ -1079,8 +1050,18 @@ // Handle a single input section description command. // It calculates and assigns the offsets for each section and also // updates the output section size. - for (InputSection *sec : cast(cmd)->sections) - output(sec); + for (InputSection *isec : cast(cmd)->sections) { + assert(isec->getParent() == sec); + const uint64_t pos = dot; + dot = alignTo(dot, isec->alignment); + isec->outSecOff = dot - sec->addr; + dot += isec->getSize(); + + // Update output section size after adding each section. This is so that + // SIZEOF works correctly in the case below: + // .foo { *(.aaa) a = SIZEOF(.foo); *(.bbb) } + expandOutputSection(dot - pos); + } } // Non-SHF_ALLOC sections do not affect the addresses of other OutputSections @@ -1327,10 +1308,10 @@ dot += getHeaderSize(); } - auto deleter = std::make_unique(); - ctx = deleter.get(); + AddressState state; + ctx = &state; errorOnMissingSection = true; - switchTo(aether); + ctx->outSec = aether; SymbolAssignmentMap oldValues = getSymbolAssignmentValues(sectionCommands); for (SectionCommand *cmd : sectionCommands) { diff --git a/lld/ELF/MapFile.cpp b/lld/ELF/MapFile.cpp --- a/lld/ELF/MapFile.cpp +++ b/lld/ELF/MapFile.cpp @@ -187,7 +187,7 @@ continue; } - writeHeader(os, isec->getVA(0), osec->getLMA() + isec->getOffset(0), + writeHeader(os, isec->getVA(), osec->getLMA() + isec->outSecOff, isec->getSize(), isec->alignment); os << indent8 << toString(isec) << '\n'; for (Symbol *sym : sectionSyms[isec]) diff --git a/lld/ELF/Options.td b/lld/ELF/Options.td --- a/lld/ELF/Options.td +++ b/lld/ELF/Options.td @@ -304,8 +304,8 @@ def o: JoinedOrSeparate<["-"], "o">, MetaVarName<"">, HelpText<"Path to file to write output">; -def oformat: Separate<["--"], "oformat">, MetaVarName<"">, - HelpText<"Specify the binary format for the output object file">; +defm oformat: EEq<"oformat", "Specify the binary format for the output object file">, + MetaVarName<"[elf,binary]">; def omagic: FF<"omagic">, MetaVarName<"">, HelpText<"Set the text and data sections to be readable and writable, do not page align sections, link against static libraries">; diff --git a/lld/ELF/OutputSections.h b/lld/ELF/OutputSections.h --- a/lld/ELF/OutputSections.h +++ b/lld/ELF/OutputSections.h @@ -128,7 +128,6 @@ // until Writer is initialized. struct Out { static uint8_t *bufferStart; - static uint8_t first; static PhdrEntry *tlsPhdr; static OutputSection *elfHeader; static OutputSection *programHeaders; diff --git a/lld/ELF/OutputSections.cpp b/lld/ELF/OutputSections.cpp --- a/lld/ELF/OutputSections.cpp +++ b/lld/ELF/OutputSections.cpp @@ -33,7 +33,6 @@ using namespace lld::elf; uint8_t *Out::bufferStart; -uint8_t Out::first; PhdrEntry *Out::tlsPhdr; OutputSection *Out::elfHeader; OutputSection *Out::programHeaders; diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp --- a/lld/ELF/Writer.cpp +++ b/lld/ELF/Writer.cpp @@ -281,7 +281,10 @@ template void elf::createSyntheticSections() { // Initialize all pointers with NULL. This is needed because // you can call lld::elf::main more than once as a library. - memset(&Out::first, 0, sizeof(Out)); + Out::tlsPhdr = nullptr; + Out::preinitArray = nullptr; + Out::initArray = nullptr; + Out::finiArray = nullptr; // Add the .interp section first because it is not a SyntheticSection. // The removeUnusedSyntheticSections() function relies on the @@ -1833,36 +1836,30 @@ }) .base(); - DenseSet isdSet; - // Mark unused synthetic sections for deletion - auto end = std::stable_partition( - start, inputSections.end(), [&](InputSectionBase *s) { - SyntheticSection *ss = dyn_cast(s); - OutputSection *os = ss->getParent(); - if (!os || ss->isNeeded()) - return true; - - // If we reach here, then ss is an unused synthetic section and we want - // to remove it from the corresponding input section description, and - // orphanSections. - for (SectionCommand *b : os->commands) - if (auto *isd = dyn_cast(b)) - isdSet.insert(isd); - - llvm::erase_if( - script->orphanSections, - [=](const InputSectionBase *isec) { return isec == ss; }); - - return false; + // Remove unused synthetic sections from inputSections; + DenseSet unused; + auto end = + std::remove_if(start, inputSections.end(), [&](InputSectionBase *s) { + auto *sec = cast(s); + if (sec->getParent() && sec->isNeeded()) + return false; + unused.insert(sec); + return true; }); - - DenseSet unused(end, inputSections.end()); - for (auto *isd : isdSet) - llvm::erase_if(isd->sections, - [=](InputSection *isec) { return unused.count(isec); }); - - // Erase unused synthetic sections. inputSections.erase(end, inputSections.end()); + + // Remove unused synthetic sections from the corresponding input section + // description and orphanSections. + for (auto *sec : unused) + if (OutputSection *osec = cast(sec)->getParent()) + for (SectionCommand *cmd : osec->commands) + if (auto *isd = dyn_cast(cmd)) + llvm::erase_if(isd->sections, [&](InputSection *isec) { + return unused.count(isec); + }); + llvm::erase_if(script->orphanSections, [&](const InputSectionBase *sec) { + return unused.count(sec); + }); } // Create output section objects and add them to OutputSections. @@ -2028,11 +2025,14 @@ sortSections(); - // Now that we have the final list, create a list of all the - // OutputSections for convenience. + // Create a list of OutputSections, assign sectionIndex, and populate + // in.shStrTab. for (SectionCommand *cmd : script->sectionCommands) - if (auto *sec = dyn_cast(cmd)) - outputSections.push_back(sec); + if (auto *osec = dyn_cast(cmd)) { + outputSections.push_back(osec); + osec->sectionIndex = outputSections.size(); + osec->shName = in.shStrTab->addString(osec->name); + } // Prefer command line supplied address over other constraints. for (OutputSection *sec : outputSections) { @@ -2054,12 +2054,7 @@ // to 1 to make __ehdr_start defined. The section number is not // particularly relevant. Out::elfHeader->sectionIndex = 1; - - for (size_t i = 0, e = outputSections.size(); i != e; ++i) { - OutputSection *sec = outputSections[i]; - sec->sectionIndex = i + 1; - sec->shName = in.shStrTab->addString(sec->name); - } + Out::elfHeader->size = sizeof(typename ELFT::Ehdr); // Binary and relocatable output does not have PHDRS. // The headers have to be created before finalize as that can influence the @@ -2549,17 +2544,6 @@ return first->offset + os->addr - first->addr; } -// Set an in-file position to a given section and returns the end position of -// the section. -static uint64_t setFileOffset(OutputSection *os, uint64_t off) { - off = computeFileOffset(os, off); - os->offset = off; - - if (os->type == SHT_NOBITS) - return off; - return off + os->size; -} - template void Writer::assignFileOffsetsBinary() { // Compute the minimum LMA of all non-empty non-NOBITS sections as minAddr. auto needsOffset = [](OutputSection &sec) { @@ -2587,9 +2571,8 @@ // Assign file offsets to output sections. template void Writer::assignFileOffsets() { - uint64_t off = 0; - off = setFileOffset(Out::elfHeader, off); - off = setFileOffset(Out::programHeaders, off); + Out::programHeaders->offset = Out::elfHeader->size; + uint64_t off = Out::elfHeader->size + Out::programHeaders->size; PhdrEntry *lastRX = nullptr; for (Partition &part : partitions) @@ -2602,18 +2585,23 @@ for (OutputSection *sec : outputSections) { if (!(sec->flags & SHF_ALLOC)) continue; - off = setFileOffset(sec, off); + off = computeFileOffset(sec, off); + sec->offset = off; + if (sec->type != SHT_NOBITS) + off += sec->size; // If this is a last section of the last executable segment and that // segment is the last loadable segment, align the offset of the // following section to avoid loading non-segments parts of the file. if (config->zSeparate != SeparateSegmentKind::None && lastRX && lastRX->lastSec == sec) - off = alignTo(off, config->commonPageSize); + off = alignTo(off, config->maxPageSize); } - for (OutputSection *sec : outputSections) - if (!(sec->flags & SHF_ALLOC)) - off = setFileOffset(sec, off); + for (OutputSection *osec : outputSections) + if (!(osec->flags & SHF_ALLOC)) { + osec->offset = alignTo(off, osec->alignment); + off = osec->offset + osec->size; + } sectionHeaderOff = alignTo(off, config->wordsize); fileSize = sectionHeaderOff + (outputSections.size() + 1) * sizeof(Elf_Shdr); @@ -2887,9 +2875,9 @@ for (PhdrEntry *p : part.phdrs) if (p->p_type == PT_LOAD && (p->p_flags & PF_X)) fillTrap(Out::bufferStart + alignDown(p->firstSec->offset + p->p_filesz, - config->commonPageSize), + config->maxPageSize), Out::bufferStart + alignTo(p->firstSec->offset + p->p_filesz, - config->commonPageSize)); + config->maxPageSize)); // Round up the file size of the last segment to the page boundary iff it is // an executable segment to ensure that other tools don't accidentally @@ -2901,7 +2889,7 @@ if (last && (last->p_flags & PF_X)) last->p_memsz = last->p_filesz = - alignTo(last->p_filesz, config->commonPageSize); + alignTo(last->p_filesz, config->maxPageSize); } } diff --git a/lld/test/ELF/arm-adr-err-long.s b/lld/test/ELF/arm-adr-err-long.s new file mode 100644 --- /dev/null +++ b/lld/test/ELF/arm-adr-err-long.s @@ -0,0 +1,57 @@ +// REQUIRES: arm +// RUN: split-file %s %t +// RUN: llvm-mc --triple=armv7a-none-eabi --arm-add-build-attributes -filetype=obj -o %t.o %t/asm +// RUN: not ld.lld --script %t/lds %t.o -o /dev/null 2>&1 | FileCheck %s + +//--- lds +SECTIONS { + .text.0 0x0100000 : AT(0x0100000) { *(.text.0) } + .text.1 0x0800000 : AT(0x0800000) { *(.text.1) } + .text.2 0xf0f0000 : AT(0xf0f0000) { *(.text.2) } +} + +//--- asm +/// This is a variant of arm-adr-long.s with some _NC relocs changed into their +/// checking counterparts, to verify that out-of-range references are caught. + + .section .text.0, "ax", %progbits +dat1: + .word 0 + + .section .text.1, "ax", %progbits + .global _start + .type _start, %function +_start: + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe2400004 // sub r0, r0, #4 + .reloc 0, R_ARM_ALU_PC_G0, dat1 +// CHECK: {{.*}}.s.tmp.o:(.text.1+0x0): unencodeable immediate 7340040 for relocation R_ARM_ALU_PC_G0 + .reloc 4, R_ARM_ALU_PC_G1, dat1 + + .inst 0xe24f1008 // sub r1, pc, #8 + .inst 0xe2411004 // sub r1, r1, #4 + .inst 0xe2411000 // sub r1, r1, #0 + .reloc 8, R_ARM_ALU_PC_G0_NC, dat2 + .reloc 12, R_ARM_ALU_PC_G1, dat2 +// CHECK: {{.*}}.s.tmp.o:(.text.1+0xc): unencodeable immediate 244252656 for relocation R_ARM_ALU_PC_G1 + .reloc 16, R_ARM_ALU_PC_G2, dat2 + + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe2400004 // sub r0, r0, #4 + .inst 0xe2400000 // sub r0, r0, #0 + .reloc 20, R_ARM_ALU_PC_G0, dat1 +// CHECK: {{.*}}.s.tmp.o:(.text.1+0x14): unencodeable immediate 7340060 for relocation R_ARM_ALU_PC_G0 + .reloc 24, R_ARM_ALU_PC_G1, dat1 + .reloc 28, R_ARM_ALU_PC_G2, dat1 + + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe2400004 // sub r0, r0, #4 + .inst 0xe1c000d0 // ldrd r0, r1, [r0, #0] + .reloc 32, R_ARM_ALU_PC_G0_NC, dat2 + .reloc 36, R_ARM_ALU_PC_G1_NC, dat2 +// CHECK: {{.*}}.s.tmp.o:(.text.1+0x28): relocation R_ARM_LDRS_PC_G2 out of range: 4056 is not in [0, 255] + .reloc 40, R_ARM_LDRS_PC_G2, dat2 + + .section .text.2, "ax", %progbits +dat2: + .word 0 diff --git a/lld/test/ELF/arm-adr-err.s b/lld/test/ELF/arm-adr-err.s --- a/lld/test/ELF/arm-adr-err.s +++ b/lld/test/ELF/arm-adr-err.s @@ -23,6 +23,12 @@ .inst 0xe24f1008 .reloc 4, R_ARM_ALU_PC_G0, unaligned + .balign 512 +/// ldrd r0, r1, _start +// CHECK: {{.*}}.s.tmp.o:(.os1+0x200): relocation R_ARM_LDRS_PC_G0 out of range: 512 is not in [0, 255]; references _start + .reloc ., R_ARM_LDRS_PC_G0, _start + .inst 0xe14f00d0 + .section .os2, "ax", %progbits .balign 1024 .thumb_func diff --git a/lld/test/ELF/arm-adr-long.s b/lld/test/ELF/arm-adr-long.s --- a/lld/test/ELF/arm-adr-long.s +++ b/lld/test/ELF/arm-adr-long.s @@ -1,15 +1,20 @@ // REQUIRES: arm -// RUN: llvm-mc --triple=armv7a-none-eabi --arm-add-build-attributes -filetype=obj -o %t.o %s -// RUN: echo "SECTIONS { \ -// RUN: .text.0 0x10000000 : AT(0x10000000) { *(.text.0) } \ -// RUN: .text.1 0x80000000 : AT(0x80000000) { *(.text.1) } \ -// RUN: .text.2 0xf0000010 : AT(0xf0000010) { *(.text.2) } \ -// RUN: } " > %t.script -// RUN: ld.lld --script %t.script %t.o -o %t -// RUN: llvm-objdump -d --no-show-raw-insn --triple=armv7a-none-eabi %t | FileCheck %s - -/// Test the long range encoding of R_ARM_ALU_PC_G0. We can encode an 8-bit +// RUN: split-file %s %t +// RUN: llvm-mc --triple=armv7a-none-eabi --arm-add-build-attributes -filetype=obj -o %t.o %t/asm +// RUN: ld.lld --script %t/lds %t.o -o %t2 +// RUN: llvm-objdump -d --no-show-raw-insn --triple=armv7a-none-eabi %t2 | FileCheck %s + +/// Test the long range encoding of R_ARM_ALU_PC_Gx sequences. We can encode an 8-bit /// immediate rotated right by an even 4-bit field. + +//--- lds +SECTIONS { + .text.0 0x0100000 : AT(0x0100000) { *(.text.0) } + .text.1 0x0800000 : AT(0x0800000) { *(.text.1) } + .text.2 0xf0f0000 : AT(0xf0f0000) { *(.text.2) } +} + +//--- asm .section .text.0, "ax", %progbits dat1: .word 0 @@ -18,25 +23,81 @@ .global _start .type _start, %function _start: -/// adr.w r0, dat1 - .inst 0xe24f0008 - .reloc 0, R_ARM_ALU_PC_G0, dat1 -/// adr.w r1, dat2 - .inst 0xe24f1008 - .reloc 4, R_ARM_ALU_PC_G0, dat2 + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe2400004 // sub r0, r0, #4 + .reloc 0, R_ARM_ALU_PC_G0_NC, dat1 + .reloc 4, R_ARM_ALU_PC_G1, dat1 + + .inst 0xe24f1008 // sub r1, pc, #8 + .inst 0xe2411004 // sub r1, r1, #4 + .inst 0xe2411000 // sub r1, r1, #0 + .reloc 8, R_ARM_ALU_PC_G0_NC, dat2 + .reloc 12, R_ARM_ALU_PC_G1_NC, dat2 + .reloc 16, R_ARM_ALU_PC_G2, dat2 + + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe2400004 // sub r0, r0, #4 + .inst 0xe2400000 // sub r0, r0, #0 + .reloc 20, R_ARM_ALU_PC_G0_NC, dat1 + .reloc 24, R_ARM_ALU_PC_G1_NC, dat1 + .reloc 28, R_ARM_ALU_PC_G2, dat1 + + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe2400004 // sub r0, r0, #4 + .inst 0xe5900000 // ldr r0, [r0, #0] + .reloc 32, R_ARM_ALU_PC_G0_NC, dat2 + .reloc 36, R_ARM_ALU_PC_G1_NC, dat2 + .reloc 40, R_ARM_LDR_PC_G2, dat2 + + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe5100004 // ldr r0, [r0, #-4] + .reloc 44, R_ARM_ALU_PC_G0_NC, dat1 + .reloc 48, R_ARM_LDR_PC_G1, dat1 + + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe2400004 // sub r0, r0, #4 + .inst 0xe5900000 // ldr r0, [r0, #0] + .reloc 52, R_ARM_ALU_PC_G0_NC, dat1 + .reloc 56, R_ARM_ALU_PC_G1_NC, dat1 + .reloc 60, R_ARM_LDR_PC_G2, dat1 + + .inst 0xe24f0008 // sub r0, pc, #8 + .inst 0xe14000d4 // ldrd r0, [r0, #-4] + .reloc 64, R_ARM_ALU_PC_G0_NC, dat1 + .reloc 68, R_ARM_LDRS_PC_G1, dat1 .section .text.2, "ax", %progbits dat2: .word 0 -// CHECK: 10000000 : -// CHECK-NEXT: 10000000: andeq r0, r0, r0 +// CHECK: 00100000 : +// CHECK-NEXT: 100000: andeq r0, r0, r0 + +// CHECK: 00800000 <_start>: +// CHECK-NEXT: 800000: sub r0, pc, #112, #16 +// CHECK-NEXT: 800004: sub r0, r0, #8 + +// CHECK-NEXT: 800008: add r1, pc, #232, #12 +// CHECK-NEXT: 80000c: add r1, r1, #978944 +// CHECK-NEXT: 800010: add r1, r1, #4080 + +// CHECK-NEXT: 800014: sub r0, pc, #112, #16 +// CHECK-NEXT: 800018: sub r0, r0, #28 +// CHECK-NEXT: 80001c: sub r0, r0, #0 + +// CHECK-NEXT: 800020: add r0, pc, #232, #12 +// CHECK-NEXT: 800024: add r0, r0, #978944 +// CHECK-NEXT: 800028: ldr r0, [r0, #4056] + +// CHECK-NEXT: 80002c: sub r0, pc, #112, #16 +// CHECK-NEXT: 800030: ldr r0, [r0, #-52] + +// CHECK-NEXT: 800034: sub r0, pc, #112, #16 +// CHECK-NEXT: 800038: sub r0, r0, #60 +// CHECK-NEXT: 80003c: ldr r0, [r0, #-0] -// CHECK: 80000000 <_start>: -/// 0x80000000 + 0x8 - 0x70000008 = 0x10000000 -// CHECK-NEXT: 80000000: sub r0, pc, #1879048200 -/// 0x80000004 + 0x8 + 0x70000004 = 0xf0000010 -// CHECK-NEXT: 80000004: add r1, pc, #1879048196 +// CHECK-NEXT: 800040: sub r0, pc, #112, #16 +// CHECK-NEXT: 800044: ldrd r0, r1, [r0, #-72] -// CHECK: f0000010 : -// CHECK-NEXT: f0000010: andeq r0, r0, r0 +// CHECK: 0f0f0000 : +// CHECK-NEXT: f0f0000: andeq r0, r0, r0 diff --git a/lld/test/ELF/arm-adr.s b/lld/test/ELF/arm-adr.s --- a/lld/test/ELF/arm-adr.s +++ b/lld/test/ELF/arm-adr.s @@ -87,9 +87,9 @@ /// 0x20804 + 0x8 - 0x3f8 = 0x11414 = dat2 // CHECK-NEXT: 20804: sub r0, pc, #1016 /// 0x20808 + 0x8 + 0x400 = 0x11c10 = dat3 -// CHECK-NEXT: 20808: add r0, pc, #1024 +// CHECK-NEXT: 20808: add r0, pc, #64, #28 /// 0x2080c + 0x8 + 0x400 = 0x11c14 = dat4 -// CHECK-NEXT: 2080c: add r0, pc, #1024 +// CHECK-NEXT: 2080c: add r0, pc, #64, #28 // CHECK: 00020c10 : // CHECK-NEXT: 20c10: andeq r0, r0, r0 diff --git a/lld/test/ELF/fill-trap-ppc.s b/lld/test/ELF/fill-trap-ppc.s --- a/lld/test/ELF/fill-trap-ppc.s +++ b/lld/test/ELF/fill-trap-ppc.s @@ -2,29 +2,29 @@ # RUN: llvm-mc -filetype=obj -triple=powerpc64le-linux %s -o %t.o # RUN: ld.lld %t.o -z separate-code -o %t.ppc64le -# RUN: llvm-readobj -l %t.ppc64le | FileCheck %s -# RUN: od -Ax -t x1 -N16 -j0x10ff0 %t.ppc64le | FileCheck %s -check-prefix=LE +# RUN: llvm-readelf -Sl %t.ppc64le | FileCheck %s +# RUN: od -Ax -t x1 -N16 -j0x1fff0 %t.ppc64le | FileCheck %s -check-prefix=LE # RUN: llvm-mc -filetype=obj -triple=powerpc64-linux %s -o %t.o # RUN: ld.lld %t.o -z separate-code -o %t.ppc64 -# RUN: llvm-readobj -l %t.ppc64 | FileCheck %s -# RUN: od -Ax -t x1 -N16 -j0x10ff0 %t.ppc64 | FileCheck %s -check-prefix=BE +# RUN: llvm-readelf -Sl %t.ppc64 | FileCheck %s +# RUN: od -Ax -t x1 -N16 -j0x1fff0 %t.ppc64 | FileCheck %s -check-prefix=BE -# CHECK: ProgramHeader { -# CHECK: Type: PT_LOAD -# CHECK: Offset: 0x10000{{$}} -# CHECK-NEXT: VirtualAddress: -# CHECK-NEXT: PhysicalAddress: -# CHECK-NEXT: FileSize: 4096 -# CHECK-NEXT: MemSize: -# CHECK-NEXT: Flags [ -# CHECK-NEXT: PF_R -# CHECK-NEXT: PF_X -# CHECK-NEXT: ] +# CHECK: [Nr] Name Type Address Off Size ES Flg Lk Inf Al +# CHECK-NEXT: [ 0] NULL 0000000000000000 000000 000000 00 0 0 0 +# CHECK-NEXT: [ 1] .text PROGBITS 0000000010010000 010000 000004 00 AX 0 0 4 +## TODO Remove empty .branch_lt +# CHECK-NEXT: [ 2] .branch_lt PROGBITS 0000000010020000 020000 000000 00 WA 0 0 8 +# CHECK-NEXT: [ 3] .comment PROGBITS 0000000000000000 020000 000008 01 MS 0 0 1 + +# CHECK: Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align +# CHECK-NEXT: PHDR 0x000040 0x0000000010000040 0x0000000010000040 0x000118 0x000118 R 0x8 +# CHECK-NEXT: LOAD 0x000000 0x0000000010000000 0x0000000010000000 0x000158 0x000158 R 0x10000 +# CHECK-NEXT: LOAD 0x010000 0x0000000010010000 0x0000000010010000 0x010000 0x010000 R E 0x10000 ## Check that executable page is filled with traps at its end. -# LE: 010ff0 08 00 e0 7f 08 00 e0 7f 08 00 e0 7f 08 00 e0 7f -# BE: 010ff0 7f e0 00 08 7f e0 00 08 7f e0 00 08 7f e0 00 08 +# LE: 01fff0 08 00 e0 7f 08 00 e0 7f 08 00 e0 7f 08 00 e0 7f +# BE: 01fff0 7f e0 00 08 7f e0 00 08 7f e0 00 08 7f e0 00 08 .globl _start _start: diff --git a/lld/test/ELF/fill-trap.s b/lld/test/ELF/fill-trap.s --- a/lld/test/ELF/fill-trap.s +++ b/lld/test/ELF/fill-trap.s @@ -4,20 +4,22 @@ ## -z noseparate-code is the default: text segment is not tail padded. # RUN: ld.lld %t.o -o %t # RUN: llvm-readobj -l %t | FileCheck %s --check-prefixes=CHECK,NOPAD -# RUN: ld.lld %t.o -z noseparate-code -o %t +# RUN: ld.lld %t.o -z noseparate-code -z common-page-size=512 -o %t # RUN: llvm-readobj -l %t | FileCheck %s --check-prefixes=CHECK,NOPAD ## -z separate-code pads the tail of text segment with traps. -# RUN: ld.lld %t.o -z separate-code -o %t +## Make common-page-size smaller than max-page-size. +## Check that we use max-page-size instead of common-page-size for padding. +# RUN: ld.lld %t.o -z separate-code -z common-page-size=512 -o %t # RUN: llvm-readobj -l %t | FileCheck %s --check-prefixes=CHECK,PAD # RUN: od -Ax -x -N16 -j0x1ff0 %t | FileCheck %s --check-prefix=FILL ## -z separate-loadable-segments pads all segments, including the text segment. -# RUN: ld.lld %t.o -z separate-loadable-segments -o %t +# RUN: ld.lld %t.o -z separate-loadable-segments -z common-page-size=512 -o %t # RUN: llvm-readobj -l %t | FileCheck %s --check-prefixes=CHECK,PAD # RUN: od -Ax -x -N16 -j0x1ff0 %t | FileCheck %s --check-prefix=FILL -# RUN: ld.lld %t.o -z separate-code -z noseparate-code -o %t +# RUN: ld.lld %t.o -z separate-code -z noseparate-code -z common-page-size=512 -o %t # RUN: llvm-readobj -l %t | FileCheck %s --check-prefixes=CHECK,NOPAD # CHECK: ProgramHeader { diff --git a/lld/test/ELF/oformat-binary.s b/lld/test/ELF/oformat-binary.s --- a/lld/test/ELF/oformat-binary.s +++ b/lld/test/ELF/oformat-binary.s @@ -1,7 +1,7 @@ # REQUIRES: x86 # RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %s -o %t -# RUN: ld.lld -o %t.out %t --oformat binary +# RUN: ld.lld -o %t.out %t --oformat=binary # RUN: od -t x1 -v %t.out | FileCheck %s # CHECK: 0000000 90 11 22 # CHECK-NEXT: 0000003 @@ -30,7 +30,8 @@ # RUN: od -t x1 %tempty | FileCheck %s ## NOBITS sections are ignored as well. -# RUN: echo 'SECTIONS { .text : {*(.text .mysec*)} .data 0x100 (NOLOAD) : {BYTE(0)}}' > %tnobits.lds +## Also test that SIZEOF_HEADERS evaluates to 0. +# RUN: echo 'SECTIONS { .text : {. += SIZEOF_HEADERS; *(.text .mysec*)} .data 0x100 (NOLOAD) : {BYTE(0)}}' > %tnobits.lds # RUN: ld.lld -T %tnobits.lds %t --oformat binary -o %tnobits # RUN: od -t x1 %tnobits | FileCheck %s @@ -45,7 +46,7 @@ # ERR: unknown --oformat value: foo # RUN: ld.lld -o /dev/null %t --oformat elf -# RUN: ld.lld -o /dev/null %t --oformat elf-foo +# RUN: ld.lld -o /dev/null %t --oformat=elf-foo .text .align 4 diff --git a/lldb/test/Shell/Process/Windows/lit.local.cfg b/lldb/test/Shell/Process/Windows/lit.local.cfg new file mode 100644 --- /dev/null +++ b/lldb/test/Shell/Process/Windows/lit.local.cfg @@ -0,0 +1,2 @@ +if 'system-windows' not in config.available_features: + config.unsupported = True diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h --- a/llvm/include/llvm/Analysis/ScalarEvolution.h +++ b/llvm/include/llvm/Analysis/ScalarEvolution.h @@ -1616,11 +1616,6 @@ /// SCEV+Loop pair. const SCEV *computeSCEVAtScope(const SCEV *S, const Loop *L); - /// This looks up computed SCEV values for all instructions that depend on - /// the given instruction and removes them from the ValueExprMap map if they - /// reference SymName. This is used during PHI resolution. - void forgetSymbolicName(Instruction *I, const SCEV *SymName); - /// Return the BackedgeTakenInfo for the given loop, lazily computing new /// values if the loop hasn't been analyzed yet. The returned result is /// guaranteed not to be predicated. @@ -1923,6 +1918,9 @@ /// Erase Value from ValueExprMap and ExprValueMap. void eraseValueFromMap(Value *V); + /// Insert V to S mapping into ValueExprMap and ExprValueMap. + void insertValueToMap(Value *V, const SCEV *S); + /// Return false iff given SCEV contains a SCEVUnknown with NULL value- /// pointer. bool checkValidity(const SCEV *S) const; diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -425,6 +425,12 @@ return true; } + /// Return true if the @llvm.get.active.lane.mask intrinsic should be expanded + /// using generic code in SelectionDAGBuilder. + virtual bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const { + return true; + } + /// Return true if it is profitable to convert a select of FP constants into /// a constant pool load whose address depends on the select condition. The /// parameter may be used to differentiate a select with FP compare from diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -4094,6 +4094,18 @@ } } +void ScalarEvolution::insertValueToMap(Value *V, const SCEV *S) { + auto It = ValueExprMap.find_as(V); + if (It == ValueExprMap.end()) { + ValueExprMap.insert({SCEVCallbackVH(V, this), S}); + ExprValueMap[S].insert({V, nullptr}); + } else { + // A recursive query may have already computed the SCEV. It should have + // arrived at the same value. + assert(It->second == S); + } +} + /// Return an existing SCEV if it exists, otherwise analyze the expression and /// create a new one. const SCEV *ScalarEvolution::getSCEV(Value *V) { @@ -4134,10 +4146,9 @@ ValueExprMapType::iterator I = ValueExprMap.find_as(V); if (I != ValueExprMap.end()) { const SCEV *S = I->second; - if (checkValidity(S)) - return S; - eraseValueFromMap(V); - forgetMemoizedResults(S); + assert(checkValidity(S) && + "existing SCEV has not been properly invalidated"); + return S; } return nullptr; } @@ -4430,44 +4441,6 @@ } } -void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) { - SmallVector Worklist; - SmallPtrSet Visited; - SmallVector ToForget; - Visited.insert(PN); - Worklist.push_back(PN); - while (!Worklist.empty()) { - Instruction *I = Worklist.pop_back_val(); - - auto It = ValueExprMap.find_as(static_cast(I)); - if (It != ValueExprMap.end()) { - const SCEV *Old = It->second; - - // Short-circuit the def-use traversal if the symbolic name - // ceases to appear in expressions. - if (Old != SymName && !hasOperand(Old, SymName)) - continue; - - // SCEVUnknown for a PHI either means that it has an unrecognized - // structure, it's a PHI that's in the progress of being computed - // by createNodeForPHI, or it's a single-value PHI. In the first case, - // additional loop trip count information isn't going to change anything. - // In the second case, createNodeForPHI will perform the necessary - // updates on its own when it gets to that point. In the third, we do - // want to forget the SCEVUnknown. - if (!isa(I) || - !isa(Old) || - (I != PN && Old == SymName)) { - eraseValueFromMap(It->first); - ToForget.push_back(Old); - } - } - - PushDefUseChildren(I, Worklist, Visited); - } - forgetMemoizedResults(ToForget); -} - namespace { /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start @@ -5335,8 +5308,7 @@ const SCEV *StartVal = getSCEV(StartValueV); const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags); - - ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; + insertValueToMap(PN, PHISCEV); // We can add Flags to the post-inc expression only if we // know that it is *undefined behavior* for BEValueV to @@ -5389,7 +5361,7 @@ // Handle PHI node value symbolically. const SCEV *SymbolicName = getUnknown(PN); - ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName}); + insertValueToMap(PN, SymbolicName); // Using this symbolic name for the PHI, analyze the value coming around // the back-edge. @@ -5460,8 +5432,8 @@ // Okay, for the entire analysis of this edge we assumed the PHI // to be symbolic. We now need to go back and purge all of the // entries for the scalars that use the symbolic expression. - forgetSymbolicName(PN, SymbolicName); - ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV; + forgetMemoizedResults(SymbolicName); + insertValueToMap(PN, PHISCEV); // We can add Flags to the post-inc expression only if we // know that it is *undefined behavior* for BEValueV to @@ -5492,8 +5464,8 @@ // Okay, for the entire analysis of this edge we assumed the PHI // to be symbolic. We now need to go back and purge all of the // entries for the scalars that use the symbolic expression. - forgetSymbolicName(PN, SymbolicName); - ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted; + forgetMemoizedResults(SymbolicName); + insertValueToMap(PN, Shifted); return Shifted; } } @@ -7601,62 +7573,19 @@ // Now that we know more about the trip count for this loop, forget any // existing SCEV values for PHI nodes in this loop since they are only // conservative estimates made without the benefit of trip count - // information. This is similar to the code in forgetLoop, except that - // it handles SCEVUnknown PHI nodes specially. + // information. This invalidation is not necessary for correctness, and is + // only done to produce more precise results. if (Result.hasAnyInfo()) { - SmallVector Worklist; - SmallPtrSet Discovered; + // Invalidate any expression using an addrec in this loop. SmallVector ToForget; - PushLoopPHIs(L, Worklist, Discovered); - while (!Worklist.empty()) { - Instruction *I = Worklist.pop_back_val(); - - ValueExprMapType::iterator It = - ValueExprMap.find_as(static_cast(I)); - if (It != ValueExprMap.end()) { - const SCEV *Old = It->second; - - // SCEVUnknown for a PHI either means that it has an unrecognized - // structure, or it's a PHI that's in the progress of being computed - // by createNodeForPHI. In the former case, additional loop trip - // count information isn't going to change anything. In the later - // case, createNodeForPHI will perform the necessary updates on its - // own when it gets to that point. - if (!isa(I) || !isa(Old)) { - eraseValueFromMap(It->first); - ToForget.push_back(Old); - } - if (PHINode *PN = dyn_cast(I)) - ConstantEvolutionLoopExitValue.erase(PN); - } - - // Since we don't need to invalidate anything for correctness and we're - // only invalidating to make SCEV's results more precise, we get to stop - // early to avoid invalidating too much. This is especially important in - // cases like: - // - // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node - // loop0: - // %pn0 = phi - // ... - // loop1: - // %pn1 = phi - // ... - // - // where both loop0 and loop1's backedge taken count uses the SCEV - // expression for %v. If we don't have the early stop below then in cases - // like the above, getBackedgeTakenInfo(loop1) will clear out the trip - // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip - // count for loop1, effectively nullifying SCEV's trip count cache. - for (auto *U : I->users()) - if (auto *I = dyn_cast(U)) { - auto *LoopForUser = LI.getLoopFor(I->getParent()); - if (LoopForUser && L->contains(LoopForUser) && - Discovered.insert(I).second) - Worklist.push_back(I); - } - } + auto LoopUsersIt = LoopUsers.find(L); + if (LoopUsersIt != LoopUsers.end()) + append_range(ToForget, LoopUsersIt->second); forgetMemoizedResults(ToForget); + + // Invalidate constant-evolved loop header phis. + for (PHINode &PN : L->getHeader()->phis()) + ConstantEvolutionLoopExitValue.erase(&PN); } // Re-lookup the insert position, since the call to @@ -12996,9 +12925,20 @@ BlockDispositions.erase(S); UnsignedRanges.erase(S); SignedRanges.erase(S); - ExprValueMap.erase(S); HasRecMap.erase(S); MinTrailingZerosCache.erase(S); + + auto ExprIt = ExprValueMap.find(S); + if (ExprIt != ExprValueMap.end()) { + for (auto &ValueAndOffset : ExprIt->second) { + if (ValueAndOffset.second == nullptr) { + auto ValueIt = ValueExprMap.find_as(ValueAndOffset.first); + if (ValueIt != ValueExprMap.end()) + ValueExprMap.erase(ValueIt); + } + } + ExprValueMap.erase(ExprIt); + } } void @@ -13103,13 +13043,40 @@ ValidLoops.insert(L); Worklist.append(L->begin(), L->end()); } - // Check for SCEV expressions referencing invalid/deleted loops. for (auto &KV : ValueExprMap) { - auto *AR = dyn_cast(KV.second); - if (!AR) - continue; - assert(ValidLoops.contains(AR->getLoop()) && - "AddRec references invalid loop"); + // Check for SCEV expressions referencing invalid/deleted loops. + if (auto *AR = dyn_cast(KV.second)) { + assert(ValidLoops.contains(AR->getLoop()) && + "AddRec references invalid loop"); + } + + // Check that the value is also part of the reverse map. + auto It = ExprValueMap.find(KV.second); + if (It == ExprValueMap.end() || !It->second.contains({KV.first, nullptr})) { + dbgs() << "Value " << *KV.first + << " is in ValueExprMap but not in ExprValueMap\n"; + std::abort(); + } + } + + for (const auto &KV : ExprValueMap) { + for (const auto &ValueAndOffset : KV.second) { + if (ValueAndOffset.second != nullptr) + continue; + + auto It = ValueExprMap.find_as(ValueAndOffset.first); + if (It == ValueExprMap.end()) { + dbgs() << "Value " << *ValueAndOffset.first + << " is in ExprValueMap but not in ValueExprMap\n"; + std::abort(); + } + if (It->second != KV.first) { + dbgs() << "Value " << *ValueAndOffset.first + << " mapped to " << *It->second + << " rather than " << *KV.first << "\n"; + std::abort(); + } + } } // Verify intergity of SCEV users. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h @@ -191,8 +191,7 @@ /// variables. DIE &updateSubprogramScopeDIE(const DISubprogram *SP); - void constructScopeDIE(LexicalScope *Scope, - SmallVectorImpl &FinalChildren); + void constructScopeDIE(LexicalScope *Scope, DIE &ParentScopeDIE); /// A helper function to construct a RangeSpanList for a given /// lexical scope. @@ -220,11 +219,6 @@ /// Construct a DIE for the given DbgLabel. DIE *constructLabelDIE(DbgLabel &DL, const LexicalScope &Scope); - /// A helper function to create children of a Scope DIE. - DIE *createScopeChildrenDIE(LexicalScope *Scope, - SmallVectorImpl &Children, - bool *HasNonScopeChildren = nullptr); - void createBaseTypeDIEs(); /// Construct a DIE for this subprogram scope. diff --git a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp --- a/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -521,8 +521,8 @@ } // Construct a DIE for this scope. -void DwarfCompileUnit::constructScopeDIE( - LexicalScope *Scope, SmallVectorImpl &FinalChildren) { +void DwarfCompileUnit::constructScopeDIE(LexicalScope *Scope, + DIE &ParentScopeDIE) { if (!Scope || !Scope->getScopeNode()) return; @@ -533,46 +533,27 @@ "constructSubprogramScopeDIE for non-inlined " "subprograms"); - SmallVector Children; - - // We try to create the scope DIE first, then the children DIEs. This will - // avoid creating un-used children then removing them later when we find out - // the scope DIE is null. - DIE *ScopeDIE; + // Emit inlined subprograms. if (Scope->getParent() && isa(DS)) { - ScopeDIE = constructInlinedScopeDIE(Scope); + DIE *ScopeDIE = constructInlinedScopeDIE(Scope); if (!ScopeDIE) return; - // We create children when the scope DIE is not null. - createScopeChildrenDIE(Scope, Children); - } else { - // Early exit when we know the scope DIE is going to be null. - if (DD->isLexicalScopeDIENull(Scope)) - return; - - bool HasNonScopeChildren = false; - // We create children here when we know the scope DIE is not going to be - // null and the children will be added to the scope DIE. - createScopeChildrenDIE(Scope, Children, &HasNonScopeChildren); - - // If there are only other scopes as children, put them directly in the - // parent instead, as this scope would serve no purpose. - if (!HasNonScopeChildren) { - FinalChildren.insert(FinalChildren.end(), - std::make_move_iterator(Children.begin()), - std::make_move_iterator(Children.end())); - return; - } - ScopeDIE = constructLexicalScopeDIE(Scope); - assert(ScopeDIE && "Scope DIE should not be null."); + ParentScopeDIE.addChild(ScopeDIE); + createAndAddScopeChildren(Scope, *ScopeDIE); + return; } - // Add children - for (auto &I : Children) - ScopeDIE->addChild(std::move(I)); + // Early exit when we know the scope DIE is going to be null. + if (DD->isLexicalScopeDIENull(Scope)) + return; + + // Emit lexical blocks. + DIE *ScopeDIE = constructLexicalScopeDIE(Scope); + assert(ScopeDIE && "Scope DIE should not be null."); - FinalChildren.push_back(std::move(ScopeDIE)); + ParentScopeDIE.addChild(ScopeDIE); + createAndAddScopeChildren(Scope, *ScopeDIE); } void DwarfCompileUnit::addScopeRangeList(DIE &ScopeDIE, @@ -1013,42 +994,6 @@ return Result; } -DIE *DwarfCompileUnit::createScopeChildrenDIE(LexicalScope *Scope, - SmallVectorImpl &Children, - bool *HasNonScopeChildren) { - assert(Children.empty()); - DIE *ObjectPointer = nullptr; - - // Emit function arguments (order is significant). - auto Vars = DU->getScopeVariables().lookup(Scope); - for (auto &DV : Vars.Args) - Children.push_back(constructVariableDIE(*DV.second, *Scope, ObjectPointer)); - - // Emit local variables. - auto Locals = sortLocalVars(Vars.Locals); - for (DbgVariable *DV : Locals) - Children.push_back(constructVariableDIE(*DV, *Scope, ObjectPointer)); - - // Skip imported directives in gmlt-like data. - if (!includeMinimalInlineScopes()) { - // There is no need to emit empty lexical block DIE. - for (const auto *IE : ImportedEntities[Scope->getScopeNode()]) - Children.push_back( - constructImportedEntityDIE(cast(IE))); - } - - if (HasNonScopeChildren) - *HasNonScopeChildren = !Children.empty(); - - for (DbgLabel *DL : DU->getScopeLabels().lookup(Scope)) - Children.push_back(constructLabelDIE(*DL, *Scope)); - - for (LexicalScope *LS : Scope->getChildren()) - constructScopeDIE(LS, Children); - - return ObjectPointer; -} - DIE &DwarfCompileUnit::constructSubprogramScopeDIE(const DISubprogram *Sub, LexicalScope *Scope) { DIE &ScopeDIE = updateSubprogramScopeDIE(Sub); @@ -1079,13 +1024,48 @@ DIE *DwarfCompileUnit::createAndAddScopeChildren(LexicalScope *Scope, DIE &ScopeDIE) { - // We create children when the scope DIE is not null. - SmallVector Children; - DIE *ObjectPointer = createScopeChildrenDIE(Scope, Children); + DIE *ObjectPointer = nullptr; + + // Emit function arguments (order is significant). + auto Vars = DU->getScopeVariables().lookup(Scope); + for (auto &DV : Vars.Args) + ScopeDIE.addChild(constructVariableDIE(*DV.second, *Scope, ObjectPointer)); + + // Emit local variables. + auto Locals = sortLocalVars(Vars.Locals); + for (DbgVariable *DV : Locals) + ScopeDIE.addChild(constructVariableDIE(*DV, *Scope, ObjectPointer)); + + // Emit imported entities (skipped in gmlt-like data). + if (!includeMinimalInlineScopes()) { + for (const auto *IE : ImportedEntities[Scope->getScopeNode()]) + ScopeDIE.addChild(constructImportedEntityDIE(cast(IE))); + } + + // Emit labels. + for (DbgLabel *DL : DU->getScopeLabels().lookup(Scope)) + ScopeDIE.addChild(constructLabelDIE(*DL, *Scope)); - // Add children - for (auto &I : Children) - ScopeDIE.addChild(std::move(I)); + // Emit inner lexical scopes. + auto needToEmitLexicalScope = [this](LexicalScope *LS) { + if (isa(LS->getScopeNode())) + return true; + auto Vars = DU->getScopeVariables().lookup(LS); + if (!Vars.Args.empty() || !Vars.Locals.empty()) + return true; + if (!includeMinimalInlineScopes() && + !ImportedEntities[LS->getScopeNode()].empty()) + return true; + return false; + }; + for (LexicalScope *LS : Scope->getChildren()) { + // If the lexical block doesn't have non-scope children, skip + // its emission and put its children directly to the parent scope. + if (needToEmitLexicalScope(LS)) + constructScopeDIE(LS, ScopeDIE); + else + createAndAddScopeChildren(LS, ScopeDIE); + } return ObjectPointer; } diff --git a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp --- a/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/EHStreamer.cpp @@ -384,8 +384,8 @@ SmallVector LandingPads; LandingPads.reserve(PadInfos.size()); - for (unsigned i = 0, N = PadInfos.size(); i != N; ++i) - LandingPads.push_back(&PadInfos[i]); + for (const LandingPadInfo &LPI : PadInfos) + LandingPads.push_back(&LPI); // Order landing pads lexicographically by type id. llvm::sort(LandingPads, [](const LandingPadInfo *L, const LandingPadInfo *R) { diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp --- a/llvm/lib/CodeGen/BranchFolding.cpp +++ b/llvm/lib/CodeGen/BranchFolding.cpp @@ -1013,8 +1013,8 @@ // If this is a large problem, avoid visiting the same basic blocks // multiple times. if (MergePotentials.size() == TailMergeThreshold) - for (unsigned i = 0, e = MergePotentials.size(); i != e; ++i) - TriedMerging.insert(MergePotentials[i].getBlock()); + for (const MergePotentialsElt &Elt : MergePotentials) + TriedMerging.insert(Elt.getBlock()); // See if we can do any tail merging on those. if (MergePotentials.size() >= 2) diff --git a/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp b/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp --- a/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp +++ b/llvm/lib/CodeGen/CriticalAntiDepBreaker.cpp @@ -460,11 +460,10 @@ // Find the node at the bottom of the critical path. const SUnit *Max = nullptr; - for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { - const SUnit *SU = &SUnits[i]; - MISUnitMap[SU->getInstr()] = SU; - if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency) - Max = SU; + for (const SUnit &SU : SUnits) { + MISUnitMap[SU.getInstr()] = &SU; + if (!Max || SU.getDepth() + SU.Latency > Max->getDepth() + Max->Latency) + Max = &SU; } assert(Max && "Failed to find bottom of the critical path"); diff --git a/llvm/lib/CodeGen/GlobalMerge.cpp b/llvm/lib/CodeGen/GlobalMerge.cpp --- a/llvm/lib/CodeGen/GlobalMerge.cpp +++ b/llvm/lib/CodeGen/GlobalMerge.cpp @@ -399,8 +399,7 @@ // having a single global, but is aggressive enough for any other case. if (GlobalMergeIgnoreSingleUse) { BitVector AllGlobals(Globals.size()); - for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) { - const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1]; + for (const UsedGlobalSet &UGS : llvm::reverse(UsedGlobalSets)) { if (UGS.UsageCount == 0) continue; if (UGS.Globals.count() > 1) @@ -418,8 +417,7 @@ BitVector PickedGlobals(Globals.size()); bool Changed = false; - for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) { - const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1]; + for (const UsedGlobalSet &UGS : llvm::reverse(UsedGlobalSets)) { if (UGS.UsageCount == 0) continue; if (PickedGlobals.anyCommon(UGS.Globals)) diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -581,11 +581,9 @@ if (!ParentVNI) { LLVM_DEBUG(dbgs() << "\tadding flags: "); - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - MachineOperand &MO = MI.getOperand(i); + for (MachineOperand &MO : MI.operands()) if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) MO.setIsUndef(); - } LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI); return true; } diff --git a/llvm/lib/CodeGen/LatencyPriorityQueue.cpp b/llvm/lib/CodeGen/LatencyPriorityQueue.cpp --- a/llvm/lib/CodeGen/LatencyPriorityQueue.cpp +++ b/llvm/lib/CodeGen/LatencyPriorityQueue.cpp @@ -73,11 +73,9 @@ // Look at all of the successors of this node. Count the number of nodes that // this node is the sole unscheduled node for. unsigned NumNodesBlocking = 0; - for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); - I != E; ++I) { - if (getSingleUnscheduledPred(I->getSUnit()) == SU) + for (const SDep &Succ : SU->Succs) + if (getSingleUnscheduledPred(Succ.getSUnit()) == SU) ++NumNodesBlocking; - } NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking; Queue.push_back(SU); diff --git a/llvm/lib/CodeGen/MachinePipeliner.cpp b/llvm/lib/CodeGen/MachinePipeliner.cpp --- a/llvm/lib/CodeGen/MachinePipeliner.cpp +++ b/llvm/lib/CodeGen/MachinePipeliner.cpp @@ -1455,17 +1455,15 @@ int asap = 0; int zeroLatencyDepth = 0; SUnit *SU = &SUnits[I]; - for (SUnit::const_pred_iterator IP = SU->Preds.begin(), - EP = SU->Preds.end(); - IP != EP; ++IP) { - SUnit *pred = IP->getSUnit(); - if (IP->getLatency() == 0) + for (const SDep &P : SU->Preds) { + SUnit *pred = P.getSUnit(); + if (P.getLatency() == 0) zeroLatencyDepth = std::max(zeroLatencyDepth, getZeroLatencyDepth(pred) + 1); - if (ignoreDependence(*IP, true)) + if (ignoreDependence(P, true)) continue; - asap = std::max(asap, (int)(getASAP(pred) + IP->getLatency() - - getDistance(pred, SU, *IP) * MII)); + asap = std::max(asap, (int)(getASAP(pred) + P.getLatency() - + getDistance(pred, SU, P) * MII)); } maxASAP = std::max(maxASAP, asap); ScheduleInfo[I].ASAP = asap; @@ -2546,8 +2544,7 @@ unsigned Pos = 0; for (std::deque::iterator I = Insts.begin(), E = Insts.end(); I != E; ++I, ++Pos) { - for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { - MachineOperand &MO = MI->getOperand(i); + for (MachineOperand &MO : MI->operands()) { if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg())) continue; diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -3057,9 +3057,9 @@ SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred); // Predecessor of landing pad live-out on last call. if (MFI->isEHPad()) { - for (auto I = Pred->rbegin(), E = Pred->rend(); I != E; ++I) { - if (I->isCall()) { - PEnd = Indexes->getInstructionIndex(*I).getBoundaryIndex(); + for (const MachineInstr &MI : llvm::reverse(*Pred)) { + if (MI.isCall()) { + PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex(); break; } } diff --git a/llvm/lib/CodeGen/ModuloSchedule.cpp b/llvm/lib/CodeGen/ModuloSchedule.cpp --- a/llvm/lib/CodeGen/ModuloSchedule.cpp +++ b/llvm/lib/CodeGen/ModuloSchedule.cpp @@ -73,8 +73,7 @@ // stage difference for each use. Keep the maximum value. for (MachineInstr *MI : Schedule.getInstructions()) { int DefStage = Schedule.getStage(MI); - for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { - MachineOperand &Op = MI->getOperand(i); + for (const MachineOperand &Op : MI->operands()) { if (!Op.isReg() || !Op.isDef()) continue; @@ -1006,8 +1005,7 @@ unsigned CurStageNum, unsigned InstrStageNum, ValueMapTy *VRMap) { - for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { - MachineOperand &MO = NewMI->getOperand(i); + for (MachineOperand &MO : NewMI->operands()) { if (!MO.isReg() || !Register::isVirtualRegister(MO.getReg())) continue; Register reg = MO.getReg(); diff --git a/llvm/lib/CodeGen/RegAllocFast.cpp b/llvm/lib/CodeGen/RegAllocFast.cpp --- a/llvm/lib/CodeGen/RegAllocFast.cpp +++ b/llvm/lib/CodeGen/RegAllocFast.cpp @@ -1258,8 +1258,7 @@ // Free registers occupied by defs. // Iterate operands in reverse order, so we see the implicit super register // defs first (we added them earlier in case of ). - for (unsigned I = MI.getNumOperands(); I-- > 0;) { - MachineOperand &MO = MI.getOperand(I); + for (MachineOperand &MO : llvm::reverse(MI.operands())) { if (!MO.isReg() || !MO.isDef()) continue; @@ -1362,8 +1361,7 @@ // Free early clobbers. if (HasEarlyClobber) { - for (unsigned I = MI.getNumOperands(); I-- > 0; ) { - MachineOperand &MO = MI.getOperand(I); + for (MachineOperand &MO : llvm::reverse(MI.operands())) { if (!MO.isReg() || !MO.isDef() || !MO.isEarlyClobber()) continue; // subreg defs don't free the full register. We left the subreg number @@ -1440,8 +1438,7 @@ MachineBasicBlock::instr_iterator BundledMI = MI.getIterator(); ++BundledMI; while (BundledMI->isBundledWithPred()) { - for (unsigned I = 0; I < BundledMI->getNumOperands(); ++I) { - MachineOperand &MO = BundledMI->getOperand(I); + for (MachineOperand &MO : BundledMI->operands()) { if (!MO.isReg()) continue; diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -706,8 +706,8 @@ #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void ScheduleDAGSDNodes::dumpSchedule() const { - for (unsigned i = 0, e = Sequence.size(); i != e; i++) { - if (SUnit *SU = Sequence[i]) + for (const SUnit *SU : Sequence) { + if (SU) dumpNode(*SU); else dbgs() << "**** NOOP ****\n"; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7105,10 +7105,16 @@ return; } case Intrinsic::get_active_lane_mask: { + EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); SDValue Index = getValue(I.getOperand(0)); - SDValue TripCount = getValue(I.getOperand(1)); EVT ElementVT = Index.getValueType(); - EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); + + if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) { + visitTargetIntrinsic(I, Intrinsic); + return; + } + + SDValue TripCount = getValue(I.getOperand(1)); auto VecTy = CCVT.changeVectorElementType(ElementVT); SDValue VectorIndex, VectorTripCount; diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp --- a/llvm/lib/CodeGen/StackSlotColoring.cpp +++ b/llvm/lib/CodeGen/StackSlotColoring.cpp @@ -159,8 +159,7 @@ // FIXME: Need the equivalent of MachineRegisterInfo for frameindex operands. for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : MBB) { - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - MachineOperand &MO = MI.getOperand(i); + for (const MachineOperand &MO : MI.operands()) { if (!MO.isFI()) continue; int FI = MO.getIndex(); @@ -394,8 +393,7 @@ SmallVectorImpl &SlotMapping, MachineFunction &MF) { // Update the operands. - for (unsigned i = 0, ee = MI.getNumOperands(); i != ee; ++i) { - MachineOperand &MO = MI.getOperand(i); + for (MachineOperand &MO : MI.operands()) { if (!MO.isFI()) continue; int OldFI = MO.getIndex(); diff --git a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp --- a/llvm/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/llvm/lib/ExecutionEngine/ExecutionEngine.cpp @@ -1256,8 +1256,7 @@ // If there are multiple modules, map the non-canonical globals to their // canonical location. if (!NonCanonicalGlobals.empty()) { - for (unsigned i = 0, e = NonCanonicalGlobals.size(); i != e; ++i) { - const GlobalValue *GV = NonCanonicalGlobals[i]; + for (const GlobalValue *GV : NonCanonicalGlobals) { const GlobalValue *CGV = LinkedGlobalsMap[std::make_pair( std::string(GV->getName()), GV->getType())]; void *Ptr = getPointerToGlobalIfAvailable(CGV); diff --git a/llvm/lib/TableGen/TGLexer.cpp b/llvm/lib/TableGen/TGLexer.cpp --- a/llvm/lib/TableGen/TGLexer.cpp +++ b/llvm/lib/TableGen/TGLexer.cpp @@ -1017,12 +1017,10 @@ } bool TGLexer::prepIsProcessingEnabled() { - for (auto I = PrepIncludeStack.back()->rbegin(), - E = PrepIncludeStack.back()->rend(); - I != E; ++I) { - if (!I->IsDefined) + for (const PreprocessorControlDesc &I : + llvm::reverse(*PrepIncludeStack.back())) + if (!I.IsDefined) return false; - } return true; } diff --git a/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp b/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp --- a/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp +++ b/llvm/lib/Target/AArch64/AArch64CondBrTuning.cpp @@ -88,12 +88,9 @@ // If this is already the flag setting version of the instruction (e.g., SUBS) // just make sure the implicit-def of NZCV isn't marked dead. if (IsFlagSetting) { - for (unsigned I = MI.getNumExplicitOperands(), E = MI.getNumOperands(); - I != E; ++I) { - MachineOperand &MO = MI.getOperand(I); + for (MachineOperand &MO : MI.implicit_operands()) if (MO.isReg() && MO.isDead() && MO.getReg() == AArch64::NZCV) MO.setIsDead(false); - } return &MI; } bool Is64Bit; @@ -104,8 +101,8 @@ MachineInstrBuilder MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(NewOpc), NewDestReg); - for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) - MIB.add(MI.getOperand(I)); + for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) + MIB.add(MO); return MIB; } diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp --- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -102,9 +102,8 @@ static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI) { const MCInstrDesc &Desc = OldMI.getDesc(); - for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands(); i != e; - ++i) { - const MachineOperand &MO = OldMI.getOperand(i); + for (const MachineOperand &MO : + llvm::drop_begin(OldMI.operands(), Desc.getNumOperands())) { assert(MO.isReg() && MO.getReg()); if (MO.isUse()) UseMI.add(MO); @@ -733,8 +732,9 @@ MOP.getReg(), /*Def=*/false, /*Implicit=*/true)); RegMaskStartIdx++; } - for (; RegMaskStartIdx < MI.getNumOperands(); ++RegMaskStartIdx) - OriginalCall->addOperand(MI.getOperand(RegMaskStartIdx)); + for (const MachineOperand &MO : + llvm::drop_begin(MI.operands(), RegMaskStartIdx)) + OriginalCall->addOperand(MO); auto *Marker = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXrs)) .addReg(AArch64::FP, RegState::Define) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -844,6 +844,8 @@ EVT getAsmOperandValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown = false) const override; + bool shouldExpandGetActiveLaneMask(EVT VT, EVT OpVT) const override; + private: /// Keep a pointer to the AArch64Subtarget around so that we can /// make the right decision when generating code for different targets. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1503,6 +1503,24 @@ } } +bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT, + EVT OpVT) const { + // Only SVE has a 1:1 mapping from intrinsic -> instruction (whilelo). + if (!Subtarget->hasSVE()) + return true; + + // We can only support legal predicate result types. + if (ResVT != MVT::nxv2i1 && ResVT != MVT::nxv4i1 && ResVT != MVT::nxv8i1 && + ResVT != MVT::nxv16i1) + return true; + + // The whilelo instruction only works with i32 or i64 scalar inputs. + if (OpVT != MVT::i32 && OpVT != MVT::i64) + return true; + + return false; +} + void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { assert(VT.isFixedLengthVector() && "Expected fixed length vector type!"); @@ -4290,6 +4308,12 @@ return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); } + case Intrinsic::get_active_lane_mask: { + SDValue ID = + DAG.getTargetConstant(Intrinsic::aarch64_sve_whilelo, dl, MVT::i64); + return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), ID, + Op.getOperand(1), Op.getOperand(2)); + } } } diff --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp --- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp +++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp @@ -6452,8 +6452,7 @@ MachineIRBuilder MIB(MI); // Go through each operand and ensure it has the same regbank. - for (unsigned OpIdx = 1; OpIdx < MI.getNumOperands(); ++OpIdx) { - MachineOperand &MO = MI.getOperand(OpIdx); + for (MachineOperand &MO : llvm::drop_begin(MI.operands())) { if (!MO.isReg()) continue; Register OpReg = MO.getReg(); @@ -6511,8 +6510,7 @@ // %endbb: // %dst:gpr(s16) = G_PHI %in1:gpr(s16), %bb1, %in2_copy:gpr(s16), %bb2 bool HasGPROp = false, HasFPROp = false; - for (unsigned OpIdx = 1; OpIdx < MI->getNumOperands(); ++OpIdx) { - const auto &MO = MI->getOperand(OpIdx); + for (const MachineOperand &MO : llvm::drop_begin(MI->operands())) { if (!MO.isReg()) continue; const LLT &Ty = MRI.getType(MO.getReg()); diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp --- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp @@ -3189,10 +3189,10 @@ const MachineInstr &MI) const { unsigned RegBank = AMDGPU::InvalidRegBankID; - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - if (!MI.getOperand(i).isReg()) + for (const MachineOperand &MO : MI.operands()) { + if (!MO.isReg()) continue; - Register Reg = MI.getOperand(i).getReg(); + Register Reg = MO.getReg(); if (const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI)) { RegBank = regBankUnion(RegBank, Bank->getID()); if (RegBank == AMDGPU::VGPRRegBankID) @@ -3206,10 +3206,10 @@ bool AMDGPURegisterBankInfo::isSALUMapping(const MachineInstr &MI) const { const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); - for (unsigned i = 0, e = MI.getNumOperands();i != e; ++i) { - if (!MI.getOperand(i).isReg()) + for (const MachineOperand &MO : MI.operands()) { + if (!MO.isReg()) continue; - Register Reg = MI.getOperand(i).getReg(); + Register Reg = MO.getReg(); if (const RegisterBank *Bank = getRegBank(Reg, MRI, *TRI)) { if (Bank->getID() != AMDGPU::SGPRRegBankID) return false; diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -285,9 +285,8 @@ NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::getLDSNoRetOp(MI.getOpcode()))); - for (unsigned i = 1, e = MI.getNumOperands(); i < e; ++i) { - NewMI.add(MI.getOperand(i)); - } + for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) + NewMI.add(MO); } else { return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); } diff --git a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp --- a/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp +++ b/llvm/lib/Target/AMDGPU/R600MachineScheduler.cpp @@ -124,11 +124,9 @@ DAG->dumpNode(*SU); } else { dbgs() << "NO NODE \n"; - for (unsigned i = 0; i < DAG->SUnits.size(); i++) { - const SUnit &S = DAG->SUnits[i]; + for (const SUnit &S : DAG->SUnits) if (!S.isScheduled) DAG->dumpNode(S); - } }); return SU; diff --git a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp --- a/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/llvm/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -127,11 +127,11 @@ static bool hasVectorOperands(const MachineInstr &MI, const SIRegisterInfo *TRI) { const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) + for (const MachineOperand &MO : MI.operands()) { + if (!MO.isReg() || !MO.getReg().isVirtual()) continue; - if (TRI->hasVectorRegisters(MRI.getRegClass(MI.getOperand(i).getReg()))) + if (TRI->hasVectorRegisters(MRI.getRegClass(MO.getReg()))) return true; } return false; diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -4295,8 +4295,8 @@ MachineInstrBuilder MIB; MIB = BuildMI(*BB, MI, DL, TII->get(AMDGPU::SI_CALL), ReturnAddrReg); - for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) - MIB.add(MI.getOperand(I)); + for (const MachineOperand &MO : MI.operands()) + MIB.add(MO); MIB.cloneMemRefs(MI); MI.eraseFromParent(); @@ -9813,10 +9813,9 @@ if (Subtarget->supportsMinMaxDenormModes() || denormalsEnabledForType(MRI.getType(Reg), MF)) return true; - for (unsigned I = 1, E = MI->getNumOperands(); I != E; ++I) { - if (!isCanonicalized(MI->getOperand(I).getReg(), MF, MaxDepth - 1)) + for (const MachineOperand &MO : llvm::drop_begin(MI->operands())) + if (!isCanonicalized(MO.getReg(), MF, MaxDepth - 1)) return false; - } return true; } default: diff --git a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp --- a/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/llvm/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -1219,9 +1219,9 @@ // Point the CPUser node to the replacement U.CPEMI = CPEs[i].CPEMI; // Change the CPI in the instruction operand to refer to the clone. - for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j) - if (UserMI->getOperand(j).isCPI()) { - UserMI->getOperand(j).setIndex(CPEs[i].CPI); + for (MachineOperand &MO : UserMI->operands()) + if (MO.isCPI()) { + MO.setIndex(CPEs[i].CPI); break; } // Adjust the refcount of the clone... @@ -1601,9 +1601,9 @@ BBUtils->adjustBBOffsetsAfter(&*--NewIsland->getIterator()); // Finally, change the CPI in the instruction operand to be ID. - for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i) - if (UserMI->getOperand(i).isCPI()) { - UserMI->getOperand(i).setIndex(ID); + for (MachineOperand &MO : UserMI->operands()) + if (MO.isCPI()) { + MO.setIndex(ID); break; } diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -14544,6 +14544,52 @@ return SDValue(); } +// Check that N is CMPZ(CSINC(0, 0, CC, X)), return X if valid. +static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) { + if (Cmp->getOpcode() != ARMISD::CMPZ || !isNullConstant(Cmp->getOperand(1))) + return SDValue(); + SDValue CSInc = Cmp->getOperand(0); + if (CSInc.getOpcode() != ARMISD::CSINC || + !isNullConstant(CSInc.getOperand(0)) || + !isNullConstant(CSInc.getOperand(1)) || !CSInc->hasOneUse()) + return SDValue(); + CC = (ARMCC::CondCodes)CSInc.getConstantOperandVal(2); + return CSInc.getOperand(3); +} + +static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) { + // Given CMPZ(CSINC(C, 0, 0, EQ), 0), we can just use C directly. As in + // t92: glue = ARMISD::CMPZ t74, 0 + // t93: i32 = ARMISD::CSINC 0, 0, 1, t92 + // t96: glue = ARMISD::CMPZ t93, 0 + // t114: i32 = ARMISD::CSINV 0, 0, 0, t96 + ARMCC::CondCodes Cond; + if (SDValue C = IsCMPZCSINC(N, Cond)) + if (Cond == ARMCC::EQ) + return C; + return SDValue(); +} + +static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) { + // Fold away an unneccessary CMPZ/CSINC + // CSXYZ A, B, C1 (CMPZ (CSINC 0, 0, C2, D), 0) -> + // if C1==EQ -> CSXYZ A, B, C2, D + // if C1==NE -> CSXYZ A, B, NOT(C2), D + ARMCC::CondCodes Cond; + if (SDValue C = IsCMPZCSINC(N->getOperand(3).getNode(), Cond)) { + if (N->getConstantOperandVal(2) == ARMCC::EQ) + return DAG.getNode(N->getOpcode(), SDLoc(N), MVT::i32, N->getOperand(0), + N->getOperand(1), + DAG.getConstant(Cond, SDLoc(N), MVT::i32), C); + if (N->getConstantOperandVal(2) == ARMCC::NE) + return DAG.getNode( + N->getOpcode(), SDLoc(N), MVT::i32, N->getOperand(0), + N->getOperand(1), + DAG.getConstant(ARMCC::getOppositeCondition(Cond), SDLoc(N), MVT::i32), C); + } + return SDValue(); +} + /// PerformVMOVRRDCombine - Target-specific dag combine xforms for /// ARMISD::VMOVRRD. static SDValue PerformVMOVRRDCombine(SDNode *N, @@ -18314,6 +18360,12 @@ return PerformCMOVCombine(N, DCI.DAG); case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG); + case ARMISD::CMPZ: + return PerformCMPZCombine(N, DCI.DAG); + case ARMISD::CSINC: + case ARMISD::CSINV: + case ARMISD::CSNEG: + return PerformCSETCombine(N, DCI.DAG); case ISD::LOAD: return PerformLOADCombine(N, DCI, Subtarget); case ARMISD::VLD1DUP: diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td --- a/llvm/lib/Target/ARM/ARMInstrThumb2.td +++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td @@ -5603,6 +5603,15 @@ defm : CSPats; defm : CSPats; + def : T2Pat<(ARMcmov (i32 1), (i32 0), cmovpred:$imm), + (t2CSINC ZR, ZR, imm0_31:$imm)>; + def : T2Pat<(ARMcmov (i32 -1), (i32 0), cmovpred:$imm), + (t2CSINV ZR, ZR, imm0_31:$imm)>; + def : T2Pat<(ARMcmov (i32 0), (i32 1), cmovpred:$imm), + (t2CSINC ZR, ZR, (inv_cond_XFORM imm:$imm))>; + def : T2Pat<(ARMcmov (i32 0), (i32 -1), cmovpred:$imm), + (t2CSINV ZR, ZR, (inv_cond_XFORM imm:$imm))>; + multiclass ModifiedV8_1CSEL { def : T2Pat<(ARMcmov modvalue, GPRwithZR:$tval, cmovpred:$imm), (Insn GPRwithZR:$tval, GPRwithZR:$fval, imm0_31:$imm)>; diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp --- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1326,8 +1326,8 @@ return false; bool HighRegsUsed = false; - for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i) - if (MI->getOperand(i).getReg() >= ARM::R8) { + for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2)) + if (MO.getReg() >= ARM::R8) { HighRegsUsed = true; break; } diff --git a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp --- a/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/llvm/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -237,12 +237,9 @@ } static void removeKillInfo(MachineInstr &MI, unsigned RegNotKilled) { - for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { - MachineOperand &Op = MI.getOperand(I); - if (!Op.isReg() || Op.getReg() != RegNotKilled || !Op.isKill()) - continue; - Op.setIsKill(false); - } + for (MachineOperand &Op : MI.operands()) + if (Op.isReg() && Op.getReg() == RegNotKilled && Op.isKill()) + Op.setIsKill(false); } /// Returns true if it is unsafe to move a copy instruction from \p UseReg to @@ -403,10 +400,7 @@ // Mark TFRs that feed a potential new value store as such. if (TII->mayBeNewStore(MI)) { // Look for uses of TFR instructions. - for (unsigned OpdIdx = 0, OpdE = MI.getNumOperands(); OpdIdx != OpdE; - ++OpdIdx) { - MachineOperand &Op = MI.getOperand(OpdIdx); - + for (const MachineOperand &Op : MI.operands()) { // Skip over anything except register uses. if (!Op.isReg() || !Op.isUse() || !Op.getReg()) continue; diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1720,10 +1720,10 @@ LLVM_DEBUG({ dbgs() << "CS information: {"; - for (unsigned i = 0, n = CSI.size(); i < n; ++i) { - int FI = CSI[i].getFrameIdx(); + for (const CalleeSavedInfo &I : CSI) { + int FI = I.getFrameIdx(); int Off = MFI.getObjectOffset(FI); - dbgs() << ' ' << printReg(CSI[i].getReg(), TRI) << ":fi#" << FI << ":sp"; + dbgs() << ' ' << printReg(I.getReg(), TRI) << ":fi#" << FI << ":sp"; if (Off >= 0) dbgs() << '+'; dbgs() << Off; diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -442,8 +442,7 @@ CLI.IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, IsVarArg, IsStructRet, StructAttrFlag, Outs, OutVals, Ins, DAG); - for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { - CCValAssign &VA = ArgLocs[i]; + for (const CCValAssign &VA : ArgLocs) { if (VA.isMemLoc()) { CLI.IsTailCall = false; break; diff --git a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp --- a/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -193,9 +193,7 @@ Defs.clear(); Uses.clear(); - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI.getOperand(i); - + for (const MachineOperand &MO : MI.operands()) { if (!MO.isReg()) continue; @@ -1644,8 +1642,7 @@ bool SkipDead) const { const HexagonRegisterInfo &HRI = *Subtarget.getRegisterInfo(); - for (unsigned oper = 0; oper < MI.getNumOperands(); ++oper) { - MachineOperand MO = MI.getOperand(oper); + for (const MachineOperand &MO : MI.operands()) { if (MO.isReg()) { if (!MO.isDef()) continue; diff --git a/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp b/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp --- a/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp +++ b/llvm/lib/Target/Hexagon/HexagonMCInstLower.cpp @@ -109,8 +109,7 @@ assert(MCI->getOpcode() == static_cast(MI->getOpcode()) && "MCI opcode should have been set on construction"); - for (unsigned i = 0, e = MI->getNumOperands(); i < e; i++) { - const MachineOperand &MO = MI->getOperand(i); + for (const MachineOperand &MO : MI->operands()) { MCOperand MCO; bool MustExtend = MO.getTargetFlags() & HexagonII::HMOTF_ConstExtended; diff --git a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp --- a/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/llvm/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -305,8 +305,7 @@ VRegHoldingReg[MI->getOperand(0).getReg()] = MI->getOperand(1).getReg(); LastVRegUse.erase(MI->getOperand(1).getReg()); } else { - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg()) continue; if (MO.isUse() && !MI->isCopy() && diff --git a/llvm/lib/Target/Lanai/LanaiDelaySlotFiller.cpp b/llvm/lib/Target/Lanai/LanaiDelaySlotFiller.cpp --- a/llvm/lib/Target/Lanai/LanaiDelaySlotFiller.cpp +++ b/llvm/lib/Target/Lanai/LanaiDelaySlotFiller.cpp @@ -199,8 +199,7 @@ assert((!MI->isCall() && !MI->isReturn()) && "Cannot put calls or returns in delay slot."); - for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { - const MachineOperand &MO = MI->getOperand(I); + for (const MachineOperand &MO : MI->operands()) { unsigned Reg; if (!MO.isReg() || !(Reg = MO.getReg())) diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp --- a/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp +++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.cpp @@ -467,8 +467,7 @@ return nullptr; // Check if MI has any non-dead defs or physreg uses. This also detects // predicated instructions which will be reading SR. - for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 1)) { // Reject frame index operands. if (MO.isFI() || MO.isCPI() || MO.isJTI()) return nullptr; diff --git a/llvm/lib/Target/Lanai/LanaiMCInstLower.cpp b/llvm/lib/Target/Lanai/LanaiMCInstLower.cpp --- a/llvm/lib/Target/Lanai/LanaiMCInstLower.cpp +++ b/llvm/lib/Target/Lanai/LanaiMCInstLower.cpp @@ -93,9 +93,7 @@ void LanaiMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); - for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { - const MachineOperand &MO = MI->getOperand(I); - + for (const MachineOperand &MO : MI->operands()) { MCOperand MCOp; switch (MO.getType()) { case MachineOperand::MO_Register: diff --git a/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp b/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp --- a/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp +++ b/llvm/lib/Target/MSP430/MSP430MCInstLower.cpp @@ -115,9 +115,7 @@ void MSP430MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); - + for (const MachineOperand &MO : MI->operands()) { MCOperand MCOp; switch (MO.getType()) { default: diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp --- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp @@ -163,9 +163,8 @@ TargetMachine &TM, MCStreamer &OutStreamer, const MipsSubtarget &Subtarget) { - for (unsigned int I = MI.getDesc().getNumOperands(), E = MI.getNumOperands(); - I < E; ++I) { - MachineOperand MO = MI.getOperand(I); + for (const MachineOperand &MO : + llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands())) { if (MO.isMCSymbol() && (MO.getTargetFlags() & MipsII::MO_JALR)) { MCSymbol *Callee = MO.getMCSymbol(); if (Callee && !Callee->getName().empty()) { diff --git a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp --- a/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp +++ b/llvm/lib/Target/Mips/MipsConstantIslandPass.cpp @@ -730,8 +730,8 @@ continue; // Scan the instructions for constant pool operands. - for (unsigned op = 0, e = MI.getNumOperands(); op != e; ++op) - if (MI.getOperand(op).isCPI()) { + for (const MachineOperand &MO : MI.operands()) + if (MO.isCPI()) { // We found one. The addressing mode tells us the max displacement // from the PC that this instruction permits. @@ -759,7 +759,7 @@ break; } // Remember that this is a user of a CP entry. - unsigned CPI = MI.getOperand(op).getIndex(); + unsigned CPI = MO.getIndex(); MachineInstr *CPEMI = CPEMIs[CPI]; unsigned MaxOffs = ((1 << Bits)-1) * Scale; unsigned LongFormMaxOffs = ((1 << LongFormBits)-1) * LongFormScale; @@ -1066,9 +1066,9 @@ // Point the CPUser node to the replacement U.CPEMI = CPEs[i].CPEMI; // Change the CPI in the instruction operand to refer to the clone. - for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j) - if (UserMI->getOperand(j).isCPI()) { - UserMI->getOperand(j).setIndex(CPEs[i].CPI); + for (MachineOperand &MO : UserMI->operands()) + if (MO.isCPI()) { + MO.setIndex(CPEs[i].CPI); break; } // Adjust the refcount of the clone... @@ -1122,9 +1122,9 @@ // Point the CPUser node to the replacement U.CPEMI = CPEs[i].CPEMI; // Change the CPI in the instruction operand to refer to the clone. - for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j) - if (UserMI->getOperand(j).isCPI()) { - UserMI->getOperand(j).setIndex(CPEs[i].CPI); + for (MachineOperand &MO : UserMI->operands()) + if (MO.isCPI()) { + MO.setIndex(CPEs[i].CPI); break; } // Adjust the refcount of the clone... @@ -1392,9 +1392,9 @@ adjustBBOffsetsAfter(&*--NewIsland->getIterator()); // Finally, change the CPI in the instruction operand to be ID. - for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i) - if (UserMI->getOperand(i).isCPI()) { - UserMI->getOperand(i).setIndex(ID); + for (MachineOperand &MO : UserMI->operands()) + if (MO.isCPI()) { + MO.setIndex(ID); break; } diff --git a/llvm/lib/Target/Mips/MipsMCInstLower.cpp b/llvm/lib/Target/Mips/MipsMCInstLower.cpp --- a/llvm/lib/Target/Mips/MipsMCInstLower.cpp +++ b/llvm/lib/Target/Mips/MipsMCInstLower.cpp @@ -318,8 +318,7 @@ OutMI.setOpcode(MI->getOpcode()); - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (const MachineOperand &MO : MI->operands()) { MCOperand MCOp = LowerOperand(MO); if (MCOp.isValid()) diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -3581,8 +3581,8 @@ MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(UsingMips32 ? Mips::LH : Mips::LH64), Rt); - for (unsigned i = 1; i < MI.getNumOperands(); i++) - MIB.add(MI.getOperand(i)); + for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) + MIB.add(MO); if(!UsingMips32) { Register Tmp = RegInfo.createVirtualRegister(&Mips::GPR32RegClass); diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h --- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h +++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.h @@ -306,6 +306,11 @@ std::string getVirtualRegisterName(unsigned) const; const MCSymbol *getFunctionFrameSymbol() const override; + + // Make emitGlobalVariable() no-op for NVPTX. + // Global variables have been already emitted by the time the base AsmPrinter + // attempts to do so in doFinalization() (see NVPTXAsmPrinter::emitGlobals()). + void emitGlobalVariable(const GlobalVariable *GV) override {} }; } // end namespace llvm diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp --- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -887,33 +887,11 @@ GlobalsEmitted = true; } - // XXX Temproarily remove global variables so that doFinalization() will not - // emit them again (global variables are emitted at beginning). - - Module::GlobalListType &global_list = M.getGlobalList(); - int i, n = global_list.size(); - GlobalVariable **gv_array = new GlobalVariable *[n]; - - // first, back-up GlobalVariable in gv_array - i = 0; - for (Module::global_iterator I = global_list.begin(), E = global_list.end(); - I != E; ++I) - gv_array[i++] = &*I; - - // second, empty global_list - while (!global_list.empty()) - global_list.remove(global_list.begin()); - // call doFinalization bool ret = AsmPrinter::doFinalization(M); - // now we restore global variables - for (i = 0; i < n; i++) - global_list.insert(global_list.end(), gv_array[i]); - clearAnnotationCache(&M); - delete[] gv_array; // Close the last emitted section if (HasDebugInfo) { static_cast(OutStreamer->getTargetStreamer()) diff --git a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp --- a/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/llvm/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -81,8 +81,7 @@ } static bool clobbersCTR(const MachineInstr &MI) { - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI.getOperand(i); + for (const MachineOperand &MO : MI.operands()) { if (MO.isReg()) { if (MO.isDef() && (MO.getReg() == PPC::CTR || MO.getReg() == PPC::CTR8)) return true; diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -2330,8 +2330,7 @@ &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass }; bool Found = false; - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI.getOperand(i); + for (const MachineOperand &MO : MI.operands()) { for (unsigned c = 0; c < array_lengthof(RCs) && !Found; ++c) { const TargetRegisterClass *RC = RCs[c]; if (MO.isReg()) { diff --git a/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp b/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp --- a/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp +++ b/llvm/lib/Target/PowerPC/PPCMCInstLower.cpp @@ -152,9 +152,9 @@ AsmPrinter &AP) { OutMI.setOpcode(MI->getOpcode()); - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + for (const MachineOperand &MO : MI->operands()) { MCOperand MCOp; - if (LowerPPCMachineOperandToMCOperand(MI->getOperand(i), MCOp, AP)) + if (LowerPPCMachineOperandToMCOperand(MO, MCOp, AP)) OutMI.addOperand(MCOp); } } diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h @@ -63,6 +63,9 @@ const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override; + + void getOffsetOpcodes(const StackOffset &Offset, + SmallVectorImpl &Ops) const override; }; } diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp @@ -20,6 +20,7 @@ #include "llvm/CodeGen/RegisterScavenging.h" #include "llvm/CodeGen/TargetFrameLowering.h" #include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/IR/DebugInfoMetadata.h" #include "llvm/Support/ErrorHandling.h" #define GET_REGINFO_TARGET_DESC @@ -320,3 +321,30 @@ return &RISCV::VRRegClass; return RC; } + +void RISCVRegisterInfo::getOffsetOpcodes(const StackOffset &Offset, + SmallVectorImpl &Ops) const { + // VLENB is the length of a vector register in bytes. We use + // to represent one vector register. The dwarf offset is + // VLENB * scalable_offset / 8. + assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset"); + + // Add fixed-sized offset using existing DIExpression interface. + DIExpression::appendOffset(Ops, Offset.getFixed()); + + unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true); + int64_t VLENBSized = Offset.getScalable() / 8; + if (VLENBSized > 0) { + Ops.push_back(dwarf::DW_OP_constu); + Ops.push_back(VLENBSized); + Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL}); + Ops.push_back(dwarf::DW_OP_mul); + Ops.push_back(dwarf::DW_OP_plus); + } else if (VLENBSized < 0) { + Ops.push_back(dwarf::DW_OP_constu); + Ops.push_back(-VLENBSized); + Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL}); + Ops.push_back(dwarf::DW_OP_mul); + Ops.push_back(dwarf::DW_OP_minus); + } +} diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.td +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.td @@ -480,6 +480,8 @@ def VL : RISCVReg<0, "vl", ["vl"]>; def VXSAT : RISCVReg<0, "vxsat", ["vxsat"]>; def VXRM : RISCVReg<0, "vxrm", ["vxrm"]>; + def VLENB : RISCVReg<0, "vlenb", ["vlenb"]>, + DwarfRegNum<[!add(4096, SysRegVLENB.Encoding)]>; } foreach m = [1, 2, 4] in { diff --git a/llvm/lib/Target/RISCV/RISCVSystemOperands.td b/llvm/lib/Target/RISCV/RISCVSystemOperands.td --- a/llvm/lib/Target/RISCV/RISCVSystemOperands.td +++ b/llvm/lib/Target/RISCV/RISCVSystemOperands.td @@ -388,4 +388,4 @@ def : SysReg<"vcsr", 0x00F>; def : SysReg<"vl", 0xC20>; def : SysReg<"vtype", 0xC21>; -def : SysReg<"vlenb", 0xC22>; +def SysRegVLENB: SysReg<"vlenb", 0xC22>; diff --git a/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/llvm/lib/Target/Sparc/DelaySlotFiller.cpp --- a/llvm/lib/Target/Sparc/DelaySlotFiller.cpp +++ b/llvm/lib/Target/Sparc/DelaySlotFiller.cpp @@ -318,8 +318,7 @@ SmallSet& RegDefs, SmallSet& RegUses) { - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (const MachineOperand &MO : MI->operands()) { if (!MO.isReg()) continue; diff --git a/llvm/lib/Target/Sparc/SparcMCInstLower.cpp b/llvm/lib/Target/Sparc/SparcMCInstLower.cpp --- a/llvm/lib/Target/Sparc/SparcMCInstLower.cpp +++ b/llvm/lib/Target/Sparc/SparcMCInstLower.cpp @@ -97,8 +97,7 @@ OutMI.setOpcode(MI->getOpcode()); - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (const MachineOperand &MO : MI->operands()) { MCOperand MCOp = LowerOperand(MI, MO, AP); if (MCOp.isValid()) diff --git a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp --- a/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp +++ b/llvm/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -144,8 +144,7 @@ if (MI.isDebugInstr()) return Ref; - for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { - const MachineOperand &MO = MI.getOperand(I); + for (const MachineOperand &MO : MI.operands()) { if (MO.isReg()) { if (Register MOReg = MO.getReg()) { if (TRI->regsOverlap(MOReg, Reg)) { diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp --- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -203,8 +203,8 @@ Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef()); // Keep the remaining operands as-is. - for (unsigned I = 2; I < MI.getNumOperands(); ++I) - MIB.add(MI.getOperand(I)); + for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2)) + MIB.add(MO); MI.eraseFromParent(); } diff --git a/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp b/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp --- a/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp +++ b/llvm/lib/Target/SystemZ/SystemZMCInstLower.cpp @@ -93,10 +93,8 @@ void SystemZMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); - for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { - const MachineOperand &MO = MI->getOperand(I); + for (const MachineOperand &MO : MI->operands()) // Ignore all implicit register operands. if (!MO.isReg() || !MO.isImplicit()) OutMI.addOperand(lowerOperand(MO)); - } } diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -1452,9 +1452,7 @@ unsigned NumDefCFAOffsets = 0; int MinAbsOffset = std::numeric_limits::max(); - for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { - const MCCFIInstruction &Inst = Instrs[i]; - + for (const MCCFIInstruction &Inst : Instrs) { switch (Inst.getOperation()) { default: // Any other CFI directives indicate a frame that we aren't prepared diff --git a/llvm/lib/Target/X86/X86ExpandPseudo.cpp b/llvm/lib/Target/X86/X86ExpandPseudo.cpp --- a/llvm/lib/Target/X86/X86ExpandPseudo.cpp +++ b/llvm/lib/Target/X86/X86ExpandPseudo.cpp @@ -209,10 +209,8 @@ llvm_unreachable("unexpected opcode"); OriginalCall = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc)).getInstr(); - unsigned OpStart = 1; bool RAXImplicitDead = false; - for (; OpStart < MI.getNumOperands(); ++OpStart) { - MachineOperand &Op = MI.getOperand(OpStart); + for (MachineOperand &Op : llvm::drop_begin(MI.operands())) { // RAX may be 'implicit dead', if there are no other users of the return // value. We introduce a new use, so change it to 'implicit def'. if (Op.isReg() && Op.isImplicit() && Op.isDead() && diff --git a/llvm/lib/Target/XCore/XCoreMCInstLower.cpp b/llvm/lib/Target/XCore/XCoreMCInstLower.cpp --- a/llvm/lib/Target/XCore/XCoreMCInstLower.cpp +++ b/llvm/lib/Target/XCore/XCoreMCInstLower.cpp @@ -103,8 +103,7 @@ void XCoreMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { OutMI.setOpcode(MI->getOpcode()); - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (const MachineOperand &MO : MI->operands()) { MCOperand MCOp = LowerOperand(MO); if (MCOp.isValid()) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -2585,7 +2585,8 @@ return replaceInstUsesWith(I, V); Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); - if (I.getType()->isIntOrIntVectorTy(1)) { + Type *Ty = I.getType(); + if (Ty->isIntOrIntVectorTy(1)) { if (auto *SI0 = dyn_cast(Op0)) { if (auto *I = foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ false)) @@ -2618,7 +2619,7 @@ // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X). Value *Or = Builder.CreateOr(X, Y); - return BinaryOperator::CreateXor(Or, ConstantInt::get(I.getType(), *CV)); + return BinaryOperator::CreateXor(Or, ConstantInt::get(Ty, *CV)); } // (A & C) | (B & D) @@ -2651,14 +2652,14 @@ // iff (C0 & C1) == 0 and (X & ~C0) == 0 if (match(A, m_c_Or(m_Value(X), m_Specific(B))) && MaskedValueIsZero(X, ~*C0, 0, &I)) { - Constant *C01 = ConstantInt::get(I.getType(), *C0 | *C1); + Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); return BinaryOperator::CreateAnd(A, C01); } // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1) // iff (C0 & C1) == 0 and (X & ~C1) == 0 if (match(B, m_c_Or(m_Value(X), m_Specific(A))) && MaskedValueIsZero(X, ~*C1, 0, &I)) { - Constant *C01 = ConstantInt::get(I.getType(), *C0 | *C1); + Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); return BinaryOperator::CreateAnd(B, C01); } // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1) @@ -2668,7 +2669,7 @@ match(B, m_Or(m_Specific(X), m_APInt(C3))) && (*C2 & ~*C0).isZero() && (*C3 & ~*C1).isZero()) { Value *Or = Builder.CreateOr(X, *C2 | *C3, "bitfield"); - Constant *C01 = ConstantInt::get(I.getType(), *C0 | *C1); + Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); return BinaryOperator::CreateAnd(Or, C01); } } @@ -2814,10 +2815,10 @@ // canonicalization? if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && A->getType()->isIntOrIntVectorTy(1)) - return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1); + return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), Op1); if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && A->getType()->isIntOrIntVectorTy(1)) - return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0); + return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), Op0); // Note: If we've gotten to the point of visiting the outer OR, then the // inner one couldn't be simplified. If it was a constant, then it won't @@ -2849,7 +2850,6 @@ // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X) --> X s> Y ? -1 : X. { Value *X, *Y; - Type *Ty = I.getType(); if (match(&I, m_c_Or(m_OneUse(m_AShr( m_NSWSub(m_Value(Y), m_Value(X)), m_SpecificInt(Ty->getScalarSizeInBits() - 1))), @@ -2899,7 +2899,6 @@ if (match(&I, m_c_Or(m_Add(m_Shl(m_One(), m_Value(X)), m_AllOnes()), m_Shl(m_One(), m_Deferred(X)))) && match(&I, m_c_Or(m_OneUse(m_Value()), m_Value()))) { - Type *Ty = X->getType(); Value *Sub = Builder.CreateSub( ConstantInt::get(Ty, Ty->getScalarSizeInBits() - 1), X); return BinaryOperator::CreateLShr(Constant::getAllOnesValue(Ty), Sub); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -14,6 +14,7 @@ #include "llvm/ADT/APSInt.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/Analysis/CmpInstAnalysis.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/TargetLibraryInfo.h" @@ -1894,23 +1895,6 @@ return new ICmpInst(NewPred, X, SubOne(cast(Cmp.getOperand(1)))); } - // (X & C2) == 0 -> (trunc X) >= 0 - // (X & C2) != 0 -> (trunc X) < 0 - // iff C2 is a power of 2 and it masks the sign bit of a legal integer type. - const APInt *C2; - if (And->hasOneUse() && C.isZero() && match(Y, m_APInt(C2))) { - int32_t ExactLogBase2 = C2->exactLogBase2(); - if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { - Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); - if (auto *AndVTy = dyn_cast(And->getType())) - NTy = VectorType::get(NTy, AndVTy->getElementCount()); - Value *Trunc = Builder.CreateTrunc(X, NTy); - auto NewPred = - Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE : CmpInst::ICMP_SLT; - return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy)); - } - } - return nullptr; } @@ -4615,7 +4599,7 @@ static Instruction *foldICmpWithTrunc(ICmpInst &ICmp, InstCombiner::BuilderTy &Builder) { - const ICmpInst::Predicate Pred = ICmp.getPredicate(); + ICmpInst::Predicate Pred = ICmp.getPredicate(); Value *Op0 = ICmp.getOperand(0), *Op1 = ICmp.getOperand(1); // Try to canonicalize trunc + compare-to-constant into a mask + cmp. @@ -4625,41 +4609,31 @@ if (!match(Op0, m_OneUse(m_Trunc(m_Value(X)))) || !match(Op1, m_APInt(C))) return nullptr; + // This matches patterns corresponding to tests of the signbit as well as: + // (trunc X) u< C --> (X & -C) == 0 (are all masked-high-bits clear?) + // (trunc X) u> C --> (X & ~C) != 0 (are any masked-high-bits set?) + APInt Mask; + if (decomposeBitTestICmp(Op0, Op1, Pred, X, Mask, true /* WithTrunc */)) { + Value *And = Builder.CreateAnd(X, Mask); + Constant *Zero = ConstantInt::getNullValue(X->getType()); + return new ICmpInst(Pred, And, Zero); + } + unsigned SrcBits = X->getType()->getScalarSizeInBits(); - if (Pred == ICmpInst::ICMP_ULT) { - if (C->isPowerOf2()) { - // If C is a power-of-2 (one set bit): - // (trunc X) u< C --> (X & -C) == 0 (are all masked-high-bits clear?) - Constant *MaskC = ConstantInt::get(X->getType(), (-*C).zext(SrcBits)); - Value *And = Builder.CreateAnd(X, MaskC); - Constant *Zero = ConstantInt::getNullValue(X->getType()); - return new ICmpInst(ICmpInst::ICMP_EQ, And, Zero); - } + if (Pred == ICmpInst::ICMP_ULT && C->isNegatedPowerOf2()) { // If C is a negative power-of-2 (high-bit mask): // (trunc X) u< C --> (X & C) != C (are any masked-high-bits clear?) - if (C->isNegatedPowerOf2()) { - Constant *MaskC = ConstantInt::get(X->getType(), C->zext(SrcBits)); - Value *And = Builder.CreateAnd(X, MaskC); - return new ICmpInst(ICmpInst::ICMP_NE, And, MaskC); - } + Constant *MaskC = ConstantInt::get(X->getType(), C->zext(SrcBits)); + Value *And = Builder.CreateAnd(X, MaskC); + return new ICmpInst(ICmpInst::ICMP_NE, And, MaskC); } - if (Pred == ICmpInst::ICMP_UGT) { - // If C is a low-bit-mask (C+1 is a power-of-2): - // (trunc X) u> C --> (X & ~C) != 0 (are any masked-high-bits set?) - if (C->isMask()) { - Constant *MaskC = ConstantInt::get(X->getType(), (~*C).zext(SrcBits)); - Value *And = Builder.CreateAnd(X, MaskC); - Constant *Zero = ConstantInt::getNullValue(X->getType()); - return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); - } + if (Pred == ICmpInst::ICMP_UGT && (~*C).isPowerOf2()) { // If C is not-of-power-of-2 (one clear bit): // (trunc X) u> C --> (X & (C+1)) == C+1 (are all masked-high-bits set?) - if ((~*C).isPowerOf2()) { - Constant *MaskC = ConstantInt::get(X->getType(), (*C + 1).zext(SrcBits)); - Value *And = Builder.CreateAnd(X, MaskC); - return new ICmpInst(ICmpInst::ICMP_EQ, And, MaskC); - } + Constant *MaskC = ConstantInt::get(X->getType(), (*C + 1).zext(SrcBits)); + Value *And = Builder.CreateAnd(X, MaskC); + return new ICmpInst(ICmpInst::ICMP_EQ, And, MaskC); } return nullptr; diff --git a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp --- a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp +++ b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp @@ -94,11 +94,9 @@ return false; } else if (const auto *CS = dyn_cast(Inst)) { // For calls, just check the arguments (and not the callee operand). - for (auto OI = CS->arg_begin(), OE = CS->arg_end(); OI != OE; ++OI) { - const Value *Op = *OI; + for (const Value *Op : CS->args()) if (IsPotentialRetainableObjPtr(Op, *PA.getAA()) && PA.related(Ptr, Op)) return true; - } return false; } else if (const StoreInst *SI = dyn_cast(Inst)) { // Special-case stores, because we don't care about the stored value, just diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp --- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -159,6 +159,16 @@ cl::desc("The maximum number of blocks to check when trying to prove that " "all paths to an exit go through a killing block (default = 50)")); +// This flags allows or disallows DSE to optimize MemorySSA during its +// traversal. Note that DSE optimizing MemorySSA may impact other passes +// downstream of the DSE invocation and can lead to issues not being +// reproducible in isolation (i.e. when MemorySSA is built from scratch). In +// those cases, the flag can be used to check if DSE's MemorySSA optimizations +// impact follow-up passes. +static cl::opt + OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(false), cl::Hidden, + cl::desc("Allow DSE to optimize memory accesses")); + //===----------------------------------------------------------------------===// // Helper functions //===----------------------------------------------------------------------===// @@ -802,7 +812,7 @@ /// Keep track of instructions (partly) overlapping with killing MemoryDefs per /// basic block. - DenseMap IOLs; + MapVector IOLs; // Class contains self-reference, make sure it's not copied/moved. DSEState(const DSEState &) = delete; @@ -1273,6 +1283,15 @@ Instruction *KillingI = KillingDef->getMemoryInst(); LLVM_DEBUG(dbgs() << " trying to get dominating access\n"); + // Only optimize defining access of KillingDef when directly starting at its + // defining access. The defining access also must only access KillingLoc. At + // the moment we only support instructions with a single write location, so + // it should be sufficient to disable optimizations for instructions that + // also read from memory. + bool CanOptimize = OptimizeMemorySSA && + KillingDef->getDefiningAccess() == StartAccess && + !KillingI->mayReadFromMemory(); + // Find the next clobbering Mod access for DefLoc, starting at StartAccess. Optional CurrentLoc; for (;; Current = cast(Current)->getDefiningAccess()) { @@ -1314,8 +1333,10 @@ Instruction *CurrentI = CurrentDef->getMemoryInst(); if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(KillingUndObj), - TLI)) + TLI)) { + CanOptimize = false; continue; + } // Before we try to remove anything, check for any extra throwing // instructions that block us from DSEing @@ -1352,8 +1373,10 @@ // If Current does not have an analyzable write location or is not // removable, skip it. CurrentLoc = getLocForWriteEx(CurrentI); - if (!CurrentLoc || !isRemovable(CurrentI)) + if (!CurrentLoc || !isRemovable(CurrentI)) { + CanOptimize = false; continue; + } // AliasAnalysis does not account for loops. Limit elimination to // candidates for which we can guarantee they always store to the same @@ -1361,6 +1384,7 @@ if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) { LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n"); WalkerStepLimit -= 1; + CanOptimize = false; continue; } @@ -1368,13 +1392,29 @@ // If the killing def is a memory terminator (e.g. lifetime.end), check // the next candidate if the current Current does not write the same // underlying object as the terminator. - if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) + if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) { + CanOptimize = false; continue; + } } else { int64_t KillingOffset = 0; int64_t DeadOffset = 0; auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc, KillingOffset, DeadOffset); + if (CanOptimize) { + // CurrentDef is the earliest write clobber of KillingDef. Use it as + // optimized access. Do not optimize if CurrentDef is already the + // defining access of KillingDef. + if (CurrentDef != KillingDef->getDefiningAccess() && + (OR == OW_Complete || OR == OW_MaybePartial)) + KillingDef->setOptimized(CurrentDef); + + // Once a may-aliasing def is encountered do not set an optimized + // access. + if (OR != OW_None) + CanOptimize = false; + } + // If Current does not write to the same object as KillingDef, check // the next candidate. if (OR == OW_Unknown || OR == OW_None) diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp --- a/llvm/lib/Transforms/Scalar/Reassociate.cpp +++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp @@ -2313,11 +2313,8 @@ MadeChange |= LinearizeExprTree(I, Tree); SmallVector Ops; Ops.reserve(Tree.size()); - for (unsigned i = 0, e = Tree.size(); i != e; ++i) { - RepeatedValue E = Tree[i]; - Ops.append(E.second.getZExtValue(), - ValueEntry(getRank(E.first), E.first)); - } + for (const RepeatedValue &E : Tree) + Ops.append(E.second.getZExtValue(), ValueEntry(getRank(E.first), E.first)); LLVM_DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n'); diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -473,10 +473,6 @@ /// handle the more complex control flow around the loops. virtual BasicBlock *createVectorizedLoopSkeleton(); - /// Widen a single instruction within the innermost loop. - void widenInstruction(Instruction &I, VPWidenRecipe *WidenRec, - VPTransformState &State); - /// Widen a single call instruction within the innermost loop. void widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, VPTransformState &State); @@ -496,13 +492,6 @@ /// new unrolled loop, where UF is the unroll factor. using VectorParts = SmallVector; - /// Vectorize a single GetElementPtrInst based on information gathered and - /// decisions taken during planning. - void widenGEP(GetElementPtrInst *GEP, VPWidenGEPRecipe *WidenGEPRec, - VPUser &Indices, unsigned UF, ElementCount VF, - bool IsPtrLoopInvariant, SmallBitVector &IsIndexLoopInvariant, - VPTransformState &State); - /// Vectorize a single first-order recurrence or pointer induction PHINode in /// a block. This method handles the induction variable canonicalization. It /// supports both VF = 1 for unrolled loops and arbitrary length vectors. @@ -567,6 +556,17 @@ /// element. virtual Value *getBroadcastInstrs(Value *V); + /// Add metadata from one instruction to another. + /// + /// This includes both the original MDs from \p From and additional ones (\see + /// addNewMetadata). Use this for *newly created* instructions in the vector + /// loop. + void addMetadata(Instruction *To, Instruction *From); + + /// Similar to the previous function but it adds the metadata to a + /// vector of instructions. + void addMetadata(ArrayRef To, Instruction *From); + protected: friend class LoopVectorizationPlanner; @@ -742,17 +742,6 @@ /// vector loop. void addNewMetadata(Instruction *To, const Instruction *Orig); - /// Add metadata from one instruction to another. - /// - /// This includes both the original MDs from \p From and additional ones (\see - /// addNewMetadata). Use this for *newly created* instructions in the vector - /// loop. - void addMetadata(Instruction *To, Instruction *From); - - /// Similar to the previous function but it adds the metadata to a - /// vector of instructions. - void addMetadata(ArrayRef To, Instruction *From); - /// Collect poison-generating recipes that may generate a poison value that is /// used after vectorization, even when their operands are not poison. Those /// recipes meet the following conditions: @@ -1915,9 +1904,11 @@ /// Collect the instructions that are scalar after vectorization. An /// instruction is scalar if it is known to be uniform or will be scalarized - /// during vectorization. Non-uniform scalarized instructions will be - /// represented by VF values in the vectorized loop, each corresponding to an - /// iteration of the original scalar loop. + /// during vectorization. collectLoopScalars should only add non-uniform nodes + /// to the list if they are used by a load/store instruction that is marked as + /// CM_Scalarize. Non-uniform scalarized instructions will be represented by + /// VF values in the vectorized loop, each corresponding to an iteration of + /// the original scalar loop. void collectLoopScalars(ElementCount VF); /// Keeps cost model vectorization decision and cost for instructions. @@ -4723,86 +4714,6 @@ return Cost->useOrderedReductions(RdxDesc); } -void InnerLoopVectorizer::widenGEP(GetElementPtrInst *GEP, - VPWidenGEPRecipe *WidenGEPRec, - VPUser &Operands, unsigned UF, - ElementCount VF, bool IsPtrLoopInvariant, - SmallBitVector &IsIndexLoopInvariant, - VPTransformState &State) { - // Construct a vector GEP by widening the operands of the scalar GEP as - // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP - // results in a vector of pointers when at least one operand of the GEP - // is vector-typed. Thus, to keep the representation compact, we only use - // vector-typed operands for loop-varying values. - - if (VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { - // If we are vectorizing, but the GEP has only loop-invariant operands, - // the GEP we build (by only using vector-typed operands for - // loop-varying values) would be a scalar pointer. Thus, to ensure we - // produce a vector of pointers, we need to either arbitrarily pick an - // operand to broadcast, or broadcast a clone of the original GEP. - // Here, we broadcast a clone of the original. - // - // TODO: If at some point we decide to scalarize instructions having - // loop-invariant operands, this special case will no longer be - // required. We would add the scalarization decision to - // collectLoopScalars() and teach getVectorValue() to broadcast - // the lane-zero scalar value. - auto *Clone = Builder.Insert(GEP->clone()); - for (unsigned Part = 0; Part < UF; ++Part) { - Value *EntryPart = Builder.CreateVectorSplat(VF, Clone); - State.set(WidenGEPRec, EntryPart, Part); - addMetadata(EntryPart, GEP); - } - } else { - // If the GEP has at least one loop-varying operand, we are sure to - // produce a vector of pointers. But if we are only unrolling, we want - // to produce a scalar GEP for each unroll part. Thus, the GEP we - // produce with the code below will be scalar (if VF == 1) or vector - // (otherwise). Note that for the unroll-only case, we still maintain - // values in the vector mapping with initVector, as we do for other - // instructions. - for (unsigned Part = 0; Part < UF; ++Part) { - // The pointer operand of the new GEP. If it's loop-invariant, we - // won't broadcast it. - auto *Ptr = IsPtrLoopInvariant - ? State.get(Operands.getOperand(0), VPIteration(0, 0)) - : State.get(Operands.getOperand(0), Part); - - // Collect all the indices for the new GEP. If any index is - // loop-invariant, we won't broadcast it. - SmallVector Indices; - for (unsigned I = 1, E = Operands.getNumOperands(); I < E; I++) { - VPValue *Operand = Operands.getOperand(I); - if (IsIndexLoopInvariant[I - 1]) - Indices.push_back(State.get(Operand, VPIteration(0, 0))); - else - Indices.push_back(State.get(Operand, Part)); - } - - // If the GEP instruction is vectorized and was in a basic block that - // needed predication, we can't propagate the poison-generating 'inbounds' - // flag. The control flow has been linearized and the GEP is no longer - // guarded by the predicate, which could make the 'inbounds' properties to - // no longer hold. - bool IsInBounds = GEP->isInBounds() && - State.MayGeneratePoisonRecipes.count(WidenGEPRec) == 0; - - // Create the new GEP. Note that this GEP may be a scalar if VF == 1, - // but it should be a vector, otherwise. - auto *NewGEP = - IsInBounds - ? Builder.CreateInBoundsGEP(GEP->getSourceElementType(), Ptr, - Indices) - : Builder.CreateGEP(GEP->getSourceElementType(), Ptr, Indices); - assert((VF.isScalar() || NewGEP->getType()->isVectorTy()) && - "NewGEP is not a pointer vector"); - State.set(WidenGEPRec, NewGEP, Part); - addMetadata(NewGEP, GEP); - } - } -} - void InnerLoopVectorizer::widenPHIInstruction(Instruction *PN, VPWidenPHIRecipe *PhiR, VPTransformState &State) { @@ -4862,38 +4773,14 @@ // iteration. If the instruction is uniform, we only need to generate the // first lane. Otherwise, we generate all VF values. bool IsUniform = Cost->isUniformAfterVectorization(P, State.VF); - unsigned Lanes = IsUniform ? 1 : State.VF.getKnownMinValue(); - - bool NeedsVectorIndex = !IsUniform && VF.isScalable(); - Value *UnitStepVec = nullptr, *PtrIndSplat = nullptr; - if (NeedsVectorIndex) { - Type *VecIVTy = VectorType::get(PtrInd->getType(), VF); - UnitStepVec = Builder.CreateStepVector(VecIVTy); - PtrIndSplat = Builder.CreateVectorSplat(VF, PtrInd); - } + assert((IsUniform || !State.VF.isScalable()) && + "Cannot scalarize a scalable VF"); + unsigned Lanes = IsUniform ? 1 : State.VF.getFixedValue(); for (unsigned Part = 0; Part < UF; ++Part) { Value *PartStart = createStepForVF(Builder, PtrInd->getType(), VF, Part); - if (NeedsVectorIndex) { - // Here we cache the whole vector, which means we can support the - // extraction of any lane. However, in some cases the extractelement - // instruction that is generated for scalar uses of this vector (e.g. - // a load instruction) is not folded away. Therefore we still - // calculate values for the first n lanes to avoid redundant moves - // (when extracting the 0th element) and to produce scalar code (i.e. - // additional add/gep instructions instead of expensive extractelement - // instructions) when extracting higher-order elements. - Value *PartStartSplat = Builder.CreateVectorSplat(VF, PartStart); - Value *Indices = Builder.CreateAdd(PartStartSplat, UnitStepVec); - Value *GlobalIndices = Builder.CreateAdd(PtrIndSplat, Indices); - Value *SclrGep = - emitTransformedIndex(Builder, GlobalIndices, PSE.getSE(), DL, II); - SclrGep->setName("next.gep"); - State.set(PhiR, SclrGep, Part); - } - for (unsigned Lane = 0; Lane < Lanes; ++Lane) { Value *Idx = Builder.CreateAdd( PartStart, ConstantInt::get(PtrInd->getType(), Lane)); @@ -4975,123 +4862,6 @@ return !CInt || CInt->isZero(); } -void InnerLoopVectorizer::widenInstruction(Instruction &I, - VPWidenRecipe *WidenRec, - VPTransformState &State) { - switch (I.getOpcode()) { - case Instruction::Call: - case Instruction::Br: - case Instruction::PHI: - case Instruction::GetElementPtr: - case Instruction::Select: - llvm_unreachable("This instruction is handled by a different recipe."); - case Instruction::UDiv: - case Instruction::SDiv: - case Instruction::SRem: - case Instruction::URem: - case Instruction::Add: - case Instruction::FAdd: - case Instruction::Sub: - case Instruction::FSub: - case Instruction::FNeg: - case Instruction::Mul: - case Instruction::FMul: - case Instruction::FDiv: - case Instruction::FRem: - case Instruction::Shl: - case Instruction::LShr: - case Instruction::AShr: - case Instruction::And: - case Instruction::Or: - case Instruction::Xor: { - // Just widen unops and binops. - setDebugLocFromInst(&I); - - for (unsigned Part = 0; Part < UF; ++Part) { - SmallVector Ops; - for (VPValue *VPOp : WidenRec->operands()) - Ops.push_back(State.get(VPOp, Part)); - - Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); - - if (auto *VecOp = dyn_cast(V)) { - VecOp->copyIRFlags(&I); - - // If the instruction is vectorized and was in a basic block that needed - // predication, we can't propagate poison-generating flags (nuw/nsw, - // exact, etc.). The control flow has been linearized and the - // instruction is no longer guarded by the predicate, which could make - // the flag properties to no longer hold. - if (State.MayGeneratePoisonRecipes.count(WidenRec) > 0) - VecOp->dropPoisonGeneratingFlags(); - } - - // Use this vector value for all users of the original instruction. - State.set(WidenRec, V, Part); - addMetadata(V, &I); - } - - break; - } - case Instruction::ICmp: - case Instruction::FCmp: { - // Widen compares. Generate vector compares. - bool FCmp = (I.getOpcode() == Instruction::FCmp); - auto *Cmp = cast(&I); - setDebugLocFromInst(Cmp); - for (unsigned Part = 0; Part < UF; ++Part) { - Value *A = State.get(WidenRec->getOperand(0), Part); - Value *B = State.get(WidenRec->getOperand(1), Part); - Value *C = nullptr; - if (FCmp) { - // Propagate fast math flags. - IRBuilder<>::FastMathFlagGuard FMFG(Builder); - Builder.setFastMathFlags(Cmp->getFastMathFlags()); - C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); - } else { - C = Builder.CreateICmp(Cmp->getPredicate(), A, B); - } - State.set(WidenRec, C, Part); - addMetadata(C, &I); - } - - break; - } - - case Instruction::ZExt: - case Instruction::SExt: - case Instruction::FPToUI: - case Instruction::FPToSI: - case Instruction::FPExt: - case Instruction::PtrToInt: - case Instruction::IntToPtr: - case Instruction::SIToFP: - case Instruction::UIToFP: - case Instruction::Trunc: - case Instruction::FPTrunc: - case Instruction::BitCast: { - auto *CI = cast(&I); - setDebugLocFromInst(CI); - - /// Vectorize casts. - Type *DestTy = - (VF.isScalar()) ? CI->getType() : VectorType::get(CI->getType(), VF); - - for (unsigned Part = 0; Part < UF; ++Part) { - Value *A = State.get(WidenRec->getOperand(0), Part); - Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); - State.set(WidenRec, Cast, Part); - addMetadata(Cast, &I); - } - break; - } - default: - // This instruction is not vectorized by simple widening. - LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); - llvm_unreachable("Unhandled instruction!"); - } // end of switch. -} - void InnerLoopVectorizer::widenCallInstruction(CallInst &I, VPValue *Def, VPUser &ArgOperands, VPTransformState &State) { @@ -5229,38 +4999,11 @@ !TheLoop->isLoopInvariant(V); }; - auto isScalarPtrInduction = [&](Instruction *MemAccess, Value *Ptr) { - if (!isa(Ptr) || - !Legal->getInductionVars().count(cast(Ptr))) - return false; - auto &Induction = Legal->getInductionVars()[cast(Ptr)]; - if (Induction.getKind() != InductionDescriptor::IK_PtrInduction) - return false; - return isScalarUse(MemAccess, Ptr); - }; - - // A helper that evaluates a memory access's use of a pointer. If the - // pointer is actually the pointer induction of a loop, it is being - // inserted into Worklist. If the use will be a scalar use, and the - // pointer is only used by memory accesses, we place the pointer in - // ScalarPtrs. Otherwise, the pointer is placed in PossibleNonScalarPtrs. + // A helper that evaluates a memory access's use of a pointer. If the use will + // be a scalar use and the pointer is only used by memory accesses, we place + // the pointer in ScalarPtrs. Otherwise, the pointer is placed in + // PossibleNonScalarPtrs. auto evaluatePtrUse = [&](Instruction *MemAccess, Value *Ptr) { - if (isScalarPtrInduction(MemAccess, Ptr)) { - Worklist.insert(cast(Ptr)); - LLVM_DEBUG(dbgs() << "LV: Found new scalar instruction: " << *Ptr - << "\n"); - - Instruction *Update = cast( - cast(Ptr)->getIncomingValueForBlock(Latch)); - - // If there is more than one user of Update (Ptr), we shouldn't assume it - // will be scalar after vectorisation as other users of the instruction - // may require widening. Otherwise, add it to ScalarPtrs. - if (Update->hasOneUse() && cast(*Update->user_begin()) == Ptr) { - ScalarPtrs.insert(Update); - return; - } - } // We only care about bitcast and getelementptr instructions contained in // the loop. if (!isLoopVaryingBitCastOrGEP(Ptr)) @@ -5352,11 +5095,22 @@ if (Ind == Legal->getPrimaryInduction() && foldTailByMasking()) continue; + // Returns true if \p Indvar is a pointer induction that is used directly by + // load/store instruction \p I. + auto IsDirectLoadStoreFromPtrIndvar = [&](Instruction *Indvar, + Instruction *I) { + return Induction.second.getKind() == + InductionDescriptor::IK_PtrInduction && + (isa(I) || isa(I)) && + Indvar == getLoadStorePointerOperand(I) && isScalarUse(I, Indvar); + }; + // Determine if all users of the induction variable are scalar after // vectorization. auto ScalarInd = llvm::all_of(Ind->users(), [&](User *U) -> bool { auto *I = cast(U); - return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I); + return I == IndUpdate || !TheLoop->contains(I) || Worklist.count(I) || + IsDirectLoadStoreFromPtrIndvar(Ind, I); }); if (!ScalarInd) continue; @@ -5366,7 +5120,8 @@ auto ScalarIndUpdate = llvm::all_of(IndUpdate->users(), [&](User *U) -> bool { auto *I = cast(U); - return I == Ind || !TheLoop->contains(I) || Worklist.count(I); + return I == Ind || !TheLoop->contains(I) || Worklist.count(I) || + IsDirectLoadStoreFromPtrIndvar(IndUpdate, I); }); if (!ScalarIndUpdate) continue; @@ -9908,13 +9663,197 @@ } void VPWidenRecipe::execute(VPTransformState &State) { - State.ILV->widenInstruction(*getUnderlyingInstr(), this, State); + auto &I = *cast(getUnderlyingValue()); + auto &Builder = State.Builder; + switch (I.getOpcode()) { + case Instruction::Call: + case Instruction::Br: + case Instruction::PHI: + case Instruction::GetElementPtr: + case Instruction::Select: + llvm_unreachable("This instruction is handled by a different recipe."); + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::SRem: + case Instruction::URem: + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::FNeg: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::FDiv: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + // Just widen unops and binops. + State.ILV->setDebugLocFromInst(&I); + + for (unsigned Part = 0; Part < State.UF; ++Part) { + SmallVector Ops; + for (VPValue *VPOp : operands()) + Ops.push_back(State.get(VPOp, Part)); + + Value *V = Builder.CreateNAryOp(I.getOpcode(), Ops); + + if (auto *VecOp = dyn_cast(V)) { + VecOp->copyIRFlags(&I); + + // If the instruction is vectorized and was in a basic block that needed + // predication, we can't propagate poison-generating flags (nuw/nsw, + // exact, etc.). The control flow has been linearized and the + // instruction is no longer guarded by the predicate, which could make + // the flag properties to no longer hold. + if (State.MayGeneratePoisonRecipes.count(this) > 0) + VecOp->dropPoisonGeneratingFlags(); + } + + // Use this vector value for all users of the original instruction. + State.set(this, V, Part); + State.ILV->addMetadata(V, &I); + } + + break; + } + case Instruction::ICmp: + case Instruction::FCmp: { + // Widen compares. Generate vector compares. + bool FCmp = (I.getOpcode() == Instruction::FCmp); + auto *Cmp = cast(&I); + State.ILV->setDebugLocFromInst(Cmp); + for (unsigned Part = 0; Part < State.UF; ++Part) { + Value *A = State.get(getOperand(0), Part); + Value *B = State.get(getOperand(1), Part); + Value *C = nullptr; + if (FCmp) { + // Propagate fast math flags. + IRBuilder<>::FastMathFlagGuard FMFG(Builder); + Builder.setFastMathFlags(Cmp->getFastMathFlags()); + C = Builder.CreateFCmp(Cmp->getPredicate(), A, B); + } else { + C = Builder.CreateICmp(Cmp->getPredicate(), A, B); + } + State.set(this, C, Part); + State.ILV->addMetadata(C, &I); + } + + break; + } + + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::FPExt: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::SIToFP: + case Instruction::UIToFP: + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::BitCast: { + auto *CI = cast(&I); + State.ILV->setDebugLocFromInst(CI); + + /// Vectorize casts. + Type *DestTy = (State.VF.isScalar()) + ? CI->getType() + : VectorType::get(CI->getType(), State.VF); + + for (unsigned Part = 0; Part < State.UF; ++Part) { + Value *A = State.get(getOperand(0), Part); + Value *Cast = Builder.CreateCast(CI->getOpcode(), A, DestTy); + State.set(this, Cast, Part); + State.ILV->addMetadata(Cast, &I); + } + break; + } + default: + // This instruction is not vectorized by simple widening. + LLVM_DEBUG(dbgs() << "LV: Found an unhandled instruction: " << I); + llvm_unreachable("Unhandled instruction!"); + } // end of switch. } void VPWidenGEPRecipe::execute(VPTransformState &State) { - State.ILV->widenGEP(cast(getUnderlyingInstr()), this, - *this, State.UF, State.VF, IsPtrLoopInvariant, - IsIndexLoopInvariant, State); + auto *GEP = cast(getUnderlyingInstr()); + // Construct a vector GEP by widening the operands of the scalar GEP as + // necessary. We mark the vector GEP 'inbounds' if appropriate. A GEP + // results in a vector of pointers when at least one operand of the GEP + // is vector-typed. Thus, to keep the representation compact, we only use + // vector-typed operands for loop-varying values. + + if (State.VF.isVector() && IsPtrLoopInvariant && IsIndexLoopInvariant.all()) { + // If we are vectorizing, but the GEP has only loop-invariant operands, + // the GEP we build (by only using vector-typed operands for + // loop-varying values) would be a scalar pointer. Thus, to ensure we + // produce a vector of pointers, we need to either arbitrarily pick an + // operand to broadcast, or broadcast a clone of the original GEP. + // Here, we broadcast a clone of the original. + // + // TODO: If at some point we decide to scalarize instructions having + // loop-invariant operands, this special case will no longer be + // required. We would add the scalarization decision to + // collectLoopScalars() and teach getVectorValue() to broadcast + // the lane-zero scalar value. + auto *Clone = State.Builder.Insert(GEP->clone()); + for (unsigned Part = 0; Part < State.UF; ++Part) { + Value *EntryPart = State.Builder.CreateVectorSplat(State.VF, Clone); + State.set(this, EntryPart, Part); + State.ILV->addMetadata(EntryPart, GEP); + } + } else { + // If the GEP has at least one loop-varying operand, we are sure to + // produce a vector of pointers. But if we are only unrolling, we want + // to produce a scalar GEP for each unroll part. Thus, the GEP we + // produce with the code below will be scalar (if VF == 1) or vector + // (otherwise). Note that for the unroll-only case, we still maintain + // values in the vector mapping with initVector, as we do for other + // instructions. + for (unsigned Part = 0; Part < State.UF; ++Part) { + // The pointer operand of the new GEP. If it's loop-invariant, we + // won't broadcast it. + auto *Ptr = IsPtrLoopInvariant + ? State.get(getOperand(0), VPIteration(0, 0)) + : State.get(getOperand(0), Part); + + // Collect all the indices for the new GEP. If any index is + // loop-invariant, we won't broadcast it. + SmallVector Indices; + for (unsigned I = 1, E = getNumOperands(); I < E; I++) { + VPValue *Operand = getOperand(I); + if (IsIndexLoopInvariant[I - 1]) + Indices.push_back(State.get(Operand, VPIteration(0, 0))); + else + Indices.push_back(State.get(Operand, Part)); + } + + // If the GEP instruction is vectorized and was in a basic block that + // needed predication, we can't propagate the poison-generating 'inbounds' + // flag. The control flow has been linearized and the GEP is no longer + // guarded by the predicate, which could make the 'inbounds' properties to + // no longer hold. + bool IsInBounds = + GEP->isInBounds() && State.MayGeneratePoisonRecipes.count(this) == 0; + + // Create the new GEP. Note that this GEP may be a scalar if VF == 1, + // but it should be a vector, otherwise. + auto *NewGEP = IsInBounds + ? State.Builder.CreateInBoundsGEP( + GEP->getSourceElementType(), Ptr, Indices) + : State.Builder.CreateGEP(GEP->getSourceElementType(), + Ptr, Indices); + assert((State.VF.isScalar() || NewGEP->getType()->isVectorTy()) && + "NewGEP is not a pointer vector"); + State.set(this, NewGEP, Part); + State.ILV->addMetadata(NewGEP, GEP); + } + } } void VPWidenIntOrFpInductionRecipe::execute(VPTransformState &State) { diff --git a/llvm/test/Analysis/ScalarEvolution/becount-invalidation.ll b/llvm/test/Analysis/ScalarEvolution/becount-invalidation.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Analysis/ScalarEvolution/becount-invalidation.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py +; RUN: opt -disable-output "-passes=print" < %s 2>&1 | FileCheck %s + +; This tests an invalidation issue during BECount calculation. %ptr1.dummy has +; the same SCEV as %ptr1.next, but we should not remove a symbolic name +; placeholder for %ptr1.next when invalidating SCEVs after BECount calculation. + +define void @test(i64* %arg) { +; CHECK-LABEL: 'test' +; CHECK-NEXT: Classifying expressions for: @test +; CHECK-NEXT: %ptr1 = phi i64* [ %ptr1.next, %loop.latch ], [ null, %entry ] +; CHECK-NEXT: --> %ptr1 U: full-set S: full-set Exits: <> LoopDispositions: { %loop.header: Variant, %loop2.header: Invariant } +; CHECK-NEXT: %ptr2 = phi i64* [ %ptr2.next, %loop.latch ], [ null, %entry ] +; CHECK-NEXT: --> %ptr2 U: full-set S: full-set Exits: <> LoopDispositions: { %loop.header: Variant, %loop2.header: Invariant } +; CHECK-NEXT: %ptr1.next = phi i64* [ %ptr2, %loop.header ], [ %ptr1.next.next, %loop2.latch ] +; CHECK-NEXT: --> {%ptr2,+,8}<%loop2.header> U: full-set S: full-set Exits: <> LoopDispositions: { %loop2.header: Computable, %loop.header: Variant } +; CHECK-NEXT: %iv = phi i64 [ 0, %loop.header ], [ %iv.next, %loop2.latch ] +; CHECK-NEXT: --> {0,+,1}<%loop2.header> U: [0,1) S: [0,1) Exits: <> LoopDispositions: { %loop2.header: Computable, %loop.header: Variant } +; CHECK-NEXT: %ptr1.dummy = getelementptr inbounds i64, i64* %ptr1.next, i64 0 +; CHECK-NEXT: --> {%ptr2,+,8}<%loop2.header> U: full-set S: full-set Exits: <> LoopDispositions: { %loop2.header: Computable, %loop.header: Variant } +; CHECK-NEXT: %val = load i64, i64* %ptr1.dummy, align 8 +; CHECK-NEXT: --> %val U: full-set S: full-set Exits: <> LoopDispositions: { %loop2.header: Variant, %loop.header: Variant } +; CHECK-NEXT: %ptr1.next.next = getelementptr inbounds i64, i64* %ptr1.next, i64 1 +; CHECK-NEXT: --> {(8 + %ptr2),+,8}<%loop2.header> U: full-set S: full-set Exits: <> LoopDispositions: { %loop2.header: Computable, %loop.header: Variant } +; CHECK-NEXT: %iv.next = add i64 %iv, 1 +; CHECK-NEXT: --> {1,+,1}<%loop2.header> U: [1,2) S: [1,2) Exits: <> LoopDispositions: { %loop2.header: Computable, %loop.header: Variant } +; CHECK-NEXT: %ptr2.next = phi i64* [ %ptr1, %if ], [ %arg, %else ] +; CHECK-NEXT: --> %ptr2.next U: full-set S: full-set Exits: <> LoopDispositions: { %loop.header: Variant, %loop2.header: Invariant } +; CHECK-NEXT: Determining loop execution counts for: @test +; CHECK-NEXT: Loop %loop2.header: Unpredictable backedge-taken count. +; CHECK-NEXT: exit count for loop2.header: ***COULDNOTCOMPUTE*** +; CHECK-NEXT: exit count for loop2.latch: false +; CHECK-NEXT: Loop %loop2.header: max backedge-taken count is false +; CHECK-NEXT: Loop %loop2.header: Unpredictable predicated backedge-taken count. +; CHECK-NEXT: Loop %loop.header: Unpredictable backedge-taken count. +; CHECK-NEXT: Loop %loop.header: Unpredictable max backedge-taken count. +; CHECK-NEXT: Loop %loop.header: Unpredictable predicated backedge-taken count. +; +entry: + br label %loop.header + +loop.header: + %ptr1 = phi i64* [ %ptr1.next, %loop.latch ], [ null, %entry ] + %ptr2 = phi i64* [ %ptr2.next, %loop.latch ], [ null, %entry ] + br label %loop2.header + +loop2.header: + %ptr1.next = phi i64* [ %ptr2, %loop.header ], [ %ptr1.next.next, %loop2.latch ] + %iv = phi i64 [ 0, %loop.header ], [ %iv.next, %loop2.latch ] + %ptr1.dummy = getelementptr inbounds i64, i64* %ptr1.next, i64 0 + %val = load i64, i64* %ptr1.dummy, align 8 + %cmp = icmp ne i64 %val, 0 + br i1 %cmp, label %loop2.exit, label %loop2.latch + +loop2.latch: + %ptr1.next.next = getelementptr inbounds i64, i64* %ptr1.next, i64 1 + %iv.next = add i64 %iv, 1 + br i1 true, label %return, label %loop2.header + +loop2.exit: + %cmp2 = icmp sgt i64 %iv, 0 + br i1 %cmp2, label %if, label %else + +if: + br label %loop.latch + +else: + br label %loop.latch + +loop.latch: + %ptr2.next = phi i64* [ %ptr1, %if ], [ %arg, %else ] + br label %loop.header + +return: + ret void +} diff --git a/llvm/test/CodeGen/AArch64/active_lane_mask.ll b/llvm/test/CodeGen/AArch64/active_lane_mask.ll --- a/llvm/test/CodeGen/AArch64/active_lane_mask.ll +++ b/llvm/test/CodeGen/AArch64/active_lane_mask.ll @@ -4,43 +4,7 @@ define @lane_mask_nxv16i1_i32(i32 %index, i32 %TC) { ; CHECK-LABEL: lane_mask_nxv16i1_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: index z0.s, #0, #1 -; CHECK-NEXT: mov z2.s, w0 -; CHECK-NEXT: mov z1.d, z0.d -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: incw z1.s -; CHECK-NEXT: add z3.s, z2.s, z0.s -; CHECK-NEXT: incw z0.s, all, mul #2 -; CHECK-NEXT: add z4.s, z2.s, z1.s -; CHECK-NEXT: incw z1.s, all, mul #2 -; CHECK-NEXT: cmphi p1.s, p0/z, z2.s, z3.s -; CHECK-NEXT: add z0.s, z2.s, z0.s -; CHECK-NEXT: cmphi p2.s, p0/z, z2.s, z4.s -; CHECK-NEXT: add z1.s, z2.s, z1.s -; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h -; CHECK-NEXT: cmphi p2.s, p0/z, z2.s, z0.s -; CHECK-NEXT: cmphi p3.s, p0/z, z2.s, z1.s -; CHECK-NEXT: mov z2.s, w1 -; CHECK-NEXT: uzp1 p2.h, p2.h, p3.h -; CHECK-NEXT: cmphi p3.s, p0/z, z2.s, z4.s -; CHECK-NEXT: cmphi p4.s, p0/z, z2.s, z3.s -; CHECK-NEXT: uzp1 p1.b, p1.b, p2.b -; CHECK-NEXT: uzp1 p2.h, p4.h, p3.h -; CHECK-NEXT: cmphi p3.s, p0/z, z2.s, z0.s -; CHECK-NEXT: cmphi p0.s, p0/z, z2.s, z1.s -; CHECK-NEXT: ptrue p4.b -; CHECK-NEXT: uzp1 p0.h, p3.h, p0.h -; CHECK-NEXT: not p1.b, p4/z, p1.b -; CHECK-NEXT: uzp1 p0.b, p2.b, p0.b -; CHECK-NEXT: and p0.b, p4/z, p1.b, p0.b -; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: whilelo p0.b, w0, w1 ; CHECK-NEXT: ret %active.lane.mask = call @llvm.get.active.lane.mask.nxv16i1.i32(i32 %index, i32 %TC) ret %active.lane.mask @@ -49,23 +13,7 @@ define @lane_mask_nxv8i1_i32(i32 %index, i32 %TC) { ; CHECK-LABEL: lane_mask_nxv8i1_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: index z0.s, #0, #1 -; CHECK-NEXT: mov z2.s, w0 -; CHECK-NEXT: mov z1.d, z0.d -; CHECK-NEXT: add z0.s, z2.s, z0.s -; CHECK-NEXT: incw z1.s -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: add z1.s, z2.s, z1.s -; CHECK-NEXT: cmphi p2.s, p0/z, z2.s, z0.s -; CHECK-NEXT: cmphi p3.s, p0/z, z2.s, z1.s -; CHECK-NEXT: mov z2.s, w1 -; CHECK-NEXT: ptrue p1.h -; CHECK-NEXT: uzp1 p2.h, p2.h, p3.h -; CHECK-NEXT: cmphi p3.s, p0/z, z2.s, z1.s -; CHECK-NEXT: cmphi p0.s, p0/z, z2.s, z0.s -; CHECK-NEXT: not p2.b, p1/z, p2.b -; CHECK-NEXT: uzp1 p0.h, p0.h, p3.h -; CHECK-NEXT: and p0.b, p1/z, p2.b, p0.b +; CHECK-NEXT: whilelo p0.h, w0, w1 ; CHECK-NEXT: ret %active.lane.mask = call @llvm.get.active.lane.mask.nxv8i1.i32(i32 %index, i32 %TC) ret %active.lane.mask @@ -74,14 +22,7 @@ define @lane_mask_nxv4i1_i32(i32 %index, i32 %TC) { ; CHECK-LABEL: lane_mask_nxv4i1_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: index z0.s, w0, #1 -; CHECK-NEXT: mov z1.s, w0 -; CHECK-NEXT: mov z2.s, w1 -; CHECK-NEXT: cmphi p1.s, p0/z, z1.s, z0.s -; CHECK-NEXT: cmphi p2.s, p0/z, z2.s, z0.s -; CHECK-NEXT: not p1.b, p0/z, p1.b -; CHECK-NEXT: and p0.b, p0/z, p1.b, p2.b +; CHECK-NEXT: whilelo p0.s, w0, w1 ; CHECK-NEXT: ret %active.lane.mask = call @llvm.get.active.lane.mask.nxv4i1.i32(i32 %index, i32 %TC) ret %active.lane.mask @@ -90,21 +31,7 @@ define @lane_mask_nxv2i1_i32(i32 %index, i32 %TC) { ; CHECK-LABEL: lane_mask_nxv2i1_i32: ; CHECK: // %bb.0: -; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0 -; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: index z0.d, #0, #1 -; CHECK-NEXT: and z1.d, z1.d, #0xffffffff -; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1 -; CHECK-NEXT: mov z2.d, x1 -; CHECK-NEXT: adr z0.d, [z1.d, z0.d, uxtw] -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: mov z1.d, z0.d -; CHECK-NEXT: and z2.d, z2.d, #0xffffffff -; CHECK-NEXT: and z1.d, z1.d, #0xffffffff -; CHECK-NEXT: cmpne p1.d, p0/z, z1.d, z0.d -; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z1.d -; CHECK-NEXT: not p1.b, p0/z, p1.b -; CHECK-NEXT: and p0.b, p0/z, p1.b, p2.b +; CHECK-NEXT: whilelo p0.d, w0, w1 ; CHECK-NEXT: ret %active.lane.mask = call @llvm.get.active.lane.mask.nxv2i1.i32(i32 %index, i32 %TC) ret %active.lane.mask @@ -113,73 +40,7 @@ define @lane_mask_nxv16i1_i64(i64 %index, i64 %TC) { ; CHECK-LABEL: lane_mask_nxv16i1_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: str p6, [sp, #5, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: str p5, [sp, #6, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: index z0.d, #0, #1 -; CHECK-NEXT: mov z3.d, x0 -; CHECK-NEXT: mov z1.d, z0.d -; CHECK-NEXT: mov z2.d, z0.d -; CHECK-NEXT: incd z1.d -; CHECK-NEXT: incd z2.d, all, mul #2 -; CHECK-NEXT: mov z5.d, z1.d -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: incd z5.d, all, mul #2 -; CHECK-NEXT: add z4.d, z3.d, z0.d -; CHECK-NEXT: add z6.d, z3.d, z1.d -; CHECK-NEXT: add z7.d, z3.d, z2.d -; CHECK-NEXT: add z24.d, z3.d, z5.d -; CHECK-NEXT: incd z0.d, all, mul #4 -; CHECK-NEXT: cmphi p1.d, p0/z, z3.d, z4.d -; CHECK-NEXT: incd z1.d, all, mul #4 -; CHECK-NEXT: cmphi p2.d, p0/z, z3.d, z6.d -; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z7.d -; CHECK-NEXT: cmphi p4.d, p0/z, z3.d, z24.d -; CHECK-NEXT: incd z2.d, all, mul #4 -; CHECK-NEXT: incd z5.d, all, mul #4 -; CHECK-NEXT: add z0.d, z3.d, z0.d -; CHECK-NEXT: uzp1 p1.s, p1.s, p2.s -; CHECK-NEXT: uzp1 p2.s, p3.s, p4.s -; CHECK-NEXT: add z1.d, z3.d, z1.d -; CHECK-NEXT: add z2.d, z3.d, z2.d -; CHECK-NEXT: add z5.d, z3.d, z5.d -; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h -; CHECK-NEXT: cmphi p2.d, p0/z, z3.d, z0.d -; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z1.d -; CHECK-NEXT: cmphi p4.d, p0/z, z3.d, z2.d -; CHECK-NEXT: cmphi p5.d, p0/z, z3.d, z5.d -; CHECK-NEXT: uzp1 p2.s, p2.s, p3.s -; CHECK-NEXT: uzp1 p3.s, p4.s, p5.s -; CHECK-NEXT: mov z3.d, x1 -; CHECK-NEXT: uzp1 p2.h, p2.h, p3.h -; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z6.d -; CHECK-NEXT: cmphi p4.d, p0/z, z3.d, z4.d -; CHECK-NEXT: uzp1 p1.b, p1.b, p2.b -; CHECK-NEXT: uzp1 p2.s, p4.s, p3.s -; CHECK-NEXT: cmphi p3.d, p0/z, z3.d, z7.d -; CHECK-NEXT: cmphi p4.d, p0/z, z3.d, z24.d -; CHECK-NEXT: cmphi p5.d, p0/z, z3.d, z0.d -; CHECK-NEXT: cmphi p6.d, p0/z, z3.d, z1.d -; CHECK-NEXT: uzp1 p3.s, p3.s, p4.s -; CHECK-NEXT: uzp1 p4.s, p5.s, p6.s -; CHECK-NEXT: cmphi p5.d, p0/z, z3.d, z2.d -; CHECK-NEXT: cmphi p0.d, p0/z, z3.d, z5.d -; CHECK-NEXT: uzp1 p0.s, p5.s, p0.s -; CHECK-NEXT: ptrue p5.b -; CHECK-NEXT: uzp1 p2.h, p2.h, p3.h -; CHECK-NEXT: uzp1 p0.h, p4.h, p0.h -; CHECK-NEXT: not p1.b, p5/z, p1.b -; CHECK-NEXT: uzp1 p0.b, p2.b, p0.b -; CHECK-NEXT: and p0.b, p5/z, p1.b, p0.b -; CHECK-NEXT: ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: whilelo p0.b, x0, x1 ; CHECK-NEXT: ret %active.lane.mask = call @llvm.get.active.lane.mask.nxv16i1.i64(i64 %index, i64 %TC) ret %active.lane.mask @@ -188,43 +49,7 @@ define @lane_mask_nxv8i1_i64(i64 %index, i64 %TC) { ; CHECK-LABEL: lane_mask_nxv8i1_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill -; CHECK-NEXT: addvl sp, sp, #-1 -; CHECK-NEXT: str p4, [sp, #7, mul vl] // 2-byte Folded Spill -; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG -; CHECK-NEXT: .cfi_offset w29, -16 -; CHECK-NEXT: index z0.d, #0, #1 -; CHECK-NEXT: mov z2.d, x0 -; CHECK-NEXT: mov z1.d, z0.d -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: incd z1.d -; CHECK-NEXT: add z3.d, z2.d, z0.d -; CHECK-NEXT: incd z0.d, all, mul #2 -; CHECK-NEXT: add z4.d, z2.d, z1.d -; CHECK-NEXT: incd z1.d, all, mul #2 -; CHECK-NEXT: cmphi p1.d, p0/z, z2.d, z3.d -; CHECK-NEXT: add z0.d, z2.d, z0.d -; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z4.d -; CHECK-NEXT: add z1.d, z2.d, z1.d -; CHECK-NEXT: uzp1 p1.s, p1.s, p2.s -; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z0.d -; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z1.d -; CHECK-NEXT: mov z2.d, x1 -; CHECK-NEXT: uzp1 p2.s, p2.s, p3.s -; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z4.d -; CHECK-NEXT: cmphi p4.d, p0/z, z2.d, z3.d -; CHECK-NEXT: uzp1 p1.h, p1.h, p2.h -; CHECK-NEXT: uzp1 p2.s, p4.s, p3.s -; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z0.d -; CHECK-NEXT: cmphi p0.d, p0/z, z2.d, z1.d -; CHECK-NEXT: ptrue p4.h -; CHECK-NEXT: uzp1 p0.s, p3.s, p0.s -; CHECK-NEXT: not p1.b, p4/z, p1.b -; CHECK-NEXT: uzp1 p0.h, p2.h, p0.h -; CHECK-NEXT: and p0.b, p4/z, p1.b, p0.b -; CHECK-NEXT: ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload -; CHECK-NEXT: addvl sp, sp, #1 -; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: whilelo p0.h, x0, x1 ; CHECK-NEXT: ret %active.lane.mask = call @llvm.get.active.lane.mask.nxv8i1.i64(i64 %index, i64 %TC) ret %active.lane.mask @@ -233,23 +58,7 @@ define @lane_mask_nxv4i1_i64(i64 %index, i64 %TC) { ; CHECK-LABEL: lane_mask_nxv4i1_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: index z0.d, #0, #1 -; CHECK-NEXT: mov z2.d, x0 -; CHECK-NEXT: mov z1.d, z0.d -; CHECK-NEXT: add z0.d, z2.d, z0.d -; CHECK-NEXT: incd z1.d -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: add z1.d, z2.d, z1.d -; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z0.d -; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z1.d -; CHECK-NEXT: mov z2.d, x1 -; CHECK-NEXT: ptrue p1.s -; CHECK-NEXT: uzp1 p2.s, p2.s, p3.s -; CHECK-NEXT: cmphi p3.d, p0/z, z2.d, z1.d -; CHECK-NEXT: cmphi p0.d, p0/z, z2.d, z0.d -; CHECK-NEXT: not p2.b, p1/z, p2.b -; CHECK-NEXT: uzp1 p0.s, p0.s, p3.s -; CHECK-NEXT: and p0.b, p1/z, p2.b, p0.b +; CHECK-NEXT: whilelo p0.s, x0, x1 ; CHECK-NEXT: ret %active.lane.mask = call @llvm.get.active.lane.mask.nxv4i1.i64(i64 %index, i64 %TC) ret %active.lane.mask @@ -258,14 +67,7 @@ define @lane_mask_nxv2i1_i64(i64 %index, i64 %TC) { ; CHECK-LABEL: lane_mask_nxv2i1_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: ptrue p0.d -; CHECK-NEXT: index z0.d, x0, #1 -; CHECK-NEXT: mov z1.d, x0 -; CHECK-NEXT: mov z2.d, x1 -; CHECK-NEXT: cmphi p1.d, p0/z, z1.d, z0.d -; CHECK-NEXT: cmphi p2.d, p0/z, z2.d, z0.d -; CHECK-NEXT: not p1.b, p0/z, p1.b -; CHECK-NEXT: and p0.b, p0/z, p1.b, p2.b +; CHECK-NEXT: whilelo p0.d, x0, x1 ; CHECK-NEXT: ret %active.lane.mask = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 %index, i64 %TC) ret %active.lane.mask diff --git a/llvm/test/CodeGen/ARM/fpclamptosat.ll b/llvm/test/CodeGen/ARM/fpclamptosat.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/ARM/fpclamptosat.ll @@ -0,0 +1,5857 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv6m-none-eabi -float-abi=soft %s -o - | FileCheck %s --check-prefixes=SOFT +; RUN: llc -mtriple=thumbv7m-none-eabihf -mattr=+vfp2sp %s -o - | FileCheck %s --check-prefixes=VFP,VFP2 +; RUN: llc -mtriple=thumbv8.1m.main-eabihf -mattr=+fullfp16,+fp64 %s -o - | FileCheck %s --check-prefixes=VFP,FULL + +; i32 saturate + +define i32 @stest_f64i32(double %x) { +; SOFT-LABEL: stest_f64i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r7, lr} +; SOFT-NEXT: push {r4, r5, r7, lr} +; SOFT-NEXT: bl __aeabi_d2lz +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: movs r3, #0 +; SOFT-NEXT: ldr r4, .LCPI0_0 +; SOFT-NEXT: subs r5, r0, r4 +; SOFT-NEXT: mov r5, r1 +; SOFT-NEXT: sbcs r5, r3 +; SOFT-NEXT: mov r5, r2 +; SOFT-NEXT: bge .LBB0_7 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB0_8 +; SOFT-NEXT: .LBB0_2: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB0_4 +; SOFT-NEXT: .LBB0_3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB0_4: @ %entry +; SOFT-NEXT: mvns r3, r3 +; SOFT-NEXT: lsls r2, r2, #31 +; SOFT-NEXT: subs r4, r2, r0 +; SOFT-NEXT: sbcs r3, r1 +; SOFT-NEXT: blt .LBB0_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: .LBB0_6: @ %entry +; SOFT-NEXT: pop {r4, r5, r7, pc} +; SOFT-NEXT: .LBB0_7: @ %entry +; SOFT-NEXT: mov r5, r3 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB0_2 +; SOFT-NEXT: .LBB0_8: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB0_3 +; SOFT-NEXT: b .LBB0_4 +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.9: +; SOFT-NEXT: .LCPI0_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f64i32: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2lz +; VFP2-NEXT: mvn r2, #-2147483648 +; VFP2-NEXT: subs r3, r0, r2 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs r3, r1, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w r12, #1 +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: itt ne +; VFP2-NEXT: movne r12, r1 +; VFP2-NEXT: movne r2, r0 +; VFP2-NEXT: mov.w r0, #-1 +; VFP2-NEXT: rsbs.w r1, r2, #-2147483648 +; VFP2-NEXT: sbcs.w r0, r0, r12 +; VFP2-NEXT: it ge +; VFP2-NEXT: movge.w r2, #-2147483648 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f64i32: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov r0, r1, d0 +; FULL-NEXT: bl __aeabi_d2lz +; FULL-NEXT: mvn r2, #-2147483648 +; FULL-NEXT: subs r3, r0, r2 +; FULL-NEXT: sbcs r3, r1, #0 +; FULL-NEXT: cset r3, lt +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: csel r1, r1, r3, ne +; FULL-NEXT: rsbs.w r3, r0, #-2147483648 +; FULL-NEXT: mov.w r2, #-1 +; FULL-NEXT: sbcs.w r1, r2, r1 +; FULL-NEXT: it ge +; FULL-NEXT: movge.w r0, #-2147483648 +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi double %x to i64 + %0 = icmp slt i64 %conv, 2147483647 + %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 + %1 = icmp sgt i64 %spec.store.select, -2147483648 + %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @utest_f64i32(double %x) { +; SOFT-LABEL: utest_f64i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2ulz +; SOFT-NEXT: movs r2, #0 +; SOFT-NEXT: adds r3, r0, #1 +; SOFT-NEXT: sbcs r1, r2 +; SOFT-NEXT: blo .LBB1_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mvns r0, r2 +; SOFT-NEXT: .LBB1_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; +; VFP-LABEL: utest_f64i32: +; VFP: @ %bb.0: @ %entry +; VFP-NEXT: .save {r7, lr} +; VFP-NEXT: push {r7, lr} +; VFP-NEXT: vmov r0, r1, d0 +; VFP-NEXT: bl __aeabi_d2ulz +; VFP-NEXT: subs.w r2, r0, #-1 +; VFP-NEXT: sbcs r1, r1, #0 +; VFP-NEXT: it hs +; VFP-NEXT: movhs.w r0, #-1 +; VFP-NEXT: pop {r7, pc} +entry: + %conv = fptoui double %x to i64 + %0 = icmp ult i64 %conv, 4294967295 + %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 + %conv6 = trunc i64 %spec.store.select to i32 + ret i32 %conv6 +} + +define i32 @ustest_f64i32(double %x) { +; SOFT-LABEL: ustest_f64i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, lr} +; SOFT-NEXT: push {r4, lr} +; SOFT-NEXT: bl __aeabi_d2lz +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: movs r3, #0 +; SOFT-NEXT: adds r4, r0, #1 +; SOFT-NEXT: mov r4, r1 +; SOFT-NEXT: sbcs r4, r3 +; SOFT-NEXT: mov r4, r2 +; SOFT-NEXT: bge .LBB2_7 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB2_8 +; SOFT-NEXT: .LBB2_2: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB2_4 +; SOFT-NEXT: .LBB2_3: @ %entry +; SOFT-NEXT: mvns r0, r3 +; SOFT-NEXT: .LBB2_4: @ %entry +; SOFT-NEXT: rsbs r4, r0, #0 +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: sbcs r4, r1 +; SOFT-NEXT: bge .LBB2_9 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB2_10 +; SOFT-NEXT: .LBB2_6: @ %entry +; SOFT-NEXT: pop {r4, pc} +; SOFT-NEXT: .LBB2_7: @ %entry +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB2_2 +; SOFT-NEXT: .LBB2_8: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB2_3 +; SOFT-NEXT: b .LBB2_4 +; SOFT-NEXT: .LBB2_9: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB2_6 +; SOFT-NEXT: .LBB2_10: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: pop {r4, pc} +; +; VFP2-LABEL: ustest_f64i32: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2lz +; VFP2-NEXT: subs.w r3, r0, #-1 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: sbcs r3, r1, #0 +; VFP2-NEXT: mov.w r3, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r3, #1 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: ite ne +; VFP2-NEXT: movne r3, r1 +; VFP2-NEXT: moveq.w r0, #-1 +; VFP2-NEXT: rsbs r1, r0, #0 +; VFP2-NEXT: sbcs.w r1, r2, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f64i32: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov r0, r1, d0 +; FULL-NEXT: bl __aeabi_d2lz +; FULL-NEXT: subs.w r2, r0, #-1 +; FULL-NEXT: sbcs r2, r1, #0 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq.w r0, #-1 +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: rsbs r3, r0, #0 +; FULL-NEXT: mov.w r2, #0 +; FULL-NEXT: sbcs.w r1, r2, r1 +; FULL-NEXT: cset r1, lt +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r0, r1, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi double %x to i64 + %0 = icmp slt i64 %conv, 4294967295 + %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 + %1 = icmp sgt i64 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @stest_f32i32(float %x) { +; SOFT-LABEL: stest_f32i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r7, lr} +; SOFT-NEXT: push {r4, r5, r7, lr} +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: movs r3, #0 +; SOFT-NEXT: ldr r4, .LCPI3_0 +; SOFT-NEXT: subs r5, r0, r4 +; SOFT-NEXT: mov r5, r1 +; SOFT-NEXT: sbcs r5, r3 +; SOFT-NEXT: mov r5, r2 +; SOFT-NEXT: bge .LBB3_7 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB3_8 +; SOFT-NEXT: .LBB3_2: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB3_4 +; SOFT-NEXT: .LBB3_3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB3_4: @ %entry +; SOFT-NEXT: mvns r3, r3 +; SOFT-NEXT: lsls r2, r2, #31 +; SOFT-NEXT: subs r4, r2, r0 +; SOFT-NEXT: sbcs r3, r1 +; SOFT-NEXT: blt .LBB3_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: .LBB3_6: @ %entry +; SOFT-NEXT: pop {r4, r5, r7, pc} +; SOFT-NEXT: .LBB3_7: @ %entry +; SOFT-NEXT: mov r5, r3 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB3_2 +; SOFT-NEXT: .LBB3_8: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB3_3 +; SOFT-NEXT: b .LBB3_4 +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.9: +; SOFT-NEXT: .LCPI3_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f32i32: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: mvn r2, #-2147483648 +; VFP2-NEXT: subs r3, r0, r2 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs r3, r1, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w r12, #1 +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: itt ne +; VFP2-NEXT: movne r12, r1 +; VFP2-NEXT: movne r2, r0 +; VFP2-NEXT: mov.w r0, #-1 +; VFP2-NEXT: rsbs.w r1, r2, #-2147483648 +; VFP2-NEXT: sbcs.w r0, r0, r12 +; VFP2-NEXT: it ge +; VFP2-NEXT: movge.w r2, #-2147483648 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f32i32: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: bl __aeabi_f2lz +; FULL-NEXT: mvn r2, #-2147483648 +; FULL-NEXT: subs r3, r0, r2 +; FULL-NEXT: sbcs r3, r1, #0 +; FULL-NEXT: cset r3, lt +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: csel r1, r1, r3, ne +; FULL-NEXT: rsbs.w r3, r0, #-2147483648 +; FULL-NEXT: mov.w r2, #-1 +; FULL-NEXT: sbcs.w r1, r2, r1 +; FULL-NEXT: it ge +; FULL-NEXT: movge.w r0, #-2147483648 +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi float %x to i64 + %0 = icmp slt i64 %conv, 2147483647 + %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 + %1 = icmp sgt i64 %spec.store.select, -2147483648 + %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @utest_f32i32(float %x) { +; SOFT-LABEL: utest_f32i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2ulz +; SOFT-NEXT: movs r2, #0 +; SOFT-NEXT: adds r3, r0, #1 +; SOFT-NEXT: sbcs r1, r2 +; SOFT-NEXT: blo .LBB4_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mvns r0, r2 +; SOFT-NEXT: .LBB4_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; +; VFP-LABEL: utest_f32i32: +; VFP: @ %bb.0: @ %entry +; VFP-NEXT: .save {r7, lr} +; VFP-NEXT: push {r7, lr} +; VFP-NEXT: vmov r0, s0 +; VFP-NEXT: bl __aeabi_f2ulz +; VFP-NEXT: subs.w r2, r0, #-1 +; VFP-NEXT: sbcs r1, r1, #0 +; VFP-NEXT: it hs +; VFP-NEXT: movhs.w r0, #-1 +; VFP-NEXT: pop {r7, pc} +entry: + %conv = fptoui float %x to i64 + %0 = icmp ult i64 %conv, 4294967295 + %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 + %conv6 = trunc i64 %spec.store.select to i32 + ret i32 %conv6 +} + +define i32 @ustest_f32i32(float %x) { +; SOFT-LABEL: ustest_f32i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, lr} +; SOFT-NEXT: push {r4, lr} +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: movs r3, #0 +; SOFT-NEXT: adds r4, r0, #1 +; SOFT-NEXT: mov r4, r1 +; SOFT-NEXT: sbcs r4, r3 +; SOFT-NEXT: mov r4, r2 +; SOFT-NEXT: bge .LBB5_7 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB5_8 +; SOFT-NEXT: .LBB5_2: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB5_4 +; SOFT-NEXT: .LBB5_3: @ %entry +; SOFT-NEXT: mvns r0, r3 +; SOFT-NEXT: .LBB5_4: @ %entry +; SOFT-NEXT: rsbs r4, r0, #0 +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: sbcs r4, r1 +; SOFT-NEXT: bge .LBB5_9 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB5_10 +; SOFT-NEXT: .LBB5_6: @ %entry +; SOFT-NEXT: pop {r4, pc} +; SOFT-NEXT: .LBB5_7: @ %entry +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB5_2 +; SOFT-NEXT: .LBB5_8: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB5_3 +; SOFT-NEXT: b .LBB5_4 +; SOFT-NEXT: .LBB5_9: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB5_6 +; SOFT-NEXT: .LBB5_10: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: pop {r4, pc} +; +; VFP2-LABEL: ustest_f32i32: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: subs.w r3, r0, #-1 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: sbcs r3, r1, #0 +; VFP2-NEXT: mov.w r3, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r3, #1 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: ite ne +; VFP2-NEXT: movne r3, r1 +; VFP2-NEXT: moveq.w r0, #-1 +; VFP2-NEXT: rsbs r1, r0, #0 +; VFP2-NEXT: sbcs.w r1, r2, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f32i32: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: bl __aeabi_f2lz +; FULL-NEXT: subs.w r2, r0, #-1 +; FULL-NEXT: sbcs r2, r1, #0 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq.w r0, #-1 +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: rsbs r3, r0, #0 +; FULL-NEXT: mov.w r2, #0 +; FULL-NEXT: sbcs.w r1, r2, r1 +; FULL-NEXT: cset r1, lt +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r0, r1, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi float %x to i64 + %0 = icmp slt i64 %conv, 4294967295 + %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 + %1 = icmp sgt i64 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @stest_f16i32(half %x) { +; SOFT-LABEL: stest_f16i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r7, lr} +; SOFT-NEXT: push {r4, r5, r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: movs r3, #0 +; SOFT-NEXT: ldr r4, .LCPI6_0 +; SOFT-NEXT: subs r5, r0, r4 +; SOFT-NEXT: mov r5, r1 +; SOFT-NEXT: sbcs r5, r3 +; SOFT-NEXT: mov r5, r2 +; SOFT-NEXT: bge .LBB6_7 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB6_8 +; SOFT-NEXT: .LBB6_2: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB6_4 +; SOFT-NEXT: .LBB6_3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB6_4: @ %entry +; SOFT-NEXT: mvns r3, r3 +; SOFT-NEXT: lsls r2, r2, #31 +; SOFT-NEXT: subs r4, r2, r0 +; SOFT-NEXT: sbcs r3, r1 +; SOFT-NEXT: blt .LBB6_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: .LBB6_6: @ %entry +; SOFT-NEXT: pop {r4, r5, r7, pc} +; SOFT-NEXT: .LBB6_7: @ %entry +; SOFT-NEXT: mov r5, r3 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB6_2 +; SOFT-NEXT: .LBB6_8: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB6_3 +; SOFT-NEXT: b .LBB6_4 +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.9: +; SOFT-NEXT: .LCPI6_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f16i32: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: mvn r2, #-2147483648 +; VFP2-NEXT: subs r3, r0, r2 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs r3, r1, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w r12, #1 +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: itt ne +; VFP2-NEXT: movne r12, r1 +; VFP2-NEXT: movne r2, r0 +; VFP2-NEXT: mov.w r0, #-1 +; VFP2-NEXT: rsbs.w r1, r2, #-2147483648 +; VFP2-NEXT: sbcs.w r0, r0, r12 +; VFP2-NEXT: it ge +; VFP2-NEXT: movge.w r2, #-2147483648 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f16i32: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixhfdi +; FULL-NEXT: mvn r2, #-2147483648 +; FULL-NEXT: subs r3, r0, r2 +; FULL-NEXT: sbcs r3, r1, #0 +; FULL-NEXT: cset r3, lt +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: csel r1, r1, r3, ne +; FULL-NEXT: rsbs.w r3, r0, #-2147483648 +; FULL-NEXT: mov.w r2, #-1 +; FULL-NEXT: sbcs.w r1, r2, r1 +; FULL-NEXT: it ge +; FULL-NEXT: movge.w r0, #-2147483648 +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi half %x to i64 + %0 = icmp slt i64 %conv, 2147483647 + %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 + %1 = icmp sgt i64 %spec.store.select, -2147483648 + %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @utesth_f16i32(half %x) { +; SOFT-LABEL: utesth_f16i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2ulz +; SOFT-NEXT: movs r2, #0 +; SOFT-NEXT: adds r3, r0, #1 +; SOFT-NEXT: sbcs r1, r2 +; SOFT-NEXT: blo .LBB7_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mvns r0, r2 +; SOFT-NEXT: .LBB7_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; +; VFP2-LABEL: utesth_f16i32: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: bl __aeabi_f2ulz +; VFP2-NEXT: subs.w r2, r0, #-1 +; VFP2-NEXT: sbcs r1, r1, #0 +; VFP2-NEXT: it hs +; VFP2-NEXT: movhs.w r0, #-1 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utesth_f16i32: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixunshfdi +; FULL-NEXT: subs.w r2, r0, #-1 +; FULL-NEXT: sbcs r1, r1, #0 +; FULL-NEXT: it hs +; FULL-NEXT: movhs.w r0, #-1 +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptoui half %x to i64 + %0 = icmp ult i64 %conv, 4294967295 + %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 + %conv6 = trunc i64 %spec.store.select to i32 + ret i32 %conv6 +} + +define i32 @ustest_f16i32(half %x) { +; SOFT-LABEL: ustest_f16i32: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, lr} +; SOFT-NEXT: push {r4, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: movs r3, #0 +; SOFT-NEXT: adds r4, r0, #1 +; SOFT-NEXT: mov r4, r1 +; SOFT-NEXT: sbcs r4, r3 +; SOFT-NEXT: mov r4, r2 +; SOFT-NEXT: bge .LBB8_7 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB8_8 +; SOFT-NEXT: .LBB8_2: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB8_4 +; SOFT-NEXT: .LBB8_3: @ %entry +; SOFT-NEXT: mvns r0, r3 +; SOFT-NEXT: .LBB8_4: @ %entry +; SOFT-NEXT: rsbs r4, r0, #0 +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: sbcs r4, r1 +; SOFT-NEXT: bge .LBB8_9 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB8_10 +; SOFT-NEXT: .LBB8_6: @ %entry +; SOFT-NEXT: pop {r4, pc} +; SOFT-NEXT: .LBB8_7: @ %entry +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB8_2 +; SOFT-NEXT: .LBB8_8: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB8_3 +; SOFT-NEXT: b .LBB8_4 +; SOFT-NEXT: .LBB8_9: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB8_6 +; SOFT-NEXT: .LBB8_10: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: pop {r4, pc} +; +; VFP2-LABEL: ustest_f16i32: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: subs.w r3, r0, #-1 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: sbcs r3, r1, #0 +; VFP2-NEXT: mov.w r3, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r3, #1 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: ite ne +; VFP2-NEXT: movne r3, r1 +; VFP2-NEXT: moveq.w r0, #-1 +; VFP2-NEXT: rsbs r1, r0, #0 +; VFP2-NEXT: sbcs.w r1, r2, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f16i32: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixhfdi +; FULL-NEXT: subs.w r2, r0, #-1 +; FULL-NEXT: sbcs r2, r1, #0 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq.w r0, #-1 +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: rsbs r3, r0, #0 +; FULL-NEXT: mov.w r2, #0 +; FULL-NEXT: sbcs.w r1, r2, r1 +; FULL-NEXT: cset r1, lt +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r0, r1, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi half %x to i64 + %0 = icmp slt i64 %conv, 4294967295 + %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 + %1 = icmp sgt i64 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +; i16 saturate + +define i16 @stest_f64i16(double %x) { +; SOFT-LABEL: stest_f64i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2iz +; SOFT-NEXT: ldr r1, .LCPI9_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB9_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB9_2: @ %entry +; SOFT-NEXT: ldr r1, .LCPI9_1 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: bgt .LBB9_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB9_4: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.5: +; SOFT-NEXT: .LCPI9_0: +; SOFT-NEXT: .long 32767 @ 0x7fff +; SOFT-NEXT: .LCPI9_1: +; SOFT-NEXT: .long 4294934528 @ 0xffff8000 +; +; VFP2-LABEL: stest_f64i16: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2iz +; VFP2-NEXT: ssat r0, #16, r0 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f64i16: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f64 s0, d0 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: ssat r0, #16, r0 +; FULL-NEXT: bx lr +entry: + %conv = fptosi double %x to i32 + %0 = icmp slt i32 %conv, 32767 + %spec.store.select = select i1 %0, i32 %conv, i32 32767 + %1 = icmp sgt i32 %spec.store.select, -32768 + %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @utest_f64i16(double %x) { +; SOFT-LABEL: utest_f64i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2uiz +; SOFT-NEXT: ldr r1, .LCPI10_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blo .LBB10_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB10_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI10_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: utest_f64i16: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2uiz +; VFP2-NEXT: movw r1, #65535 +; VFP2-NEXT: cmp r0, r1 +; VFP2-NEXT: it hs +; VFP2-NEXT: movhs r0, r1 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utest_f64i16: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.u32.f64 s0, d0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lo +; FULL-NEXT: bx lr +entry: + %conv = fptoui double %x to i32 + %0 = icmp ult i32 %conv, 65535 + %spec.store.select = select i1 %0, i32 %conv, i32 65535 + %conv6 = trunc i32 %spec.store.select to i16 + ret i16 %conv6 +} + +define i16 @ustest_f64i16(double %x) { +; SOFT-LABEL: ustest_f64i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2iz +; SOFT-NEXT: ldr r1, .LCPI11_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB11_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB11_2: @ %entry +; SOFT-NEXT: asrs r1, r0, #31 +; SOFT-NEXT: bics r0, r1 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI11_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: ustest_f64i16: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2iz +; VFP2-NEXT: usat r0, #16, r0 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f64i16: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f64 s0, d0 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: usat r0, #16, r0 +; FULL-NEXT: bx lr +entry: + %conv = fptosi double %x to i32 + %0 = icmp slt i32 %conv, 65535 + %spec.store.select = select i1 %0, i32 %conv, i32 65535 + %1 = icmp sgt i32 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @stest_f32i16(float %x) { +; SOFT-LABEL: stest_f32i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2iz +; SOFT-NEXT: ldr r1, .LCPI12_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB12_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB12_2: @ %entry +; SOFT-NEXT: ldr r1, .LCPI12_1 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: bgt .LBB12_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB12_4: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.5: +; SOFT-NEXT: .LCPI12_0: +; SOFT-NEXT: .long 32767 @ 0x7fff +; SOFT-NEXT: .LCPI12_1: +; SOFT-NEXT: .long 4294934528 @ 0xffff8000 +; +; VFP-LABEL: stest_f32i16: +; VFP: @ %bb.0: @ %entry +; VFP-NEXT: vcvt.s32.f32 s0, s0 +; VFP-NEXT: vmov r0, s0 +; VFP-NEXT: ssat r0, #16, r0 +; VFP-NEXT: bx lr +entry: + %conv = fptosi float %x to i32 + %0 = icmp slt i32 %conv, 32767 + %spec.store.select = select i1 %0, i32 %conv, i32 32767 + %1 = icmp sgt i32 %spec.store.select, -32768 + %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @utest_f32i16(float %x) { +; SOFT-LABEL: utest_f32i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2uiz +; SOFT-NEXT: ldr r1, .LCPI13_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blo .LBB13_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB13_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI13_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: utest_f32i16: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: vcvt.u32.f32 s0, s0 +; VFP2-NEXT: movw r0, #65535 +; VFP2-NEXT: vmov r1, s0 +; VFP2-NEXT: cmp r1, r0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r0, r1 +; VFP2-NEXT: bx lr +; +; FULL-LABEL: utest_f32i16: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.u32.f32 s0, s0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lo +; FULL-NEXT: bx lr +entry: + %conv = fptoui float %x to i32 + %0 = icmp ult i32 %conv, 65535 + %spec.store.select = select i1 %0, i32 %conv, i32 65535 + %conv6 = trunc i32 %spec.store.select to i16 + ret i16 %conv6 +} + +define i16 @ustest_f32i16(float %x) { +; SOFT-LABEL: ustest_f32i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2iz +; SOFT-NEXT: ldr r1, .LCPI14_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB14_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB14_2: @ %entry +; SOFT-NEXT: asrs r1, r0, #31 +; SOFT-NEXT: bics r0, r1 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI14_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP-LABEL: ustest_f32i16: +; VFP: @ %bb.0: @ %entry +; VFP-NEXT: vcvt.s32.f32 s0, s0 +; VFP-NEXT: vmov r0, s0 +; VFP-NEXT: usat r0, #16, r0 +; VFP-NEXT: bx lr +entry: + %conv = fptosi float %x to i32 + %0 = icmp slt i32 %conv, 65535 + %spec.store.select = select i1 %0, i32 %conv, i32 65535 + %1 = icmp sgt i32 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @stest_f16i16(half %x) { +; SOFT-LABEL: stest_f16i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2iz +; SOFT-NEXT: ldr r1, .LCPI15_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB15_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB15_2: @ %entry +; SOFT-NEXT: ldr r1, .LCPI15_1 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: bgt .LBB15_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB15_4: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.5: +; SOFT-NEXT: .LCPI15_0: +; SOFT-NEXT: .long 32767 @ 0x7fff +; SOFT-NEXT: .LCPI15_1: +; SOFT-NEXT: .long 4294934528 @ 0xffff8000 +; +; VFP2-LABEL: stest_f16i16: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: vcvt.s32.f32 s0, s0 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: ssat r0, #16, r0 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f16i16: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f16 s0, s0 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: ssat r0, #16, r0 +; FULL-NEXT: bx lr +entry: + %conv = fptosi half %x to i32 + %0 = icmp slt i32 %conv, 32767 + %spec.store.select = select i1 %0, i32 %conv, i32 32767 + %1 = icmp sgt i32 %spec.store.select, -32768 + %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @utesth_f16i16(half %x) { +; SOFT-LABEL: utesth_f16i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2uiz +; SOFT-NEXT: ldr r1, .LCPI16_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blo .LBB16_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB16_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI16_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: utesth_f16i16: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: movw r0, #65535 +; VFP2-NEXT: vcvt.u32.f32 s0, s0 +; VFP2-NEXT: vmov r1, s0 +; VFP2-NEXT: cmp r1, r0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r0, r1 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utesth_f16i16: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.u32.f16 s0, s0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lo +; FULL-NEXT: bx lr +entry: + %conv = fptoui half %x to i32 + %0 = icmp ult i32 %conv, 65535 + %spec.store.select = select i1 %0, i32 %conv, i32 65535 + %conv6 = trunc i32 %spec.store.select to i16 + ret i16 %conv6 +} + +define i16 @ustest_f16i16(half %x) { +; SOFT-LABEL: ustest_f16i16: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2iz +; SOFT-NEXT: ldr r1, .LCPI17_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB17_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB17_2: @ %entry +; SOFT-NEXT: asrs r1, r0, #31 +; SOFT-NEXT: bics r0, r1 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI17_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: ustest_f16i16: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: vcvt.s32.f32 s0, s0 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: usat r0, #16, r0 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f16i16: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f16 s0, s0 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: usat r0, #16, r0 +; FULL-NEXT: bx lr +entry: + %conv = fptosi half %x to i32 + %0 = icmp slt i32 %conv, 65535 + %spec.store.select = select i1 %0, i32 %conv, i32 65535 + %1 = icmp sgt i32 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +; i64 saturate + +define i64 @stest_f64i64(double %x) { +; SOFT-LABEL: stest_f64i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #12 +; SOFT-NEXT: sub sp, #12 +; SOFT-NEXT: bl __fixdfti +; SOFT-NEXT: mov r6, r0 +; SOFT-NEXT: movs r4, #1 +; SOFT-NEXT: movs r5, #0 +; SOFT-NEXT: ldr r0, .LCPI18_0 +; SOFT-NEXT: adds r7, r6, #1 +; SOFT-NEXT: mov r7, r1 +; SOFT-NEXT: sbcs r7, r0 +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: sbcs r7, r5 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: sbcs r7, r5 +; SOFT-NEXT: mov r7, r4 +; SOFT-NEXT: bge .LBB18_13 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB18_14 +; SOFT-NEXT: .LBB18_2: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB18_4 +; SOFT-NEXT: .LBB18_3: @ %entry +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: .LBB18_4: @ %entry +; SOFT-NEXT: str r2, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB18_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: .LBB18_6: @ %entry +; SOFT-NEXT: str r3, [sp, #4] @ 4-byte Spill +; SOFT-NEXT: mvns r0, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB18_8 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: mov r6, r0 +; SOFT-NEXT: .LBB18_8: @ %entry +; SOFT-NEXT: lsls r3, r4, #31 +; SOFT-NEXT: rsbs r7, r6, #0 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: sbcs r7, r1 +; SOFT-NEXT: mov r7, r0 +; SOFT-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: sbcs r7, r2 +; SOFT-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: sbcs r0, r2 +; SOFT-NEXT: bge .LBB18_15 +; SOFT-NEXT: @ %bb.9: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB18_16 +; SOFT-NEXT: .LBB18_10: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB18_12 +; SOFT-NEXT: .LBB18_11: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: .LBB18_12: @ %entry +; SOFT-NEXT: mov r0, r6 +; SOFT-NEXT: add sp, #12 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .LBB18_13: @ %entry +; SOFT-NEXT: mov r7, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB18_2 +; SOFT-NEXT: .LBB18_14: @ %entry +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB18_3 +; SOFT-NEXT: b .LBB18_4 +; SOFT-NEXT: .LBB18_15: @ %entry +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB18_10 +; SOFT-NEXT: .LBB18_16: @ %entry +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB18_11 +; SOFT-NEXT: b .LBB18_12 +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.17: +; SOFT-NEXT: .LCPI18_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f64i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r7, lr} +; VFP2-NEXT: push {r4, r5, r7, lr} +; VFP2-NEXT: bl __fixdfti +; VFP2-NEXT: subs.w r4, r0, #-1 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: mvn r1, #-2147483648 +; VFP2-NEXT: sbcs.w r4, r12, r1 +; VFP2-NEXT: sbcs r4, r2, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: sbcs r4, r3, #0 +; VFP2-NEXT: mov.w r4, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r4, #1 +; VFP2-NEXT: cmp r4, #0 +; VFP2-NEXT: itee eq +; VFP2-NEXT: moveq r3, r4 +; VFP2-NEXT: movne r4, r2 +; VFP2-NEXT: movne r1, r12 +; VFP2-NEXT: mov.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: rsbs r5, r0, #0 +; VFP2-NEXT: mov.w r12, #-2147483648 +; VFP2-NEXT: sbcs.w r5, r12, r1 +; VFP2-NEXT: sbcs.w r4, r2, r4 +; VFP2-NEXT: sbcs r2, r3 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r4, r5, r7, pc} +; +; FULL-LABEL: stest_f64i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, r5, r7, lr} +; FULL-NEXT: push {r4, r5, r7, lr} +; FULL-NEXT: bl __fixdfti +; FULL-NEXT: subs.w lr, r0, #-1 +; FULL-NEXT: mvn r12, #-2147483648 +; FULL-NEXT: sbcs.w lr, r1, r12 +; FULL-NEXT: sbcs lr, r2, #0 +; FULL-NEXT: sbcs lr, r3, #0 +; FULL-NEXT: cset lr, lt +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r5, r3, lr, ne +; FULL-NEXT: mov.w r3, #-1 +; FULL-NEXT: csel r0, r0, r3, ne +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: csel r2, r2, lr, ne +; FULL-NEXT: rsbs r4, r0, #0 +; FULL-NEXT: mov.w r12, #-2147483648 +; FULL-NEXT: sbcs.w r4, r12, r1 +; FULL-NEXT: sbcs.w r2, r3, r2 +; FULL-NEXT: sbcs.w r2, r3, r5 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq r0, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: pop {r4, r5, r7, pc} +entry: + %conv = fptosi double %x to i128 + %0 = icmp slt i128 %conv, 9223372036854775807 + %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 + %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 + %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @utest_f64i64(double %x) { +; SOFT-LABEL: utest_f64i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, lr} +; SOFT-NEXT: push {r4, lr} +; SOFT-NEXT: bl __fixunsdfti +; SOFT-NEXT: movs r4, #0 +; SOFT-NEXT: subs r2, r2, #1 +; SOFT-NEXT: sbcs r3, r4 +; SOFT-NEXT: blo .LBB19_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r2, r4 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB19_3 +; SOFT-NEXT: b .LBB19_4 +; SOFT-NEXT: .LBB19_2: +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB19_4 +; SOFT-NEXT: .LBB19_3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB19_4: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB19_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: pop {r4, pc} +; SOFT-NEXT: .LBB19_6: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: pop {r4, pc} +; +; VFP2-LABEL: utest_f64i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: bl __fixunsdfti +; VFP2-NEXT: subs r2, #1 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs r2, r3, #0 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utest_f64i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: bl __fixunsdfti +; FULL-NEXT: subs r2, #1 +; FULL-NEXT: sbcs r2, r3, #0 +; FULL-NEXT: mov.w r3, #0 +; FULL-NEXT: cset r2, lo +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r3, ne +; FULL-NEXT: csel r1, r1, r3, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptoui double %x to i128 + %0 = icmp ult i128 %conv, 18446744073709551616 + %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 + %conv6 = trunc i128 %spec.store.select to i64 + ret i64 %conv6 +} + +define i64 @ustest_f64i64(double %x) { +; SOFT-LABEL: ustest_f64i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, lr} +; SOFT-NEXT: push {r4, r5, r6, lr} +; SOFT-NEXT: bl __fixdfti +; SOFT-NEXT: movs r4, #1 +; SOFT-NEXT: movs r5, #0 +; SOFT-NEXT: subs r6, r2, #1 +; SOFT-NEXT: mov r6, r3 +; SOFT-NEXT: sbcs r6, r5 +; SOFT-NEXT: mov r6, r4 +; SOFT-NEXT: bge .LBB20_10 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB20_11 +; SOFT-NEXT: .LBB20_2: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB20_12 +; SOFT-NEXT: .LBB20_3: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB20_13 +; SOFT-NEXT: .LBB20_4: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB20_6 +; SOFT-NEXT: .LBB20_5: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: .LBB20_6: @ %entry +; SOFT-NEXT: rsbs r6, r0, #0 +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: sbcs r6, r1 +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: sbcs r6, r2 +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: sbcs r2, r3 +; SOFT-NEXT: bge .LBB20_14 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB20_15 +; SOFT-NEXT: .LBB20_8: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB20_16 +; SOFT-NEXT: .LBB20_9: @ %entry +; SOFT-NEXT: pop {r4, r5, r6, pc} +; SOFT-NEXT: .LBB20_10: @ %entry +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB20_2 +; SOFT-NEXT: .LBB20_11: @ %entry +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB20_3 +; SOFT-NEXT: .LBB20_12: @ %entry +; SOFT-NEXT: mov r2, r4 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB20_4 +; SOFT-NEXT: .LBB20_13: @ %entry +; SOFT-NEXT: mov r1, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB20_5 +; SOFT-NEXT: b .LBB20_6 +; SOFT-NEXT: .LBB20_14: @ %entry +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB20_8 +; SOFT-NEXT: .LBB20_15: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB20_9 +; SOFT-NEXT: .LBB20_16: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: pop {r4, r5, r6, pc} +; +; VFP2-LABEL: ustest_f64i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: bl __fixdfti +; VFP2-NEXT: subs.w lr, r2, #1 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs lr, r3, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: itttt eq +; VFP2-NEXT: moveq r3, r12 +; VFP2-NEXT: moveq r2, #1 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: rsbs.w lr, r0, #0 +; VFP2-NEXT: sbcs.w lr, r12, r1 +; VFP2-NEXT: sbcs.w r2, r12, r2 +; VFP2-NEXT: sbcs.w r2, r12, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w r12, #1 +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f64i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: bl __fixdfti +; FULL-NEXT: subs.w r12, r2, #1 +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: sbcs r12, r3, #0 +; FULL-NEXT: cset r12, lt +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq r2, #1 +; FULL-NEXT: csel r0, r0, lr, ne +; FULL-NEXT: csel r12, r3, lr, ne +; FULL-NEXT: csel r1, r1, lr, ne +; FULL-NEXT: rsbs r3, r0, #0 +; FULL-NEXT: sbcs.w r3, lr, r1 +; FULL-NEXT: sbcs.w r2, lr, r2 +; FULL-NEXT: sbcs.w r2, lr, r12 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi double %x to i128 + %0 = icmp slt i128 %conv, 18446744073709551616 + %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 + %1 = icmp sgt i128 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @stest_f32i64(float %x) { +; SOFT-LABEL: stest_f32i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #12 +; SOFT-NEXT: sub sp, #12 +; SOFT-NEXT: bl __fixsfti +; SOFT-NEXT: mov r6, r0 +; SOFT-NEXT: movs r4, #1 +; SOFT-NEXT: movs r5, #0 +; SOFT-NEXT: ldr r0, .LCPI21_0 +; SOFT-NEXT: adds r7, r6, #1 +; SOFT-NEXT: mov r7, r1 +; SOFT-NEXT: sbcs r7, r0 +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: sbcs r7, r5 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: sbcs r7, r5 +; SOFT-NEXT: mov r7, r4 +; SOFT-NEXT: bge .LBB21_13 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB21_14 +; SOFT-NEXT: .LBB21_2: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB21_4 +; SOFT-NEXT: .LBB21_3: @ %entry +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: .LBB21_4: @ %entry +; SOFT-NEXT: str r2, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB21_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: .LBB21_6: @ %entry +; SOFT-NEXT: str r3, [sp, #4] @ 4-byte Spill +; SOFT-NEXT: mvns r0, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB21_8 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: mov r6, r0 +; SOFT-NEXT: .LBB21_8: @ %entry +; SOFT-NEXT: lsls r3, r4, #31 +; SOFT-NEXT: rsbs r7, r6, #0 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: sbcs r7, r1 +; SOFT-NEXT: mov r7, r0 +; SOFT-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: sbcs r7, r2 +; SOFT-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: sbcs r0, r2 +; SOFT-NEXT: bge .LBB21_15 +; SOFT-NEXT: @ %bb.9: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB21_16 +; SOFT-NEXT: .LBB21_10: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB21_12 +; SOFT-NEXT: .LBB21_11: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: .LBB21_12: @ %entry +; SOFT-NEXT: mov r0, r6 +; SOFT-NEXT: add sp, #12 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .LBB21_13: @ %entry +; SOFT-NEXT: mov r7, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB21_2 +; SOFT-NEXT: .LBB21_14: @ %entry +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB21_3 +; SOFT-NEXT: b .LBB21_4 +; SOFT-NEXT: .LBB21_15: @ %entry +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB21_10 +; SOFT-NEXT: .LBB21_16: @ %entry +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB21_11 +; SOFT-NEXT: b .LBB21_12 +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.17: +; SOFT-NEXT: .LCPI21_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f32i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r7, lr} +; VFP2-NEXT: push {r4, r5, r7, lr} +; VFP2-NEXT: bl __fixsfti +; VFP2-NEXT: subs.w r4, r0, #-1 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: mvn r1, #-2147483648 +; VFP2-NEXT: sbcs.w r4, r12, r1 +; VFP2-NEXT: sbcs r4, r2, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: sbcs r4, r3, #0 +; VFP2-NEXT: mov.w r4, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r4, #1 +; VFP2-NEXT: cmp r4, #0 +; VFP2-NEXT: itee eq +; VFP2-NEXT: moveq r3, r4 +; VFP2-NEXT: movne r4, r2 +; VFP2-NEXT: movne r1, r12 +; VFP2-NEXT: mov.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: rsbs r5, r0, #0 +; VFP2-NEXT: mov.w r12, #-2147483648 +; VFP2-NEXT: sbcs.w r5, r12, r1 +; VFP2-NEXT: sbcs.w r4, r2, r4 +; VFP2-NEXT: sbcs r2, r3 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r4, r5, r7, pc} +; +; FULL-LABEL: stest_f32i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, r5, r7, lr} +; FULL-NEXT: push {r4, r5, r7, lr} +; FULL-NEXT: bl __fixsfti +; FULL-NEXT: subs.w lr, r0, #-1 +; FULL-NEXT: mvn r12, #-2147483648 +; FULL-NEXT: sbcs.w lr, r1, r12 +; FULL-NEXT: sbcs lr, r2, #0 +; FULL-NEXT: sbcs lr, r3, #0 +; FULL-NEXT: cset lr, lt +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r5, r3, lr, ne +; FULL-NEXT: mov.w r3, #-1 +; FULL-NEXT: csel r0, r0, r3, ne +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: csel r2, r2, lr, ne +; FULL-NEXT: rsbs r4, r0, #0 +; FULL-NEXT: mov.w r12, #-2147483648 +; FULL-NEXT: sbcs.w r4, r12, r1 +; FULL-NEXT: sbcs.w r2, r3, r2 +; FULL-NEXT: sbcs.w r2, r3, r5 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq r0, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: pop {r4, r5, r7, pc} +entry: + %conv = fptosi float %x to i128 + %0 = icmp slt i128 %conv, 9223372036854775807 + %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 + %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 + %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @utest_f32i64(float %x) { +; SOFT-LABEL: utest_f32i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, lr} +; SOFT-NEXT: push {r4, lr} +; SOFT-NEXT: bl __fixunssfti +; SOFT-NEXT: movs r4, #0 +; SOFT-NEXT: subs r2, r2, #1 +; SOFT-NEXT: sbcs r3, r4 +; SOFT-NEXT: blo .LBB22_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r2, r4 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB22_3 +; SOFT-NEXT: b .LBB22_4 +; SOFT-NEXT: .LBB22_2: +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB22_4 +; SOFT-NEXT: .LBB22_3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB22_4: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB22_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: pop {r4, pc} +; SOFT-NEXT: .LBB22_6: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: pop {r4, pc} +; +; VFP2-LABEL: utest_f32i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: bl __fixunssfti +; VFP2-NEXT: subs r2, #1 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs r2, r3, #0 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utest_f32i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: bl __fixunssfti +; FULL-NEXT: subs r2, #1 +; FULL-NEXT: sbcs r2, r3, #0 +; FULL-NEXT: mov.w r3, #0 +; FULL-NEXT: cset r2, lo +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r3, ne +; FULL-NEXT: csel r1, r1, r3, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptoui float %x to i128 + %0 = icmp ult i128 %conv, 18446744073709551616 + %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 + %conv6 = trunc i128 %spec.store.select to i64 + ret i64 %conv6 +} + +define i64 @ustest_f32i64(float %x) { +; SOFT-LABEL: ustest_f32i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, lr} +; SOFT-NEXT: push {r4, r5, r6, lr} +; SOFT-NEXT: bl __fixsfti +; SOFT-NEXT: movs r4, #1 +; SOFT-NEXT: movs r5, #0 +; SOFT-NEXT: subs r6, r2, #1 +; SOFT-NEXT: mov r6, r3 +; SOFT-NEXT: sbcs r6, r5 +; SOFT-NEXT: mov r6, r4 +; SOFT-NEXT: bge .LBB23_10 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB23_11 +; SOFT-NEXT: .LBB23_2: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB23_12 +; SOFT-NEXT: .LBB23_3: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB23_13 +; SOFT-NEXT: .LBB23_4: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB23_6 +; SOFT-NEXT: .LBB23_5: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: .LBB23_6: @ %entry +; SOFT-NEXT: rsbs r6, r0, #0 +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: sbcs r6, r1 +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: sbcs r6, r2 +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: sbcs r2, r3 +; SOFT-NEXT: bge .LBB23_14 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB23_15 +; SOFT-NEXT: .LBB23_8: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB23_16 +; SOFT-NEXT: .LBB23_9: @ %entry +; SOFT-NEXT: pop {r4, r5, r6, pc} +; SOFT-NEXT: .LBB23_10: @ %entry +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB23_2 +; SOFT-NEXT: .LBB23_11: @ %entry +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB23_3 +; SOFT-NEXT: .LBB23_12: @ %entry +; SOFT-NEXT: mov r2, r4 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB23_4 +; SOFT-NEXT: .LBB23_13: @ %entry +; SOFT-NEXT: mov r1, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB23_5 +; SOFT-NEXT: b .LBB23_6 +; SOFT-NEXT: .LBB23_14: @ %entry +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB23_8 +; SOFT-NEXT: .LBB23_15: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB23_9 +; SOFT-NEXT: .LBB23_16: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: pop {r4, r5, r6, pc} +; +; VFP2-LABEL: ustest_f32i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: bl __fixsfti +; VFP2-NEXT: subs.w lr, r2, #1 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs lr, r3, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: itttt eq +; VFP2-NEXT: moveq r3, r12 +; VFP2-NEXT: moveq r2, #1 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: rsbs.w lr, r0, #0 +; VFP2-NEXT: sbcs.w lr, r12, r1 +; VFP2-NEXT: sbcs.w r2, r12, r2 +; VFP2-NEXT: sbcs.w r2, r12, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w r12, #1 +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f32i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: bl __fixsfti +; FULL-NEXT: subs.w r12, r2, #1 +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: sbcs r12, r3, #0 +; FULL-NEXT: cset r12, lt +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq r2, #1 +; FULL-NEXT: csel r0, r0, lr, ne +; FULL-NEXT: csel r12, r3, lr, ne +; FULL-NEXT: csel r1, r1, lr, ne +; FULL-NEXT: rsbs r3, r0, #0 +; FULL-NEXT: sbcs.w r3, lr, r1 +; FULL-NEXT: sbcs.w r2, lr, r2 +; FULL-NEXT: sbcs.w r2, lr, r12 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi float %x to i128 + %0 = icmp slt i128 %conv, 18446744073709551616 + %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 + %1 = icmp sgt i128 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @stest_f16i64(half %x) { +; SOFT-LABEL: stest_f16i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #12 +; SOFT-NEXT: sub sp, #12 +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __fixsfti +; SOFT-NEXT: mov r6, r0 +; SOFT-NEXT: movs r4, #1 +; SOFT-NEXT: movs r5, #0 +; SOFT-NEXT: ldr r0, .LCPI24_0 +; SOFT-NEXT: adds r7, r6, #1 +; SOFT-NEXT: mov r7, r1 +; SOFT-NEXT: sbcs r7, r0 +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: sbcs r7, r5 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: sbcs r7, r5 +; SOFT-NEXT: mov r7, r4 +; SOFT-NEXT: bge .LBB24_13 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB24_14 +; SOFT-NEXT: .LBB24_2: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB24_4 +; SOFT-NEXT: .LBB24_3: @ %entry +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: .LBB24_4: @ %entry +; SOFT-NEXT: str r2, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB24_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: .LBB24_6: @ %entry +; SOFT-NEXT: str r3, [sp, #4] @ 4-byte Spill +; SOFT-NEXT: mvns r0, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB24_8 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: mov r6, r0 +; SOFT-NEXT: .LBB24_8: @ %entry +; SOFT-NEXT: lsls r3, r4, #31 +; SOFT-NEXT: rsbs r7, r6, #0 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: sbcs r7, r1 +; SOFT-NEXT: mov r7, r0 +; SOFT-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: sbcs r7, r2 +; SOFT-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: sbcs r0, r2 +; SOFT-NEXT: bge .LBB24_15 +; SOFT-NEXT: @ %bb.9: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB24_16 +; SOFT-NEXT: .LBB24_10: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB24_12 +; SOFT-NEXT: .LBB24_11: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: .LBB24_12: @ %entry +; SOFT-NEXT: mov r0, r6 +; SOFT-NEXT: add sp, #12 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .LBB24_13: @ %entry +; SOFT-NEXT: mov r7, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB24_2 +; SOFT-NEXT: .LBB24_14: @ %entry +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB24_3 +; SOFT-NEXT: b .LBB24_4 +; SOFT-NEXT: .LBB24_15: @ %entry +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB24_10 +; SOFT-NEXT: .LBB24_16: @ %entry +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB24_11 +; SOFT-NEXT: b .LBB24_12 +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.17: +; SOFT-NEXT: .LCPI24_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f16i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r7, lr} +; VFP2-NEXT: push {r4, r5, r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: bl __fixsfti +; VFP2-NEXT: subs.w r4, r0, #-1 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: mvn r1, #-2147483648 +; VFP2-NEXT: sbcs.w r4, r12, r1 +; VFP2-NEXT: sbcs r4, r2, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: sbcs r4, r3, #0 +; VFP2-NEXT: mov.w r4, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r4, #1 +; VFP2-NEXT: cmp r4, #0 +; VFP2-NEXT: itee eq +; VFP2-NEXT: moveq r3, r4 +; VFP2-NEXT: movne r4, r2 +; VFP2-NEXT: movne r1, r12 +; VFP2-NEXT: mov.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: rsbs r5, r0, #0 +; VFP2-NEXT: mov.w r12, #-2147483648 +; VFP2-NEXT: sbcs.w r5, r12, r1 +; VFP2-NEXT: sbcs.w r4, r2, r4 +; VFP2-NEXT: sbcs r2, r3 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r4, r5, r7, pc} +; +; FULL-LABEL: stest_f16i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, r5, r7, lr} +; FULL-NEXT: push {r4, r5, r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixhfti +; FULL-NEXT: subs.w lr, r0, #-1 +; FULL-NEXT: mvn r12, #-2147483648 +; FULL-NEXT: sbcs.w lr, r1, r12 +; FULL-NEXT: sbcs lr, r2, #0 +; FULL-NEXT: sbcs lr, r3, #0 +; FULL-NEXT: cset lr, lt +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r5, r3, lr, ne +; FULL-NEXT: mov.w r3, #-1 +; FULL-NEXT: csel r0, r0, r3, ne +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: csel r2, r2, lr, ne +; FULL-NEXT: rsbs r4, r0, #0 +; FULL-NEXT: mov.w r12, #-2147483648 +; FULL-NEXT: sbcs.w r4, r12, r1 +; FULL-NEXT: sbcs.w r2, r3, r2 +; FULL-NEXT: sbcs.w r2, r3, r5 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq r0, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: pop {r4, r5, r7, pc} +entry: + %conv = fptosi half %x to i128 + %0 = icmp slt i128 %conv, 9223372036854775807 + %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 + %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 + %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @utesth_f16i64(half %x) { +; SOFT-LABEL: utesth_f16i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, lr} +; SOFT-NEXT: push {r4, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __fixunssfti +; SOFT-NEXT: movs r4, #0 +; SOFT-NEXT: subs r2, r2, #1 +; SOFT-NEXT: sbcs r3, r4 +; SOFT-NEXT: blo .LBB25_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r2, r4 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB25_3 +; SOFT-NEXT: b .LBB25_4 +; SOFT-NEXT: .LBB25_2: +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB25_4 +; SOFT-NEXT: .LBB25_3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB25_4: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB25_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: pop {r4, pc} +; SOFT-NEXT: .LBB25_6: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: pop {r4, pc} +; +; VFP2-LABEL: utesth_f16i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: bl __fixunssfti +; VFP2-NEXT: subs r2, #1 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs r2, r3, #0 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utesth_f16i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixunshfti +; FULL-NEXT: subs r2, #1 +; FULL-NEXT: sbcs r2, r3, #0 +; FULL-NEXT: mov.w r3, #0 +; FULL-NEXT: cset r2, lo +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r3, ne +; FULL-NEXT: csel r1, r1, r3, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptoui half %x to i128 + %0 = icmp ult i128 %conv, 18446744073709551616 + %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 + %conv6 = trunc i128 %spec.store.select to i64 + ret i64 %conv6 +} + +define i64 @ustest_f16i64(half %x) { +; SOFT-LABEL: ustest_f16i64: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, lr} +; SOFT-NEXT: push {r4, r5, r6, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __fixsfti +; SOFT-NEXT: movs r4, #1 +; SOFT-NEXT: movs r5, #0 +; SOFT-NEXT: subs r6, r2, #1 +; SOFT-NEXT: mov r6, r3 +; SOFT-NEXT: sbcs r6, r5 +; SOFT-NEXT: mov r6, r4 +; SOFT-NEXT: bge .LBB26_10 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB26_11 +; SOFT-NEXT: .LBB26_2: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB26_12 +; SOFT-NEXT: .LBB26_3: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB26_13 +; SOFT-NEXT: .LBB26_4: @ %entry +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB26_6 +; SOFT-NEXT: .LBB26_5: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: .LBB26_6: @ %entry +; SOFT-NEXT: rsbs r6, r0, #0 +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: sbcs r6, r1 +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: sbcs r6, r2 +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: sbcs r2, r3 +; SOFT-NEXT: bge .LBB26_14 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB26_15 +; SOFT-NEXT: .LBB26_8: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB26_16 +; SOFT-NEXT: .LBB26_9: @ %entry +; SOFT-NEXT: pop {r4, r5, r6, pc} +; SOFT-NEXT: .LBB26_10: @ %entry +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB26_2 +; SOFT-NEXT: .LBB26_11: @ %entry +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB26_3 +; SOFT-NEXT: .LBB26_12: @ %entry +; SOFT-NEXT: mov r2, r4 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: bne .LBB26_4 +; SOFT-NEXT: .LBB26_13: @ %entry +; SOFT-NEXT: mov r1, r5 +; SOFT-NEXT: cmp r6, #0 +; SOFT-NEXT: beq .LBB26_5 +; SOFT-NEXT: b .LBB26_6 +; SOFT-NEXT: .LBB26_14: @ %entry +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB26_8 +; SOFT-NEXT: .LBB26_15: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB26_9 +; SOFT-NEXT: .LBB26_16: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: pop {r4, r5, r6, pc} +; +; VFP2-LABEL: ustest_f16i64: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: bl __fixsfti +; VFP2-NEXT: subs.w lr, r2, #1 +; VFP2-NEXT: mov.w r12, #0 +; VFP2-NEXT: sbcs lr, r3, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: itttt eq +; VFP2-NEXT: moveq r3, r12 +; VFP2-NEXT: moveq r2, #1 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: rsbs.w lr, r0, #0 +; VFP2-NEXT: sbcs.w lr, r12, r1 +; VFP2-NEXT: sbcs.w r2, r12, r2 +; VFP2-NEXT: sbcs.w r2, r12, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w r12, #1 +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f16i64: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixhfti +; FULL-NEXT: subs.w r12, r2, #1 +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: sbcs r12, r3, #0 +; FULL-NEXT: cset r12, lt +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: it eq +; FULL-NEXT: moveq r2, #1 +; FULL-NEXT: csel r0, r0, lr, ne +; FULL-NEXT: csel r12, r3, lr, ne +; FULL-NEXT: csel r1, r1, lr, ne +; FULL-NEXT: rsbs r3, r0, #0 +; FULL-NEXT: sbcs.w r3, lr, r1 +; FULL-NEXT: sbcs.w r2, lr, r2 +; FULL-NEXT: sbcs.w r2, lr, r12 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi half %x to i128 + %0 = icmp slt i128 %conv, 18446744073709551616 + %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 + %1 = icmp sgt i128 %spec.store.select, 0 + %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + + + + +; i32 saturate + +define i32 @stest_f64i32_mm(double %x) { +; SOFT-LABEL: stest_f64i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2lz +; SOFT-NEXT: mov r2, r0 +; SOFT-NEXT: ldr r3, .LCPI27_0 +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bhs .LBB27_9 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bpl .LBB27_10 +; SOFT-NEXT: .LBB27_2: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bne .LBB27_11 +; SOFT-NEXT: .LBB27_3: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bmi .LBB27_5 +; SOFT-NEXT: .LBB27_4: @ %entry +; SOFT-NEXT: movs r1, #0 +; SOFT-NEXT: .LBB27_5: @ %entry +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: lsls r3, r2, #31 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r2, r0 +; SOFT-NEXT: blt .LBB27_12 +; SOFT-NEXT: @ %bb.6: @ %entry +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bls .LBB27_13 +; SOFT-NEXT: .LBB27_7: @ %entry +; SOFT-NEXT: adds r1, r1, #1 +; SOFT-NEXT: bne .LBB27_14 +; SOFT-NEXT: .LBB27_8: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .LBB27_9: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bmi .LBB27_2 +; SOFT-NEXT: .LBB27_10: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB27_3 +; SOFT-NEXT: .LBB27_11: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bpl .LBB27_4 +; SOFT-NEXT: b .LBB27_5 +; SOFT-NEXT: .LBB27_12: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bhi .LBB27_7 +; SOFT-NEXT: .LBB27_13: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: adds r1, r1, #1 +; SOFT-NEXT: beq .LBB27_8 +; SOFT-NEXT: .LBB27_14: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.15: +; SOFT-NEXT: .LCPI27_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f64i32_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2lz +; VFP2-NEXT: mvn r2, #-2147483648 +; VFP2-NEXT: cmp r0, r2 +; VFP2-NEXT: mvn r3, #-2147483648 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r3, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r2, r0 +; VFP2-NEXT: mov.w r0, #-2147483648 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r3 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r1, #0 +; VFP2-NEXT: cmp.w r1, #-1 +; VFP2-NEXT: mov.w r3, #-2147483648 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r3, r2 +; VFP2-NEXT: cmp.w r2, #-2147483648 +; VFP2-NEXT: it ls +; VFP2-NEXT: movls r2, r0 +; VFP2-NEXT: adds r0, r1, #1 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r2, r3 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f64i32_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov r0, r1, d0 +; FULL-NEXT: bl __aeabi_d2lz +; FULL-NEXT: mvn r2, #-2147483648 +; FULL-NEXT: cmp r0, r2 +; FULL-NEXT: csel r3, r0, r2, lo +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r0, r2, mi +; FULL-NEXT: it pl +; FULL-NEXT: movpl r1, #0 +; FULL-NEXT: csel r0, r3, r0, eq +; FULL-NEXT: mov.w r2, #-2147483648 +; FULL-NEXT: cmp.w r1, #-1 +; FULL-NEXT: csel r3, r0, r2, gt +; FULL-NEXT: cmp.w r0, #-2147483648 +; FULL-NEXT: csel r0, r0, r2, hi +; FULL-NEXT: adds r1, #1 +; FULL-NEXT: csel r0, r0, r3, eq +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi double %x to i64 + %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) + %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @utest_f64i32_mm(double %x) { +; SOFT-LABEL: utest_f64i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2ulz +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB28_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: movs r0, #0 +; SOFT-NEXT: mvns r0, r0 +; SOFT-NEXT: .LBB28_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; +; VFP-LABEL: utest_f64i32_mm: +; VFP: @ %bb.0: @ %entry +; VFP-NEXT: .save {r7, lr} +; VFP-NEXT: push {r7, lr} +; VFP-NEXT: vmov r0, r1, d0 +; VFP-NEXT: bl __aeabi_d2ulz +; VFP-NEXT: cmp r1, #0 +; VFP-NEXT: it ne +; VFP-NEXT: movne.w r0, #-1 +; VFP-NEXT: pop {r7, pc} +entry: + %conv = fptoui double %x to i64 + %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) + %conv6 = trunc i64 %spec.store.select to i32 + ret i32 %conv6 +} + +define i32 @ustest_f64i32_mm(double %x) { +; SOFT-LABEL: ustest_f64i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2lz +; SOFT-NEXT: movs r2, #0 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r3, r0 +; SOFT-NEXT: bpl .LBB29_7 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bne .LBB29_8 +; SOFT-NEXT: .LBB29_2: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bmi .LBB29_4 +; SOFT-NEXT: .LBB29_3: @ %entry +; SOFT-NEXT: mov r1, r2 +; SOFT-NEXT: .LBB29_4: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r3, r0 +; SOFT-NEXT: ble .LBB29_9 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bne .LBB29_10 +; SOFT-NEXT: .LBB29_6: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .LBB29_7: @ %entry +; SOFT-NEXT: mvns r3, r2 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB29_2 +; SOFT-NEXT: .LBB29_8: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bpl .LBB29_3 +; SOFT-NEXT: b .LBB29_4 +; SOFT-NEXT: .LBB29_9: @ %entry +; SOFT-NEXT: mov r3, r2 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB29_6 +; SOFT-NEXT: .LBB29_10: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: pop {r7, pc} +; +; VFP2-LABEL: ustest_f64i32_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2lz +; VFP2-NEXT: mov r2, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r0 +; VFP2-NEXT: mov.w r0, #0 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r1, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r0, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f64i32_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov r0, r1, d0 +; FULL-NEXT: bl __aeabi_d2lz +; FULL-NEXT: mov r2, r0 +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: it pl +; FULL-NEXT: movpl.w r2, #-1 +; FULL-NEXT: csel r0, r0, r2, eq +; FULL-NEXT: mov.w r2, #0 +; FULL-NEXT: csel r1, r1, r2, mi +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r1, r0, r2, gt +; FULL-NEXT: csel r0, r0, r1, eq +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi double %x to i64 + %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) + %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @stest_f32i32_mm(float %x) { +; SOFT-LABEL: stest_f32i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: mov r2, r0 +; SOFT-NEXT: ldr r3, .LCPI30_0 +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bhs .LBB30_9 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bpl .LBB30_10 +; SOFT-NEXT: .LBB30_2: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bne .LBB30_11 +; SOFT-NEXT: .LBB30_3: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bmi .LBB30_5 +; SOFT-NEXT: .LBB30_4: @ %entry +; SOFT-NEXT: movs r1, #0 +; SOFT-NEXT: .LBB30_5: @ %entry +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: lsls r3, r2, #31 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r2, r0 +; SOFT-NEXT: blt .LBB30_12 +; SOFT-NEXT: @ %bb.6: @ %entry +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bls .LBB30_13 +; SOFT-NEXT: .LBB30_7: @ %entry +; SOFT-NEXT: adds r1, r1, #1 +; SOFT-NEXT: bne .LBB30_14 +; SOFT-NEXT: .LBB30_8: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .LBB30_9: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bmi .LBB30_2 +; SOFT-NEXT: .LBB30_10: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB30_3 +; SOFT-NEXT: .LBB30_11: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bpl .LBB30_4 +; SOFT-NEXT: b .LBB30_5 +; SOFT-NEXT: .LBB30_12: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bhi .LBB30_7 +; SOFT-NEXT: .LBB30_13: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: adds r1, r1, #1 +; SOFT-NEXT: beq .LBB30_8 +; SOFT-NEXT: .LBB30_14: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.15: +; SOFT-NEXT: .LCPI30_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f32i32_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: mvn r2, #-2147483648 +; VFP2-NEXT: cmp r0, r2 +; VFP2-NEXT: mvn r3, #-2147483648 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r3, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r2, r0 +; VFP2-NEXT: mov.w r0, #-2147483648 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r3 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r1, #0 +; VFP2-NEXT: cmp.w r1, #-1 +; VFP2-NEXT: mov.w r3, #-2147483648 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r3, r2 +; VFP2-NEXT: cmp.w r2, #-2147483648 +; VFP2-NEXT: it ls +; VFP2-NEXT: movls r2, r0 +; VFP2-NEXT: adds r0, r1, #1 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r2, r3 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f32i32_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: bl __aeabi_f2lz +; FULL-NEXT: mvn r2, #-2147483648 +; FULL-NEXT: cmp r0, r2 +; FULL-NEXT: csel r3, r0, r2, lo +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r0, r2, mi +; FULL-NEXT: it pl +; FULL-NEXT: movpl r1, #0 +; FULL-NEXT: csel r0, r3, r0, eq +; FULL-NEXT: mov.w r2, #-2147483648 +; FULL-NEXT: cmp.w r1, #-1 +; FULL-NEXT: csel r3, r0, r2, gt +; FULL-NEXT: cmp.w r0, #-2147483648 +; FULL-NEXT: csel r0, r0, r2, hi +; FULL-NEXT: adds r1, #1 +; FULL-NEXT: csel r0, r0, r3, eq +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi float %x to i64 + %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) + %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @utest_f32i32_mm(float %x) { +; SOFT-LABEL: utest_f32i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2ulz +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB31_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: movs r0, #0 +; SOFT-NEXT: mvns r0, r0 +; SOFT-NEXT: .LBB31_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; +; VFP-LABEL: utest_f32i32_mm: +; VFP: @ %bb.0: @ %entry +; VFP-NEXT: .save {r7, lr} +; VFP-NEXT: push {r7, lr} +; VFP-NEXT: vmov r0, s0 +; VFP-NEXT: bl __aeabi_f2ulz +; VFP-NEXT: cmp r1, #0 +; VFP-NEXT: it ne +; VFP-NEXT: movne.w r0, #-1 +; VFP-NEXT: pop {r7, pc} +entry: + %conv = fptoui float %x to i64 + %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) + %conv6 = trunc i64 %spec.store.select to i32 + ret i32 %conv6 +} + +define i32 @ustest_f32i32_mm(float %x) { +; SOFT-LABEL: ustest_f32i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, lr} +; SOFT-NEXT: push {r4, lr} +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: movs r2, #0 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r3, r1 +; SOFT-NEXT: bmi .LBB32_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r3, r2 +; SOFT-NEXT: .LBB32_2: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: bmi .LBB32_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mvns r4, r2 +; SOFT-NEXT: .LBB32_4: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB32_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB32_6: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: ble .LBB32_9 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bne .LBB32_10 +; SOFT-NEXT: .LBB32_8: @ %entry +; SOFT-NEXT: pop {r4, pc} +; SOFT-NEXT: .LBB32_9: @ %entry +; SOFT-NEXT: mov r1, r2 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: beq .LBB32_8 +; SOFT-NEXT: .LBB32_10: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: pop {r4, pc} +; +; VFP2-LABEL: ustest_f32i32_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: mov r2, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r0 +; VFP2-NEXT: mov.w r0, #0 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r1, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r0, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f32i32_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: bl __aeabi_f2lz +; FULL-NEXT: mov r2, r0 +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: it pl +; FULL-NEXT: movpl.w r2, #-1 +; FULL-NEXT: csel r0, r0, r2, eq +; FULL-NEXT: mov.w r2, #0 +; FULL-NEXT: csel r1, r1, r2, mi +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r1, r0, r2, gt +; FULL-NEXT: csel r0, r0, r1, eq +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi float %x to i64 + %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) + %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @stest_f16i32_mm(half %x) { +; SOFT-LABEL: stest_f16i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: mov r2, r0 +; SOFT-NEXT: ldr r3, .LCPI33_0 +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bhs .LBB33_9 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bpl .LBB33_10 +; SOFT-NEXT: .LBB33_2: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bne .LBB33_11 +; SOFT-NEXT: .LBB33_3: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bmi .LBB33_5 +; SOFT-NEXT: .LBB33_4: @ %entry +; SOFT-NEXT: movs r1, #0 +; SOFT-NEXT: .LBB33_5: @ %entry +; SOFT-NEXT: movs r2, #1 +; SOFT-NEXT: lsls r3, r2, #31 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r2, r0 +; SOFT-NEXT: blt .LBB33_12 +; SOFT-NEXT: @ %bb.6: @ %entry +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bls .LBB33_13 +; SOFT-NEXT: .LBB33_7: @ %entry +; SOFT-NEXT: adds r1, r1, #1 +; SOFT-NEXT: bne .LBB33_14 +; SOFT-NEXT: .LBB33_8: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .LBB33_9: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bmi .LBB33_2 +; SOFT-NEXT: .LBB33_10: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB33_3 +; SOFT-NEXT: .LBB33_11: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: bpl .LBB33_4 +; SOFT-NEXT: b .LBB33_5 +; SOFT-NEXT: .LBB33_12: @ %entry +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: cmp r0, r3 +; SOFT-NEXT: bhi .LBB33_7 +; SOFT-NEXT: .LBB33_13: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: adds r1, r1, #1 +; SOFT-NEXT: beq .LBB33_8 +; SOFT-NEXT: .LBB33_14: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.15: +; SOFT-NEXT: .LCPI33_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f16i32_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: mvn r2, #-2147483648 +; VFP2-NEXT: cmp r0, r2 +; VFP2-NEXT: mvn r3, #-2147483648 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r3, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r2, r0 +; VFP2-NEXT: mov.w r0, #-2147483648 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r3 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r1, #0 +; VFP2-NEXT: cmp.w r1, #-1 +; VFP2-NEXT: mov.w r3, #-2147483648 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r3, r2 +; VFP2-NEXT: cmp.w r2, #-2147483648 +; VFP2-NEXT: it ls +; VFP2-NEXT: movls r2, r0 +; VFP2-NEXT: adds r0, r1, #1 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r2, r3 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f16i32_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixhfdi +; FULL-NEXT: mvn r2, #-2147483648 +; FULL-NEXT: cmp r0, r2 +; FULL-NEXT: csel r3, r0, r2, lo +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r0, r2, mi +; FULL-NEXT: it pl +; FULL-NEXT: movpl r1, #0 +; FULL-NEXT: csel r0, r3, r0, eq +; FULL-NEXT: mov.w r2, #-2147483648 +; FULL-NEXT: cmp.w r1, #-1 +; FULL-NEXT: csel r3, r0, r2, gt +; FULL-NEXT: cmp.w r0, #-2147483648 +; FULL-NEXT: csel r0, r0, r2, hi +; FULL-NEXT: adds r1, #1 +; FULL-NEXT: csel r0, r0, r3, eq +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi half %x to i64 + %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) + %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +define i32 @utesth_f16i32_mm(half %x) { +; SOFT-LABEL: utesth_f16i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2ulz +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB34_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: movs r0, #0 +; SOFT-NEXT: mvns r0, r0 +; SOFT-NEXT: .LBB34_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; +; VFP2-LABEL: utesth_f16i32_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: bl __aeabi_f2ulz +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne.w r0, #-1 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utesth_f16i32_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixunshfdi +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: it ne +; FULL-NEXT: movne.w r0, #-1 +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptoui half %x to i64 + %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) + %conv6 = trunc i64 %spec.store.select to i32 + ret i32 %conv6 +} + +define i32 @ustest_f16i32_mm(half %x) { +; SOFT-LABEL: ustest_f16i32_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, lr} +; SOFT-NEXT: push {r4, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: movs r2, #0 +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r3, r1 +; SOFT-NEXT: bmi .LBB35_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r3, r2 +; SOFT-NEXT: .LBB35_2: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: bmi .LBB35_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mvns r4, r2 +; SOFT-NEXT: .LBB35_4: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: beq .LBB35_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB35_6: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: ble .LBB35_9 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bne .LBB35_10 +; SOFT-NEXT: .LBB35_8: @ %entry +; SOFT-NEXT: pop {r4, pc} +; SOFT-NEXT: .LBB35_9: @ %entry +; SOFT-NEXT: mov r1, r2 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: beq .LBB35_8 +; SOFT-NEXT: .LBB35_10: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: pop {r4, pc} +; +; VFP2-LABEL: ustest_f16i32_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: mov r2, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r0 +; VFP2-NEXT: mov.w r0, #0 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r1, r0 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r0, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f16i32_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixhfdi +; FULL-NEXT: mov r2, r0 +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: it pl +; FULL-NEXT: movpl.w r2, #-1 +; FULL-NEXT: csel r0, r0, r2, eq +; FULL-NEXT: mov.w r2, #0 +; FULL-NEXT: csel r1, r1, r2, mi +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r1, r0, r2, gt +; FULL-NEXT: csel r0, r0, r1, eq +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptosi half %x to i64 + %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) + %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) + %conv6 = trunc i64 %spec.store.select7 to i32 + ret i32 %conv6 +} + +; i16 saturate + +define i16 @stest_f64i16_mm(double %x) { +; SOFT-LABEL: stest_f64i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2iz +; SOFT-NEXT: ldr r1, .LCPI36_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB36_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB36_2: @ %entry +; SOFT-NEXT: ldr r1, .LCPI36_1 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: bgt .LBB36_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB36_4: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.5: +; SOFT-NEXT: .LCPI36_0: +; SOFT-NEXT: .long 32767 @ 0x7fff +; SOFT-NEXT: .LCPI36_1: +; SOFT-NEXT: .long 4294934528 @ 0xffff8000 +; +; VFP2-LABEL: stest_f64i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2iz +; VFP2-NEXT: movw r1, #32767 +; VFP2-NEXT: cmp r0, r1 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, r0 +; VFP2-NEXT: movw r0, #32768 +; VFP2-NEXT: movt r0, #65535 +; VFP2-NEXT: cmn.w r1, #32768 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r0, r1 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f64i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f64 s0, d0 +; FULL-NEXT: movw r1, #32767 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lt +; FULL-NEXT: movw r1, #32768 +; FULL-NEXT: movt r1, #65535 +; FULL-NEXT: cmn.w r0, #32768 +; FULL-NEXT: csel r0, r0, r1, gt +; FULL-NEXT: bx lr +entry: + %conv = fptosi double %x to i32 + %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) + %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @utest_f64i16_mm(double %x) { +; SOFT-LABEL: utest_f64i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2uiz +; SOFT-NEXT: ldr r1, .LCPI37_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blo .LBB37_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB37_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI37_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: utest_f64i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2uiz +; VFP2-NEXT: movw r1, #65535 +; VFP2-NEXT: cmp r0, r1 +; VFP2-NEXT: it hs +; VFP2-NEXT: movhs r0, r1 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utest_f64i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.u32.f64 s0, d0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lo +; FULL-NEXT: bx lr +entry: + %conv = fptoui double %x to i32 + %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) + %conv6 = trunc i32 %spec.store.select to i16 + ret i16 %conv6 +} + +define i16 @ustest_f64i16_mm(double %x) { +; SOFT-LABEL: ustest_f64i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_d2iz +; SOFT-NEXT: ldr r1, .LCPI38_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB38_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB38_2: @ %entry +; SOFT-NEXT: asrs r1, r0, #31 +; SOFT-NEXT: bics r0, r1 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI38_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: ustest_f64i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, r1, d0 +; VFP2-NEXT: bl __aeabi_d2iz +; VFP2-NEXT: movw r1, #65535 +; VFP2-NEXT: cmp r0, r1 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, r0 +; VFP2-NEXT: bic.w r0, r1, r1, asr #31 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f64i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f64 s0, d0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lt +; FULL-NEXT: bic.w r0, r0, r0, asr #31 +; FULL-NEXT: bx lr +entry: + %conv = fptosi double %x to i32 + %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) + %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @stest_f32i16_mm(float %x) { +; SOFT-LABEL: stest_f32i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2iz +; SOFT-NEXT: ldr r1, .LCPI39_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB39_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB39_2: @ %entry +; SOFT-NEXT: ldr r1, .LCPI39_1 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: bgt .LBB39_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB39_4: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.5: +; SOFT-NEXT: .LCPI39_0: +; SOFT-NEXT: .long 32767 @ 0x7fff +; SOFT-NEXT: .LCPI39_1: +; SOFT-NEXT: .long 4294934528 @ 0xffff8000 +; +; VFP2-LABEL: stest_f32i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: vcvt.s32.f32 s0, s0 +; VFP2-NEXT: movw r1, #32767 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: cmp r0, r1 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, r0 +; VFP2-NEXT: movw r0, #32768 +; VFP2-NEXT: cmn.w r1, #32768 +; VFP2-NEXT: movt r0, #65535 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r0, r1 +; VFP2-NEXT: bx lr +; +; FULL-LABEL: stest_f32i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f32 s0, s0 +; FULL-NEXT: movw r1, #32767 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lt +; FULL-NEXT: movw r1, #32768 +; FULL-NEXT: movt r1, #65535 +; FULL-NEXT: cmn.w r0, #32768 +; FULL-NEXT: csel r0, r0, r1, gt +; FULL-NEXT: bx lr +entry: + %conv = fptosi float %x to i32 + %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) + %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @utest_f32i16_mm(float %x) { +; SOFT-LABEL: utest_f32i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2uiz +; SOFT-NEXT: ldr r1, .LCPI40_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blo .LBB40_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB40_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI40_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: utest_f32i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: vcvt.u32.f32 s0, s0 +; VFP2-NEXT: movw r0, #65535 +; VFP2-NEXT: vmov r1, s0 +; VFP2-NEXT: cmp r1, r0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r0, r1 +; VFP2-NEXT: bx lr +; +; FULL-LABEL: utest_f32i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.u32.f32 s0, s0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lo +; FULL-NEXT: bx lr +entry: + %conv = fptoui float %x to i32 + %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) + %conv6 = trunc i32 %spec.store.select to i16 + ret i16 %conv6 +} + +define i16 @ustest_f32i16_mm(float %x) { +; SOFT-LABEL: ustest_f32i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: bl __aeabi_f2iz +; SOFT-NEXT: ldr r1, .LCPI41_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB41_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB41_2: @ %entry +; SOFT-NEXT: asrs r1, r0, #31 +; SOFT-NEXT: bics r0, r1 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI41_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: ustest_f32i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: vcvt.s32.f32 s0, s0 +; VFP2-NEXT: movw r1, #65535 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: cmp r0, r1 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, r0 +; VFP2-NEXT: bic.w r0, r1, r1, asr #31 +; VFP2-NEXT: bx lr +; +; FULL-LABEL: ustest_f32i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f32 s0, s0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lt +; FULL-NEXT: bic.w r0, r0, r0, asr #31 +; FULL-NEXT: bx lr +entry: + %conv = fptosi float %x to i32 + %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) + %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @stest_f16i16_mm(half %x) { +; SOFT-LABEL: stest_f16i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2iz +; SOFT-NEXT: ldr r1, .LCPI42_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB42_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB42_2: @ %entry +; SOFT-NEXT: ldr r1, .LCPI42_1 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: bgt .LBB42_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB42_4: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.5: +; SOFT-NEXT: .LCPI42_0: +; SOFT-NEXT: .long 32767 @ 0x7fff +; SOFT-NEXT: .LCPI42_1: +; SOFT-NEXT: .long 4294934528 @ 0xffff8000 +; +; VFP2-LABEL: stest_f16i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: movw r1, #32767 +; VFP2-NEXT: vcvt.s32.f32 s0, s0 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: cmp r0, r1 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, r0 +; VFP2-NEXT: movw r0, #32768 +; VFP2-NEXT: cmn.w r1, #32768 +; VFP2-NEXT: movt r0, #65535 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r0, r1 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: stest_f16i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f16 s0, s0 +; FULL-NEXT: movw r1, #32767 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lt +; FULL-NEXT: movw r1, #32768 +; FULL-NEXT: movt r1, #65535 +; FULL-NEXT: cmn.w r0, #32768 +; FULL-NEXT: csel r0, r0, r1, gt +; FULL-NEXT: bx lr +entry: + %conv = fptosi half %x to i32 + %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) + %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +define i16 @utesth_f16i16_mm(half %x) { +; SOFT-LABEL: utesth_f16i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2uiz +; SOFT-NEXT: ldr r1, .LCPI43_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blo .LBB43_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB43_2: @ %entry +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI43_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: utesth_f16i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: movw r0, #65535 +; VFP2-NEXT: vcvt.u32.f32 s0, s0 +; VFP2-NEXT: vmov r1, s0 +; VFP2-NEXT: cmp r1, r0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r0, r1 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utesth_f16i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.u32.f16 s0, s0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lo +; FULL-NEXT: bx lr +entry: + %conv = fptoui half %x to i32 + %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) + %conv6 = trunc i32 %spec.store.select to i16 + ret i16 %conv6 +} + +define i16 @ustest_f16i16_mm(half %x) { +; SOFT-LABEL: ustest_f16i16_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r7, lr} +; SOFT-NEXT: push {r7, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __aeabi_f2iz +; SOFT-NEXT: ldr r1, .LCPI44_0 +; SOFT-NEXT: cmp r0, r1 +; SOFT-NEXT: blt .LBB44_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: .LBB44_2: @ %entry +; SOFT-NEXT: asrs r1, r0, #31 +; SOFT-NEXT: bics r0, r1 +; SOFT-NEXT: pop {r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.3: +; SOFT-NEXT: .LCPI44_0: +; SOFT-NEXT: .long 65535 @ 0xffff +; +; VFP2-LABEL: ustest_f16i16_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: movw r1, #65535 +; VFP2-NEXT: vcvt.s32.f32 s0, s0 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: cmp r0, r1 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, r0 +; VFP2-NEXT: bic.w r0, r1, r1, asr #31 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: ustest_f16i16_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: vcvt.s32.f16 s0, s0 +; FULL-NEXT: movw r1, #65535 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: cmp r0, r1 +; FULL-NEXT: csel r0, r0, r1, lt +; FULL-NEXT: bic.w r0, r0, r0, asr #31 +; FULL-NEXT: bx lr +entry: + %conv = fptosi half %x to i32 + %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) + %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) + %conv6 = trunc i32 %spec.store.select7 to i16 + ret i16 %conv6 +} + +; i64 saturate + +define i64 @stest_f64i64_mm(double %x) { +; SOFT-LABEL: stest_f64i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #12 +; SOFT-NEXT: sub sp, #12 +; SOFT-NEXT: bl __fixdfti +; SOFT-NEXT: mov r6, r0 +; SOFT-NEXT: mov r4, r1 +; SOFT-NEXT: ldr r5, .LCPI45_0 +; SOFT-NEXT: cmp r1, r5 +; SOFT-NEXT: blo .LBB45_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r1, r5 +; SOFT-NEXT: .LBB45_2: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: bmi .LBB45_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: .LBB45_4: @ %entry +; SOFT-NEXT: str r2, [sp, #4] @ 4-byte Spill +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: beq .LBB45_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: .LBB45_6: @ %entry +; SOFT-NEXT: movs r0, #0 +; SOFT-NEXT: str r0, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: mvns r2, r0 +; SOFT-NEXT: cmp r4, r5 +; SOFT-NEXT: mov r0, r6 +; SOFT-NEXT: blo .LBB45_8 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: .LBB45_8: @ %entry +; SOFT-NEXT: cmp r4, r5 +; SOFT-NEXT: mov r4, r6 +; SOFT-NEXT: bne .LBB45_26 +; SOFT-NEXT: @ %bb.9: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bpl .LBB45_27 +; SOFT-NEXT: .LBB45_10: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB45_12 +; SOFT-NEXT: .LBB45_11: @ %entry +; SOFT-NEXT: mov r4, r6 +; SOFT-NEXT: .LBB45_12: @ %entry +; SOFT-NEXT: movs r0, #1 +; SOFT-NEXT: lsls r5, r0, #31 +; SOFT-NEXT: cmp r1, r5 +; SOFT-NEXT: mov r2, r4 +; SOFT-NEXT: ldr r6, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: bhi .LBB45_14 +; SOFT-NEXT: @ %bb.13: @ %entry +; SOFT-NEXT: mov r2, r6 +; SOFT-NEXT: .LBB45_14: @ %entry +; SOFT-NEXT: cmp r1, r5 +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: beq .LBB45_16 +; SOFT-NEXT: @ %bb.15: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: .LBB45_16: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: bpl .LBB45_28 +; SOFT-NEXT: @ %bb.17: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: blt .LBB45_29 +; SOFT-NEXT: .LBB45_18: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: beq .LBB45_20 +; SOFT-NEXT: .LBB45_19: +; SOFT-NEXT: asrs r3, r3, #31 +; SOFT-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: ands r3, r2 +; SOFT-NEXT: .LBB45_20: @ %entry +; SOFT-NEXT: ands r3, r7 +; SOFT-NEXT: adds r2, r3, #1 +; SOFT-NEXT: beq .LBB45_22 +; SOFT-NEXT: @ %bb.21: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB45_22: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: mov r3, r1 +; SOFT-NEXT: blt .LBB45_30 +; SOFT-NEXT: @ %bb.23: @ %entry +; SOFT-NEXT: cmp r1, r5 +; SOFT-NEXT: bls .LBB45_31 +; SOFT-NEXT: .LBB45_24: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB45_32 +; SOFT-NEXT: .LBB45_25: @ %entry +; SOFT-NEXT: add sp, #12 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .LBB45_26: @ %entry +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bmi .LBB45_10 +; SOFT-NEXT: .LBB45_27: @ %entry +; SOFT-NEXT: mov r6, r2 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB45_11 +; SOFT-NEXT: b .LBB45_12 +; SOFT-NEXT: .LBB45_28: @ %entry +; SOFT-NEXT: mov r7, r6 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bge .LBB45_18 +; SOFT-NEXT: .LBB45_29: @ %entry +; SOFT-NEXT: mov r4, r6 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bne .LBB45_19 +; SOFT-NEXT: b .LBB45_20 +; SOFT-NEXT: .LBB45_30: @ %entry +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: cmp r1, r5 +; SOFT-NEXT: bhi .LBB45_24 +; SOFT-NEXT: .LBB45_31: @ %entry +; SOFT-NEXT: mov r1, r5 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB45_25 +; SOFT-NEXT: .LBB45_32: @ %entry +; SOFT-NEXT: mov r1, r3 +; SOFT-NEXT: add sp, #12 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.33: +; SOFT-NEXT: .LCPI45_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f64i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: .pad #4 +; VFP2-NEXT: sub sp, #4 +; VFP2-NEXT: bl __fixdfti +; VFP2-NEXT: mvn r8, #-2147483648 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: cmp r1, r8 +; VFP2-NEXT: mvn r1, #-2147483648 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r1, r12 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: mvn r4, #-2147483648 +; VFP2-NEXT: mov.w r5, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r4, r12 +; VFP2-NEXT: orrs.w r9, r2, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r4, r1 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r5, r3 +; VFP2-NEXT: cmp.w r5, #-1 +; VFP2-NEXT: mov.w r7, #-2147483648 +; VFP2-NEXT: mov.w r1, #-2147483648 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r7, r4 +; VFP2-NEXT: cmp.w r4, #-2147483648 +; VFP2-NEXT: mov r6, r3 +; VFP2-NEXT: it hi +; VFP2-NEXT: movhi r1, r4 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: andne.w r6, r2, r6, asr #31 +; VFP2-NEXT: and.w r2, r6, r5 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: adds r6, r2, #1 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r1, r7 +; VFP2-NEXT: mov.w r7, #-1 +; VFP2-NEXT: cmp r12, r8 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r7, r0 +; VFP2-NEXT: mov.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r7, r0 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r2, r0 +; VFP2-NEXT: cmp.w r9, #0 +; VFP2-NEXT: mov.w r0, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r7 +; VFP2-NEXT: cmp.w r4, #-2147483648 +; VFP2-NEXT: it hi +; VFP2-NEXT: movhi r0, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: cmp.w r5, #-1 +; VFP2-NEXT: it le +; VFP2-NEXT: movle r2, lr +; VFP2-NEXT: cmp r6, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r0 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: add sp, #4 +; VFP2-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; +; FULL-LABEL: stest_f64i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: .pad #4 +; FULL-NEXT: sub sp, #4 +; FULL-NEXT: bl __fixdfti +; FULL-NEXT: mvn r12, #-2147483648 +; FULL-NEXT: cmp r1, r12 +; FULL-NEXT: csel lr, r1, r12, lo +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r4, r1, r12, mi +; FULL-NEXT: orrs.w r8, r2, r3 +; FULL-NEXT: csel r4, lr, r4, eq +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: mov.w r7, #-2147483648 +; FULL-NEXT: csel r6, r3, lr, mi +; FULL-NEXT: mov r5, r3 +; FULL-NEXT: cmp.w r6, #-1 +; FULL-NEXT: csel r9, r4, r7, gt +; FULL-NEXT: cmp.w r4, #-2147483648 +; FULL-NEXT: csel r7, r4, r7, hi +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: it ne +; FULL-NEXT: andne.w r5, r2, r5, asr #31 +; FULL-NEXT: and.w r2, r5, r6 +; FULL-NEXT: adds r5, r2, #1 +; FULL-NEXT: csel r2, r7, r9, eq +; FULL-NEXT: mov.w r7, #-1 +; FULL-NEXT: cmp r1, r12 +; FULL-NEXT: csel r1, r0, r7, lo +; FULL-NEXT: csel r1, r0, r1, eq +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r0, r0, r7, mi +; FULL-NEXT: cmp.w r8, #0 +; FULL-NEXT: csel r0, r1, r0, eq +; FULL-NEXT: cmp.w r4, #-2147483648 +; FULL-NEXT: csel r1, r0, lr, hi +; FULL-NEXT: csel r1, r0, r1, eq +; FULL-NEXT: cmp.w r6, #-1 +; FULL-NEXT: csel r0, r0, lr, gt +; FULL-NEXT: cmp r5, #0 +; FULL-NEXT: csel r0, r1, r0, eq +; FULL-NEXT: mov r1, r2 +; FULL-NEXT: add sp, #4 +; FULL-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi double %x to i128 + %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) + %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @utest_f64i64_mm(double %x) { +; SOFT-LABEL: utest_f64i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, lr} +; SOFT-NEXT: push {r4, r5, r6, lr} +; SOFT-NEXT: bl __fixunsdfti +; SOFT-NEXT: movs r5, #1 +; SOFT-NEXT: mov r4, r2 +; SOFT-NEXT: eors r4, r5 +; SOFT-NEXT: movs r6, #0 +; SOFT-NEXT: subs r2, r2, #1 +; SOFT-NEXT: mov r2, r3 +; SOFT-NEXT: sbcs r2, r6 +; SOFT-NEXT: blo .LBB46_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r5, r6 +; SOFT-NEXT: .LBB46_2: @ %entry +; SOFT-NEXT: orrs r4, r3 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB46_7 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB46_8 +; SOFT-NEXT: .LBB46_4: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB46_9 +; SOFT-NEXT: .LBB46_5: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB46_10 +; SOFT-NEXT: .LBB46_6: @ %entry +; SOFT-NEXT: pop {r4, r5, r6, pc} +; SOFT-NEXT: .LBB46_7: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB46_4 +; SOFT-NEXT: .LBB46_8: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB46_5 +; SOFT-NEXT: .LBB46_9: @ %entry +; SOFT-NEXT: mov r1, r5 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB46_6 +; SOFT-NEXT: .LBB46_10: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: pop {r4, r5, r6, pc} +; +; VFP2-LABEL: utest_f64i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: bl __fixunsdfti +; VFP2-NEXT: eor r12, r2, #1 +; VFP2-NEXT: subs r2, #1 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: sbcs r2, r3, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: orr.w r12, r12, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, lr +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utest_f64i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: bl __fixunsdfti +; FULL-NEXT: eor r12, r2, #1 +; FULL-NEXT: subs r2, #1 +; FULL-NEXT: sbcs r2, r3, #0 +; FULL-NEXT: orr.w r12, r12, r3 +; FULL-NEXT: cset r2, lo +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r0, r0, r12, ne +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptoui double %x to i128 + %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) + %conv6 = trunc i128 %spec.store.select to i64 + ret i64 %conv6 +} + +define i64 @ustest_f64i64_mm(double %x) { +; SOFT-LABEL: ustest_f64i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #4 +; SOFT-NEXT: sub sp, #4 +; SOFT-NEXT: bl __fixdfti +; SOFT-NEXT: movs r5, #1 +; SOFT-NEXT: mov r4, r2 +; SOFT-NEXT: eors r4, r5 +; SOFT-NEXT: movs r6, #0 +; SOFT-NEXT: subs r7, r2, #1 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: sbcs r7, r6 +; SOFT-NEXT: mov r7, r5 +; SOFT-NEXT: blt .LBB47_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r7, r6 +; SOFT-NEXT: .LBB47_2: @ %entry +; SOFT-NEXT: orrs r4, r3 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB47_25 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB47_26 +; SOFT-NEXT: .LBB47_4: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB47_27 +; SOFT-NEXT: .LBB47_5: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB47_7 +; SOFT-NEXT: .LBB47_6: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: .LBB47_7: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r7, r0 +; SOFT-NEXT: bne .LBB47_9 +; SOFT-NEXT: @ %bb.8: @ %entry +; SOFT-NEXT: mov r7, r1 +; SOFT-NEXT: .LBB47_9: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: beq .LBB47_11 +; SOFT-NEXT: @ %bb.10: @ %entry +; SOFT-NEXT: mov r4, r7 +; SOFT-NEXT: .LBB47_11: @ %entry +; SOFT-NEXT: cmp r2, #1 +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: bhs .LBB47_28 +; SOFT-NEXT: @ %bb.12: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bpl .LBB47_29 +; SOFT-NEXT: .LBB47_13: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bne .LBB47_30 +; SOFT-NEXT: .LBB47_14: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bmi .LBB47_16 +; SOFT-NEXT: .LBB47_15: @ %entry +; SOFT-NEXT: mov r3, r6 +; SOFT-NEXT: .LBB47_16: @ %entry +; SOFT-NEXT: rsbs r2, r7, #0 +; SOFT-NEXT: mov r2, r6 +; SOFT-NEXT: sbcs r2, r3 +; SOFT-NEXT: bge .LBB47_31 +; SOFT-NEXT: @ %bb.17: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB47_32 +; SOFT-NEXT: .LBB47_18: @ %entry +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: beq .LBB47_20 +; SOFT-NEXT: .LBB47_19: @ %entry +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: .LBB47_20: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: bne .LBB47_22 +; SOFT-NEXT: @ %bb.21: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: .LBB47_22: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB47_24 +; SOFT-NEXT: @ %bb.23: @ %entry +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: .LBB47_24: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: add sp, #4 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .LBB47_25: @ %entry +; SOFT-NEXT: mov r0, r7 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB47_4 +; SOFT-NEXT: .LBB47_26: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB47_5 +; SOFT-NEXT: .LBB47_27: @ %entry +; SOFT-NEXT: mov r1, r7 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB47_6 +; SOFT-NEXT: b .LBB47_7 +; SOFT-NEXT: .LBB47_28: @ %entry +; SOFT-NEXT: mov r7, r5 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bmi .LBB47_13 +; SOFT-NEXT: .LBB47_29: @ %entry +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: beq .LBB47_14 +; SOFT-NEXT: .LBB47_30: @ %entry +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bpl .LBB47_15 +; SOFT-NEXT: b .LBB47_16 +; SOFT-NEXT: .LBB47_31: @ %entry +; SOFT-NEXT: mov r5, r6 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB47_18 +; SOFT-NEXT: .LBB47_32: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: bne .LBB47_19 +; SOFT-NEXT: b .LBB47_20 +; +; VFP2-LABEL: ustest_f64i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r7, lr} +; VFP2-NEXT: push {r4, r5, r7, lr} +; VFP2-NEXT: bl __fixdfti +; VFP2-NEXT: subs r4, r2, #1 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: eor r1, r2, #1 +; VFP2-NEXT: sbcs r4, r3, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: orr.w r5, r1, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: cmp r5, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r5 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne lr, r12 +; VFP2-NEXT: cmp r5, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq lr, r5 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: mov r12, lr +; VFP2-NEXT: mov.w r4, #1 +; VFP2-NEXT: ite ne +; VFP2-NEXT: movne r12, r0 +; VFP2-NEXT: moveq r12, r0 +; VFP2-NEXT: cmp r2, #1 +; VFP2-NEXT: mov.w r5, #1 +; VFP2-NEXT: mov.w r1, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r5, r2 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r4, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r4, r5 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r3, r1 +; VFP2-NEXT: rsbs r2, r4, #0 +; VFP2-NEXT: sbcs.w r2, r1, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, #1 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r1 +; VFP2-NEXT: orrs.w r2, r4, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r1, lr +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, lr +; VFP2-NEXT: pop {r4, r5, r7, pc} +; +; FULL-LABEL: ustest_f64i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, lr} +; FULL-NEXT: push {r4, lr} +; FULL-NEXT: bl __fixdfti +; FULL-NEXT: subs.w lr, r2, #1 +; FULL-NEXT: eor r12, r2, #1 +; FULL-NEXT: sbcs lr, r3, #0 +; FULL-NEXT: orr.w r12, r12, r3 +; FULL-NEXT: cset lr, lt +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r0, r0, lr, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r4, r0, r12, ne +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r1, r1, lr, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: mov.w lr, #1 +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r4, r1, ne +; FULL-NEXT: csel r12, r4, r0, eq +; FULL-NEXT: cmp r2, #1 +; FULL-NEXT: csel r0, r2, lr, lo +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r2, r2, lr, mi +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: csel r0, r0, r2, eq +; FULL-NEXT: csel r3, r3, lr, mi +; FULL-NEXT: rsbs r2, r0, #0 +; FULL-NEXT: sbcs.w r2, lr, r3 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r4, r4, r2, ne +; FULL-NEXT: orrs r3, r0 +; FULL-NEXT: csel r0, r12, r4, eq +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r2, r1, r2, ne +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r1, r1, r2, eq +; FULL-NEXT: pop {r4, pc} +entry: + %conv = fptosi double %x to i128 + %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) + %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @stest_f32i64_mm(float %x) { +; SOFT-LABEL: stest_f32i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #20 +; SOFT-NEXT: sub sp, #20 +; SOFT-NEXT: bl __fixsfti +; SOFT-NEXT: str r0, [sp, #16] @ 4-byte Spill +; SOFT-NEXT: mov r4, r1 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: ldr r0, .LCPI48_0 +; SOFT-NEXT: cmp r1, r0 +; SOFT-NEXT: mov r5, r1 +; SOFT-NEXT: blo .LBB48_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: ldr r5, .LCPI48_0 +; SOFT-NEXT: .LBB48_2: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: bmi .LBB48_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: ldr r1, .LCPI48_0 +; SOFT-NEXT: .LBB48_4: @ %entry +; SOFT-NEXT: str r2, [sp] @ 4-byte Spill +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: orrs r0, r7 +; SOFT-NEXT: str r0, [sp, #4] @ 4-byte Spill +; SOFT-NEXT: beq .LBB48_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r5, r1 +; SOFT-NEXT: .LBB48_6: @ %entry +; SOFT-NEXT: movs r1, #0 +; SOFT-NEXT: str r1, [sp, #12] @ 4-byte Spill +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: mov r2, r7 +; SOFT-NEXT: bmi .LBB48_8 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: .LBB48_8: @ %entry +; SOFT-NEXT: movs r1, #1 +; SOFT-NEXT: lsls r1, r1, #31 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: bge .LBB48_10 +; SOFT-NEXT: @ %bb.9: @ %entry +; SOFT-NEXT: mov r6, r1 +; SOFT-NEXT: .LBB48_10: @ %entry +; SOFT-NEXT: cmp r5, r1 +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: bhi .LBB48_12 +; SOFT-NEXT: @ %bb.11: @ %entry +; SOFT-NEXT: mov r3, r1 +; SOFT-NEXT: .LBB48_12: @ %entry +; SOFT-NEXT: str r3, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB48_14 +; SOFT-NEXT: @ %bb.13: @ %entry +; SOFT-NEXT: mov r3, r7 +; SOFT-NEXT: b .LBB48_15 +; SOFT-NEXT: .LBB48_14: +; SOFT-NEXT: asrs r3, r7, #31 +; SOFT-NEXT: ldr r0, [sp] @ 4-byte Reload +; SOFT-NEXT: ands r3, r0 +; SOFT-NEXT: .LBB48_15: @ %entry +; SOFT-NEXT: ands r3, r2 +; SOFT-NEXT: adds r0, r3, #1 +; SOFT-NEXT: str r0, [sp] @ 4-byte Spill +; SOFT-NEXT: beq .LBB48_17 +; SOFT-NEXT: @ %bb.16: @ %entry +; SOFT-NEXT: str r6, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: .LBB48_17: @ %entry +; SOFT-NEXT: ldr r3, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: mvns r6, r3 +; SOFT-NEXT: ldr r0, .LCPI48_0 +; SOFT-NEXT: cmp r4, r0 +; SOFT-NEXT: ldr r3, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: blo .LBB48_19 +; SOFT-NEXT: @ %bb.18: @ %entry +; SOFT-NEXT: mov r3, r6 +; SOFT-NEXT: .LBB48_19: @ %entry +; SOFT-NEXT: ldr r0, .LCPI48_0 +; SOFT-NEXT: cmp r4, r0 +; SOFT-NEXT: ldr r4, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: beq .LBB48_21 +; SOFT-NEXT: @ %bb.20: @ %entry +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: .LBB48_21: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bmi .LBB48_23 +; SOFT-NEXT: @ %bb.22: @ %entry +; SOFT-NEXT: str r6, [sp, #16] @ 4-byte Spill +; SOFT-NEXT: .LBB48_23: @ %entry +; SOFT-NEXT: ldr r0, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: cmp r0, #0 +; SOFT-NEXT: beq .LBB48_25 +; SOFT-NEXT: @ %bb.24: @ %entry +; SOFT-NEXT: ldr r4, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: .LBB48_25: @ %entry +; SOFT-NEXT: cmp r5, r1 +; SOFT-NEXT: mov r3, r4 +; SOFT-NEXT: bhi .LBB48_27 +; SOFT-NEXT: @ %bb.26: @ %entry +; SOFT-NEXT: ldr r3, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: .LBB48_27: @ %entry +; SOFT-NEXT: cmp r5, r1 +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: beq .LBB48_29 +; SOFT-NEXT: @ %bb.28: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: .LBB48_29: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bge .LBB48_31 +; SOFT-NEXT: @ %bb.30: @ %entry +; SOFT-NEXT: ldr r4, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: .LBB48_31: @ %entry +; SOFT-NEXT: ldr r1, [sp] @ 4-byte Reload +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: ldr r1, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: beq .LBB48_33 +; SOFT-NEXT: @ %bb.32: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB48_33: @ %entry +; SOFT-NEXT: add sp, #20 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.34: +; SOFT-NEXT: .LCPI48_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f32i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: .pad #4 +; VFP2-NEXT: sub sp, #4 +; VFP2-NEXT: bl __fixsfti +; VFP2-NEXT: mvn r8, #-2147483648 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: cmp r1, r8 +; VFP2-NEXT: mvn r1, #-2147483648 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r1, r12 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: mvn r4, #-2147483648 +; VFP2-NEXT: mov.w r5, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r4, r12 +; VFP2-NEXT: orrs.w r9, r2, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r4, r1 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r5, r3 +; VFP2-NEXT: cmp.w r5, #-1 +; VFP2-NEXT: mov.w r7, #-2147483648 +; VFP2-NEXT: mov.w r1, #-2147483648 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r7, r4 +; VFP2-NEXT: cmp.w r4, #-2147483648 +; VFP2-NEXT: mov r6, r3 +; VFP2-NEXT: it hi +; VFP2-NEXT: movhi r1, r4 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: andne.w r6, r2, r6, asr #31 +; VFP2-NEXT: and.w r2, r6, r5 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: adds r6, r2, #1 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r1, r7 +; VFP2-NEXT: mov.w r7, #-1 +; VFP2-NEXT: cmp r12, r8 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r7, r0 +; VFP2-NEXT: mov.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r7, r0 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r2, r0 +; VFP2-NEXT: cmp.w r9, #0 +; VFP2-NEXT: mov.w r0, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r7 +; VFP2-NEXT: cmp.w r4, #-2147483648 +; VFP2-NEXT: it hi +; VFP2-NEXT: movhi r0, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: cmp.w r5, #-1 +; VFP2-NEXT: it le +; VFP2-NEXT: movle r2, lr +; VFP2-NEXT: cmp r6, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r0 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: add sp, #4 +; VFP2-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; +; FULL-LABEL: stest_f32i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: .pad #4 +; FULL-NEXT: sub sp, #4 +; FULL-NEXT: bl __fixsfti +; FULL-NEXT: mvn r12, #-2147483648 +; FULL-NEXT: cmp r1, r12 +; FULL-NEXT: csel lr, r1, r12, lo +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r4, r1, r12, mi +; FULL-NEXT: orrs.w r8, r2, r3 +; FULL-NEXT: csel r4, lr, r4, eq +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: mov.w r7, #-2147483648 +; FULL-NEXT: csel r6, r3, lr, mi +; FULL-NEXT: mov r5, r3 +; FULL-NEXT: cmp.w r6, #-1 +; FULL-NEXT: csel r9, r4, r7, gt +; FULL-NEXT: cmp.w r4, #-2147483648 +; FULL-NEXT: csel r7, r4, r7, hi +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: it ne +; FULL-NEXT: andne.w r5, r2, r5, asr #31 +; FULL-NEXT: and.w r2, r5, r6 +; FULL-NEXT: adds r5, r2, #1 +; FULL-NEXT: csel r2, r7, r9, eq +; FULL-NEXT: mov.w r7, #-1 +; FULL-NEXT: cmp r1, r12 +; FULL-NEXT: csel r1, r0, r7, lo +; FULL-NEXT: csel r1, r0, r1, eq +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r0, r0, r7, mi +; FULL-NEXT: cmp.w r8, #0 +; FULL-NEXT: csel r0, r1, r0, eq +; FULL-NEXT: cmp.w r4, #-2147483648 +; FULL-NEXT: csel r1, r0, lr, hi +; FULL-NEXT: csel r1, r0, r1, eq +; FULL-NEXT: cmp.w r6, #-1 +; FULL-NEXT: csel r0, r0, lr, gt +; FULL-NEXT: cmp r5, #0 +; FULL-NEXT: csel r0, r1, r0, eq +; FULL-NEXT: mov r1, r2 +; FULL-NEXT: add sp, #4 +; FULL-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi float %x to i128 + %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) + %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @utest_f32i64_mm(float %x) { +; SOFT-LABEL: utest_f32i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, lr} +; SOFT-NEXT: push {r4, r5, r6, lr} +; SOFT-NEXT: bl __fixunssfti +; SOFT-NEXT: movs r5, #1 +; SOFT-NEXT: movs r6, #0 +; SOFT-NEXT: subs r4, r2, #1 +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: sbcs r4, r6 +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: blo .LBB49_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r4, r6 +; SOFT-NEXT: .LBB49_2: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB49_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB49_4: @ %entry +; SOFT-NEXT: eors r2, r5 +; SOFT-NEXT: orrs r2, r3 +; SOFT-NEXT: beq .LBB49_8 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB49_9 +; SOFT-NEXT: .LBB49_6: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB49_10 +; SOFT-NEXT: .LBB49_7: @ %entry +; SOFT-NEXT: pop {r4, r5, r6, pc} +; SOFT-NEXT: .LBB49_8: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB49_6 +; SOFT-NEXT: .LBB49_9: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB49_7 +; SOFT-NEXT: .LBB49_10: @ %entry +; SOFT-NEXT: mov r1, r2 +; SOFT-NEXT: pop {r4, r5, r6, pc} +; +; VFP2-LABEL: utest_f32i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: bl __fixunssfti +; VFP2-NEXT: eor r12, r2, #1 +; VFP2-NEXT: subs r2, #1 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: sbcs r2, r3, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: orr.w r12, r12, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, lr +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utest_f32i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: bl __fixunssfti +; FULL-NEXT: eor r12, r2, #1 +; FULL-NEXT: subs r2, #1 +; FULL-NEXT: sbcs r2, r3, #0 +; FULL-NEXT: orr.w r12, r12, r3 +; FULL-NEXT: cset r2, lo +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r0, r0, r12, ne +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptoui float %x to i128 + %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) + %conv6 = trunc i128 %spec.store.select to i64 + ret i64 %conv6 +} + +define i64 @ustest_f32i64_mm(float %x) { +; SOFT-LABEL: ustest_f32i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #4 +; SOFT-NEXT: sub sp, #4 +; SOFT-NEXT: bl __fixsfti +; SOFT-NEXT: movs r5, #1 +; SOFT-NEXT: movs r6, #0 +; SOFT-NEXT: subs r4, r2, #1 +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: sbcs r4, r6 +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: blt .LBB50_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r4, r6 +; SOFT-NEXT: .LBB50_2: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB50_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB50_4: @ %entry +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: eors r7, r5 +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: beq .LBB50_26 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB50_27 +; SOFT-NEXT: .LBB50_6: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB50_8 +; SOFT-NEXT: .LBB50_7: @ %entry +; SOFT-NEXT: mov r1, r7 +; SOFT-NEXT: .LBB50_8: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r7, r0 +; SOFT-NEXT: bne .LBB50_10 +; SOFT-NEXT: @ %bb.9: @ %entry +; SOFT-NEXT: mov r7, r1 +; SOFT-NEXT: .LBB50_10: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: beq .LBB50_12 +; SOFT-NEXT: @ %bb.11: @ %entry +; SOFT-NEXT: mov r4, r7 +; SOFT-NEXT: .LBB50_12: @ %entry +; SOFT-NEXT: cmp r2, #1 +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: bhs .LBB50_28 +; SOFT-NEXT: @ %bb.13: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bpl .LBB50_29 +; SOFT-NEXT: .LBB50_14: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bne .LBB50_30 +; SOFT-NEXT: .LBB50_15: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bmi .LBB50_17 +; SOFT-NEXT: .LBB50_16: @ %entry +; SOFT-NEXT: mov r3, r6 +; SOFT-NEXT: .LBB50_17: @ %entry +; SOFT-NEXT: rsbs r2, r7, #0 +; SOFT-NEXT: mov r2, r6 +; SOFT-NEXT: sbcs r2, r3 +; SOFT-NEXT: bge .LBB50_31 +; SOFT-NEXT: @ %bb.18: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB50_32 +; SOFT-NEXT: .LBB50_19: @ %entry +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: beq .LBB50_21 +; SOFT-NEXT: .LBB50_20: @ %entry +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: .LBB50_21: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: bne .LBB50_23 +; SOFT-NEXT: @ %bb.22: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: .LBB50_23: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB50_25 +; SOFT-NEXT: @ %bb.24: @ %entry +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: .LBB50_25: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: add sp, #4 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .LBB50_26: @ %entry +; SOFT-NEXT: mov r0, r7 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB50_6 +; SOFT-NEXT: .LBB50_27: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB50_7 +; SOFT-NEXT: b .LBB50_8 +; SOFT-NEXT: .LBB50_28: @ %entry +; SOFT-NEXT: mov r7, r5 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bmi .LBB50_14 +; SOFT-NEXT: .LBB50_29: @ %entry +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: beq .LBB50_15 +; SOFT-NEXT: .LBB50_30: @ %entry +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bpl .LBB50_16 +; SOFT-NEXT: b .LBB50_17 +; SOFT-NEXT: .LBB50_31: @ %entry +; SOFT-NEXT: mov r5, r6 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB50_19 +; SOFT-NEXT: .LBB50_32: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: bne .LBB50_20 +; SOFT-NEXT: b .LBB50_21 +; +; VFP2-LABEL: ustest_f32i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r7, lr} +; VFP2-NEXT: push {r4, r5, r7, lr} +; VFP2-NEXT: bl __fixsfti +; VFP2-NEXT: subs r4, r2, #1 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: eor r1, r2, #1 +; VFP2-NEXT: sbcs r4, r3, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: orr.w r5, r1, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: cmp r5, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r5 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne lr, r12 +; VFP2-NEXT: cmp r5, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq lr, r5 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: mov r12, lr +; VFP2-NEXT: mov.w r4, #1 +; VFP2-NEXT: ite ne +; VFP2-NEXT: movne r12, r0 +; VFP2-NEXT: moveq r12, r0 +; VFP2-NEXT: cmp r2, #1 +; VFP2-NEXT: mov.w r5, #1 +; VFP2-NEXT: mov.w r1, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r5, r2 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r4, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r4, r5 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r3, r1 +; VFP2-NEXT: rsbs r2, r4, #0 +; VFP2-NEXT: sbcs.w r2, r1, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, #1 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r1 +; VFP2-NEXT: orrs.w r2, r4, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r1, lr +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, lr +; VFP2-NEXT: pop {r4, r5, r7, pc} +; +; FULL-LABEL: ustest_f32i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, lr} +; FULL-NEXT: push {r4, lr} +; FULL-NEXT: bl __fixsfti +; FULL-NEXT: subs.w lr, r2, #1 +; FULL-NEXT: eor r12, r2, #1 +; FULL-NEXT: sbcs lr, r3, #0 +; FULL-NEXT: orr.w r12, r12, r3 +; FULL-NEXT: cset lr, lt +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r0, r0, lr, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r4, r0, r12, ne +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r1, r1, lr, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: mov.w lr, #1 +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r4, r1, ne +; FULL-NEXT: csel r12, r4, r0, eq +; FULL-NEXT: cmp r2, #1 +; FULL-NEXT: csel r0, r2, lr, lo +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r2, r2, lr, mi +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: csel r0, r0, r2, eq +; FULL-NEXT: csel r3, r3, lr, mi +; FULL-NEXT: rsbs r2, r0, #0 +; FULL-NEXT: sbcs.w r2, lr, r3 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r4, r4, r2, ne +; FULL-NEXT: orrs r3, r0 +; FULL-NEXT: csel r0, r12, r4, eq +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r2, r1, r2, ne +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r1, r1, r2, eq +; FULL-NEXT: pop {r4, pc} +entry: + %conv = fptosi float %x to i128 + %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) + %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @stest_f16i64_mm(half %x) { +; SOFT-LABEL: stest_f16i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #20 +; SOFT-NEXT: sub sp, #20 +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __fixsfti +; SOFT-NEXT: str r0, [sp, #16] @ 4-byte Spill +; SOFT-NEXT: mov r4, r1 +; SOFT-NEXT: mov r7, r3 +; SOFT-NEXT: ldr r0, .LCPI51_0 +; SOFT-NEXT: cmp r1, r0 +; SOFT-NEXT: mov r5, r1 +; SOFT-NEXT: blo .LBB51_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: ldr r5, .LCPI51_0 +; SOFT-NEXT: .LBB51_2: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: bmi .LBB51_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: ldr r1, .LCPI51_0 +; SOFT-NEXT: .LBB51_4: @ %entry +; SOFT-NEXT: str r2, [sp] @ 4-byte Spill +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: orrs r0, r7 +; SOFT-NEXT: str r0, [sp, #4] @ 4-byte Spill +; SOFT-NEXT: beq .LBB51_6 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: mov r5, r1 +; SOFT-NEXT: .LBB51_6: @ %entry +; SOFT-NEXT: movs r1, #0 +; SOFT-NEXT: str r1, [sp, #12] @ 4-byte Spill +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: mov r2, r7 +; SOFT-NEXT: bmi .LBB51_8 +; SOFT-NEXT: @ %bb.7: @ %entry +; SOFT-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: .LBB51_8: @ %entry +; SOFT-NEXT: movs r1, #1 +; SOFT-NEXT: lsls r1, r1, #31 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: mov r6, r5 +; SOFT-NEXT: bge .LBB51_10 +; SOFT-NEXT: @ %bb.9: @ %entry +; SOFT-NEXT: mov r6, r1 +; SOFT-NEXT: .LBB51_10: @ %entry +; SOFT-NEXT: cmp r5, r1 +; SOFT-NEXT: mov r3, r5 +; SOFT-NEXT: bhi .LBB51_12 +; SOFT-NEXT: @ %bb.11: @ %entry +; SOFT-NEXT: mov r3, r1 +; SOFT-NEXT: .LBB51_12: @ %entry +; SOFT-NEXT: str r3, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB51_14 +; SOFT-NEXT: @ %bb.13: @ %entry +; SOFT-NEXT: mov r3, r7 +; SOFT-NEXT: b .LBB51_15 +; SOFT-NEXT: .LBB51_14: +; SOFT-NEXT: asrs r3, r7, #31 +; SOFT-NEXT: ldr r0, [sp] @ 4-byte Reload +; SOFT-NEXT: ands r3, r0 +; SOFT-NEXT: .LBB51_15: @ %entry +; SOFT-NEXT: ands r3, r2 +; SOFT-NEXT: adds r0, r3, #1 +; SOFT-NEXT: str r0, [sp] @ 4-byte Spill +; SOFT-NEXT: beq .LBB51_17 +; SOFT-NEXT: @ %bb.16: @ %entry +; SOFT-NEXT: str r6, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: .LBB51_17: @ %entry +; SOFT-NEXT: ldr r3, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: mvns r6, r3 +; SOFT-NEXT: ldr r0, .LCPI51_0 +; SOFT-NEXT: cmp r4, r0 +; SOFT-NEXT: ldr r3, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: blo .LBB51_19 +; SOFT-NEXT: @ %bb.18: @ %entry +; SOFT-NEXT: mov r3, r6 +; SOFT-NEXT: .LBB51_19: @ %entry +; SOFT-NEXT: ldr r0, .LCPI51_0 +; SOFT-NEXT: cmp r4, r0 +; SOFT-NEXT: ldr r4, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: beq .LBB51_21 +; SOFT-NEXT: @ %bb.20: @ %entry +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: .LBB51_21: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bmi .LBB51_23 +; SOFT-NEXT: @ %bb.22: @ %entry +; SOFT-NEXT: str r6, [sp, #16] @ 4-byte Spill +; SOFT-NEXT: .LBB51_23: @ %entry +; SOFT-NEXT: ldr r0, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: cmp r0, #0 +; SOFT-NEXT: beq .LBB51_25 +; SOFT-NEXT: @ %bb.24: @ %entry +; SOFT-NEXT: ldr r4, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: .LBB51_25: @ %entry +; SOFT-NEXT: cmp r5, r1 +; SOFT-NEXT: mov r3, r4 +; SOFT-NEXT: bhi .LBB51_27 +; SOFT-NEXT: @ %bb.26: @ %entry +; SOFT-NEXT: ldr r3, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: .LBB51_27: @ %entry +; SOFT-NEXT: cmp r5, r1 +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: beq .LBB51_29 +; SOFT-NEXT: @ %bb.28: @ %entry +; SOFT-NEXT: mov r0, r3 +; SOFT-NEXT: .LBB51_29: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bge .LBB51_31 +; SOFT-NEXT: @ %bb.30: @ %entry +; SOFT-NEXT: ldr r4, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: .LBB51_31: @ %entry +; SOFT-NEXT: ldr r1, [sp] @ 4-byte Reload +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: ldr r1, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: beq .LBB51_33 +; SOFT-NEXT: @ %bb.32: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB51_33: @ %entry +; SOFT-NEXT: add sp, #20 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.34: +; SOFT-NEXT: .LCPI51_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: stest_f16i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: .pad #4 +; VFP2-NEXT: sub sp, #4 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: bl __fixsfti +; VFP2-NEXT: mvn r8, #-2147483648 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: cmp r1, r8 +; VFP2-NEXT: mvn r1, #-2147483648 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r1, r12 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: mvn r4, #-2147483648 +; VFP2-NEXT: mov.w r5, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r4, r12 +; VFP2-NEXT: orrs.w r9, r2, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r4, r1 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r5, r3 +; VFP2-NEXT: cmp.w r5, #-1 +; VFP2-NEXT: mov.w r7, #-2147483648 +; VFP2-NEXT: mov.w r1, #-2147483648 +; VFP2-NEXT: it gt +; VFP2-NEXT: movgt r7, r4 +; VFP2-NEXT: cmp.w r4, #-2147483648 +; VFP2-NEXT: mov r6, r3 +; VFP2-NEXT: it hi +; VFP2-NEXT: movhi r1, r4 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: andne.w r6, r2, r6, asr #31 +; VFP2-NEXT: and.w r2, r6, r5 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: adds r6, r2, #1 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r1, r7 +; VFP2-NEXT: mov.w r7, #-1 +; VFP2-NEXT: cmp r12, r8 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r7, r0 +; VFP2-NEXT: mov.w r2, #-1 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r7, r0 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r2, r0 +; VFP2-NEXT: cmp.w r9, #0 +; VFP2-NEXT: mov.w r0, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r7 +; VFP2-NEXT: cmp.w r4, #-2147483648 +; VFP2-NEXT: it hi +; VFP2-NEXT: movhi r0, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r2 +; VFP2-NEXT: cmp.w r5, #-1 +; VFP2-NEXT: it le +; VFP2-NEXT: movle r2, lr +; VFP2-NEXT: cmp r6, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r2, r0 +; VFP2-NEXT: mov r0, r2 +; VFP2-NEXT: add sp, #4 +; VFP2-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; +; FULL-LABEL: stest_f16i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: .pad #4 +; FULL-NEXT: sub sp, #4 +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixhfti +; FULL-NEXT: mvn r12, #-2147483648 +; FULL-NEXT: cmp r1, r12 +; FULL-NEXT: csel lr, r1, r12, lo +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r4, r1, r12, mi +; FULL-NEXT: orrs.w r8, r2, r3 +; FULL-NEXT: csel r4, lr, r4, eq +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: mov.w r7, #-2147483648 +; FULL-NEXT: csel r6, r3, lr, mi +; FULL-NEXT: mov r5, r3 +; FULL-NEXT: cmp.w r6, #-1 +; FULL-NEXT: csel r9, r4, r7, gt +; FULL-NEXT: cmp.w r4, #-2147483648 +; FULL-NEXT: csel r7, r4, r7, hi +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: it ne +; FULL-NEXT: andne.w r5, r2, r5, asr #31 +; FULL-NEXT: and.w r2, r5, r6 +; FULL-NEXT: adds r5, r2, #1 +; FULL-NEXT: csel r2, r7, r9, eq +; FULL-NEXT: mov.w r7, #-1 +; FULL-NEXT: cmp r1, r12 +; FULL-NEXT: csel r1, r0, r7, lo +; FULL-NEXT: csel r1, r0, r1, eq +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r0, r0, r7, mi +; FULL-NEXT: cmp.w r8, #0 +; FULL-NEXT: csel r0, r1, r0, eq +; FULL-NEXT: cmp.w r4, #-2147483648 +; FULL-NEXT: csel r1, r0, lr, hi +; FULL-NEXT: csel r1, r0, r1, eq +; FULL-NEXT: cmp.w r6, #-1 +; FULL-NEXT: csel r0, r0, lr, gt +; FULL-NEXT: cmp r5, #0 +; FULL-NEXT: csel r0, r1, r0, eq +; FULL-NEXT: mov r1, r2 +; FULL-NEXT: add sp, #4 +; FULL-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +entry: + %conv = fptosi half %x to i128 + %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) + %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + +define i64 @utesth_f16i64_mm(half %x) { +; SOFT-LABEL: utesth_f16i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, lr} +; SOFT-NEXT: push {r4, r5, r6, lr} +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __fixunssfti +; SOFT-NEXT: movs r5, #1 +; SOFT-NEXT: movs r6, #0 +; SOFT-NEXT: subs r4, r2, #1 +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: sbcs r4, r6 +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: blo .LBB52_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r4, r6 +; SOFT-NEXT: .LBB52_2: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB52_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB52_4: @ %entry +; SOFT-NEXT: eors r2, r5 +; SOFT-NEXT: orrs r2, r3 +; SOFT-NEXT: beq .LBB52_8 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB52_9 +; SOFT-NEXT: .LBB52_6: @ %entry +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB52_10 +; SOFT-NEXT: .LBB52_7: @ %entry +; SOFT-NEXT: pop {r4, r5, r6, pc} +; SOFT-NEXT: .LBB52_8: @ %entry +; SOFT-NEXT: mov r0, r2 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB52_6 +; SOFT-NEXT: .LBB52_9: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB52_7 +; SOFT-NEXT: .LBB52_10: @ %entry +; SOFT-NEXT: mov r1, r2 +; SOFT-NEXT: pop {r4, r5, r6, pc} +; +; VFP2-LABEL: utesth_f16i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r7, lr} +; VFP2-NEXT: push {r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: bl __fixunssfti +; VFP2-NEXT: eor r12, r2, #1 +; VFP2-NEXT: subs r2, #1 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: sbcs r2, r3, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: orr.w r12, r12, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, lr +; VFP2-NEXT: cmp.w r12, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, r12 +; VFP2-NEXT: pop {r7, pc} +; +; FULL-LABEL: utesth_f16i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r7, lr} +; FULL-NEXT: push {r7, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixunshfti +; FULL-NEXT: eor r12, r2, #1 +; FULL-NEXT: subs r2, #1 +; FULL-NEXT: sbcs r2, r3, #0 +; FULL-NEXT: orr.w r12, r12, r3 +; FULL-NEXT: cset r2, lo +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r2, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r0, r0, r12, ne +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: pop {r7, pc} +entry: + %conv = fptoui half %x to i128 + %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) + %conv6 = trunc i128 %spec.store.select to i64 + ret i64 %conv6 +} + +define i64 @ustest_f16i64_mm(half %x) { +; SOFT-LABEL: ustest_f16i64_mm: +; SOFT: @ %bb.0: @ %entry +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #4 +; SOFT-NEXT: sub sp, #4 +; SOFT-NEXT: uxth r0, r0 +; SOFT-NEXT: bl __aeabi_h2f +; SOFT-NEXT: bl __fixsfti +; SOFT-NEXT: movs r5, #1 +; SOFT-NEXT: movs r6, #0 +; SOFT-NEXT: subs r4, r2, #1 +; SOFT-NEXT: mov r4, r3 +; SOFT-NEXT: sbcs r4, r6 +; SOFT-NEXT: mov r4, r5 +; SOFT-NEXT: blt .LBB53_2 +; SOFT-NEXT: @ %bb.1: @ %entry +; SOFT-NEXT: mov r4, r6 +; SOFT-NEXT: .LBB53_2: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB53_4 +; SOFT-NEXT: @ %bb.3: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: .LBB53_4: @ %entry +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: eors r7, r5 +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: beq .LBB53_26 +; SOFT-NEXT: @ %bb.5: @ %entry +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: beq .LBB53_27 +; SOFT-NEXT: .LBB53_6: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: bne .LBB53_8 +; SOFT-NEXT: .LBB53_7: @ %entry +; SOFT-NEXT: mov r1, r7 +; SOFT-NEXT: .LBB53_8: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r7, r0 +; SOFT-NEXT: bne .LBB53_10 +; SOFT-NEXT: @ %bb.9: @ %entry +; SOFT-NEXT: mov r7, r1 +; SOFT-NEXT: .LBB53_10: @ %entry +; SOFT-NEXT: cmp r1, #0 +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: beq .LBB53_12 +; SOFT-NEXT: @ %bb.11: @ %entry +; SOFT-NEXT: mov r4, r7 +; SOFT-NEXT: .LBB53_12: @ %entry +; SOFT-NEXT: cmp r2, #1 +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: bhs .LBB53_28 +; SOFT-NEXT: @ %bb.13: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bpl .LBB53_29 +; SOFT-NEXT: .LBB53_14: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bne .LBB53_30 +; SOFT-NEXT: .LBB53_15: @ %entry +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bmi .LBB53_17 +; SOFT-NEXT: .LBB53_16: @ %entry +; SOFT-NEXT: mov r3, r6 +; SOFT-NEXT: .LBB53_17: @ %entry +; SOFT-NEXT: rsbs r2, r7, #0 +; SOFT-NEXT: mov r2, r6 +; SOFT-NEXT: sbcs r2, r3 +; SOFT-NEXT: bge .LBB53_31 +; SOFT-NEXT: @ %bb.18: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: beq .LBB53_32 +; SOFT-NEXT: .LBB53_19: @ %entry +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: beq .LBB53_21 +; SOFT-NEXT: .LBB53_20: @ %entry +; SOFT-NEXT: mov r4, r0 +; SOFT-NEXT: .LBB53_21: @ %entry +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: mov r0, r1 +; SOFT-NEXT: bne .LBB53_23 +; SOFT-NEXT: @ %bb.22: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: .LBB53_23: @ %entry +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB53_25 +; SOFT-NEXT: @ %bb.24: @ %entry +; SOFT-NEXT: mov r1, r0 +; SOFT-NEXT: .LBB53_25: @ %entry +; SOFT-NEXT: mov r0, r4 +; SOFT-NEXT: add sp, #4 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .LBB53_26: @ %entry +; SOFT-NEXT: mov r0, r7 +; SOFT-NEXT: cmp r4, #0 +; SOFT-NEXT: bne .LBB53_6 +; SOFT-NEXT: .LBB53_27: @ %entry +; SOFT-NEXT: mov r1, r4 +; SOFT-NEXT: cmp r7, #0 +; SOFT-NEXT: beq .LBB53_7 +; SOFT-NEXT: b .LBB53_8 +; SOFT-NEXT: .LBB53_28: @ %entry +; SOFT-NEXT: mov r7, r5 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bmi .LBB53_14 +; SOFT-NEXT: .LBB53_29: @ %entry +; SOFT-NEXT: mov r2, r5 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: beq .LBB53_15 +; SOFT-NEXT: .LBB53_30: @ %entry +; SOFT-NEXT: mov r7, r2 +; SOFT-NEXT: cmp r3, #0 +; SOFT-NEXT: bpl .LBB53_16 +; SOFT-NEXT: b .LBB53_17 +; SOFT-NEXT: .LBB53_31: @ %entry +; SOFT-NEXT: mov r5, r6 +; SOFT-NEXT: cmp r5, #0 +; SOFT-NEXT: bne .LBB53_19 +; SOFT-NEXT: .LBB53_32: @ %entry +; SOFT-NEXT: mov r0, r5 +; SOFT-NEXT: orrs r7, r3 +; SOFT-NEXT: bne .LBB53_20 +; SOFT-NEXT: b .LBB53_21 +; +; VFP2-LABEL: ustest_f16i64_mm: +; VFP2: @ %bb.0: @ %entry +; VFP2-NEXT: .save {r4, r5, r7, lr} +; VFP2-NEXT: push {r4, r5, r7, lr} +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_h2f +; VFP2-NEXT: vmov s0, r0 +; VFP2-NEXT: bl __fixsfti +; VFP2-NEXT: subs r4, r2, #1 +; VFP2-NEXT: mov r12, r1 +; VFP2-NEXT: eor r1, r2, #1 +; VFP2-NEXT: sbcs r4, r3, #0 +; VFP2-NEXT: mov.w lr, #0 +; VFP2-NEXT: orr.w r5, r1, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt.w lr, #1 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, lr +; VFP2-NEXT: cmp r5, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r5 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne lr, r12 +; VFP2-NEXT: cmp r5, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq lr, r5 +; VFP2-NEXT: cmp.w lr, #0 +; VFP2-NEXT: mov r12, lr +; VFP2-NEXT: mov.w r4, #1 +; VFP2-NEXT: ite ne +; VFP2-NEXT: movne r12, r0 +; VFP2-NEXT: moveq r12, r0 +; VFP2-NEXT: cmp r2, #1 +; VFP2-NEXT: mov.w r5, #1 +; VFP2-NEXT: mov.w r1, #0 +; VFP2-NEXT: it lo +; VFP2-NEXT: movlo r5, r2 +; VFP2-NEXT: cmp r3, #0 +; VFP2-NEXT: it mi +; VFP2-NEXT: movmi r4, r2 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r4, r5 +; VFP2-NEXT: it pl +; VFP2-NEXT: movpl r3, r1 +; VFP2-NEXT: rsbs r2, r4, #0 +; VFP2-NEXT: sbcs.w r2, r1, r3 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r1, #1 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r1 +; VFP2-NEXT: orrs.w r2, r4, r3 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r0, r12 +; VFP2-NEXT: cmp r1, #0 +; VFP2-NEXT: it ne +; VFP2-NEXT: movne r1, lr +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: it eq +; VFP2-NEXT: moveq r1, lr +; VFP2-NEXT: pop {r4, r5, r7, pc} +; +; FULL-LABEL: ustest_f16i64_mm: +; FULL: @ %bb.0: @ %entry +; FULL-NEXT: .save {r4, lr} +; FULL-NEXT: push {r4, lr} +; FULL-NEXT: vmov.f16 r0, s0 +; FULL-NEXT: vmov s0, r0 +; FULL-NEXT: bl __fixhfti +; FULL-NEXT: subs.w lr, r2, #1 +; FULL-NEXT: eor r12, r2, #1 +; FULL-NEXT: sbcs lr, r3, #0 +; FULL-NEXT: orr.w r12, r12, r3 +; FULL-NEXT: cset lr, lt +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r0, r0, lr, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r4, r0, r12, ne +; FULL-NEXT: cmp.w lr, #0 +; FULL-NEXT: csel r1, r1, lr, ne +; FULL-NEXT: cmp.w r12, #0 +; FULL-NEXT: csel r1, r1, r12, ne +; FULL-NEXT: mov.w lr, #1 +; FULL-NEXT: cmp r1, #0 +; FULL-NEXT: csel r0, r4, r1, ne +; FULL-NEXT: csel r12, r4, r0, eq +; FULL-NEXT: cmp r2, #1 +; FULL-NEXT: csel r0, r2, lr, lo +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r2, r2, lr, mi +; FULL-NEXT: mov.w lr, #0 +; FULL-NEXT: csel r0, r0, r2, eq +; FULL-NEXT: csel r3, r3, lr, mi +; FULL-NEXT: rsbs r2, r0, #0 +; FULL-NEXT: sbcs.w r2, lr, r3 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r4, r4, r2, ne +; FULL-NEXT: orrs r3, r0 +; FULL-NEXT: csel r0, r12, r4, eq +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r2, r1, r2, ne +; FULL-NEXT: cmp r3, #0 +; FULL-NEXT: csel r1, r1, r2, eq +; FULL-NEXT: pop {r4, pc} +entry: + %conv = fptosi half %x to i128 + %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) + %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) + %conv6 = trunc i128 %spec.store.select7 to i64 + ret i64 %conv6 +} + + +define void @unroll_maxmin(i32* nocapture %0, float* nocapture readonly %1, i32 %2) { +; SOFT-LABEL: unroll_maxmin: +; SOFT: @ %bb.0: +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #20 +; SOFT-NEXT: sub sp, #20 +; SOFT-NEXT: mov r4, r1 +; SOFT-NEXT: mov r5, r0 +; SOFT-NEXT: movs r0, #0 +; SOFT-NEXT: str r0, [sp, #16] @ 4-byte Spill +; SOFT-NEXT: mvns r0, r0 +; SOFT-NEXT: str r0, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: movs r0, #1 +; SOFT-NEXT: lsls r1, r0, #31 +; SOFT-NEXT: str r1, [sp, #12] @ 4-byte Spill +; SOFT-NEXT: str r0, [sp, #4] @ 4-byte Spill +; SOFT-NEXT: lsls r7, r0, #10 +; SOFT-NEXT: b .LBB54_2 +; SOFT-NEXT: .LBB54_1: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: str r0, [r5, #4] +; SOFT-NEXT: adds r4, #8 +; SOFT-NEXT: adds r5, #8 +; SOFT-NEXT: subs r7, r7, #2 +; SOFT-NEXT: beq .LBB54_18 +; SOFT-NEXT: .LBB54_2: @ =>This Inner Loop Header: Depth=1 +; SOFT-NEXT: ldr r0, [r4] +; SOFT-NEXT: movs r1, #79 +; SOFT-NEXT: lsls r6, r1, #24 +; SOFT-NEXT: mov r1, r6 +; SOFT-NEXT: bl __aeabi_fmul +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: ldr r2, .LCPI54_0 +; SOFT-NEXT: subs r2, r0, r2 +; SOFT-NEXT: mov r2, r1 +; SOFT-NEXT: ldr r3, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: sbcs r2, r3 +; SOFT-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: bge .LBB54_14 +; SOFT-NEXT: @ %bb.3: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB54_15 +; SOFT-NEXT: .LBB54_4: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB54_6 +; SOFT-NEXT: .LBB54_5: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: ldr r0, .LCPI54_0 +; SOFT-NEXT: .LBB54_6: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: subs r2, r2, r0 +; SOFT-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: sbcs r2, r1 +; SOFT-NEXT: blt .LBB54_8 +; SOFT-NEXT: @ %bb.7: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: ldr r0, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: .LBB54_8: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: str r0, [r5] +; SOFT-NEXT: ldr r0, [r4, #4] +; SOFT-NEXT: mov r1, r6 +; SOFT-NEXT: bl __aeabi_fmul +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: ldr r2, .LCPI54_0 +; SOFT-NEXT: subs r2, r0, r2 +; SOFT-NEXT: mov r2, r1 +; SOFT-NEXT: ldr r3, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: sbcs r2, r3 +; SOFT-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: bge .LBB54_16 +; SOFT-NEXT: @ %bb.9: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB54_17 +; SOFT-NEXT: .LBB54_10: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB54_12 +; SOFT-NEXT: .LBB54_11: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: ldr r0, .LCPI54_0 +; SOFT-NEXT: .LBB54_12: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: subs r2, r2, r0 +; SOFT-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: sbcs r2, r1 +; SOFT-NEXT: blt .LBB54_1 +; SOFT-NEXT: @ %bb.13: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: ldr r0, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: b .LBB54_1 +; SOFT-NEXT: .LBB54_14: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB54_4 +; SOFT-NEXT: .LBB54_15: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: mov r1, r2 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB54_5 +; SOFT-NEXT: b .LBB54_6 +; SOFT-NEXT: .LBB54_16: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB54_10 +; SOFT-NEXT: .LBB54_17: @ in Loop: Header=BB54_2 Depth=1 +; SOFT-NEXT: mov r1, r2 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB54_11 +; SOFT-NEXT: b .LBB54_12 +; SOFT-NEXT: .LBB54_18: +; SOFT-NEXT: add sp, #20 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.19: +; SOFT-NEXT: .LCPI54_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: unroll_maxmin: +; VFP2: @ %bb.0: +; VFP2-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: .pad #4 +; VFP2-NEXT: sub sp, #4 +; VFP2-NEXT: .vsave {d8} +; VFP2-NEXT: vpush {d8} +; VFP2-NEXT: sub.w r4, r1, #8 +; VFP2-NEXT: sub.w r5, r0, #8 +; VFP2-NEXT: vldr s16, .LCPI54_0 +; VFP2-NEXT: mov.w r8, #-1 +; VFP2-NEXT: mov.w r9, #-2147483648 +; VFP2-NEXT: mov.w r6, #1024 +; VFP2-NEXT: mvn r7, #-2147483648 +; VFP2-NEXT: .LBB54_1: @ =>This Inner Loop Header: Depth=1 +; VFP2-NEXT: vldr s0, [r4, #8] +; VFP2-NEXT: vmul.f32 s0, s0, s16 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: subs r2, r0, r7 +; VFP2-NEXT: sbcs r2, r1, #0 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: ite ne +; VFP2-NEXT: movne r2, r1 +; VFP2-NEXT: moveq r0, r7 +; VFP2-NEXT: subs.w r1, r9, r0 +; VFP2-NEXT: sbcs.w r1, r8, r2 +; VFP2-NEXT: it ge +; VFP2-NEXT: movge r0, r9 +; VFP2-NEXT: str r0, [r5, #8]! +; VFP2-NEXT: vldr s0, [r4, #12] +; VFP2-NEXT: vmul.f32 s0, s0, s16 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: subs r2, r0, r7 +; VFP2-NEXT: add.w r4, r4, #8 +; VFP2-NEXT: sbcs r2, r1, #0 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: ite ne +; VFP2-NEXT: movne r2, r1 +; VFP2-NEXT: moveq r0, r7 +; VFP2-NEXT: subs.w r1, r9, r0 +; VFP2-NEXT: sbcs.w r1, r8, r2 +; VFP2-NEXT: it ge +; VFP2-NEXT: movge r0, r9 +; VFP2-NEXT: subs r6, #2 +; VFP2-NEXT: str r0, [r5, #4] +; VFP2-NEXT: bne .LBB54_1 +; VFP2-NEXT: @ %bb.2: +; VFP2-NEXT: vpop {d8} +; VFP2-NEXT: add sp, #4 +; VFP2-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; VFP2-NEXT: .p2align 2 +; VFP2-NEXT: @ %bb.3: +; VFP2-NEXT: .LCPI54_0: +; VFP2-NEXT: .long 0x4f000000 @ float 2.14748365E+9 +; +; FULL-LABEL: unroll_maxmin: +; FULL: @ %bb.0: +; FULL-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: .pad #4 +; FULL-NEXT: sub sp, #4 +; FULL-NEXT: .vsave {d8} +; FULL-NEXT: vpush {d8} +; FULL-NEXT: mov.w r2, #512 +; FULL-NEXT: sub.w r5, r1, #8 +; FULL-NEXT: sub.w r6, r0, #8 +; FULL-NEXT: vldr s16, .LCPI54_0 +; FULL-NEXT: mov r4, r2 +; FULL-NEXT: mov.w r8, #-1 +; FULL-NEXT: mov.w r9, #-2147483648 +; FULL-NEXT: mvn r7, #-2147483648 +; FULL-NEXT: .LBB54_1: @ =>This Inner Loop Header: Depth=1 +; FULL-NEXT: vldr s0, [r5, #8] +; FULL-NEXT: vmul.f32 s0, s0, s16 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: bl __aeabi_f2lz +; FULL-NEXT: subs r2, r0, r7 +; FULL-NEXT: sbcs r2, r1, #0 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r7, ne +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: subs.w r2, r9, r0 +; FULL-NEXT: sbcs.w r1, r8, r1 +; FULL-NEXT: csel r0, r0, r9, lt +; FULL-NEXT: str r0, [r6, #8]! +; FULL-NEXT: vldr s0, [r5, #12] +; FULL-NEXT: vmul.f32 s0, s0, s16 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: bl __aeabi_f2lz +; FULL-NEXT: subs r2, r0, r7 +; FULL-NEXT: add.w r5, r5, #8 +; FULL-NEXT: sbcs r2, r1, #0 +; FULL-NEXT: sub.w r4, r4, #1 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r7, ne +; FULL-NEXT: csel r1, r1, r2, ne +; FULL-NEXT: subs.w r2, r9, r0 +; FULL-NEXT: sbcs.w r1, r8, r1 +; FULL-NEXT: csel r0, r0, r9, lt +; FULL-NEXT: str r0, [r6, #4] +; FULL-NEXT: cbz r4, .LBB54_2 +; FULL-NEXT: le .LBB54_1 +; FULL-NEXT: .LBB54_2: +; FULL-NEXT: vpop {d8} +; FULL-NEXT: add sp, #4 +; FULL-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; FULL-NEXT: .p2align 2 +; FULL-NEXT: @ %bb.3: +; FULL-NEXT: .LCPI54_0: +; FULL-NEXT: .long 0x4f000000 @ float 2.14748365E+9 + br label %5 + +4: ; preds = %5 + ret void + +5: ; preds = %5, %3 + %6 = phi i32 [ 0, %3 ], [ %28, %5 ] + %7 = getelementptr inbounds float, float* %1, i32 %6 + %8 = load float, float* %7, align 4 + %9 = fmul float %8, 0x41E0000000000000 + %10 = fptosi float %9 to i64 + %11 = icmp slt i64 %10, 2147483647 + %12 = select i1 %11, i64 %10, i64 2147483647 + %13 = icmp sgt i64 %12, -2147483648 + %14 = select i1 %13, i64 %12, i64 -2147483648 + %15 = trunc i64 %14 to i32 + %16 = getelementptr inbounds i32, i32* %0, i32 %6 + store i32 %15, i32* %16, align 4 + %17 = or i32 %6, 1 + %18 = getelementptr inbounds float, float* %1, i32 %17 + %19 = load float, float* %18, align 4 + %20 = fmul float %19, 0x41E0000000000000 + %21 = fptosi float %20 to i64 + %22 = icmp slt i64 %21, 2147483647 + %23 = select i1 %22, i64 %21, i64 2147483647 + %24 = icmp sgt i64 %23, -2147483648 + %25 = select i1 %24, i64 %23, i64 -2147483648 + %26 = trunc i64 %25 to i32 + %27 = getelementptr inbounds i32, i32* %0, i32 %17 + store i32 %26, i32* %27, align 4 + %28 = add nuw nsw i32 %6, 2 + %29 = icmp eq i32 %28, 1024 + br i1 %29, label %4, label %5 +} + +define void @unroll_minmax(i32* nocapture %0, float* nocapture readonly %1, i32 %2) { +; SOFT-LABEL: unroll_minmax: +; SOFT: @ %bb.0: +; SOFT-NEXT: .save {r4, r5, r6, r7, lr} +; SOFT-NEXT: push {r4, r5, r6, r7, lr} +; SOFT-NEXT: .pad #20 +; SOFT-NEXT: sub sp, #20 +; SOFT-NEXT: mov r4, r1 +; SOFT-NEXT: mov r5, r0 +; SOFT-NEXT: movs r0, #0 +; SOFT-NEXT: str r0, [sp, #16] @ 4-byte Spill +; SOFT-NEXT: mvns r0, r0 +; SOFT-NEXT: str r0, [sp, #12] @ 4-byte Spill +; SOFT-NEXT: movs r0, #1 +; SOFT-NEXT: lsls r1, r0, #31 +; SOFT-NEXT: str r1, [sp, #8] @ 4-byte Spill +; SOFT-NEXT: str r0, [sp, #4] @ 4-byte Spill +; SOFT-NEXT: lsls r7, r0, #10 +; SOFT-NEXT: b .LBB55_2 +; SOFT-NEXT: .LBB55_1: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: str r0, [r5, #4] +; SOFT-NEXT: adds r4, #8 +; SOFT-NEXT: adds r5, #8 +; SOFT-NEXT: subs r7, r7, #2 +; SOFT-NEXT: beq .LBB55_18 +; SOFT-NEXT: .LBB55_2: @ =>This Inner Loop Header: Depth=1 +; SOFT-NEXT: ldr r0, [r4] +; SOFT-NEXT: movs r1, #79 +; SOFT-NEXT: lsls r6, r1, #24 +; SOFT-NEXT: mov r1, r6 +; SOFT-NEXT: bl __aeabi_fmul +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: subs r2, r2, r0 +; SOFT-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: sbcs r2, r1 +; SOFT-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: bge .LBB55_14 +; SOFT-NEXT: @ %bb.3: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB55_15 +; SOFT-NEXT: .LBB55_4: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB55_6 +; SOFT-NEXT: .LBB55_5: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r0, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: .LBB55_6: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r2, .LCPI55_0 +; SOFT-NEXT: subs r2, r0, r2 +; SOFT-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: sbcs r1, r2 +; SOFT-NEXT: blt .LBB55_8 +; SOFT-NEXT: @ %bb.7: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r0, .LCPI55_0 +; SOFT-NEXT: .LBB55_8: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: str r0, [r5] +; SOFT-NEXT: ldr r0, [r4, #4] +; SOFT-NEXT: mov r1, r6 +; SOFT-NEXT: bl __aeabi_fmul +; SOFT-NEXT: bl __aeabi_f2lz +; SOFT-NEXT: ldr r2, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: subs r2, r2, r0 +; SOFT-NEXT: ldr r2, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: sbcs r2, r1 +; SOFT-NEXT: ldr r2, [sp, #4] @ 4-byte Reload +; SOFT-NEXT: bge .LBB55_16 +; SOFT-NEXT: @ %bb.9: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB55_17 +; SOFT-NEXT: .LBB55_10: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB55_12 +; SOFT-NEXT: .LBB55_11: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r0, [sp, #8] @ 4-byte Reload +; SOFT-NEXT: .LBB55_12: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r2, .LCPI55_0 +; SOFT-NEXT: subs r2, r0, r2 +; SOFT-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: sbcs r1, r2 +; SOFT-NEXT: blt .LBB55_1 +; SOFT-NEXT: @ %bb.13: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r0, .LCPI55_0 +; SOFT-NEXT: b .LBB55_1 +; SOFT-NEXT: .LBB55_14: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB55_4 +; SOFT-NEXT: .LBB55_15: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r1, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB55_5 +; SOFT-NEXT: b .LBB55_6 +; SOFT-NEXT: .LBB55_16: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r2, [sp, #16] @ 4-byte Reload +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: bne .LBB55_10 +; SOFT-NEXT: .LBB55_17: @ in Loop: Header=BB55_2 Depth=1 +; SOFT-NEXT: ldr r1, [sp, #12] @ 4-byte Reload +; SOFT-NEXT: cmp r2, #0 +; SOFT-NEXT: beq .LBB55_11 +; SOFT-NEXT: b .LBB55_12 +; SOFT-NEXT: .LBB55_18: +; SOFT-NEXT: add sp, #20 +; SOFT-NEXT: pop {r4, r5, r6, r7, pc} +; SOFT-NEXT: .p2align 2 +; SOFT-NEXT: @ %bb.19: +; SOFT-NEXT: .LCPI55_0: +; SOFT-NEXT: .long 2147483647 @ 0x7fffffff +; +; VFP2-LABEL: unroll_minmax: +; VFP2: @ %bb.0: +; VFP2-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; VFP2-NEXT: .pad #4 +; VFP2-NEXT: sub sp, #4 +; VFP2-NEXT: .vsave {d8} +; VFP2-NEXT: vpush {d8} +; VFP2-NEXT: sub.w r4, r1, #8 +; VFP2-NEXT: sub.w r5, r0, #8 +; VFP2-NEXT: vldr s16, .LCPI55_0 +; VFP2-NEXT: mov.w r8, #-1 +; VFP2-NEXT: mov.w r9, #-2147483648 +; VFP2-NEXT: mov.w r6, #1024 +; VFP2-NEXT: mvn r7, #-2147483648 +; VFP2-NEXT: .LBB55_1: @ =>This Inner Loop Header: Depth=1 +; VFP2-NEXT: vldr s0, [r4, #8] +; VFP2-NEXT: vmul.f32 s0, s0, s16 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: subs.w r2, r9, r0 +; VFP2-NEXT: sbcs.w r2, r8, r1 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r1, r8 +; VFP2-NEXT: moveq r0, r9 +; VFP2-NEXT: subs r2, r0, r7 +; VFP2-NEXT: sbcs r1, r1, #0 +; VFP2-NEXT: it ge +; VFP2-NEXT: movge r0, r7 +; VFP2-NEXT: str r0, [r5, #8]! +; VFP2-NEXT: vldr s0, [r4, #12] +; VFP2-NEXT: vmul.f32 s0, s0, s16 +; VFP2-NEXT: vmov r0, s0 +; VFP2-NEXT: bl __aeabi_f2lz +; VFP2-NEXT: subs.w r2, r9, r0 +; VFP2-NEXT: add.w r4, r4, #8 +; VFP2-NEXT: sbcs.w r2, r8, r1 +; VFP2-NEXT: mov.w r2, #0 +; VFP2-NEXT: it lt +; VFP2-NEXT: movlt r2, #1 +; VFP2-NEXT: cmp r2, #0 +; VFP2-NEXT: itt eq +; VFP2-NEXT: moveq r1, r8 +; VFP2-NEXT: moveq r0, r9 +; VFP2-NEXT: subs r2, r0, r7 +; VFP2-NEXT: sbcs r1, r1, #0 +; VFP2-NEXT: it ge +; VFP2-NEXT: movge r0, r7 +; VFP2-NEXT: subs r6, #2 +; VFP2-NEXT: str r0, [r5, #4] +; VFP2-NEXT: bne .LBB55_1 +; VFP2-NEXT: @ %bb.2: +; VFP2-NEXT: vpop {d8} +; VFP2-NEXT: add sp, #4 +; VFP2-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; VFP2-NEXT: .p2align 2 +; VFP2-NEXT: @ %bb.3: +; VFP2-NEXT: .LCPI55_0: +; VFP2-NEXT: .long 0x4f000000 @ float 2.14748365E+9 +; +; FULL-LABEL: unroll_minmax: +; FULL: @ %bb.0: +; FULL-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; FULL-NEXT: .pad #4 +; FULL-NEXT: sub sp, #4 +; FULL-NEXT: .vsave {d8} +; FULL-NEXT: vpush {d8} +; FULL-NEXT: mov.w r2, #512 +; FULL-NEXT: sub.w r5, r1, #8 +; FULL-NEXT: sub.w r6, r0, #8 +; FULL-NEXT: vldr s16, .LCPI55_0 +; FULL-NEXT: mov r4, r2 +; FULL-NEXT: mov.w r8, #-1 +; FULL-NEXT: mov.w r9, #-2147483648 +; FULL-NEXT: mvn r7, #-2147483648 +; FULL-NEXT: .LBB55_1: @ =>This Inner Loop Header: Depth=1 +; FULL-NEXT: vldr s0, [r5, #8] +; FULL-NEXT: vmul.f32 s0, s0, s16 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: bl __aeabi_f2lz +; FULL-NEXT: subs.w r2, r9, r0 +; FULL-NEXT: sbcs.w r2, r8, r1 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r9, ne +; FULL-NEXT: csel r1, r1, r8, ne +; FULL-NEXT: subs r2, r0, r7 +; FULL-NEXT: sbcs r1, r1, #0 +; FULL-NEXT: csel r0, r0, r7, lt +; FULL-NEXT: str r0, [r6, #8]! +; FULL-NEXT: vldr s0, [r5, #12] +; FULL-NEXT: vmul.f32 s0, s0, s16 +; FULL-NEXT: vmov r0, s0 +; FULL-NEXT: bl __aeabi_f2lz +; FULL-NEXT: subs.w r2, r9, r0 +; FULL-NEXT: add.w r5, r5, #8 +; FULL-NEXT: sbcs.w r2, r8, r1 +; FULL-NEXT: sub.w r4, r4, #1 +; FULL-NEXT: cset r2, lt +; FULL-NEXT: cmp r2, #0 +; FULL-NEXT: csel r0, r0, r9, ne +; FULL-NEXT: csel r1, r1, r8, ne +; FULL-NEXT: subs r2, r0, r7 +; FULL-NEXT: sbcs r1, r1, #0 +; FULL-NEXT: csel r0, r0, r7, lt +; FULL-NEXT: str r0, [r6, #4] +; FULL-NEXT: cbz r4, .LBB55_2 +; FULL-NEXT: le .LBB55_1 +; FULL-NEXT: .LBB55_2: +; FULL-NEXT: vpop {d8} +; FULL-NEXT: add sp, #4 +; FULL-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; FULL-NEXT: .p2align 2 +; FULL-NEXT: @ %bb.3: +; FULL-NEXT: .LCPI55_0: +; FULL-NEXT: .long 0x4f000000 @ float 2.14748365E+9 + br label %5 + +4: ; preds = %5 + ret void + +5: ; preds = %5, %3 + %6 = phi i32 [ 0, %3 ], [ %28, %5 ] + %7 = getelementptr inbounds float, float* %1, i32 %6 + %8 = load float, float* %7, align 4 + %9 = fmul float %8, 0x41E0000000000000 + %10 = fptosi float %9 to i64 + %11 = icmp sgt i64 %10, -2147483648 + %12 = select i1 %11, i64 %10, i64 -2147483648 + %13 = icmp slt i64 %12, 2147483647 + %14 = select i1 %13, i64 %12, i64 2147483647 + %15 = trunc i64 %14 to i32 + %16 = getelementptr inbounds i32, i32* %0, i32 %6 + store i32 %15, i32* %16, align 4 + %17 = or i32 %6, 1 + %18 = getelementptr inbounds float, float* %1, i32 %17 + %19 = load float, float* %18, align 4 + %20 = fmul float %19, 0x41E0000000000000 + %21 = fptosi float %20 to i64 + %22 = icmp sgt i64 %21, -2147483648 + %23 = select i1 %22, i64 %21, i64 -2147483648 + %24 = icmp slt i64 %23, 2147483647 + %25 = select i1 %24, i64 %23, i64 2147483647 + %26 = trunc i64 %25 to i32 + %27 = getelementptr inbounds i32, i32* %0, i32 %17 + store i32 %26, i32* %27, align 4 + %28 = add nuw nsw i32 %6, 2 + %29 = icmp eq i32 %28, 1024 + br i1 %29, label %4, label %5 +} + +declare i32 @llvm.smin.i32(i32, i32) +declare i32 @llvm.smax.i32(i32, i32) +declare i32 @llvm.umin.i32(i32, i32) +declare i64 @llvm.smin.i64(i64, i64) +declare i64 @llvm.smax.i64(i64, i64) +declare i64 @llvm.umin.i64(i64, i64) +declare i128 @llvm.smin.i128(i128, i128) +declare i128 @llvm.smax.i128(i128, i128) +declare i128 @llvm.umin.i128(i128, i128) diff --git a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll --- a/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll +++ b/llvm/test/CodeGen/PowerPC/more-dq-form-prepare.ll @@ -10,8 +10,8 @@ define void @foo(i32* %.m, i32* %.n, [0 x %_elem_type_of_a]* %.a, [0 x %_elem_type_of_x]* %.x, i32* %.l, <2 x double>* %.vy01, <2 x double>* %.vy02, <2 x double>* %.vy03, <2 x double>* %.vy04, <2 x double>* %.vy05, <2 x double>* %.vy06, <2 x double>* %.vy07, <2 x double>* %.vy08, <2 x double>* %.vy09, <2 x double>* %.vy0a, <2 x double>* %.vy0b, <2 x double>* %.vy0c, <2 x double>* %.vy21, <2 x double>* %.vy22, <2 x double>* %.vy23, <2 x double>* %.vy24, <2 x double>* %.vy25, <2 x double>* %.vy26, <2 x double>* %.vy27, <2 x double>* %.vy28, <2 x double>* %.vy29, <2 x double>* %.vy2a, <2 x double>* %.vy2b, <2 x double>* %.vy2c) { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: stdu 1, -608(1) -; CHECK-NEXT: .cfi_def_cfa_offset 608 +; CHECK-NEXT: stdu 1, -592(1) +; CHECK-NEXT: .cfi_def_cfa_offset 592 ; CHECK-NEXT: .cfi_offset r14, -192 ; CHECK-NEXT: .cfi_offset r15, -184 ; CHECK-NEXT: .cfi_offset r16, -176 @@ -48,322 +48,313 @@ ; CHECK-NEXT: .cfi_offset v29, -240 ; CHECK-NEXT: .cfi_offset v30, -224 ; CHECK-NEXT: .cfi_offset v31, -208 -; CHECK-NEXT: lwz 0, 0(4) -; CHECK-NEXT: std 14, 416(1) # 8-byte Folded Spill -; CHECK-NEXT: std 15, 424(1) # 8-byte Folded Spill -; CHECK-NEXT: cmpwi 0, 1 -; CHECK-NEXT: std 16, 432(1) # 8-byte Folded Spill -; CHECK-NEXT: std 17, 440(1) # 8-byte Folded Spill -; CHECK-NEXT: std 18, 448(1) # 8-byte Folded Spill -; CHECK-NEXT: std 19, 456(1) # 8-byte Folded Spill -; CHECK-NEXT: std 20, 464(1) # 8-byte Folded Spill -; CHECK-NEXT: std 21, 472(1) # 8-byte Folded Spill -; CHECK-NEXT: std 22, 480(1) # 8-byte Folded Spill -; CHECK-NEXT: std 23, 488(1) # 8-byte Folded Spill -; CHECK-NEXT: std 24, 496(1) # 8-byte Folded Spill -; CHECK-NEXT: std 25, 504(1) # 8-byte Folded Spill -; CHECK-NEXT: std 26, 512(1) # 8-byte Folded Spill -; CHECK-NEXT: std 27, 520(1) # 8-byte Folded Spill -; CHECK-NEXT: std 28, 528(1) # 8-byte Folded Spill -; CHECK-NEXT: std 29, 536(1) # 8-byte Folded Spill -; CHECK-NEXT: std 30, 544(1) # 8-byte Folded Spill -; CHECK-NEXT: std 31, 552(1) # 8-byte Folded Spill -; CHECK-NEXT: stfd 26, 560(1) # 8-byte Folded Spill -; CHECK-NEXT: stfd 27, 568(1) # 8-byte Folded Spill -; CHECK-NEXT: stfd 28, 576(1) # 8-byte Folded Spill -; CHECK-NEXT: stfd 29, 584(1) # 8-byte Folded Spill -; CHECK-NEXT: stfd 30, 592(1) # 8-byte Folded Spill -; CHECK-NEXT: stfd 31, 600(1) # 8-byte Folded Spill -; CHECK-NEXT: stxv 52, 224(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 53, 240(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 54, 256(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 55, 272(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 56, 288(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 57, 304(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 58, 320(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 59, 336(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 60, 352(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 61, 368(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 62, 384(1) # 16-byte Folded Spill -; CHECK-NEXT: stxv 63, 400(1) # 16-byte Folded Spill +; CHECK-NEXT: lwz 4, 0(4) +; CHECK-NEXT: std 14, 400(1) # 8-byte Folded Spill +; CHECK-NEXT: std 15, 408(1) # 8-byte Folded Spill +; CHECK-NEXT: cmpwi 4, 1 +; CHECK-NEXT: std 16, 416(1) # 8-byte Folded Spill +; CHECK-NEXT: std 17, 424(1) # 8-byte Folded Spill +; CHECK-NEXT: std 18, 432(1) # 8-byte Folded Spill +; CHECK-NEXT: std 19, 440(1) # 8-byte Folded Spill +; CHECK-NEXT: std 20, 448(1) # 8-byte Folded Spill +; CHECK-NEXT: std 21, 456(1) # 8-byte Folded Spill +; CHECK-NEXT: std 22, 464(1) # 8-byte Folded Spill +; CHECK-NEXT: std 23, 472(1) # 8-byte Folded Spill +; CHECK-NEXT: std 24, 480(1) # 8-byte Folded Spill +; CHECK-NEXT: std 25, 488(1) # 8-byte Folded Spill +; CHECK-NEXT: std 26, 496(1) # 8-byte Folded Spill +; CHECK-NEXT: std 27, 504(1) # 8-byte Folded Spill +; CHECK-NEXT: std 28, 512(1) # 8-byte Folded Spill +; CHECK-NEXT: std 29, 520(1) # 8-byte Folded Spill +; CHECK-NEXT: std 30, 528(1) # 8-byte Folded Spill +; CHECK-NEXT: std 31, 536(1) # 8-byte Folded Spill +; CHECK-NEXT: stfd 26, 544(1) # 8-byte Folded Spill +; CHECK-NEXT: stfd 27, 552(1) # 8-byte Folded Spill +; CHECK-NEXT: stfd 28, 560(1) # 8-byte Folded Spill +; CHECK-NEXT: stfd 29, 568(1) # 8-byte Folded Spill +; CHECK-NEXT: stfd 30, 576(1) # 8-byte Folded Spill +; CHECK-NEXT: stfd 31, 584(1) # 8-byte Folded Spill +; CHECK-NEXT: stxv 52, 208(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 53, 224(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 54, 240(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 55, 256(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 56, 272(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 57, 288(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 58, 304(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 59, 320(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 60, 336(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 61, 352(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 62, 368(1) # 16-byte Folded Spill +; CHECK-NEXT: stxv 63, 384(1) # 16-byte Folded Spill ; CHECK-NEXT: blt 0, .LBB0_7 ; CHECK-NEXT: # %bb.1: # %_loop_1_do_.lr.ph ; CHECK-NEXT: lwz 3, 0(3) ; CHECK-NEXT: cmpwi 3, 1 ; CHECK-NEXT: blt 0, .LBB0_7 ; CHECK-NEXT: # %bb.2: # %_loop_1_do_.preheader -; CHECK-NEXT: mr 23, 5 -; CHECK-NEXT: ld 5, 704(1) +; CHECK-NEXT: mr 21, 5 +; CHECK-NEXT: ld 5, 848(1) +; CHECK-NEXT: lwa 0, 0(7) ; CHECK-NEXT: addi 3, 3, 1 -; CHECK-NEXT: ld 4, 728(1) +; CHECK-NEXT: mr 14, 7 +; CHECK-NEXT: mr 22, 6 ; CHECK-NEXT: mr 11, 10 -; CHECK-NEXT: mr 10, 6 -; CHECK-NEXT: std 8, 40(1) # 8-byte Folded Spill -; CHECK-NEXT: std 9, 48(1) # 8-byte Folded Spill -; CHECK-NEXT: lwa 7, 0(7) -; CHECK-NEXT: ld 29, 840(1) +; CHECK-NEXT: ld 18, 736(1) +; CHECK-NEXT: ld 17, 728(1) +; CHECK-NEXT: std 8, 32(1) # 8-byte Folded Spill +; CHECK-NEXT: std 9, 40(1) # 8-byte Folded Spill ; CHECK-NEXT: cmpldi 3, 9 -; CHECK-NEXT: ld 27, 832(1) -; CHECK-NEXT: ld 28, 856(1) -; CHECK-NEXT: std 5, 112(1) # 8-byte Folded Spill -; CHECK-NEXT: std 4, 120(1) # 8-byte Folded Spill -; CHECK-NEXT: lxv 1, 0(5) +; CHECK-NEXT: ld 19, 744(1) +; CHECK-NEXT: ld 20, 752(1) +; CHECK-NEXT: std 5, 200(1) # 8-byte Folded Spill +; CHECK-NEXT: ld 5, 840(1) +; CHECK-NEXT: std 17, 80(1) # 8-byte Folded Spill +; CHECK-NEXT: std 18, 88(1) # 8-byte Folded Spill +; CHECK-NEXT: lxv 36, 0(18) +; CHECK-NEXT: std 19, 96(1) # 8-byte Folded Spill +; CHECK-NEXT: std 20, 104(1) # 8-byte Folded Spill +; CHECK-NEXT: lxv 13, 0(19) +; CHECK-NEXT: lxv 12, 0(20) +; CHECK-NEXT: ld 30, 832(1) +; CHECK-NEXT: ld 2, 824(1) +; CHECK-NEXT: ld 12, 816(1) +; CHECK-NEXT: ld 29, 808(1) +; CHECK-NEXT: std 2, 176(1) # 8-byte Folded Spill +; CHECK-NEXT: std 30, 184(1) # 8-byte Folded Spill +; CHECK-NEXT: std 29, 160(1) # 8-byte Folded Spill +; CHECK-NEXT: std 12, 168(1) # 8-byte Folded Spill +; CHECK-NEXT: std 5, 192(1) # 8-byte Folded Spill ; CHECK-NEXT: li 5, 9 -; CHECK-NEXT: ld 30, 848(1) -; CHECK-NEXT: lxv 0, 0(4) -; CHECK-NEXT: sldi 4, 7, 3 -; CHECK-NEXT: add 4, 4, 23 -; CHECK-NEXT: sldi 16, 7, 2 -; CHECK-NEXT: sldi 15, 7, 1 -; CHECK-NEXT: ld 17, 760(1) -; CHECK-NEXT: std 27, 192(1) # 8-byte Folded Spill -; CHECK-NEXT: std 29, 200(1) # 8-byte Folded Spill -; CHECK-NEXT: lxv 6, 0(29) -; CHECK-NEXT: ld 26, 824(1) -; CHECK-NEXT: ld 25, 816(1) -; CHECK-NEXT: ld 24, 808(1) -; CHECK-NEXT: std 30, 208(1) # 8-byte Folded Spill -; CHECK-NEXT: std 28, 216(1) # 8-byte Folded Spill -; CHECK-NEXT: std 25, 176(1) # 8-byte Folded Spill -; CHECK-NEXT: std 26, 184(1) # 8-byte Folded Spill -; CHECK-NEXT: std 24, 168(1) # 8-byte Folded Spill -; CHECK-NEXT: iselgt 3, 3, 5 -; CHECK-NEXT: ld 5, 752(1) -; CHECK-NEXT: addi 14, 4, 32 -; CHECK-NEXT: sldi 4, 7, 4 -; CHECK-NEXT: add 29, 7, 15 -; CHECK-NEXT: ld 22, 800(1) -; CHECK-NEXT: ld 21, 792(1) -; CHECK-NEXT: ld 20, 784(1) -; CHECK-NEXT: std 22, 160(1) # 8-byte Folded Spill -; CHECK-NEXT: std 20, 144(1) # 8-byte Folded Spill -; CHECK-NEXT: std 21, 152(1) # 8-byte Folded Spill -; CHECK-NEXT: addi 6, 3, -2 -; CHECK-NEXT: add 3, 7, 16 -; CHECK-NEXT: add 4, 4, 23 -; CHECK-NEXT: ld 19, 776(1) -; CHECK-NEXT: ld 18, 768(1) -; CHECK-NEXT: lxv 4, 0(8) -; CHECK-NEXT: lxv 2, 0(11) -; CHECK-NEXT: std 18, 128(1) # 8-byte Folded Spill -; CHECK-NEXT: std 19, 136(1) # 8-byte Folded Spill -; CHECK-NEXT: addi 12, 4, 32 -; CHECK-NEXT: rldicl 2, 6, 61, 3 -; CHECK-NEXT: sldi 6, 3, 3 -; CHECK-NEXT: ld 4, 736(1) -; CHECK-NEXT: ld 31, 720(1) -; CHECK-NEXT: std 11, 56(1) # 8-byte Folded Spill -; CHECK-NEXT: std 31, 64(1) # 8-byte Folded Spill -; CHECK-NEXT: add 11, 23, 6 -; CHECK-NEXT: ld 6, 744(1) -; CHECK-NEXT: ld 8, 712(1) -; CHECK-NEXT: std 5, 96(1) # 8-byte Folded Spill -; CHECK-NEXT: std 17, 104(1) # 8-byte Folded Spill -; CHECK-NEXT: lxv 39, 0(5) -; CHECK-NEXT: sldi 5, 7, 5 -; CHECK-NEXT: lxv 5, 0(30) -; CHECK-NEXT: lxv 7, 0(28) -; CHECK-NEXT: lxv 3, 0(9) -; CHECK-NEXT: addi 2, 2, 1 -; CHECK-NEXT: add 30, 23, 5 -; CHECK-NEXT: sldi 5, 29, 3 -; CHECK-NEXT: add 28, 23, 5 -; CHECK-NEXT: ld 5, 864(1) +; CHECK-NEXT: ld 28, 800(1) +; CHECK-NEXT: ld 27, 792(1) +; CHECK-NEXT: ld 26, 784(1) +; CHECK-NEXT: ld 25, 776(1) +; CHECK-NEXT: ld 24, 768(1) +; CHECK-NEXT: ld 23, 760(1) +; CHECK-NEXT: ld 16, 720(1) +; CHECK-NEXT: ld 15, 712(1) +; CHECK-NEXT: ld 6, 704(1) +; CHECK-NEXT: ld 7, 696(1) +; CHECK-NEXT: ld 10, 688(1) ; CHECK-NEXT: lxv 43, 0(8) -; CHECK-NEXT: lxv 42, 0(31) -; CHECK-NEXT: lxv 38, 0(17) -; CHECK-NEXT: std 4, 72(1) # 8-byte Folded Spill -; CHECK-NEXT: std 6, 80(1) # 8-byte Folded Spill -; CHECK-NEXT: lxv 41, 0(4) -; CHECK-NEXT: lxv 40, 0(6) -; CHECK-NEXT: lxv 33, 0(18) -; CHECK-NEXT: lxv 32, 0(19) -; CHECK-NEXT: std 5, 88(1) # 8-byte Folded Spill -; CHECK-NEXT: lxv 37, 0(20) -; CHECK-NEXT: lxv 36, 0(21) -; CHECK-NEXT: lxv 13, 0(22) -; CHECK-NEXT: lxv 12, 0(24) -; CHECK-NEXT: lxv 11, 0(25) -; CHECK-NEXT: lxv 9, 0(26) -; CHECK-NEXT: lxv 8, 0(27) -; CHECK-NEXT: lxv 10, 0(5) -; CHECK-NEXT: mulli 27, 7, 48 -; CHECK-NEXT: mulli 26, 7, 6 -; CHECK-NEXT: li 25, 1 -; CHECK-NEXT: li 24, 0 -; CHECK-NEXT: mr 5, 23 +; CHECK-NEXT: std 11, 48(1) # 8-byte Folded Spill +; CHECK-NEXT: std 6, 56(1) # 8-byte Folded Spill +; CHECK-NEXT: std 27, 144(1) # 8-byte Folded Spill +; CHECK-NEXT: std 28, 152(1) # 8-byte Folded Spill +; CHECK-NEXT: mr 8, 7 +; CHECK-NEXT: std 25, 128(1) # 8-byte Folded Spill +; CHECK-NEXT: std 26, 136(1) # 8-byte Folded Spill +; CHECK-NEXT: std 15, 64(1) # 8-byte Folded Spill +; CHECK-NEXT: std 16, 72(1) # 8-byte Folded Spill +; CHECK-NEXT: std 23, 112(1) # 8-byte Folded Spill +; CHECK-NEXT: std 24, 120(1) # 8-byte Folded Spill +; CHECK-NEXT: iselgt 3, 3, 5 +; CHECK-NEXT: sldi 5, 0, 3 +; CHECK-NEXT: add 5, 5, 21 +; CHECK-NEXT: lxv 42, 0(9) +; CHECK-NEXT: lxv 41, 0(11) +; CHECK-NEXT: lxv 40, 0(10) +; CHECK-NEXT: lxv 39, 0(7) +; CHECK-NEXT: mulli 11, 0, 48 +; CHECK-NEXT: addi 14, 5, 32 +; CHECK-NEXT: sldi 5, 0, 4 +; CHECK-NEXT: addi 3, 3, -2 +; CHECK-NEXT: lxv 38, 0(6) +; CHECK-NEXT: lxv 33, 0(15) +; CHECK-NEXT: lxv 32, 0(16) +; CHECK-NEXT: lxv 37, 0(17) +; CHECK-NEXT: add 5, 5, 21 +; CHECK-NEXT: lxv 11, 0(23) +; CHECK-NEXT: lxv 10, 0(24) +; CHECK-NEXT: lxv 8, 0(25) +; CHECK-NEXT: lxv 6, 0(26) +; CHECK-NEXT: rldicl 3, 3, 61, 3 +; CHECK-NEXT: li 26, 0 +; CHECK-NEXT: mr 25, 21 +; CHECK-NEXT: addi 31, 5, 32 +; CHECK-NEXT: mulli 5, 0, 40 +; CHECK-NEXT: lxv 5, 0(27) +; CHECK-NEXT: lxv 3, 0(28) +; CHECK-NEXT: lxv 1, 0(29) +; CHECK-NEXT: lxv 0, 0(12) +; CHECK-NEXT: mulli 28, 0, 6 +; CHECK-NEXT: addi 3, 3, 1 +; CHECK-NEXT: li 27, 1 +; CHECK-NEXT: add 18, 21, 5 +; CHECK-NEXT: sldi 5, 0, 5 +; CHECK-NEXT: lxv 2, 0(2) +; CHECK-NEXT: lxv 4, 0(30) +; CHECK-NEXT: sldi 2, 0, 1 +; CHECK-NEXT: add 19, 21, 5 +; CHECK-NEXT: mulli 5, 0, 24 +; CHECK-NEXT: add 20, 21, 5 +; CHECK-NEXT: ld 5, 192(1) # 8-byte Folded Reload +; CHECK-NEXT: lxv 9, 0(5) +; CHECK-NEXT: ld 5, 200(1) # 8-byte Folded Reload +; CHECK-NEXT: lxv 7, 0(5) ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: .LBB0_3: # %_loop_2_do_.lr.ph ; CHECK-NEXT: # =>This Loop Header: Depth=1 ; CHECK-NEXT: # Child Loop BB0_4 Depth 2 -; CHECK-NEXT: maddld 6, 26, 24, 3 -; CHECK-NEXT: mtctr 2 -; CHECK-NEXT: sldi 6, 6, 3 -; CHECK-NEXT: add 22, 23, 6 -; CHECK-NEXT: maddld 6, 26, 24, 16 -; CHECK-NEXT: sldi 6, 6, 3 -; CHECK-NEXT: add 21, 23, 6 -; CHECK-NEXT: maddld 6, 26, 24, 29 -; CHECK-NEXT: sldi 6, 6, 3 -; CHECK-NEXT: add 20, 23, 6 -; CHECK-NEXT: maddld 6, 26, 24, 15 -; CHECK-NEXT: sldi 6, 6, 3 -; CHECK-NEXT: add 19, 23, 6 -; CHECK-NEXT: maddld 6, 26, 24, 7 -; CHECK-NEXT: sldi 6, 6, 3 -; CHECK-NEXT: add 18, 23, 6 -; CHECK-NEXT: mulld 6, 26, 24 -; CHECK-NEXT: sldi 6, 6, 3 -; CHECK-NEXT: add 17, 23, 6 -; CHECK-NEXT: mr 6, 10 +; CHECK-NEXT: maddld 5, 28, 26, 2 +; CHECK-NEXT: mr 6, 22 +; CHECK-NEXT: mr 29, 20 +; CHECK-NEXT: mr 30, 19 +; CHECK-NEXT: mr 12, 18 +; CHECK-NEXT: mtctr 3 +; CHECK-NEXT: sldi 5, 5, 3 +; CHECK-NEXT: add 24, 21, 5 +; CHECK-NEXT: maddld 5, 28, 26, 0 +; CHECK-NEXT: sldi 5, 5, 3 +; CHECK-NEXT: add 23, 21, 5 +; CHECK-NEXT: mr 5, 25 ; CHECK-NEXT: .p2align 5 ; CHECK-NEXT: .LBB0_4: # %_loop_2_do_ ; CHECK-NEXT: # Parent Loop BB0_3 Depth=1 ; CHECK-NEXT: # => This Inner Loop Header: Depth=2 ; CHECK-NEXT: lxvp 34, 0(6) -; CHECK-NEXT: lxvp 44, 0(17) -; CHECK-NEXT: xvmaddadp 4, 45, 35 -; CHECK-NEXT: lxvp 46, 0(18) -; CHECK-NEXT: xvmaddadp 3, 47, 35 -; CHECK-NEXT: lxvp 48, 0(19) -; CHECK-NEXT: lxvp 50, 0(20) -; CHECK-NEXT: lxvp 62, 0(21) -; CHECK-NEXT: lxvp 60, 0(22) +; CHECK-NEXT: lxvp 44, 0(5) +; CHECK-NEXT: xvmaddadp 43, 45, 35 +; CHECK-NEXT: lxvp 46, 0(23) +; CHECK-NEXT: xvmaddadp 42, 47, 35 +; CHECK-NEXT: lxvp 48, 0(24) +; CHECK-NEXT: lxvp 50, 0(29) +; CHECK-NEXT: lxvp 62, 0(30) +; CHECK-NEXT: lxvp 60, 0(12) ; CHECK-NEXT: lxvp 58, 32(6) -; CHECK-NEXT: lxvp 56, 32(17) -; CHECK-NEXT: lxvp 54, 32(18) -; CHECK-NEXT: lxvp 52, 32(19) -; CHECK-NEXT: lxvp 30, 32(20) -; CHECK-NEXT: lxvp 28, 32(21) -; CHECK-NEXT: lxvp 26, 32(22) -; CHECK-NEXT: xvmaddadp 2, 49, 35 -; CHECK-NEXT: xvmaddadp 1, 51, 35 -; CHECK-NEXT: xvmaddadp 43, 63, 35 -; CHECK-NEXT: xvmaddadp 42, 61, 35 -; CHECK-NEXT: xvmaddadp 0, 44, 34 -; CHECK-NEXT: xvmaddadp 41, 46, 34 -; CHECK-NEXT: xvmaddadp 40, 48, 34 -; CHECK-NEXT: xvmaddadp 39, 50, 34 -; CHECK-NEXT: xvmaddadp 38, 62, 34 -; CHECK-NEXT: xvmaddadp 33, 60, 34 -; CHECK-NEXT: xvmaddadp 32, 57, 59 -; CHECK-NEXT: xvmaddadp 37, 55, 59 -; CHECK-NEXT: xvmaddadp 36, 53, 59 -; CHECK-NEXT: xvmaddadp 13, 31, 59 -; CHECK-NEXT: xvmaddadp 12, 29, 59 -; CHECK-NEXT: xvmaddadp 11, 27, 59 -; CHECK-NEXT: xvmaddadp 9, 56, 58 -; CHECK-NEXT: xvmaddadp 8, 54, 58 -; CHECK-NEXT: xvmaddadp 6, 52, 58 -; CHECK-NEXT: xvmaddadp 5, 30, 58 -; CHECK-NEXT: xvmaddadp 7, 28, 58 -; CHECK-NEXT: xvmaddadp 10, 26, 58 +; CHECK-NEXT: lxvp 56, 32(5) +; CHECK-NEXT: lxvp 54, 32(23) +; CHECK-NEXT: lxvp 52, 32(24) +; CHECK-NEXT: lxvp 30, 32(29) +; CHECK-NEXT: lxvp 28, 32(30) +; CHECK-NEXT: lxvp 26, 32(12) +; CHECK-NEXT: xvmaddadp 41, 49, 35 +; CHECK-NEXT: xvmaddadp 40, 51, 35 +; CHECK-NEXT: xvmaddadp 39, 63, 35 +; CHECK-NEXT: xvmaddadp 38, 61, 35 +; CHECK-NEXT: xvmaddadp 33, 44, 34 +; CHECK-NEXT: xvmaddadp 32, 46, 34 +; CHECK-NEXT: xvmaddadp 37, 48, 34 +; CHECK-NEXT: xvmaddadp 36, 50, 34 +; CHECK-NEXT: xvmaddadp 13, 62, 34 +; CHECK-NEXT: xvmaddadp 12, 60, 34 +; CHECK-NEXT: xvmaddadp 11, 57, 59 +; CHECK-NEXT: xvmaddadp 10, 55, 59 +; CHECK-NEXT: xvmaddadp 8, 53, 59 +; CHECK-NEXT: xvmaddadp 6, 31, 59 +; CHECK-NEXT: xvmaddadp 5, 29, 59 +; CHECK-NEXT: xvmaddadp 3, 27, 59 +; CHECK-NEXT: xvmaddadp 1, 56, 58 +; CHECK-NEXT: xvmaddadp 0, 54, 58 +; CHECK-NEXT: xvmaddadp 2, 52, 58 +; CHECK-NEXT: xvmaddadp 4, 30, 58 +; CHECK-NEXT: xvmaddadp 9, 28, 58 +; CHECK-NEXT: xvmaddadp 7, 26, 58 ; CHECK-NEXT: addi 6, 6, 64 -; CHECK-NEXT: addi 17, 17, 64 -; CHECK-NEXT: addi 18, 18, 64 -; CHECK-NEXT: addi 19, 19, 64 -; CHECK-NEXT: addi 20, 20, 64 -; CHECK-NEXT: addi 21, 21, 64 -; CHECK-NEXT: addi 22, 22, 64 +; CHECK-NEXT: addi 5, 5, 64 +; CHECK-NEXT: addi 23, 23, 64 +; CHECK-NEXT: addi 24, 24, 64 +; CHECK-NEXT: addi 29, 29, 64 +; CHECK-NEXT: addi 30, 30, 64 +; CHECK-NEXT: addi 12, 12, 64 ; CHECK-NEXT: bdnz .LBB0_4 ; CHECK-NEXT: # %bb.5: # %_loop_2_endl_ ; CHECK-NEXT: # -; CHECK-NEXT: addi 25, 25, 6 -; CHECK-NEXT: add 5, 5, 27 -; CHECK-NEXT: add 14, 14, 27 -; CHECK-NEXT: add 11, 11, 27 -; CHECK-NEXT: add 12, 12, 27 -; CHECK-NEXT: add 30, 30, 27 -; CHECK-NEXT: add 28, 28, 27 -; CHECK-NEXT: addi 24, 24, 1 -; CHECK-NEXT: cmpld 25, 0 +; CHECK-NEXT: addi 27, 27, 6 +; CHECK-NEXT: add 25, 25, 11 +; CHECK-NEXT: add 14, 14, 11 +; CHECK-NEXT: add 18, 18, 11 +; CHECK-NEXT: add 31, 31, 11 +; CHECK-NEXT: add 19, 19, 11 +; CHECK-NEXT: add 20, 20, 11 +; CHECK-NEXT: addi 26, 26, 1 +; CHECK-NEXT: cmpld 27, 4 ; CHECK-NEXT: ble 0, .LBB0_3 ; CHECK-NEXT: # %bb.6: # %_loop_1_loopHeader_._return_bb_crit_edge.loopexit +; CHECK-NEXT: ld 3, 32(1) # 8-byte Folded Reload +; CHECK-NEXT: stxv 43, 0(3) ; CHECK-NEXT: ld 3, 40(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 4, 0(3) +; CHECK-NEXT: stxv 42, 0(3) ; CHECK-NEXT: ld 3, 48(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 3, 0(3) +; CHECK-NEXT: stxv 41, 0(3) ; CHECK-NEXT: ld 3, 56(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 2, 0(3) -; CHECK-NEXT: ld 3, 112(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 1, 0(3) +; CHECK-NEXT: stxv 40, 0(10) +; CHECK-NEXT: stxv 39, 0(8) +; CHECK-NEXT: stxv 38, 0(3) ; CHECK-NEXT: ld 3, 64(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 43, 0(8) -; CHECK-NEXT: stxv 42, 0(3) -; CHECK-NEXT: ld 3, 120(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 0, 0(3) +; CHECK-NEXT: stxv 33, 0(3) ; CHECK-NEXT: ld 3, 72(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 41, 0(3) +; CHECK-NEXT: stxv 32, 0(3) ; CHECK-NEXT: ld 3, 80(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 40, 0(3) +; CHECK-NEXT: stxv 37, 0(3) +; CHECK-NEXT: ld 3, 88(1) # 8-byte Folded Reload +; CHECK-NEXT: stxv 36, 0(3) ; CHECK-NEXT: ld 3, 96(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 39, 0(3) +; CHECK-NEXT: stxv 13, 0(3) ; CHECK-NEXT: ld 3, 104(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 38, 0(3) +; CHECK-NEXT: stxv 12, 0(3) +; CHECK-NEXT: ld 3, 112(1) # 8-byte Folded Reload +; CHECK-NEXT: stxv 11, 0(3) +; CHECK-NEXT: ld 3, 120(1) # 8-byte Folded Reload +; CHECK-NEXT: stxv 10, 0(3) ; CHECK-NEXT: ld 3, 128(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 33, 0(3) +; CHECK-NEXT: stxv 8, 0(3) ; CHECK-NEXT: ld 3, 136(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 32, 0(3) +; CHECK-NEXT: stxv 6, 0(3) ; CHECK-NEXT: ld 3, 144(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 37, 0(3) +; CHECK-NEXT: stxv 5, 0(3) ; CHECK-NEXT: ld 3, 152(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 36, 0(3) +; CHECK-NEXT: stxv 3, 0(3) ; CHECK-NEXT: ld 3, 160(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 13, 0(3) +; CHECK-NEXT: stxv 1, 0(3) ; CHECK-NEXT: ld 3, 168(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 12, 0(3) +; CHECK-NEXT: stxv 0, 0(3) ; CHECK-NEXT: ld 3, 176(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 11, 0(3) +; CHECK-NEXT: stxv 2, 0(3) ; CHECK-NEXT: ld 3, 184(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 9, 0(3) +; CHECK-NEXT: stxv 4, 0(3) ; CHECK-NEXT: ld 3, 192(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 8, 0(3) +; CHECK-NEXT: stxv 9, 0(3) ; CHECK-NEXT: ld 3, 200(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 6, 0(3) -; CHECK-NEXT: ld 3, 208(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 5, 0(3) -; CHECK-NEXT: ld 3, 216(1) # 8-byte Folded Reload ; CHECK-NEXT: stxv 7, 0(3) -; CHECK-NEXT: ld 3, 88(1) # 8-byte Folded Reload -; CHECK-NEXT: stxv 10, 0(3) ; CHECK-NEXT: .LBB0_7: # %_return_bb -; CHECK-NEXT: lxv 63, 400(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 62, 384(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 61, 368(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 60, 352(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 59, 336(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 58, 320(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 57, 304(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 56, 288(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 55, 272(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 54, 256(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 53, 240(1) # 16-byte Folded Reload -; CHECK-NEXT: lxv 52, 224(1) # 16-byte Folded Reload -; CHECK-NEXT: lfd 31, 600(1) # 8-byte Folded Reload -; CHECK-NEXT: lfd 30, 592(1) # 8-byte Folded Reload -; CHECK-NEXT: lfd 29, 584(1) # 8-byte Folded Reload -; CHECK-NEXT: lfd 28, 576(1) # 8-byte Folded Reload -; CHECK-NEXT: lfd 27, 568(1) # 8-byte Folded Reload -; CHECK-NEXT: lfd 26, 560(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 31, 552(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 30, 544(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 29, 536(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 28, 528(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 27, 520(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 26, 512(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 25, 504(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 24, 496(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 23, 488(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 22, 480(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 21, 472(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 20, 464(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 19, 456(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 18, 448(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 17, 440(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 16, 432(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 15, 424(1) # 8-byte Folded Reload -; CHECK-NEXT: ld 14, 416(1) # 8-byte Folded Reload -; CHECK-NEXT: addi 1, 1, 608 +; CHECK-NEXT: lxv 63, 384(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 62, 368(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 61, 352(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 60, 336(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 59, 320(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 58, 304(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 57, 288(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 56, 272(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 55, 256(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 54, 240(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 53, 224(1) # 16-byte Folded Reload +; CHECK-NEXT: lxv 52, 208(1) # 16-byte Folded Reload +; CHECK-NEXT: lfd 31, 584(1) # 8-byte Folded Reload +; CHECK-NEXT: lfd 30, 576(1) # 8-byte Folded Reload +; CHECK-NEXT: lfd 29, 568(1) # 8-byte Folded Reload +; CHECK-NEXT: lfd 28, 560(1) # 8-byte Folded Reload +; CHECK-NEXT: lfd 27, 552(1) # 8-byte Folded Reload +; CHECK-NEXT: lfd 26, 544(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 31, 536(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 30, 528(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 29, 520(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 28, 512(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 27, 504(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 26, 496(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 25, 488(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 24, 480(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 23, 472(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 22, 464(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 21, 456(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 20, 448(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 19, 440(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 18, 432(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 17, 424(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 16, 416(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 15, 408(1) # 8-byte Folded Reload +; CHECK-NEXT: ld 14, 400(1) # 8-byte Folded Reload +; CHECK-NEXT: addi 1, 1, 592 ; CHECK-NEXT: blr entry: %_val_l_ = load i32, i32* %.l, align 4 diff --git a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll --- a/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll +++ b/llvm/test/CodeGen/PowerPC/ppc64-P9-vabsd.ll @@ -1,200 +1,191 @@ -; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s -; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr9 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s -; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-PWR8 -implicit-check-not vabsdu -; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s -check-prefix=CHECK-PWR7 -implicit-check-not vmaxsd +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr9 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-PWR9,CHECK-PWR9-LE +; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr9 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-PWR9,CHECK-PWR9-BE +; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,CHECK-PWR78,CHECK-PWR8 -implicit-check-not vabsdu +; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr7 -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,CHECK-PWR78,CHECK-PWR7 -implicit-check-not vmaxsd define <4 x i32> @simple_absv_32(<4 x i32> %a) local_unnamed_addr { +; CHECK-PWR9-LABEL: simple_absv_32: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: vnegw v3, v2 +; CHECK-PWR9-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: simple_absv_32: +; CHECK-PWR78: # %bb.0: # %entry +; CHECK-PWR78-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR78-NEXT: vsubuwm v3, v3, v2 +; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR78-NEXT: blr entry: %sub.i = sub <4 x i32> zeroinitializer, %a %0 = tail call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %a, <4 x i32> %sub.i) ret <4 x i32> %0 -; CHECK-LABEL: simple_absv_32 -; CHECK-NOT: vxor -; CHECK-NOT: vabsduw -; CHECK: vnegw v[[REG:[0-9]+]], v2 -; CHECK-NEXT: vmaxsw v2, v2, v[[REG]] -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: simple_absv_32 -; CHECK-PWR8: xxlxor -; CHECK-PWR8: vsubuwm -; CHECK-PWR8: vmaxsw -; CHECK-PWR8: blr -; CHECK-PWR7-LABEL: simple_absv_32 -; CHECK-PWR7: xxlxor -; CHECK-PWR7: vsubuwm -; CHECK-PWR7: vmaxsw -; CHECK-PWR7: blr } define <4 x i32> @simple_absv_32_swap(<4 x i32> %a) local_unnamed_addr { +; CHECK-PWR9-LABEL: simple_absv_32_swap: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: vnegw v3, v2 +; CHECK-PWR9-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: simple_absv_32_swap: +; CHECK-PWR78: # %bb.0: # %entry +; CHECK-PWR78-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR78-NEXT: vsubuwm v3, v3, v2 +; CHECK-PWR78-NEXT: vmaxsw v2, v3, v2 +; CHECK-PWR78-NEXT: blr entry: %sub.i = sub <4 x i32> zeroinitializer, %a %0 = tail call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %sub.i, <4 x i32> %a) ret <4 x i32> %0 -; CHECK-LABEL: simple_absv_32_swap -; CHECK-NOT: vxor -; CHECK-NOT: vabsduw -; CHECK: vnegw v[[REG:[0-9]+]], v2 -; CHECK-NEXT: vmaxsw v2, v2, v[[REG]] -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: simple_absv_32_swap -; CHECK-PWR8: xxlxor -; CHECK-PWR8: vsubuwm -; CHECK-PWR8: vmaxsw -; CHECK-PWR8: blr } define <8 x i16> @simple_absv_16(<8 x i16> %a) local_unnamed_addr { +; CHECK-LABEL: simple_absv_16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlxor v3, v3, v3 +; CHECK-NEXT: vsubuhm v3, v3, v2 +; CHECK-NEXT: vmaxsh v2, v2, v3 +; CHECK-NEXT: blr entry: %sub.i = sub <8 x i16> zeroinitializer, %a %0 = tail call <8 x i16> @llvm.ppc.altivec.vmaxsh(<8 x i16> %a, <8 x i16> %sub.i) ret <8 x i16> %0 -; CHECK-LABEL: simple_absv_16 -; CHECK-NOT: mtvsrws -; CHECK-NOT: vabsduh -; CHECK: xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]] -; CHECK-NEXT: vsubuhm v[[REG:[0-9]+]], v[[ZERO]], v2 -; CHECK-NEXT: vmaxsh v2, v2, v[[REG]] -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: simple_absv_16 -; CHECK-PWR8: xxlxor -; CHECK-PWR8: vsubuhm -; CHECK-PWR8: vmaxsh -; CHECK-PWR8: blr -; CHECK-PWR7-LABEL: simple_absv_16 -; CHECK-PWR7: xxlxor -; CHECK-PWR7: vsubuhm -; CHECK-PWR7: vmaxsh -; CHECK-PWR7: blr } define <16 x i8> @simple_absv_8(<16 x i8> %a) local_unnamed_addr { +; CHECK-LABEL: simple_absv_8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: xxlxor v3, v3, v3 +; CHECK-NEXT: vsububm v3, v3, v2 +; CHECK-NEXT: vmaxsb v2, v2, v3 +; CHECK-NEXT: blr entry: %sub.i = sub <16 x i8> zeroinitializer, %a %0 = tail call <16 x i8> @llvm.ppc.altivec.vmaxsb(<16 x i8> %a, <16 x i8> %sub.i) ret <16 x i8> %0 -; CHECK-LABEL: simple_absv_8 -; CHECK-NOT: xxspltib -; CHECK-NOT: vabsdub -; CHECK: xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]] -; CHECK-NEXT: vsububm v[[REG:[0-9]+]], v[[ZERO]], v2 -; CHECK-NEXT: vmaxsb v2, v2, v[[REG]] -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: simple_absv_8 -; CHECK-PWR8: xxlxor -; CHECK-PWR8: vsububm -; CHECK-PWR8: vmaxsb -; CHECK-PWR8: blr -; CHECK-PWR7-LABEL: simple_absv_8 -; CHECK-PWR7: xxlxor -; CHECK-PWR7: vsububm -; CHECK-PWR7: vmaxsb -; CHECK-PWR7: blr } -; v2i64 vmax isn't avaiable on pwr7 +; v2i64 vmax isn't avaiable on pwr7 define <2 x i64> @sub_absv_64(<2 x i64> %a, <2 x i64> %b) local_unnamed_addr { +; CHECK-PWR9-LABEL: sub_absv_64: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: vsubudm v2, v2, v3 +; CHECK-PWR9-NEXT: vnegd v3, v2 +; CHECK-PWR9-NEXT: vmaxsd v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR8-LABEL: sub_absv_64: +; CHECK-PWR8: # %bb.0: # %entry +; CHECK-PWR8-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR8-NEXT: vsubudm v2, v2, v3 +; CHECK-PWR8-NEXT: vsubudm v3, v4, v2 +; CHECK-PWR8-NEXT: vmaxsd v2, v2, v3 +; CHECK-PWR8-NEXT: blr +; +; CHECK-PWR7-LABEL: sub_absv_64: +; CHECK-PWR7: # %bb.0: # %entry +; CHECK-PWR7-NEXT: addi r3, r1, -48 +; CHECK-PWR7-NEXT: addi r4, r1, -32 +; CHECK-PWR7-NEXT: stxvd2x v2, 0, r3 +; CHECK-PWR7-NEXT: stxvd2x v3, 0, r4 +; CHECK-PWR7-NEXT: ld r3, -40(r1) +; CHECK-PWR7-NEXT: ld r4, -24(r1) +; CHECK-PWR7-NEXT: ld r5, -48(r1) +; CHECK-PWR7-NEXT: ld r6, -32(r1) +; CHECK-PWR7-NEXT: sub r3, r3, r4 +; CHECK-PWR7-NEXT: sub r4, r5, r6 +; CHECK-PWR7-NEXT: sradi r5, r3, 63 +; CHECK-PWR7-NEXT: sradi r6, r4, 63 +; CHECK-PWR7-NEXT: add r3, r3, r5 +; CHECK-PWR7-NEXT: add r4, r4, r6 +; CHECK-PWR7-NEXT: xor r3, r3, r5 +; CHECK-PWR7-NEXT: xor r4, r4, r6 +; CHECK-PWR7-NEXT: std r3, -8(r1) +; CHECK-PWR7-NEXT: addi r3, r1, -16 +; CHECK-PWR7-NEXT: std r4, -16(r1) +; CHECK-PWR7-NEXT: lxvd2x v2, 0, r3 +; CHECK-PWR7-NEXT: blr entry: %0 = sub nsw <2 x i64> %a, %b %1 = icmp sgt <2 x i64> %0, %2 = sub <2 x i64> zeroinitializer, %0 %3 = select <2 x i1> %1, <2 x i64> %0, <2 x i64> %2 ret <2 x i64> %3 -; CHECK-LABEL: sub_absv_64 -; CHECK: vsubudm -; CHECK: vnegd -; CHECK: vmaxsd -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: sub_absv_64 -; CHECK-PWR8-DAG: vsubudm -; CHECK-PWR8-DAG: xxlxor -; CHECK-PWR8: vmaxsd -; CHECK-PWR8: blr -; CHECK-PWR7-LABEL: sub_absv_64 -; CHECK-PWR7-NOT: vmaxsd -; CHECK-PWR7: blr } ; The select pattern can only be detected for v4i32. define <4 x i32> @sub_absv_32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr { +; CHECK-PWR9-LABEL: sub_absv_32: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: xvnegsp v3, v3 +; CHECK-PWR9-NEXT: xvnegsp v2, v2 +; CHECK-PWR9-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: sub_absv_32: +; CHECK-PWR78: # %bb.0: # %entry +; CHECK-PWR78-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v3, v4, v2 +; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR78-NEXT: blr entry: %0 = sub nsw <4 x i32> %a, %b %1 = icmp sgt <4 x i32> %0, %2 = sub <4 x i32> zeroinitializer, %0 %3 = select <4 x i1> %1, <4 x i32> %0, <4 x i32> %2 ret <4 x i32> %3 -; CHECK-LABEL: sub_absv_32 -; CHECK-NOT: vsubuwm -; CHECK-NOT: vnegw -; CHECK-NOT: vmaxsw -; CHECK-DAG: xvnegsp v2, v2 -; CHECK-DAG: xvnegsp v3, v3 -; CHECK-NEXT: vabsduw v2, v{{[23]}}, v{{[23]}} -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: sub_absv_32 -; CHECK-PWR8-DAG: vsubuwm -; CHECK-PWR8-DAG: xxlxor -; CHECK-PWR8: vmaxsw -; CHECK-PWR8: blr -; CHECK-PWR7-LABEL: sub_absv_32 -; CHECK-PWR7-DAG: vsubuwm -; CHECK-PWR7-DAG: xxlxor -; CHECK-PWR7: vmaxsw -; CHECK-PWR7: blr } define <8 x i16> @sub_absv_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr { +; CHECK-PWR9-LABEL: sub_absv_16: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: vsubuhm v2, v2, v3 +; CHECK-PWR9-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-NEXT: vsubuhm v3, v3, v2 +; CHECK-PWR9-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: sub_absv_16: +; CHECK-PWR78: # %bb.0: # %entry +; CHECK-PWR78-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR78-NEXT: vsubuhm v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v3, v4, v2 +; CHECK-PWR78-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR78-NEXT: blr entry: %0 = sub nsw <8 x i16> %a, %b %1 = icmp sgt <8 x i16> %0, %2 = sub <8 x i16> zeroinitializer, %0 %3 = select <8 x i1> %1, <8 x i16> %0, <8 x i16> %2 ret <8 x i16> %3 -; CHECK-LABEL: sub_absv_16 -; CHECK-NOT: vabsduh -; CHECK-DAG: xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]] -; CHECK-DAG: vsubuhm v[[SUB:[0-9]+]], v2, v3 -; CHECK: vsubuhm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]] -; CHECK-NEXT: vmaxsh v2, v[[SUB]], v[[SUB1]] -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: sub_absv_16 -; CHECK-PWR8-DAG: xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]] -; CHECK-PWR8-DAG: vsubuhm v[[SUB:[0-9]+]], v2, v3 -; CHECK-PWR8: vsubuhm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]] -; CHECK-PWR8-NEXT: vmaxsh v2, v[[SUB]], v[[SUB1]] -; CHECK-PWR8-NEXT: blr -; CHECK-PWR7-LABEL: sub_absv_16 -; CHECK-PWR7-DAG: vsubuhm -; CHECK-PWR7-DAG: xxlxor -; CHECK-PWR7: vmaxsh -; CHECK-PWR7-NEXT: blr } define <16 x i8> @sub_absv_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr { +; CHECK-PWR9-LABEL: sub_absv_8: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: vsububm v2, v2, v3 +; CHECK-PWR9-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-NEXT: vsububm v3, v3, v2 +; CHECK-PWR9-NEXT: vmaxsb v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: sub_absv_8: +; CHECK-PWR78: # %bb.0: # %entry +; CHECK-PWR78-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR78-NEXT: vsububm v2, v2, v3 +; CHECK-PWR78-NEXT: vsububm v3, v4, v2 +; CHECK-PWR78-NEXT: vmaxsb v2, v2, v3 +; CHECK-PWR78-NEXT: blr entry: %0 = sub nsw <16 x i8> %a, %b %1 = icmp sgt <16 x i8> %0, %2 = sub <16 x i8> zeroinitializer, %0 %3 = select <16 x i1> %1, <16 x i8> %0, <16 x i8> %2 ret <16 x i8> %3 -; CHECK-LABEL: sub_absv_8 -; CHECK-NOT: vabsdub -; CHECK-DAG: xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]] -; CHECK-DAG: vsububm v[[SUB:[0-9]+]], v2, v3 -; CHECK: vsububm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]] -; CHECK-NEXT: vmaxsb v2, v[[SUB]], v[[SUB1]] -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: sub_absv_8 -; CHECK-PWR8-DAG: xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]] -; CHECK-PWR8-DAG: vsububm v[[SUB:[0-9]+]], v2, v3 -; CHECK-PWR8: vsububm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]] -; CHECK-PWR8-NEXT: vmaxsb v2, v[[SUB]], v[[SUB1]] -; CHECK-PWR8-NEXT: blr -; CHECK-PWR7-LABEL: sub_absv_8 -; CHECK-PWR7-DAG: xxlxor -; CHECK-PWR7-DAG: vsububm -; CHECK-PWR7: vmaxsb -; CHECK-PWR7-NEXT: blr } ; FIXME: This does not produce the ISD::ABS that we are looking for. @@ -203,6 +194,76 @@ ; Threfore, we end up doing more work than is required with a pair of abs for word ; instead of just one for the halfword. define <8 x i16> @sub_absv_16_ext(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr { +; CHECK-PWR9-LABEL: sub_absv_16_ext: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: vmrghh v4, v2, v2 +; CHECK-PWR9-NEXT: vmrglh v2, v2, v2 +; CHECK-PWR9-NEXT: vmrghh v5, v3, v3 +; CHECK-PWR9-NEXT: vmrglh v3, v3, v3 +; CHECK-PWR9-NEXT: vextsh2w v2, v2 +; CHECK-PWR9-NEXT: vextsh2w v3, v3 +; CHECK-PWR9-NEXT: vextsh2w v4, v4 +; CHECK-PWR9-NEXT: vextsh2w v5, v5 +; CHECK-PWR9-NEXT: xvnegsp v3, v3 +; CHECK-PWR9-NEXT: xvnegsp v2, v2 +; CHECK-PWR9-NEXT: xvnegsp v4, v4 +; CHECK-PWR9-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-NEXT: xvnegsp v3, v5 +; CHECK-PWR9-NEXT: vabsduw v3, v4, v3 +; CHECK-PWR9-NEXT: vpkuwum v2, v3, v2 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR8-LABEL: sub_absv_16_ext: +; CHECK-PWR8: # %bb.0: # %entry +; CHECK-PWR8-NEXT: vmrglh v5, v2, v2 +; CHECK-PWR8-NEXT: vspltisw v4, 8 +; CHECK-PWR8-NEXT: vmrghh v2, v2, v2 +; CHECK-PWR8-NEXT: vmrglh v0, v3, v3 +; CHECK-PWR8-NEXT: vmrghh v3, v3, v3 +; CHECK-PWR8-NEXT: vadduwm v4, v4, v4 +; CHECK-PWR8-NEXT: vslw v5, v5, v4 +; CHECK-PWR8-NEXT: vslw v2, v2, v4 +; CHECK-PWR8-NEXT: vslw v0, v0, v4 +; CHECK-PWR8-NEXT: vslw v3, v3, v4 +; CHECK-PWR8-NEXT: vsraw v5, v5, v4 +; CHECK-PWR8-NEXT: vsraw v2, v2, v4 +; CHECK-PWR8-NEXT: vsraw v0, v0, v4 +; CHECK-PWR8-NEXT: vsraw v3, v3, v4 +; CHECK-PWR8-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR8-NEXT: vsubuwm v5, v5, v0 +; CHECK-PWR8-NEXT: vsubuwm v2, v2, v3 +; CHECK-PWR8-NEXT: vsubuwm v3, v4, v5 +; CHECK-PWR8-NEXT: vsubuwm v4, v4, v2 +; CHECK-PWR8-NEXT: vmaxsw v3, v5, v3 +; CHECK-PWR8-NEXT: vmaxsw v2, v2, v4 +; CHECK-PWR8-NEXT: vpkuwum v2, v2, v3 +; CHECK-PWR8-NEXT: blr +; +; CHECK-PWR7-LABEL: sub_absv_16_ext: +; CHECK-PWR7: # %bb.0: # %entry +; CHECK-PWR7-NEXT: vmrglh v5, v2, v2 +; CHECK-PWR7-NEXT: vmrghh v2, v2, v2 +; CHECK-PWR7-NEXT: vmrglh v0, v3, v3 +; CHECK-PWR7-NEXT: vmrghh v3, v3, v3 +; CHECK-PWR7-NEXT: vspltisw v4, 8 +; CHECK-PWR7-NEXT: vadduwm v4, v4, v4 +; CHECK-PWR7-NEXT: vslw v5, v5, v4 +; CHECK-PWR7-NEXT: vslw v2, v2, v4 +; CHECK-PWR7-NEXT: vslw v0, v0, v4 +; CHECK-PWR7-NEXT: vslw v3, v3, v4 +; CHECK-PWR7-NEXT: vsraw v5, v5, v4 +; CHECK-PWR7-NEXT: vsraw v2, v2, v4 +; CHECK-PWR7-NEXT: vsraw v0, v0, v4 +; CHECK-PWR7-NEXT: vsraw v3, v3, v4 +; CHECK-PWR7-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR7-NEXT: vsubuwm v5, v5, v0 +; CHECK-PWR7-NEXT: vsubuwm v2, v2, v3 +; CHECK-PWR7-NEXT: vsubuwm v3, v4, v5 +; CHECK-PWR7-NEXT: vsubuwm v4, v4, v2 +; CHECK-PWR7-NEXT: vmaxsw v3, v5, v3 +; CHECK-PWR7-NEXT: vmaxsw v2, v2, v4 +; CHECK-PWR7-NEXT: vpkuwum v2, v2, v3 +; CHECK-PWR7-NEXT: blr entry: %0 = sext <8 x i16> %a to <8 x i32> %1 = sext <8 x i16> %b to <8 x i32> @@ -212,25 +273,820 @@ %5 = select <8 x i1> %3, <8 x i32> %2, <8 x i32> %4 %6 = trunc <8 x i32> %5 to <8 x i16> ret <8 x i16> %6 -; CHECK-LABEL: sub_absv_16_ext -; CHECK-NOT: vabsduh -; CHECK: vabsduw -; CHECK-NOT: vnegw -; CHECK-NOT: vabsduh -; CHECK: vabsduw -; CHECK-NOT: vnegw -; CHECK-NOT: vabsduh -; CHECK: blr -; CHECK-PWR8-LABEL: sub_absv_16 -; CHECK-PWR8-DAG: vsubuwm -; CHECK-PWR8-DAG: xxlxor -; CHECK-PWR8: blr } ; FIXME: This does not produce ISD::ABS. This does not even vectorize correctly! ; This function should look like sub_absv_32 and sub_absv_16 except that the type is v16i8. ; Function Attrs: norecurse nounwind readnone define <16 x i8> @sub_absv_8_ext(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr { +; CHECK-PWR9-LE-LABEL: sub_absv_8_ext: +; CHECK-PWR9-LE: # %bb.0: # %entry +; CHECK-PWR9-LE-NEXT: li r3, 0 +; CHECK-PWR9-LE-NEXT: li r5, 2 +; CHECK-PWR9-LE-NEXT: li r4, 1 +; CHECK-PWR9-LE-NEXT: std r30, -16(r1) # 8-byte Folded Spill +; CHECK-PWR9-LE-NEXT: vextubrx r6, r3, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r3, r3, v3 +; CHECK-PWR9-LE-NEXT: vextubrx r8, r5, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r5, r5, v3 +; CHECK-PWR9-LE-NEXT: std r29, -24(r1) # 8-byte Folded Spill +; CHECK-PWR9-LE-NEXT: std r28, -32(r1) # 8-byte Folded Spill +; CHECK-PWR9-LE-NEXT: std r27, -40(r1) # 8-byte Folded Spill +; CHECK-PWR9-LE-NEXT: std r26, -48(r1) # 8-byte Folded Spill +; CHECK-PWR9-LE-NEXT: std r25, -56(r1) # 8-byte Folded Spill +; CHECK-PWR9-LE-NEXT: clrlwi r6, r6, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r3, r3, 24 +; CHECK-PWR9-LE-NEXT: vextubrx r7, r4, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r4, r4, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24 +; CHECK-PWR9-LE-NEXT: sub r3, r6, r3 +; CHECK-PWR9-LE-NEXT: clrlwi r5, r5, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r4, r4, 24 +; CHECK-PWR9-LE-NEXT: sub r5, r8, r5 +; CHECK-PWR9-LE-NEXT: sub r4, r7, r4 +; CHECK-PWR9-LE-NEXT: srawi r6, r3, 31 +; CHECK-PWR9-LE-NEXT: srawi r7, r4, 31 +; CHECK-PWR9-LE-NEXT: add r3, r3, r6 +; CHECK-PWR9-LE-NEXT: add r4, r4, r7 +; CHECK-PWR9-LE-NEXT: xor r6, r3, r6 +; CHECK-PWR9-LE-NEXT: srawi r3, r5, 31 +; CHECK-PWR9-LE-NEXT: xor r4, r4, r7 +; CHECK-PWR9-LE-NEXT: add r5, r5, r3 +; CHECK-PWR9-LE-NEXT: xor r3, r5, r3 +; CHECK-PWR9-LE-NEXT: li r5, 3 +; CHECK-PWR9-LE-NEXT: vextubrx r7, r5, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r5, r5, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r5, r5, 24 +; CHECK-PWR9-LE-NEXT: sub r5, r7, r5 +; CHECK-PWR9-LE-NEXT: srawi r7, r5, 31 +; CHECK-PWR9-LE-NEXT: add r5, r5, r7 +; CHECK-PWR9-LE-NEXT: xor r5, r5, r7 +; CHECK-PWR9-LE-NEXT: li r7, 4 +; CHECK-PWR9-LE-NEXT: vextubrx r8, r7, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r7, r7, v3 +; CHECK-PWR9-LE-NEXT: mtvsrd v4, r5 +; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r7, r7, 24 +; CHECK-PWR9-LE-NEXT: sub r7, r8, r7 +; CHECK-PWR9-LE-NEXT: srawi r8, r7, 31 +; CHECK-PWR9-LE-NEXT: add r7, r7, r8 +; CHECK-PWR9-LE-NEXT: xor r7, r7, r8 +; CHECK-PWR9-LE-NEXT: li r8, 5 +; CHECK-PWR9-LE-NEXT: vextubrx r9, r8, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r8, r8, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r9, r9, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r8, r8, 24 +; CHECK-PWR9-LE-NEXT: sub r8, r9, r8 +; CHECK-PWR9-LE-NEXT: srawi r9, r8, 31 +; CHECK-PWR9-LE-NEXT: add r8, r8, r9 +; CHECK-PWR9-LE-NEXT: xor r8, r8, r9 +; CHECK-PWR9-LE-NEXT: li r9, 6 +; CHECK-PWR9-LE-NEXT: vextubrx r10, r9, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r9, r9, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r10, r10, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r9, r9, 24 +; CHECK-PWR9-LE-NEXT: sub r9, r10, r9 +; CHECK-PWR9-LE-NEXT: srawi r10, r9, 31 +; CHECK-PWR9-LE-NEXT: add r9, r9, r10 +; CHECK-PWR9-LE-NEXT: xor r9, r9, r10 +; CHECK-PWR9-LE-NEXT: li r10, 7 +; CHECK-PWR9-LE-NEXT: vextubrx r11, r10, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r10, r10, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r11, r11, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r10, r10, 24 +; CHECK-PWR9-LE-NEXT: sub r10, r11, r10 +; CHECK-PWR9-LE-NEXT: srawi r11, r10, 31 +; CHECK-PWR9-LE-NEXT: add r10, r10, r11 +; CHECK-PWR9-LE-NEXT: xor r10, r10, r11 +; CHECK-PWR9-LE-NEXT: li r11, 8 +; CHECK-PWR9-LE-NEXT: vextubrx r12, r11, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r11, r11, v3 +; CHECK-PWR9-LE-NEXT: mtvsrd v5, r10 +; CHECK-PWR9-LE-NEXT: clrlwi r12, r12, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r11, r11, 24 +; CHECK-PWR9-LE-NEXT: sub r11, r12, r11 +; CHECK-PWR9-LE-NEXT: srawi r12, r11, 31 +; CHECK-PWR9-LE-NEXT: add r11, r11, r12 +; CHECK-PWR9-LE-NEXT: xor r11, r11, r12 +; CHECK-PWR9-LE-NEXT: li r12, 9 +; CHECK-PWR9-LE-NEXT: vextubrx r0, r12, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r12, r12, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r0, r0, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r12, r12, 24 +; CHECK-PWR9-LE-NEXT: sub r12, r0, r12 +; CHECK-PWR9-LE-NEXT: srawi r0, r12, 31 +; CHECK-PWR9-LE-NEXT: add r12, r12, r0 +; CHECK-PWR9-LE-NEXT: xor r12, r12, r0 +; CHECK-PWR9-LE-NEXT: li r0, 10 +; CHECK-PWR9-LE-NEXT: vextubrx r30, r0, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r0, r0, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r30, r30, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r0, r0, 24 +; CHECK-PWR9-LE-NEXT: sub r0, r30, r0 +; CHECK-PWR9-LE-NEXT: srawi r30, r0, 31 +; CHECK-PWR9-LE-NEXT: add r0, r0, r30 +; CHECK-PWR9-LE-NEXT: xor r0, r0, r30 +; CHECK-PWR9-LE-NEXT: li r30, 11 +; CHECK-PWR9-LE-NEXT: vextubrx r29, r30, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r30, r30, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r29, r29, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r30, r30, 24 +; CHECK-PWR9-LE-NEXT: sub r30, r29, r30 +; CHECK-PWR9-LE-NEXT: srawi r29, r30, 31 +; CHECK-PWR9-LE-NEXT: add r30, r30, r29 +; CHECK-PWR9-LE-NEXT: xor r30, r30, r29 +; CHECK-PWR9-LE-NEXT: li r29, 12 +; CHECK-PWR9-LE-NEXT: vextubrx r28, r29, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r29, r29, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r28, r28, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r29, r29, 24 +; CHECK-PWR9-LE-NEXT: sub r29, r28, r29 +; CHECK-PWR9-LE-NEXT: srawi r28, r29, 31 +; CHECK-PWR9-LE-NEXT: add r29, r29, r28 +; CHECK-PWR9-LE-NEXT: xor r29, r29, r28 +; CHECK-PWR9-LE-NEXT: li r28, 13 +; CHECK-PWR9-LE-NEXT: vextubrx r27, r28, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r28, r28, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r27, r27, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r28, r28, 24 +; CHECK-PWR9-LE-NEXT: sub r28, r27, r28 +; CHECK-PWR9-LE-NEXT: srawi r27, r28, 31 +; CHECK-PWR9-LE-NEXT: add r28, r28, r27 +; CHECK-PWR9-LE-NEXT: xor r28, r28, r27 +; CHECK-PWR9-LE-NEXT: li r27, 14 +; CHECK-PWR9-LE-NEXT: vextubrx r26, r27, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r27, r27, v3 +; CHECK-PWR9-LE-NEXT: clrlwi r26, r26, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r27, r27, 24 +; CHECK-PWR9-LE-NEXT: sub r27, r26, r27 +; CHECK-PWR9-LE-NEXT: srawi r26, r27, 31 +; CHECK-PWR9-LE-NEXT: add r27, r27, r26 +; CHECK-PWR9-LE-NEXT: xor r27, r27, r26 +; CHECK-PWR9-LE-NEXT: li r26, 15 +; CHECK-PWR9-LE-NEXT: vextubrx r25, r26, v2 +; CHECK-PWR9-LE-NEXT: vextubrx r26, r26, v3 +; CHECK-PWR9-LE-NEXT: mtvsrd v2, r6 +; CHECK-PWR9-LE-NEXT: mtvsrd v3, r4 +; CHECK-PWR9-LE-NEXT: vmrghb v2, v3, v2 +; CHECK-PWR9-LE-NEXT: mtvsrd v3, r3 +; CHECK-PWR9-LE-NEXT: clrlwi r25, r25, 24 +; CHECK-PWR9-LE-NEXT: clrlwi r26, r26, 24 +; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3 +; CHECK-PWR9-LE-NEXT: mtvsrd v4, r8 +; CHECK-PWR9-LE-NEXT: sub r26, r25, r26 +; CHECK-PWR9-LE-NEXT: vmrglh v2, v3, v2 +; CHECK-PWR9-LE-NEXT: mtvsrd v3, r7 +; CHECK-PWR9-LE-NEXT: srawi r25, r26, 31 +; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3 +; CHECK-PWR9-LE-NEXT: mtvsrd v4, r9 +; CHECK-PWR9-LE-NEXT: add r26, r26, r25 +; CHECK-PWR9-LE-NEXT: vmrghb v4, v5, v4 +; CHECK-PWR9-LE-NEXT: mtvsrd v5, r30 +; CHECK-PWR9-LE-NEXT: xor r26, r26, r25 +; CHECK-PWR9-LE-NEXT: ld r30, -16(r1) # 8-byte Folded Reload +; CHECK-PWR9-LE-NEXT: ld r25, -56(r1) # 8-byte Folded Reload +; CHECK-PWR9-LE-NEXT: mtvsrd v0, r26 +; CHECK-PWR9-LE-NEXT: ld r26, -48(r1) # 8-byte Folded Reload +; CHECK-PWR9-LE-NEXT: vmrglh v3, v4, v3 +; CHECK-PWR9-LE-NEXT: mtvsrd v4, r12 +; CHECK-PWR9-LE-NEXT: vmrglw v2, v3, v2 +; CHECK-PWR9-LE-NEXT: mtvsrd v3, r11 +; CHECK-PWR9-LE-NEXT: vmrghb v3, v4, v3 +; CHECK-PWR9-LE-NEXT: mtvsrd v4, r0 +; CHECK-PWR9-LE-NEXT: vmrghb v4, v5, v4 +; CHECK-PWR9-LE-NEXT: mtvsrd v5, r28 +; CHECK-PWR9-LE-NEXT: ld r28, -32(r1) # 8-byte Folded Reload +; CHECK-PWR9-LE-NEXT: vmrglh v3, v4, v3 +; CHECK-PWR9-LE-NEXT: mtvsrd v4, r29 +; CHECK-PWR9-LE-NEXT: ld r29, -24(r1) # 8-byte Folded Reload +; CHECK-PWR9-LE-NEXT: vmrghb v4, v5, v4 +; CHECK-PWR9-LE-NEXT: mtvsrd v5, r27 +; CHECK-PWR9-LE-NEXT: ld r27, -40(r1) # 8-byte Folded Reload +; CHECK-PWR9-LE-NEXT: vmrghb v5, v0, v5 +; CHECK-PWR9-LE-NEXT: vmrglh v4, v5, v4 +; CHECK-PWR9-LE-NEXT: vmrglw v3, v4, v3 +; CHECK-PWR9-LE-NEXT: xxmrgld v2, v3, v2 +; CHECK-PWR9-LE-NEXT: blr +; +; CHECK-PWR9-BE-LABEL: sub_absv_8_ext: +; CHECK-PWR9-BE: # %bb.0: # %entry +; CHECK-PWR9-BE-NEXT: li r3, 0 +; CHECK-PWR9-BE-NEXT: li r4, 1 +; CHECK-PWR9-BE-NEXT: li r5, 2 +; CHECK-PWR9-BE-NEXT: std r30, -16(r1) # 8-byte Folded Spill +; CHECK-PWR9-BE-NEXT: vextublx r6, r3, v2 +; CHECK-PWR9-BE-NEXT: vextublx r3, r3, v3 +; CHECK-PWR9-BE-NEXT: vextublx r7, r4, v2 +; CHECK-PWR9-BE-NEXT: vextublx r4, r4, v3 +; CHECK-PWR9-BE-NEXT: std r29, -24(r1) # 8-byte Folded Spill +; CHECK-PWR9-BE-NEXT: std r28, -32(r1) # 8-byte Folded Spill +; CHECK-PWR9-BE-NEXT: std r27, -40(r1) # 8-byte Folded Spill +; CHECK-PWR9-BE-NEXT: std r26, -48(r1) # 8-byte Folded Spill +; CHECK-PWR9-BE-NEXT: std r25, -56(r1) # 8-byte Folded Spill +; CHECK-PWR9-BE-NEXT: clrlwi r6, r6, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r3, r3, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r7, r7, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r4, r4, 24 +; CHECK-PWR9-BE-NEXT: vextublx r8, r5, v2 +; CHECK-PWR9-BE-NEXT: vextublx r5, r5, v3 +; CHECK-PWR9-BE-NEXT: sub r3, r6, r3 +; CHECK-PWR9-BE-NEXT: sub r4, r7, r4 +; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r5, r5, 24 +; CHECK-PWR9-BE-NEXT: sub r5, r8, r5 +; CHECK-PWR9-BE-NEXT: srawi r6, r3, 31 +; CHECK-PWR9-BE-NEXT: srawi r7, r4, 31 +; CHECK-PWR9-BE-NEXT: srawi r8, r5, 31 +; CHECK-PWR9-BE-NEXT: add r3, r3, r6 +; CHECK-PWR9-BE-NEXT: add r4, r4, r7 +; CHECK-PWR9-BE-NEXT: add r5, r5, r8 +; CHECK-PWR9-BE-NEXT: xor r3, r3, r6 +; CHECK-PWR9-BE-NEXT: li r6, 3 +; CHECK-PWR9-BE-NEXT: xor r4, r4, r7 +; CHECK-PWR9-BE-NEXT: xor r5, r5, r8 +; CHECK-PWR9-BE-NEXT: vextublx r7, r6, v2 +; CHECK-PWR9-BE-NEXT: vextublx r6, r6, v3 +; CHECK-PWR9-BE-NEXT: mtvsrwz v1, r3 +; CHECK-PWR9-BE-NEXT: clrlwi r7, r7, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r6, r6, 24 +; CHECK-PWR9-BE-NEXT: sub r6, r7, r6 +; CHECK-PWR9-BE-NEXT: srawi r7, r6, 31 +; CHECK-PWR9-BE-NEXT: add r6, r6, r7 +; CHECK-PWR9-BE-NEXT: xor r6, r6, r7 +; CHECK-PWR9-BE-NEXT: li r7, 4 +; CHECK-PWR9-BE-NEXT: vextublx r8, r7, v2 +; CHECK-PWR9-BE-NEXT: vextublx r7, r7, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r7, r7, 24 +; CHECK-PWR9-BE-NEXT: sub r7, r8, r7 +; CHECK-PWR9-BE-NEXT: srawi r8, r7, 31 +; CHECK-PWR9-BE-NEXT: add r7, r7, r8 +; CHECK-PWR9-BE-NEXT: xor r7, r7, r8 +; CHECK-PWR9-BE-NEXT: li r8, 5 +; CHECK-PWR9-BE-NEXT: vextublx r9, r8, v2 +; CHECK-PWR9-BE-NEXT: vextublx r8, r8, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r9, r9, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r8, r8, 24 +; CHECK-PWR9-BE-NEXT: sub r8, r9, r8 +; CHECK-PWR9-BE-NEXT: srawi r9, r8, 31 +; CHECK-PWR9-BE-NEXT: add r8, r8, r9 +; CHECK-PWR9-BE-NEXT: xor r8, r8, r9 +; CHECK-PWR9-BE-NEXT: li r9, 6 +; CHECK-PWR9-BE-NEXT: vextublx r10, r9, v2 +; CHECK-PWR9-BE-NEXT: vextublx r9, r9, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r10, r10, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r9, r9, 24 +; CHECK-PWR9-BE-NEXT: sub r9, r10, r9 +; CHECK-PWR9-BE-NEXT: srawi r10, r9, 31 +; CHECK-PWR9-BE-NEXT: add r9, r9, r10 +; CHECK-PWR9-BE-NEXT: xor r9, r9, r10 +; CHECK-PWR9-BE-NEXT: li r10, 7 +; CHECK-PWR9-BE-NEXT: vextublx r11, r10, v2 +; CHECK-PWR9-BE-NEXT: vextublx r10, r10, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r11, r11, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r10, r10, 24 +; CHECK-PWR9-BE-NEXT: sub r10, r11, r10 +; CHECK-PWR9-BE-NEXT: srawi r11, r10, 31 +; CHECK-PWR9-BE-NEXT: add r10, r10, r11 +; CHECK-PWR9-BE-NEXT: xor r10, r10, r11 +; CHECK-PWR9-BE-NEXT: li r11, 8 +; CHECK-PWR9-BE-NEXT: vextublx r12, r11, v2 +; CHECK-PWR9-BE-NEXT: vextublx r11, r11, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r12, r12, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r11, r11, 24 +; CHECK-PWR9-BE-NEXT: sub r11, r12, r11 +; CHECK-PWR9-BE-NEXT: srawi r12, r11, 31 +; CHECK-PWR9-BE-NEXT: add r11, r11, r12 +; CHECK-PWR9-BE-NEXT: xor r11, r11, r12 +; CHECK-PWR9-BE-NEXT: li r12, 9 +; CHECK-PWR9-BE-NEXT: vextublx r0, r12, v2 +; CHECK-PWR9-BE-NEXT: vextublx r12, r12, v3 +; CHECK-PWR9-BE-NEXT: mtvsrwz v0, r11 +; CHECK-PWR9-BE-NEXT: clrlwi r0, r0, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r12, r12, 24 +; CHECK-PWR9-BE-NEXT: sub r12, r0, r12 +; CHECK-PWR9-BE-NEXT: srawi r0, r12, 31 +; CHECK-PWR9-BE-NEXT: add r12, r12, r0 +; CHECK-PWR9-BE-NEXT: xor r12, r12, r0 +; CHECK-PWR9-BE-NEXT: li r0, 10 +; CHECK-PWR9-BE-NEXT: vextublx r30, r0, v2 +; CHECK-PWR9-BE-NEXT: vextublx r0, r0, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r30, r30, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r0, r0, 24 +; CHECK-PWR9-BE-NEXT: sub r0, r30, r0 +; CHECK-PWR9-BE-NEXT: srawi r30, r0, 31 +; CHECK-PWR9-BE-NEXT: add r0, r0, r30 +; CHECK-PWR9-BE-NEXT: xor r0, r0, r30 +; CHECK-PWR9-BE-NEXT: li r30, 11 +; CHECK-PWR9-BE-NEXT: vextublx r29, r30, v2 +; CHECK-PWR9-BE-NEXT: vextublx r30, r30, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r29, r29, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r30, r30, 24 +; CHECK-PWR9-BE-NEXT: sub r30, r29, r30 +; CHECK-PWR9-BE-NEXT: srawi r29, r30, 31 +; CHECK-PWR9-BE-NEXT: add r30, r30, r29 +; CHECK-PWR9-BE-NEXT: xor r30, r30, r29 +; CHECK-PWR9-BE-NEXT: li r29, 12 +; CHECK-PWR9-BE-NEXT: vextublx r28, r29, v2 +; CHECK-PWR9-BE-NEXT: vextublx r29, r29, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r28, r28, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r29, r29, 24 +; CHECK-PWR9-BE-NEXT: sub r29, r28, r29 +; CHECK-PWR9-BE-NEXT: srawi r28, r29, 31 +; CHECK-PWR9-BE-NEXT: add r29, r29, r28 +; CHECK-PWR9-BE-NEXT: xor r29, r29, r28 +; CHECK-PWR9-BE-NEXT: li r28, 13 +; CHECK-PWR9-BE-NEXT: vextublx r27, r28, v2 +; CHECK-PWR9-BE-NEXT: vextublx r28, r28, v3 +; CHECK-PWR9-BE-NEXT: mtvsrwz v5, r29 +; CHECK-PWR9-BE-NEXT: ld r29, -24(r1) # 8-byte Folded Reload +; CHECK-PWR9-BE-NEXT: clrlwi r27, r27, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r28, r28, 24 +; CHECK-PWR9-BE-NEXT: sub r28, r27, r28 +; CHECK-PWR9-BE-NEXT: srawi r27, r28, 31 +; CHECK-PWR9-BE-NEXT: add r28, r28, r27 +; CHECK-PWR9-BE-NEXT: xor r28, r28, r27 +; CHECK-PWR9-BE-NEXT: li r27, 14 +; CHECK-PWR9-BE-NEXT: vextublx r26, r27, v2 +; CHECK-PWR9-BE-NEXT: vextublx r27, r27, v3 +; CHECK-PWR9-BE-NEXT: clrlwi r26, r26, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r27, r27, 24 +; CHECK-PWR9-BE-NEXT: sub r27, r26, r27 +; CHECK-PWR9-BE-NEXT: srawi r26, r27, 31 +; CHECK-PWR9-BE-NEXT: add r27, r27, r26 +; CHECK-PWR9-BE-NEXT: xor r27, r27, r26 +; CHECK-PWR9-BE-NEXT: li r26, 15 +; CHECK-PWR9-BE-NEXT: vextublx r25, r26, v2 +; CHECK-PWR9-BE-NEXT: vextublx r26, r26, v3 +; CHECK-PWR9-BE-NEXT: mtvsrwz v3, r27 +; CHECK-PWR9-BE-NEXT: addis r27, r2, .LCPI9_0@toc@ha +; CHECK-PWR9-BE-NEXT: addi r27, r27, .LCPI9_0@toc@l +; CHECK-PWR9-BE-NEXT: clrlwi r25, r25, 24 +; CHECK-PWR9-BE-NEXT: clrlwi r26, r26, 24 +; CHECK-PWR9-BE-NEXT: lxv v4, 0(r27) +; CHECK-PWR9-BE-NEXT: ld r27, -40(r1) # 8-byte Folded Reload +; CHECK-PWR9-BE-NEXT: sub r26, r25, r26 +; CHECK-PWR9-BE-NEXT: srawi r25, r26, 31 +; CHECK-PWR9-BE-NEXT: add r26, r26, r25 +; CHECK-PWR9-BE-NEXT: xor r26, r26, r25 +; CHECK-PWR9-BE-NEXT: ld r25, -56(r1) # 8-byte Folded Reload +; CHECK-PWR9-BE-NEXT: mtvsrwz v2, r26 +; CHECK-PWR9-BE-NEXT: ld r26, -48(r1) # 8-byte Folded Reload +; CHECK-PWR9-BE-NEXT: vperm v2, v3, v2, v4 +; CHECK-PWR9-BE-NEXT: mtvsrwz v3, r28 +; CHECK-PWR9-BE-NEXT: ld r28, -32(r1) # 8-byte Folded Reload +; CHECK-PWR9-BE-NEXT: vperm v3, v5, v3, v4 +; CHECK-PWR9-BE-NEXT: mtvsrwz v5, r0 +; CHECK-PWR9-BE-NEXT: vmrghh v2, v3, v2 +; CHECK-PWR9-BE-NEXT: mtvsrwz v3, r30 +; CHECK-PWR9-BE-NEXT: ld r30, -16(r1) # 8-byte Folded Reload +; CHECK-PWR9-BE-NEXT: vperm v3, v5, v3, v4 +; CHECK-PWR9-BE-NEXT: mtvsrwz v5, r12 +; CHECK-PWR9-BE-NEXT: vperm v5, v0, v5, v4 +; CHECK-PWR9-BE-NEXT: mtvsrwz v0, r7 +; CHECK-PWR9-BE-NEXT: vmrghh v3, v5, v3 +; CHECK-PWR9-BE-NEXT: mtvsrwz v5, r9 +; CHECK-PWR9-BE-NEXT: vmrghw v2, v3, v2 +; CHECK-PWR9-BE-NEXT: mtvsrwz v3, r10 +; CHECK-PWR9-BE-NEXT: vperm v3, v5, v3, v4 +; CHECK-PWR9-BE-NEXT: mtvsrwz v5, r8 +; CHECK-PWR9-BE-NEXT: vperm v5, v0, v5, v4 +; CHECK-PWR9-BE-NEXT: mtvsrwz v0, r5 +; CHECK-PWR9-BE-NEXT: vmrghh v3, v5, v3 +; CHECK-PWR9-BE-NEXT: mtvsrwz v5, r6 +; CHECK-PWR9-BE-NEXT: vperm v5, v0, v5, v4 +; CHECK-PWR9-BE-NEXT: mtvsrwz v0, r4 +; CHECK-PWR9-BE-NEXT: vperm v4, v1, v0, v4 +; CHECK-PWR9-BE-NEXT: vmrghh v4, v4, v5 +; CHECK-PWR9-BE-NEXT: vmrghw v3, v4, v3 +; CHECK-PWR9-BE-NEXT: xxmrghd v2, v3, v2 +; CHECK-PWR9-BE-NEXT: blr +; +; CHECK-PWR8-LABEL: sub_absv_8_ext: +; CHECK-PWR8: # %bb.0: # %entry +; CHECK-PWR8-NEXT: xxswapd vs0, v2 +; CHECK-PWR8-NEXT: mfvsrd r5, v2 +; CHECK-PWR8-NEXT: std r26, -48(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: std r30, -16(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: std r25, -56(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: std r27, -40(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: mfvsrd r6, v3 +; CHECK-PWR8-NEXT: xxswapd vs1, v3 +; CHECK-PWR8-NEXT: clrldi r3, r5, 56 +; CHECK-PWR8-NEXT: rldicl r7, r5, 56, 56 +; CHECK-PWR8-NEXT: clrldi r4, r6, 56 +; CHECK-PWR8-NEXT: rldicl r8, r6, 56, 56 +; CHECK-PWR8-NEXT: mffprd r26, f0 +; CHECK-PWR8-NEXT: clrlwi r3, r3, 24 +; CHECK-PWR8-NEXT: clrlwi r7, r7, 24 +; CHECK-PWR8-NEXT: std r28, -32(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: std r29, -24(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: rldicl r11, r5, 40, 56 +; CHECK-PWR8-NEXT: rldicl r12, r6, 40, 56 +; CHECK-PWR8-NEXT: clrlwi r4, r4, 24 +; CHECK-PWR8-NEXT: clrlwi r8, r8, 24 +; CHECK-PWR8-NEXT: rldicl r9, r5, 48, 56 +; CHECK-PWR8-NEXT: rldicl r10, r6, 48, 56 +; CHECK-PWR8-NEXT: sub r4, r3, r4 +; CHECK-PWR8-NEXT: clrlwi r11, r11, 24 +; CHECK-PWR8-NEXT: rldicl r3, r26, 16, 56 +; CHECK-PWR8-NEXT: clrlwi r12, r12, 24 +; CHECK-PWR8-NEXT: sub r7, r7, r8 +; CHECK-PWR8-NEXT: clrlwi r9, r9, 24 +; CHECK-PWR8-NEXT: clrlwi r10, r10, 24 +; CHECK-PWR8-NEXT: std r24, -64(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: mffprd r24, f1 +; CHECK-PWR8-NEXT: rldicl r0, r5, 32, 56 +; CHECK-PWR8-NEXT: rldicl r30, r6, 32, 56 +; CHECK-PWR8-NEXT: std r3, -160(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: sub r11, r11, r12 +; CHECK-PWR8-NEXT: sub r9, r9, r10 +; CHECK-PWR8-NEXT: srawi r3, r4, 31 +; CHECK-PWR8-NEXT: srawi r12, r7, 31 +; CHECK-PWR8-NEXT: clrlwi r10, r0, 24 +; CHECK-PWR8-NEXT: clrlwi r0, r30, 24 +; CHECK-PWR8-NEXT: add r4, r4, r3 +; CHECK-PWR8-NEXT: add r7, r7, r12 +; CHECK-PWR8-NEXT: sub r10, r10, r0 +; CHECK-PWR8-NEXT: std r20, -96(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: std r21, -88(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: xor r3, r4, r3 +; CHECK-PWR8-NEXT: srawi r4, r9, 31 +; CHECK-PWR8-NEXT: xor r7, r7, r12 +; CHECK-PWR8-NEXT: std r22, -80(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: rldicl r29, r5, 24, 56 +; CHECK-PWR8-NEXT: rldicl r28, r6, 24, 56 +; CHECK-PWR8-NEXT: add r9, r9, r4 +; CHECK-PWR8-NEXT: mtvsrd v3, r7 +; CHECK-PWR8-NEXT: rldicl r27, r5, 16, 56 +; CHECK-PWR8-NEXT: rldicl r25, r6, 16, 56 +; CHECK-PWR8-NEXT: clrlwi r30, r29, 24 +; CHECK-PWR8-NEXT: clrlwi r29, r28, 24 +; CHECK-PWR8-NEXT: mtvsrd v2, r3 +; CHECK-PWR8-NEXT: xor r4, r9, r4 +; CHECK-PWR8-NEXT: srawi r7, r10, 31 +; CHECK-PWR8-NEXT: srawi r3, r11, 31 +; CHECK-PWR8-NEXT: clrlwi r9, r27, 24 +; CHECK-PWR8-NEXT: clrlwi r12, r25, 24 +; CHECK-PWR8-NEXT: sub r0, r30, r29 +; CHECK-PWR8-NEXT: mtvsrd v4, r4 +; CHECK-PWR8-NEXT: std r23, -72(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: add r10, r10, r7 +; CHECK-PWR8-NEXT: add r11, r11, r3 +; CHECK-PWR8-NEXT: sub r9, r9, r12 +; CHECK-PWR8-NEXT: std r18, -112(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: std r19, -104(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: vmrghb v2, v3, v2 +; CHECK-PWR8-NEXT: xor r7, r10, r7 +; CHECK-PWR8-NEXT: rldicl r5, r5, 8, 56 +; CHECK-PWR8-NEXT: xor r3, r11, r3 +; CHECK-PWR8-NEXT: rldicl r6, r6, 8, 56 +; CHECK-PWR8-NEXT: srawi r4, r0, 31 +; CHECK-PWR8-NEXT: mtvsrd v0, r7 +; CHECK-PWR8-NEXT: std r16, -128(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: std r17, -120(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: srawi r7, r9, 31 +; CHECK-PWR8-NEXT: clrldi r23, r26, 56 +; CHECK-PWR8-NEXT: mtvsrd v5, r3 +; CHECK-PWR8-NEXT: clrlwi r3, r5, 24 +; CHECK-PWR8-NEXT: clrlwi r5, r6, 24 +; CHECK-PWR8-NEXT: clrldi r22, r24, 56 +; CHECK-PWR8-NEXT: rldicl r21, r26, 56, 56 +; CHECK-PWR8-NEXT: add r10, r0, r4 +; CHECK-PWR8-NEXT: add r9, r9, r7 +; CHECK-PWR8-NEXT: rldicl r20, r24, 56, 56 +; CHECK-PWR8-NEXT: rldicl r19, r26, 48, 56 +; CHECK-PWR8-NEXT: sub r3, r3, r5 +; CHECK-PWR8-NEXT: xor r4, r10, r4 +; CHECK-PWR8-NEXT: xor r7, r9, r7 +; CHECK-PWR8-NEXT: clrlwi r9, r23, 24 +; CHECK-PWR8-NEXT: rldicl r18, r24, 48, 56 +; CHECK-PWR8-NEXT: clrlwi r10, r22, 24 +; CHECK-PWR8-NEXT: clrlwi r11, r21, 24 +; CHECK-PWR8-NEXT: clrlwi r12, r20, 24 +; CHECK-PWR8-NEXT: mtvsrd v1, r4 +; CHECK-PWR8-NEXT: std r14, -144(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: std r15, -136(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: rldicl r17, r26, 40, 56 +; CHECK-PWR8-NEXT: rldicl r16, r24, 40, 56 +; CHECK-PWR8-NEXT: sub r9, r9, r10 +; CHECK-PWR8-NEXT: sub r10, r11, r12 +; CHECK-PWR8-NEXT: mtvsrd v3, r7 +; CHECK-PWR8-NEXT: srawi r4, r3, 31 +; CHECK-PWR8-NEXT: clrlwi r11, r19, 24 +; CHECK-PWR8-NEXT: clrlwi r12, r18, 24 +; CHECK-PWR8-NEXT: vmrghb v4, v5, v4 +; CHECK-PWR8-NEXT: std r31, -8(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: add r3, r3, r4 +; CHECK-PWR8-NEXT: sub r7, r11, r12 +; CHECK-PWR8-NEXT: clrlwi r11, r17, 24 +; CHECK-PWR8-NEXT: clrlwi r12, r16, 24 +; CHECK-PWR8-NEXT: vmrghb v0, v1, v0 +; CHECK-PWR8-NEXT: std r2, -152(r1) # 8-byte Folded Spill +; CHECK-PWR8-NEXT: rldicl r15, r26, 32, 56 +; CHECK-PWR8-NEXT: rldicl r14, r24, 32, 56 +; CHECK-PWR8-NEXT: xor r3, r3, r4 +; CHECK-PWR8-NEXT: sub r11, r11, r12 +; CHECK-PWR8-NEXT: srawi r4, r9, 31 +; CHECK-PWR8-NEXT: srawi r12, r10, 31 +; CHECK-PWR8-NEXT: clrlwi r0, r15, 24 +; CHECK-PWR8-NEXT: clrlwi r30, r14, 24 +; CHECK-PWR8-NEXT: mtvsrd v5, r3 +; CHECK-PWR8-NEXT: ld r27, -40(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: add r9, r9, r4 +; CHECK-PWR8-NEXT: add r10, r10, r12 +; CHECK-PWR8-NEXT: sub r3, r0, r30 +; CHECK-PWR8-NEXT: ld r25, -56(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r23, -72(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r22, -80(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: srawi r28, r11, 31 +; CHECK-PWR8-NEXT: xor r4, r9, r4 +; CHECK-PWR8-NEXT: xor r10, r10, r12 +; CHECK-PWR8-NEXT: vmrghb v3, v5, v3 +; CHECK-PWR8-NEXT: ld r21, -88(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r20, -96(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: srawi r29, r7, 31 +; CHECK-PWR8-NEXT: srawi r9, r3, 31 +; CHECK-PWR8-NEXT: mtvsrd v5, r4 +; CHECK-PWR8-NEXT: add r4, r11, r28 +; CHECK-PWR8-NEXT: ld r19, -104(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r18, -112(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: mtvsrd v1, r10 +; CHECK-PWR8-NEXT: ld r10, -160(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: rldicl r31, r26, 24, 56 +; CHECK-PWR8-NEXT: rldicl r2, r24, 24, 56 +; CHECK-PWR8-NEXT: add r7, r7, r29 +; CHECK-PWR8-NEXT: add r3, r3, r9 +; CHECK-PWR8-NEXT: rldicl r8, r24, 16, 56 +; CHECK-PWR8-NEXT: rldicl r6, r26, 8, 56 +; CHECK-PWR8-NEXT: xor r4, r4, r28 +; CHECK-PWR8-NEXT: clrlwi r0, r31, 24 +; CHECK-PWR8-NEXT: clrlwi r30, r2, 24 +; CHECK-PWR8-NEXT: xor r7, r7, r29 +; CHECK-PWR8-NEXT: rldicl r5, r24, 8, 56 +; CHECK-PWR8-NEXT: clrlwi r10, r10, 24 +; CHECK-PWR8-NEXT: clrlwi r8, r8, 24 +; CHECK-PWR8-NEXT: xor r3, r3, r9 +; CHECK-PWR8-NEXT: mtvsrd v7, r4 +; CHECK-PWR8-NEXT: clrlwi r4, r6, 24 +; CHECK-PWR8-NEXT: clrlwi r5, r5, 24 +; CHECK-PWR8-NEXT: sub r0, r0, r30 +; CHECK-PWR8-NEXT: mtvsrd v6, r7 +; CHECK-PWR8-NEXT: sub r7, r10, r8 +; CHECK-PWR8-NEXT: ld r2, -152(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r31, -8(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: mtvsrd v8, r3 +; CHECK-PWR8-NEXT: sub r3, r4, r5 +; CHECK-PWR8-NEXT: srawi r12, r0, 31 +; CHECK-PWR8-NEXT: ld r30, -16(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r29, -24(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r28, -32(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: srawi r6, r7, 31 +; CHECK-PWR8-NEXT: srawi r5, r3, 31 +; CHECK-PWR8-NEXT: add r8, r0, r12 +; CHECK-PWR8-NEXT: vmrghb v5, v1, v5 +; CHECK-PWR8-NEXT: ld r26, -48(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r24, -64(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: add r4, r7, r6 +; CHECK-PWR8-NEXT: add r3, r3, r5 +; CHECK-PWR8-NEXT: xor r8, r8, r12 +; CHECK-PWR8-NEXT: vmrghb v6, v7, v6 +; CHECK-PWR8-NEXT: ld r17, -120(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r16, -128(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: xor r4, r4, r6 +; CHECK-PWR8-NEXT: xor r3, r3, r5 +; CHECK-PWR8-NEXT: mtvsrd v9, r8 +; CHECK-PWR8-NEXT: ld r15, -136(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: ld r14, -144(r1) # 8-byte Folded Reload +; CHECK-PWR8-NEXT: mtvsrd v1, r4 +; CHECK-PWR8-NEXT: mtvsrd v7, r3 +; CHECK-PWR8-NEXT: vmrghb v8, v9, v8 +; CHECK-PWR8-NEXT: vmrghb v1, v7, v1 +; CHECK-PWR8-NEXT: vmrglh v2, v4, v2 +; CHECK-PWR8-NEXT: vmrglh v3, v3, v0 +; CHECK-PWR8-NEXT: vmrglh v4, v6, v5 +; CHECK-PWR8-NEXT: vmrglh v5, v1, v8 +; CHECK-PWR8-NEXT: vmrglw v2, v3, v2 +; CHECK-PWR8-NEXT: vmrglw v3, v5, v4 +; CHECK-PWR8-NEXT: xxmrgld v2, v2, v3 +; CHECK-PWR8-NEXT: blr +; +; CHECK-PWR7-LABEL: sub_absv_8_ext: +; CHECK-PWR7: # %bb.0: # %entry +; CHECK-PWR7-NEXT: stdu r1, -464(r1) +; CHECK-PWR7-NEXT: .cfi_def_cfa_offset 464 +; CHECK-PWR7-NEXT: .cfi_offset r16, -128 +; CHECK-PWR7-NEXT: .cfi_offset r17, -120 +; CHECK-PWR7-NEXT: .cfi_offset r18, -112 +; CHECK-PWR7-NEXT: .cfi_offset r19, -104 +; CHECK-PWR7-NEXT: .cfi_offset r20, -96 +; CHECK-PWR7-NEXT: .cfi_offset r21, -88 +; CHECK-PWR7-NEXT: .cfi_offset r22, -80 +; CHECK-PWR7-NEXT: .cfi_offset r23, -72 +; CHECK-PWR7-NEXT: .cfi_offset r24, -64 +; CHECK-PWR7-NEXT: .cfi_offset r25, -56 +; CHECK-PWR7-NEXT: .cfi_offset r26, -48 +; CHECK-PWR7-NEXT: .cfi_offset r27, -40 +; CHECK-PWR7-NEXT: .cfi_offset r28, -32 +; CHECK-PWR7-NEXT: .cfi_offset r29, -24 +; CHECK-PWR7-NEXT: .cfi_offset r30, -16 +; CHECK-PWR7-NEXT: addi r3, r1, 304 +; CHECK-PWR7-NEXT: std r16, 336(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: addi r4, r1, 320 +; CHECK-PWR7-NEXT: std r17, 344(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r18, 352(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r19, 360(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r20, 368(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r21, 376(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r22, 384(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r23, 392(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r24, 400(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r25, 408(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r26, 416(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r27, 424(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r28, 432(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r29, 440(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: std r30, 448(r1) # 8-byte Folded Spill +; CHECK-PWR7-NEXT: stxvw4x v2, 0, r3 +; CHECK-PWR7-NEXT: lbz r3, 304(r1) +; CHECK-PWR7-NEXT: stxvw4x v3, 0, r4 +; CHECK-PWR7-NEXT: lbz r4, 320(r1) +; CHECK-PWR7-NEXT: lbz r5, 305(r1) +; CHECK-PWR7-NEXT: lbz r6, 321(r1) +; CHECK-PWR7-NEXT: lbz r7, 306(r1) +; CHECK-PWR7-NEXT: lbz r8, 322(r1) +; CHECK-PWR7-NEXT: lbz r9, 307(r1) +; CHECK-PWR7-NEXT: sub r3, r3, r4 +; CHECK-PWR7-NEXT: lbz r10, 323(r1) +; CHECK-PWR7-NEXT: lbz r11, 308(r1) +; CHECK-PWR7-NEXT: sub r5, r5, r6 +; CHECK-PWR7-NEXT: lbz r12, 324(r1) +; CHECK-PWR7-NEXT: lbz r0, 309(r1) +; CHECK-PWR7-NEXT: sub r6, r7, r8 +; CHECK-PWR7-NEXT: lbz r30, 325(r1) +; CHECK-PWR7-NEXT: lbz r29, 310(r1) +; CHECK-PWR7-NEXT: sub r9, r9, r10 +; CHECK-PWR7-NEXT: lbz r28, 326(r1) +; CHECK-PWR7-NEXT: lbz r23, 313(r1) +; CHECK-PWR7-NEXT: sub r10, r11, r12 +; CHECK-PWR7-NEXT: lbz r22, 329(r1) +; CHECK-PWR7-NEXT: lbz r4, 314(r1) +; CHECK-PWR7-NEXT: sub r0, r0, r30 +; CHECK-PWR7-NEXT: lbz r21, 330(r1) +; CHECK-PWR7-NEXT: lbz r7, 315(r1) +; CHECK-PWR7-NEXT: sub r30, r29, r28 +; CHECK-PWR7-NEXT: srawi r20, r0, 31 +; CHECK-PWR7-NEXT: lbz r8, 331(r1) +; CHECK-PWR7-NEXT: lbz r11, 316(r1) +; CHECK-PWR7-NEXT: sub r23, r23, r22 +; CHECK-PWR7-NEXT: srawi r19, r30, 31 +; CHECK-PWR7-NEXT: lbz r12, 332(r1) +; CHECK-PWR7-NEXT: lbz r29, 317(r1) +; CHECK-PWR7-NEXT: sub r4, r4, r21 +; CHECK-PWR7-NEXT: add r0, r0, r20 +; CHECK-PWR7-NEXT: lbz r28, 333(r1) +; CHECK-PWR7-NEXT: lbz r22, 319(r1) +; CHECK-PWR7-NEXT: sub r7, r7, r8 +; CHECK-PWR7-NEXT: add r30, r30, r19 +; CHECK-PWR7-NEXT: lbz r21, 335(r1) +; CHECK-PWR7-NEXT: lbz r27, 311(r1) +; CHECK-PWR7-NEXT: sub r8, r11, r12 +; CHECK-PWR7-NEXT: xor r0, r0, r20 +; CHECK-PWR7-NEXT: lbz r26, 327(r1) +; CHECK-PWR7-NEXT: lbz r25, 312(r1) +; CHECK-PWR7-NEXT: sub r11, r29, r28 +; CHECK-PWR7-NEXT: srawi r28, r3, 31 +; CHECK-PWR7-NEXT: lbz r24, 328(r1) +; CHECK-PWR7-NEXT: sub r29, r22, r21 +; CHECK-PWR7-NEXT: add r3, r3, r28 +; CHECK-PWR7-NEXT: xor r30, r30, r19 +; CHECK-PWR7-NEXT: sub r27, r27, r26 +; CHECK-PWR7-NEXT: srawi r17, r29, 31 +; CHECK-PWR7-NEXT: xor r3, r3, r28 +; CHECK-PWR7-NEXT: ld r20, 368(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: sub r26, r25, r24 +; CHECK-PWR7-NEXT: lbz r25, 318(r1) +; CHECK-PWR7-NEXT: lbz r24, 334(r1) +; CHECK-PWR7-NEXT: add r29, r29, r17 +; CHECK-PWR7-NEXT: xor r29, r29, r17 +; CHECK-PWR7-NEXT: srawi r18, r27, 31 +; CHECK-PWR7-NEXT: ld r19, 360(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: sub r12, r25, r24 +; CHECK-PWR7-NEXT: stb r29, 288(r1) +; CHECK-PWR7-NEXT: add r28, r27, r18 +; CHECK-PWR7-NEXT: srawi r29, r12, 31 +; CHECK-PWR7-NEXT: srawi r16, r26, 31 +; CHECK-PWR7-NEXT: xor r28, r28, r18 +; CHECK-PWR7-NEXT: ld r18, 352(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: add r12, r12, r29 +; CHECK-PWR7-NEXT: add r27, r26, r16 +; CHECK-PWR7-NEXT: xor r12, r12, r29 +; CHECK-PWR7-NEXT: srawi r29, r7, 31 +; CHECK-PWR7-NEXT: xor r27, r27, r16 +; CHECK-PWR7-NEXT: ld r16, 336(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: srawi r26, r8, 31 +; CHECK-PWR7-NEXT: srawi r25, r5, 31 +; CHECK-PWR7-NEXT: add r7, r7, r29 +; CHECK-PWR7-NEXT: add r8, r8, r26 +; CHECK-PWR7-NEXT: srawi r24, r6, 31 +; CHECK-PWR7-NEXT: add r5, r5, r25 +; CHECK-PWR7-NEXT: xor r7, r7, r29 +; CHECK-PWR7-NEXT: srawi r22, r9, 31 +; CHECK-PWR7-NEXT: srawi r21, r10, 31 +; CHECK-PWR7-NEXT: xor r8, r8, r26 +; CHECK-PWR7-NEXT: xor r5, r5, r25 +; CHECK-PWR7-NEXT: srawi r17, r11, 31 +; CHECK-PWR7-NEXT: srawi r26, r23, 31 +; CHECK-PWR7-NEXT: add r6, r6, r24 +; CHECK-PWR7-NEXT: add r9, r9, r22 +; CHECK-PWR7-NEXT: srawi r29, r4, 31 +; CHECK-PWR7-NEXT: add r10, r10, r21 +; CHECK-PWR7-NEXT: add r11, r11, r17 +; CHECK-PWR7-NEXT: add r25, r23, r26 +; CHECK-PWR7-NEXT: add r4, r4, r29 +; CHECK-PWR7-NEXT: xor r6, r6, r24 +; CHECK-PWR7-NEXT: xor r9, r9, r22 +; CHECK-PWR7-NEXT: xor r10, r10, r21 +; CHECK-PWR7-NEXT: xor r11, r11, r17 +; CHECK-PWR7-NEXT: xor r4, r4, r29 +; CHECK-PWR7-NEXT: xor r26, r25, r26 +; CHECK-PWR7-NEXT: addi r29, r1, 224 +; CHECK-PWR7-NEXT: stb r12, 272(r1) +; CHECK-PWR7-NEXT: addi r12, r1, 288 +; CHECK-PWR7-NEXT: addi r25, r1, 208 +; CHECK-PWR7-NEXT: stb r11, 256(r1) +; CHECK-PWR7-NEXT: addi r11, r1, 272 +; CHECK-PWR7-NEXT: ld r24, 400(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: stb r8, 240(r1) +; CHECK-PWR7-NEXT: stb r7, 224(r1) +; CHECK-PWR7-NEXT: stb r4, 208(r1) +; CHECK-PWR7-NEXT: stb r26, 192(r1) +; CHECK-PWR7-NEXT: stb r27, 176(r1) +; CHECK-PWR7-NEXT: stb r28, 160(r1) +; CHECK-PWR7-NEXT: stb r30, 144(r1) +; CHECK-PWR7-NEXT: stb r0, 128(r1) +; CHECK-PWR7-NEXT: stb r10, 112(r1) +; CHECK-PWR7-NEXT: stb r9, 96(r1) +; CHECK-PWR7-NEXT: stb r6, 80(r1) +; CHECK-PWR7-NEXT: stb r5, 64(r1) +; CHECK-PWR7-NEXT: stb r3, 48(r1) +; CHECK-PWR7-NEXT: addi r8, r1, 256 +; CHECK-PWR7-NEXT: addi r7, r1, 240 +; CHECK-PWR7-NEXT: lxvw4x v2, 0, r12 +; CHECK-PWR7-NEXT: lxvw4x v3, 0, r11 +; CHECK-PWR7-NEXT: addi r3, r1, 192 +; CHECK-PWR7-NEXT: addi r4, r1, 176 +; CHECK-PWR7-NEXT: addi r5, r1, 160 +; CHECK-PWR7-NEXT: addi r6, r1, 144 +; CHECK-PWR7-NEXT: lxvw4x v4, 0, r8 +; CHECK-PWR7-NEXT: lxvw4x v5, 0, r7 +; CHECK-PWR7-NEXT: lxvw4x v0, 0, r29 +; CHECK-PWR7-NEXT: lxvw4x v1, 0, r25 +; CHECK-PWR7-NEXT: addi r7, r1, 128 +; CHECK-PWR7-NEXT: addi r8, r1, 112 +; CHECK-PWR7-NEXT: lxvw4x v6, 0, r3 +; CHECK-PWR7-NEXT: lxvw4x v7, 0, r4 +; CHECK-PWR7-NEXT: vmrghb v2, v3, v2 +; CHECK-PWR7-NEXT: addi r9, r1, 96 +; CHECK-PWR7-NEXT: lxvw4x v3, 0, r5 +; CHECK-PWR7-NEXT: lxvw4x v8, 0, r6 +; CHECK-PWR7-NEXT: addi r3, r1, 80 +; CHECK-PWR7-NEXT: addi r4, r1, 64 +; CHECK-PWR7-NEXT: addi r5, r1, 48 +; CHECK-PWR7-NEXT: vmrghb v4, v5, v4 +; CHECK-PWR7-NEXT: lxvw4x v5, 0, r7 +; CHECK-PWR7-NEXT: lxvw4x v9, 0, r8 +; CHECK-PWR7-NEXT: vmrghb v0, v1, v0 +; CHECK-PWR7-NEXT: lxvw4x v1, 0, r9 +; CHECK-PWR7-NEXT: lxvw4x v10, 0, r3 +; CHECK-PWR7-NEXT: vmrghb v6, v7, v6 +; CHECK-PWR7-NEXT: lxvw4x v7, 0, r4 +; CHECK-PWR7-NEXT: ld r30, 448(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: vmrghb v3, v8, v3 +; CHECK-PWR7-NEXT: lxvw4x v8, 0, r5 +; CHECK-PWR7-NEXT: ld r29, 440(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: vmrghb v5, v9, v5 +; CHECK-PWR7-NEXT: ld r28, 432(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r27, 424(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: vmrghb v1, v10, v1 +; CHECK-PWR7-NEXT: ld r26, 416(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r25, 408(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: vmrghb v7, v8, v7 +; CHECK-PWR7-NEXT: ld r23, 392(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r22, 384(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: vmrghh v2, v4, v2 +; CHECK-PWR7-NEXT: ld r21, 376(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: ld r17, 344(r1) # 8-byte Folded Reload +; CHECK-PWR7-NEXT: vmrghh v4, v6, v0 +; CHECK-PWR7-NEXT: vmrghh v3, v5, v3 +; CHECK-PWR7-NEXT: vmrghh v5, v7, v1 +; CHECK-PWR7-NEXT: vmrghw v2, v4, v2 +; CHECK-PWR7-NEXT: vmrghw v3, v5, v3 +; CHECK-PWR7-NEXT: xxmrghd v2, v3, v2 +; CHECK-PWR7-NEXT: addi r1, r1, 464 +; CHECK-PWR7-NEXT: blr entry: %vecext = extractelement <16 x i8> %a, i32 0 %conv = zext i8 %vecext to i32 @@ -393,370 +1249,518 @@ %conv122 = trunc i32 %15 to i8 %vecins123 = insertelement <16 x i8> %vecins115, i8 %conv122, i32 15 ret <16 x i8> %vecins123 -; CHECK-LABEL: sub_absv_8_ext -; CHECK-NOT: vabsdub -; CHECK: sub -; CHECK-NOT: vabsdub -; CHECK: xor -; CHECK-NOT: vabsdub -; CHECK: blr -; CHECK-PWR8-LABEL: sub_absv_8_ext -; CHECK-PWR8: sub -; CHECK-PWR8: xor -; CHECK-PWR8: blr } define <4 x i32> @sub_absv_vec_32(<4 x i32> %a, <4 x i32> %b) local_unnamed_addr { +; CHECK-PWR9-LABEL: sub_absv_vec_32: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: xvnegsp v3, v3 +; CHECK-PWR9-NEXT: xvnegsp v2, v2 +; CHECK-PWR9-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: sub_absv_vec_32: +; CHECK-PWR78: # %bb.0: # %entry +; CHECK-PWR78-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR78-NEXT: vsubuwm v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v3, v4, v2 +; CHECK-PWR78-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR78-NEXT: blr entry: %sub = sub <4 x i32> %a, %b %sub.i = sub <4 x i32> zeroinitializer, %sub %0 = tail call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %sub, <4 x i32> %sub.i) ret <4 x i32> %0 -; CHECK-LABEL: sub_absv_vec_32 -; CHECK-NOT: vsubuwm -; CHECK-NOT: vnegw -; CHECK-NOT: vmaxsw -; CHECK-DAG: xvnegsp v2, v2 -; CHECK-DAG: xvnegsp v3, v3 -; CHECK-NEXT: vabsduw v2, v{{[23]}}, v{{[23]}} -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: sub_absv_vec_32 -; CHECK-PWR8-DAG: xxlxor -; CHECK-PWR8-DAG: vsubuwm -; CHECK-PWR8: vmaxsw -; CHECK-PWR8: blr } define <8 x i16> @sub_absv_vec_16(<8 x i16> %a, <8 x i16> %b) local_unnamed_addr { +; CHECK-PWR9-LABEL: sub_absv_vec_16: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: vsubuhm v2, v2, v3 +; CHECK-PWR9-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-NEXT: vsubuhm v3, v3, v2 +; CHECK-PWR9-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: sub_absv_vec_16: +; CHECK-PWR78: # %bb.0: # %entry +; CHECK-PWR78-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR78-NEXT: vsubuhm v2, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v3, v4, v2 +; CHECK-PWR78-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR78-NEXT: blr entry: %sub = sub <8 x i16> %a, %b %sub.i = sub <8 x i16> zeroinitializer, %sub %0 = tail call <8 x i16> @llvm.ppc.altivec.vmaxsh(<8 x i16> %sub, <8 x i16> %sub.i) ret <8 x i16> %0 -; CHECK-LABEL: sub_absv_vec_16 -; CHECK-NOT: mtvsrws -; CHECK-NOT: vabsduh -; CHECK-DAG: xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]] -; CHECK-DAG: vsubuhm v[[SUB:[0-9]+]], v2, v3 -; CHECK: vsubuhm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]] -; CHECK-NEXT: vmaxsh v2, v[[SUB]], v[[SUB1]] -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: sub_absv_vec_16 -; CHECK-PWR8-DAG: xxlxor -; CHECK-PWR8-DAG: vsubuhm -; CHECK-PWR8: vmaxsh -; CHECK-PWR8: blr } define <16 x i8> @sub_absv_vec_8(<16 x i8> %a, <16 x i8> %b) local_unnamed_addr { +; CHECK-PWR9-LABEL: sub_absv_vec_8: +; CHECK-PWR9: # %bb.0: # %entry +; CHECK-PWR9-NEXT: vsububm v2, v2, v3 +; CHECK-PWR9-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR9-NEXT: vsububm v3, v3, v2 +; CHECK-PWR9-NEXT: vmaxsb v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: sub_absv_vec_8: +; CHECK-PWR78: # %bb.0: # %entry +; CHECK-PWR78-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR78-NEXT: vsububm v2, v2, v3 +; CHECK-PWR78-NEXT: vsububm v3, v4, v2 +; CHECK-PWR78-NEXT: vmaxsb v2, v2, v3 +; CHECK-PWR78-NEXT: blr entry: %sub = sub <16 x i8> %a, %b %sub.i = sub <16 x i8> zeroinitializer, %sub %0 = tail call <16 x i8> @llvm.ppc.altivec.vmaxsb(<16 x i8> %sub, <16 x i8> %sub.i) ret <16 x i8> %0 -; CHECK-LABEL: sub_absv_vec_8 -; CHECK-NOT: xxspltib -; CHECK-NOT: vabsdub -; CHECK-DAG: xxlxor v[[ZERO:[0-9]+]], v[[ZERO]], v[[ZERO]] -; CHECK-DAG: vsububm v[[SUB:[0-9]+]], v2, v3 -; CHECK: vsububm v[[SUB1:[0-9]+]], v[[ZERO]], v[[SUB]] -; CHECK-NEXT: vmaxsb v2, v[[SUB]], v[[SUB1]] -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: sub_absv_vec_8 -; CHECK-PWR8-DAG: xxlxor -; CHECK-PWR8-DAG: vsububm -; CHECK-PWR8: vmaxsb -; CHECK-PWR8: blr } define <4 x i32> @zext_sub_absd32(<4 x i16>, <4 x i16>) local_unnamed_addr { +; CHECK-PWR9-LE-LABEL: zext_sub_absd32: +; CHECK-PWR9-LE: # %bb.0: +; CHECK-PWR9-LE-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR9-LE-NEXT: vmrglh v2, v4, v2 +; CHECK-PWR9-LE-NEXT: vmrglh v3, v4, v3 +; CHECK-PWR9-LE-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-LE-NEXT: blr +; +; CHECK-PWR9-BE-LABEL: zext_sub_absd32: +; CHECK-PWR9-BE: # %bb.0: +; CHECK-PWR9-BE-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR9-BE-NEXT: vmrghh v2, v4, v2 +; CHECK-PWR9-BE-NEXT: vmrghh v3, v4, v3 +; CHECK-PWR9-BE-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-BE-NEXT: blr +; +; CHECK-PWR8-LABEL: zext_sub_absd32: +; CHECK-PWR8: # %bb.0: +; CHECK-PWR8-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR8-NEXT: vmrglh v2, v4, v2 +; CHECK-PWR8-NEXT: vmrglh v3, v4, v3 +; CHECK-PWR8-NEXT: vsubuwm v2, v2, v3 +; CHECK-PWR8-NEXT: vsubuwm v3, v4, v2 +; CHECK-PWR8-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR8-NEXT: blr +; +; CHECK-PWR7-LABEL: zext_sub_absd32: +; CHECK-PWR7: # %bb.0: +; CHECK-PWR7-NEXT: addis r3, r2, .LCPI13_0@toc@ha +; CHECK-PWR7-NEXT: xxlxor v5, v5, v5 +; CHECK-PWR7-NEXT: addi r3, r3, .LCPI13_0@toc@l +; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3 +; CHECK-PWR7-NEXT: vperm v2, v5, v2, v4 +; CHECK-PWR7-NEXT: vperm v3, v5, v3, v4 +; CHECK-PWR7-NEXT: vsubuwm v2, v2, v3 +; CHECK-PWR7-NEXT: vsubuwm v3, v5, v2 +; CHECK-PWR7-NEXT: vmaxsw v2, v2, v3 +; CHECK-PWR7-NEXT: blr %3 = zext <4 x i16> %0 to <4 x i32> %4 = zext <4 x i16> %1 to <4 x i32> %5 = sub <4 x i32> %3, %4 %6 = sub <4 x i32> zeroinitializer, %5 %7 = tail call <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32> %5, <4 x i32> %6) ret <4 x i32> %7 -; CHECK-LABEL: zext_sub_absd32 -; CHECK-NOT: xvnegsp -; CHECK: vabsduw -; CHECK: blr -; CHECK-PWR8-LABEL: zext_sub_absd32 -; CHECK-PWR8: vmaxsw -; CHECK-PWR8: blr } define <8 x i16> @zext_sub_absd16(<8 x i8>, <8 x i8>) local_unnamed_addr { +; CHECK-PWR9-LE-LABEL: zext_sub_absd16: +; CHECK-PWR9-LE: # %bb.0: +; CHECK-PWR9-LE-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR9-LE-NEXT: vmrglb v2, v4, v2 +; CHECK-PWR9-LE-NEXT: vmrglb v3, v4, v3 +; CHECK-PWR9-LE-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-LE-NEXT: blr +; +; CHECK-PWR9-BE-LABEL: zext_sub_absd16: +; CHECK-PWR9-BE: # %bb.0: +; CHECK-PWR9-BE-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR9-BE-NEXT: vmrghb v2, v4, v2 +; CHECK-PWR9-BE-NEXT: vmrghb v3, v4, v3 +; CHECK-PWR9-BE-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-BE-NEXT: blr +; +; CHECK-PWR8-LABEL: zext_sub_absd16: +; CHECK-PWR8: # %bb.0: +; CHECK-PWR8-NEXT: xxlxor v4, v4, v4 +; CHECK-PWR8-NEXT: vmrglb v2, v4, v2 +; CHECK-PWR8-NEXT: vmrglb v3, v4, v3 +; CHECK-PWR8-NEXT: vsubuhm v2, v2, v3 +; CHECK-PWR8-NEXT: vsubuhm v3, v4, v2 +; CHECK-PWR8-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR8-NEXT: blr +; +; CHECK-PWR7-LABEL: zext_sub_absd16: +; CHECK-PWR7: # %bb.0: +; CHECK-PWR7-NEXT: addis r3, r2, .LCPI14_0@toc@ha +; CHECK-PWR7-NEXT: xxlxor v5, v5, v5 +; CHECK-PWR7-NEXT: addi r3, r3, .LCPI14_0@toc@l +; CHECK-PWR7-NEXT: lxvw4x v4, 0, r3 +; CHECK-PWR7-NEXT: vperm v2, v5, v2, v4 +; CHECK-PWR7-NEXT: vperm v3, v5, v3, v4 +; CHECK-PWR7-NEXT: vsubuhm v2, v2, v3 +; CHECK-PWR7-NEXT: vsubuhm v3, v5, v2 +; CHECK-PWR7-NEXT: vmaxsh v2, v2, v3 +; CHECK-PWR7-NEXT: blr %3 = zext <8 x i8> %0 to <8 x i16> %4 = zext <8 x i8> %1 to <8 x i16> %5 = sub <8 x i16> %3, %4 %6 = sub <8 x i16> zeroinitializer, %5 %7 = tail call <8 x i16> @llvm.ppc.altivec.vmaxsh(<8 x i16> %5, <8 x i16> %6) ret <8 x i16> %7 -; CHECK-LABEL: zext_sub_absd16 -; CHECK-NOT: vadduhm -; CHECK: vabsduh -; CHECK: blr -; CHECK-PWR8-LABEL: zext_sub_absd16 -; CHECK-PWR8: vmaxsh -; CHECK-PWR8: blr } define <16 x i8> @zext_sub_absd8(<16 x i4>, <16 x i4>) local_unnamed_addr { +; CHECK-PWR9-LABEL: zext_sub_absd8: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: xxspltib vs0, 15 +; CHECK-PWR9-NEXT: xxland v2, v2, vs0 +; CHECK-PWR9-NEXT: xxland v3, v3, vs0 +; CHECK-PWR9-NEXT: vabsdub v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: zext_sub_absd8: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vspltisb v4, 15 +; CHECK-PWR78-NEXT: xxland v2, v2, v4 +; CHECK-PWR78-NEXT: xxland v3, v3, v4 +; CHECK-PWR78-NEXT: vsububm v2, v2, v3 +; CHECK-PWR78-NEXT: xxlxor v3, v3, v3 +; CHECK-PWR78-NEXT: vsububm v3, v3, v2 +; CHECK-PWR78-NEXT: vmaxsb v2, v2, v3 +; CHECK-PWR78-NEXT: blr %3 = zext <16 x i4> %0 to <16 x i8> %4 = zext <16 x i4> %1 to <16 x i8> %5 = sub <16 x i8> %3, %4 %6 = sub <16 x i8> zeroinitializer, %5 %7 = tail call <16 x i8> @llvm.ppc.altivec.vmaxsb(<16 x i8> %5, <16 x i8> %6) ret <16 x i8> %7 -; CHECK-LABEL: zext_sub_absd8 -; CHECK-NOT: vaddubm -; CHECK: vabsdub -; CHECK: blr -; CHECK-PWR8-LABEL: zext_sub_absd8 -; CHECK-PWR8: vmaxsb -; CHECK-PWR8: blr } ; To verify vabsdu* exploitation for ucmp + sub + select sequence define <4 x i32> @absd_int32_ugt(<4 x i32>, <4 x i32>) { +; CHECK-PWR9-LABEL: absd_int32_ugt: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int32_ugt: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtuw v4, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 +; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp ugt <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 %5 = sub <4 x i32> %1, %0 %6 = select <4 x i1> %3, <4 x i32> %4, <4 x i32> %5 ret <4 x i32> %6 -; CHECK-LABEL: absd_int32_ugt -; CHECK-NOT: vcmpgtuw -; CHECK-NOT: xxsel -; CHECK: vabsduw v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int32_ugt -; CHECK-PWR8: vcmpgtuw -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <4 x i32> @absd_int32_uge(<4 x i32>, <4 x i32>) { +; CHECK-PWR9-LABEL: absd_int32_uge: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int32_uge: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtuw v4, v3, v2 +; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4 +; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 +; CHECK-PWR78-NEXT: xxsel v2, v2, v4, vs0 +; CHECK-PWR78-NEXT: blr %3 = icmp uge <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 %5 = sub <4 x i32> %1, %0 %6 = select <4 x i1> %3, <4 x i32> %4, <4 x i32> %5 ret <4 x i32> %6 -; CHECK-LABEL: absd_int32_uge -; CHECK-NOT: vcmpgtuw -; CHECK-NOT: xxsel -; CHECK: vabsduw v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int32_uge -; CHECK-PWR8: vcmpgtuw -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <4 x i32> @absd_int32_ult(<4 x i32>, <4 x i32>) { +; CHECK-PWR9-LABEL: absd_int32_ult: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int32_ult: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtuw v4, v3, v2 +; CHECK-PWR78-NEXT: vsubuwm v5, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 +; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp ult <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 %5 = sub <4 x i32> %1, %0 %6 = select <4 x i1> %3, <4 x i32> %5, <4 x i32> %4 ret <4 x i32> %6 -; CHECK-LABEL: absd_int32_ult -; CHECK-NOT: vcmpgtuw -; CHECK-NOT: xxsel -; CHECK: vabsduw v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int32_ult -; CHECK-PWR8: vcmpgtuw -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <4 x i32> @absd_int32_ule(<4 x i32>, <4 x i32>) { +; CHECK-PWR9-LABEL: absd_int32_ule: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsduw v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int32_ule: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtuw v4, v2, v3 +; CHECK-PWR78-NEXT: xxlnor vs0, v4, v4 +; CHECK-PWR78-NEXT: vsubuwm v4, v2, v3 +; CHECK-PWR78-NEXT: vsubuwm v2, v3, v2 +; CHECK-PWR78-NEXT: xxsel v2, v4, v2, vs0 +; CHECK-PWR78-NEXT: blr %3 = icmp ule <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 %5 = sub <4 x i32> %1, %0 %6 = select <4 x i1> %3, <4 x i32> %5, <4 x i32> %4 ret <4 x i32> %6 -; CHECK-LABEL: absd_int32_ule -; CHECK-NOT: vcmpgtuw -; CHECK-NOT: xxsel -; CHECK: vabsduw v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int32_ule -; CHECK-PWR8: vcmpgtuw -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <8 x i16> @absd_int16_ugt(<8 x i16>, <8 x i16>) { +; CHECK-PWR9-LABEL: absd_int16_ugt: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int16_ugt: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtuh v4, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2 +; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp ugt <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 %5 = sub <8 x i16> %1, %0 %6 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> %5 ret <8 x i16> %6 -; CHECK-LABEL: absd_int16_ugt -; CHECK-NOT: vcmpgtuh -; CHECK-NOT: xxsel -; CHECK: vabsduh v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int16_ugt -; CHECK-PWR8: vcmpgtuh -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <8 x i16> @absd_int16_uge(<8 x i16>, <8 x i16>) { +; CHECK-PWR9-LABEL: absd_int16_uge: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int16_uge: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtuh v4, v3, v2 +; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2 +; CHECK-PWR78-NEXT: xxlnor v4, v4, v4 +; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp uge <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 %5 = sub <8 x i16> %1, %0 %6 = select <8 x i1> %3, <8 x i16> %4, <8 x i16> %5 ret <8 x i16> %6 -; CHECK-LABEL: absd_int16_uge -; CHECK-NOT: vcmpgtuh -; CHECK-NOT: xxsel -; CHECK: vabsduh v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int16_uge -; CHECK-PWR8: vcmpgtuh -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <8 x i16> @absd_int16_ult(<8 x i16>, <8 x i16>) { +; CHECK-PWR9-LABEL: absd_int16_ult: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int16_ult: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtuh v4, v3, v2 +; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2 +; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp ult <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 %5 = sub <8 x i16> %1, %0 %6 = select <8 x i1> %3, <8 x i16> %5, <8 x i16> %4 ret <8 x i16> %6 -; CHECK-LABEL: absd_int16_ult -; CHECK-NOT: vcmpgtuh -; CHECK-NOT: xxsel -; CHECK: vabsduh v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int16_ult -; CHECK-PWR8: vcmpgtuh -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <8 x i16> @absd_int16_ule(<8 x i16>, <8 x i16>) { +; CHECK-PWR9-LABEL: absd_int16_ule: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsduh v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int16_ule: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtuh v4, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v5, v2, v3 +; CHECK-PWR78-NEXT: vsubuhm v2, v3, v2 +; CHECK-PWR78-NEXT: xxlnor v4, v4, v4 +; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp ule <8 x i16> %0, %1 %4 = sub <8 x i16> %0, %1 %5 = sub <8 x i16> %1, %0 %6 = select <8 x i1> %3, <8 x i16> %5, <8 x i16> %4 ret <8 x i16> %6 -; CHECK-LABEL: absd_int16_ule -; CHECK-NOT: vcmpgtuh -; CHECK-NOT: xxsel -; CHECK: vabsduh v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int16_ule -; CHECK-PWR8: vcmpgtuh -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <16 x i8> @absd_int8_ugt(<16 x i8>, <16 x i8>) { +; CHECK-PWR9-LABEL: absd_int8_ugt: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsdub v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int8_ugt: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtub v4, v2, v3 +; CHECK-PWR78-NEXT: vsububm v5, v2, v3 +; CHECK-PWR78-NEXT: vsububm v2, v3, v2 +; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp ugt <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 %5 = sub <16 x i8> %1, %0 %6 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> %5 ret <16 x i8> %6 -; CHECK-LABEL: absd_int8_ugt -; CHECK-NOT: vcmpgtub -; CHECK-NOT: xxsel -; CHECK: vabsdub v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int8_ugt -; CHECK-PWR8: vcmpgtub -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <16 x i8> @absd_int8_uge(<16 x i8>, <16 x i8>) { +; CHECK-PWR9-LABEL: absd_int8_uge: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsdub v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int8_uge: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtub v4, v3, v2 +; CHECK-PWR78-NEXT: vsububm v5, v2, v3 +; CHECK-PWR78-NEXT: vsububm v2, v3, v2 +; CHECK-PWR78-NEXT: xxlnor v4, v4, v4 +; CHECK-PWR78-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp uge <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 %5 = sub <16 x i8> %1, %0 %6 = select <16 x i1> %3, <16 x i8> %4, <16 x i8> %5 ret <16 x i8> %6 -; CHECK-LABEL: absd_int8_uge -; CHECK-NOT: vcmpgtub -; CHECK-NOT: xxsel -; CHECK: vabsdub v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int8_uge -; CHECK-PWR8: vcmpgtub -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <16 x i8> @absd_int8_ult(<16 x i8>, <16 x i8>) { +; CHECK-PWR9-LABEL: absd_int8_ult: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsdub v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int8_ult: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtub v4, v3, v2 +; CHECK-PWR78-NEXT: vsububm v5, v2, v3 +; CHECK-PWR78-NEXT: vsububm v2, v3, v2 +; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp ult <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 %5 = sub <16 x i8> %1, %0 %6 = select <16 x i1> %3, <16 x i8> %5, <16 x i8> %4 ret <16 x i8> %6 -; CHECK-LABEL: absd_int8_ult -; CHECK-NOT: vcmpgtub -; CHECK-NOT: xxsel -; CHECK: vabsdub v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int8_ult -; CHECK-PWR8: vcmpgtub -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <16 x i8> @absd_int8_ule(<16 x i8>, <16 x i8>) { +; CHECK-PWR9-LABEL: absd_int8_ule: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vabsdub v2, v2, v3 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR78-LABEL: absd_int8_ule: +; CHECK-PWR78: # %bb.0: +; CHECK-PWR78-NEXT: vcmpgtub v4, v2, v3 +; CHECK-PWR78-NEXT: vsububm v5, v2, v3 +; CHECK-PWR78-NEXT: vsububm v2, v3, v2 +; CHECK-PWR78-NEXT: xxlnor v4, v4, v4 +; CHECK-PWR78-NEXT: xxsel v2, v5, v2, v4 +; CHECK-PWR78-NEXT: blr %3 = icmp ule <16 x i8> %0, %1 %4 = sub <16 x i8> %0, %1 %5 = sub <16 x i8> %1, %0 %6 = select <16 x i1> %3, <16 x i8> %5, <16 x i8> %4 ret <16 x i8> %6 -; CHECK-LABEL: absd_int8_ule -; CHECK-NOT: vcmpgtub -; CHECK-NOT: xxsel -; CHECK: vabsdub v2, v2, v3 -; CHECK-NEXT: blr -; CHECK-PWR8-LABEL: absd_int8_ule -; CHECK-PWR8: vcmpgtub -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } ; some cases we are unable to optimize ; check whether goes beyond the scope define <4 x i32> @absd_int32_ugt_opp(<4 x i32>, <4 x i32>) { +; CHECK-LABEL: absd_int32_ugt_opp: +; CHECK: # %bb.0: +; CHECK-NEXT: vcmpgtuw v4, v2, v3 +; CHECK-NEXT: vsubuwm v5, v2, v3 +; CHECK-NEXT: vsubuwm v2, v3, v2 +; CHECK-NEXT: xxsel v2, v5, v2, v4 +; CHECK-NEXT: blr %3 = icmp ugt <4 x i32> %0, %1 %4 = sub <4 x i32> %0, %1 %5 = sub <4 x i32> %1, %0 %6 = select <4 x i1> %3, <4 x i32> %5, <4 x i32> %4 ret <4 x i32> %6 -; CHECK-LABEL: absd_int32_ugt_opp -; CHECK-NOT: vabsduw -; CHECK: vcmpgtuw -; CHECK: xxsel -; CHECK: blr -; CHECK-PWR8-LABEL: absd_int32_ugt_opp -; CHECK-PWR8: vcmpgtuw -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } define <2 x i64> @absd_int64_ugt(<2 x i64>, <2 x i64>) { +; CHECK-PWR9-LABEL: absd_int64_ugt: +; CHECK-PWR9: # %bb.0: +; CHECK-PWR9-NEXT: vcmpgtud v4, v2, v3 +; CHECK-PWR9-NEXT: vsubudm v5, v2, v3 +; CHECK-PWR9-NEXT: vsubudm v2, v3, v2 +; CHECK-PWR9-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR9-NEXT: blr +; +; CHECK-PWR8-LABEL: absd_int64_ugt: +; CHECK-PWR8: # %bb.0: +; CHECK-PWR8-NEXT: vcmpgtud v4, v2, v3 +; CHECK-PWR8-NEXT: vsubudm v5, v2, v3 +; CHECK-PWR8-NEXT: vsubudm v2, v3, v2 +; CHECK-PWR8-NEXT: xxsel v2, v2, v5, v4 +; CHECK-PWR8-NEXT: blr +; +; CHECK-PWR7-LABEL: absd_int64_ugt: +; CHECK-PWR7: # %bb.0: +; CHECK-PWR7-NEXT: addi r3, r1, -64 +; CHECK-PWR7-NEXT: addi r4, r1, -80 +; CHECK-PWR7-NEXT: li r5, 0 +; CHECK-PWR7-NEXT: li r6, -1 +; CHECK-PWR7-NEXT: stxvd2x v3, 0, r3 +; CHECK-PWR7-NEXT: stxvd2x v2, 0, r4 +; CHECK-PWR7-NEXT: ld r3, -56(r1) +; CHECK-PWR7-NEXT: ld r4, -72(r1) +; CHECK-PWR7-NEXT: ld r8, -80(r1) +; CHECK-PWR7-NEXT: cmpld r4, r3 +; CHECK-PWR7-NEXT: sub r9, r4, r3 +; CHECK-PWR7-NEXT: iselgt r7, r6, r5 +; CHECK-PWR7-NEXT: sub r3, r3, r4 +; CHECK-PWR7-NEXT: std r7, -8(r1) +; CHECK-PWR7-NEXT: ld r7, -64(r1) +; CHECK-PWR7-NEXT: cmpld r8, r7 +; CHECK-PWR7-NEXT: iselgt r4, r6, r5 +; CHECK-PWR7-NEXT: addi r5, r1, -16 +; CHECK-PWR7-NEXT: std r4, -16(r1) +; CHECK-PWR7-NEXT: sub r4, r8, r7 +; CHECK-PWR7-NEXT: lxvd2x v2, 0, r5 +; CHECK-PWR7-NEXT: std r9, -40(r1) +; CHECK-PWR7-NEXT: addi r5, r1, -48 +; CHECK-PWR7-NEXT: std r4, -48(r1) +; CHECK-PWR7-NEXT: sub r4, r7, r8 +; CHECK-PWR7-NEXT: lxvd2x v3, 0, r5 +; CHECK-PWR7-NEXT: std r3, -24(r1) +; CHECK-PWR7-NEXT: addi r3, r1, -32 +; CHECK-PWR7-NEXT: std r4, -32(r1) +; CHECK-PWR7-NEXT: lxvd2x v4, 0, r3 +; CHECK-PWR7-NEXT: xxsel v2, v4, v3, v2 +; CHECK-PWR7-NEXT: blr %3 = icmp ugt <2 x i64> %0, %1 %4 = sub <2 x i64> %0, %1 %5 = sub <2 x i64> %1, %0 %6 = select <2 x i1> %3, <2 x i64> %4, <2 x i64> %5 ret <2 x i64> %6 -; CHECK-LABEL: absd_int64_ugt -; CHECK-NOT: vabsduw -; CHECK: vcmpgtud -; CHECK: xxsel -; CHECK: blr -; CHECK-PWR8-LABEL: absd_int64_ugt -; CHECK-PWR8: vcmpgtud -; CHECK-PWR8: xxsel -; CHECK-PWR8: blr } declare <4 x i32> @llvm.ppc.altivec.vmaxsw(<4 x i32>, <4 x i32>) diff --git a/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/debug-info-rvv-dbg-value.mir @@ -0,0 +1,143 @@ +# RUN: llc -march=riscv64 -mattr=+experimental-v -o %t0 -filetype=obj \ +# RUN: -start-before=prologepilog %s +# RUN: llc -march=riscv64 -mattr=+experimental-v -o %t1 -filetype=obj \ +# RUN: -frame-pointer=all -start-before=prologepilog %s +# RUN: llvm-dwarfdump --name="value0" %t0 | FileCheck %s --check-prefix=CHECK0-PLUS +# RUN: llvm-dwarfdump --name="value1" %t0 | FileCheck %s --check-prefix=CHECK1-PLUS +# RUN: llvm-dwarfdump --name="value2" %t0 | FileCheck %s --check-prefix=CHECK2-PLUS +# RUN: llvm-dwarfdump --name="value3" %t0 | FileCheck %s --check-prefix=CHECK3-PLUS +# RUN: llvm-dwarfdump --name="value4" %t0 | FileCheck %s --check-prefix=CHECK4-PLUS +# RUN: llvm-dwarfdump --name="value5" %t0 | FileCheck %s --check-prefix=CHECK5-PLUS +# RUN: llvm-dwarfdump --name="value0" %t1 | FileCheck %s --check-prefix=CHECK0-MINUS +# RUN: llvm-dwarfdump --name="value1" %t1 | FileCheck %s --check-prefix=CHECK1-MINUS +# RUN: llvm-dwarfdump --name="value2" %t1 | FileCheck %s --check-prefix=CHECK2-MINUS +# RUN: llvm-dwarfdump --name="value3" %t1 | FileCheck %s --check-prefix=CHECK3-MINUS +# RUN: llvm-dwarfdump --name="value4" %t1 | FileCheck %s --check-prefix=CHECK4-MINUS +# RUN: llvm-dwarfdump --name="value5" %t1 | FileCheck %s --check-prefix=CHECK5-MINUS + +# CHECK0-PLUS: : DW_OP_breg2 X2+24) +# CHECK0-PLUS: DW_AT_type {{.*}}int32_t +# +# CHECK1-PLUS: : DW_OP_breg2 X2+16) +# CHECK1-PLUS: DW_AT_type {{.*}}int32_t +# +# CHECK2-PLUS: : DW_OP_breg2 X2+32, DW_OP_lit3, DW_OP_bregx VLENB+0, DW_OP_mul, DW_OP_plus) +# CHECK2-PLUS: DW_AT_type {{.*}}vint32m1_t +# +# CHECK3-PLUS: : DW_OP_breg2 X2+32, DW_OP_lit2, DW_OP_bregx VLENB+0, DW_OP_mul, DW_OP_plus) +# CHECK3-PLUS: DW_AT_type {{.*}}vint32m1_t +# +# CHECK4-PLUS: : DW_OP_breg2 X2+32, DW_OP_lit1, DW_OP_bregx VLENB+0, DW_OP_mul, DW_OP_plus) +# CHECK4-PLUS: DW_AT_type {{.*}}vbool1_t +# +# CHECK5-PLUS: : DW_OP_breg2 X2+32) +# CHECK5-PLUS: DW_AT_type {{.*}}vbool1_t + +# CHECK0-MINUS: : DW_OP_breg8 X8-40) +# CHECK0-MINUS: DW_AT_type {{.*}}int32_t +# +# CHECK1-MINUS: : DW_OP_breg8 X8-48) +# CHECK1-MINUS: DW_AT_type {{.*}}int32_t +# +# CHECK2-MINUS: : DW_OP_breg8 X8-48, DW_OP_lit1, DW_OP_bregx VLENB+0, DW_OP_mul, DW_OP_minus) +# CHECK2-MINUS: DW_AT_type {{.*}}vint32m1_t +# +# CHECK3-MINUS: : DW_OP_breg8 X8-48, DW_OP_lit2, DW_OP_bregx VLENB+0, DW_OP_mul, DW_OP_minus) +# CHECK3-MINUS: DW_AT_type {{.*}}vint32m1_t +# +# CHECK4-MINUS: : DW_OP_breg8 X8-48, DW_OP_lit3, DW_OP_bregx VLENB+0, DW_OP_mul, DW_OP_minus) +# CHECK4-MINUS: DW_AT_type {{.*}}vbool1_t +# +# CHECK5-MINUS: : DW_OP_breg8 X8-48, DW_OP_lit4, DW_OP_bregx VLENB+0, DW_OP_mul, DW_OP_minus) +# CHECK5-MINUS: DW_AT_type {{.*}}vbool1_t + +--- | + define void @foo() !dbg !5 { + entry: + unreachable, !dbg !8 + } + + ; Function Attrs: nounwind readnone speculatable willreturn + declare void @llvm.dbg.value(metadata, metadata, metadata) + + !llvm.dbg.cu = !{!0} + !llvm.debugify = !{!3, !3} + !llvm.module.flags = !{!4} + + !0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2) + !1 = !DIFile(filename: "debug-info-rvv-dbg-value.mir", directory: "/") + !2 = !{} + !3 = !{i32 1} + !4 = !{i32 2, !"Debug Info Version", i32 3} + !5 = distinct !DISubprogram(name: "foo", linkageName: "foo", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0, retainedNodes: !9) + !6 = !DISubroutineType(types: !2) + !7 = !DIBasicType(name: "int32_t", size: 32, encoding: DW_ATE_signed) + !8 = !DILocation(line: 1, column: 1, scope: !5) + !9 = !{!10, !11, !12, !13, !14, !15} + !10 = !DILocalVariable(name: "value0", scope: !5, file: !1, line: 1, type: !7) + !11 = !DILocalVariable(name: "value1", scope: !5, file: !1, line: 1, type: !7) + !12 = !DILocalVariable(name: "value2", scope: !5, file: !1, line: 1, type: !16) + !13 = !DILocalVariable(name: "value3", scope: !5, file: !1, line: 1, type: !16) + !14 = !DILocalVariable(name: "value4", scope: !5, file: !1, line: 1, type: !21) + !15 = !DILocalVariable(name: "value5", scope: !5, file: !1, line: 1, type: !21) + !16 = !DIDerivedType(tag: DW_TAG_typedef, name: "vint32m1_t", file: !1, line: 1, baseType: !17) + !17 = !DIDerivedType(tag: DW_TAG_typedef, name: "__rvv_int32m1_t", file: !1, baseType: !18) + !18 = !DICompositeType(tag: DW_TAG_array_type, baseType: !7, flags: DIFlagVector, elements: !19) + !19 = !{!20} + !20 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_bregx, 7202, 0, DW_OP_constu, 4, DW_OP_div, DW_OP_constu, 1, DW_OP_mul)) + !21 = !DIDerivedType(tag: DW_TAG_typedef, name: "vbool1_t", file: !1, line: 90, baseType: !22) + !22 = !DIDerivedType(tag: DW_TAG_typedef, name: "__rvv_bool1_t", file: !1, baseType: !23) + !23 = !DICompositeType(tag: DW_TAG_array_type, baseType: !24, flags: DIFlagVector, elements: !25) + !24 = !DIBasicType(name: "_Bool", size: 8, encoding: DW_ATE_boolean) + !25 = !{!26} + !26 = !DISubrange(lowerBound: 0, upperBound: !DIExpression(DW_OP_bregx, 7202, 0, DW_OP_constu, 1, DW_OP_div, DW_OP_constu, 1, DW_OP_mul)) + + +... +--- +name: foo +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '$x12' } + - { reg: '$x13' } + - { reg: '$v8' } + - { reg: '$v9' } + - { reg: '$v0' } + - { reg: '$v1' } +frameInfo: + maxAlignment: 16 + adjustsStack: true + hasCalls: true + maxCallFrameSize: 0 + localFrameSize: 4 +stack: + - { id: 0, size: 8, alignment: 8 } + - { id: 1, size: 8, alignment: 8 } + - { id: 2, size: 8, alignment: 4, stack-id: scalable-vector } + - { id: 3, size: 8, alignment: 4, stack-id: scalable-vector } + - { id: 4, size: 8, alignment: 1, stack-id: scalable-vector } + - { id: 5, size: 8, alignment: 1, stack-id: scalable-vector } +machineFunctionInfo: {} +body: | + bb.0.entry: + liveins: $x12, $x13, $v8, $v9, $v0, $v1 + + SD killed renamable $x12, %stack.0, 0, debug-location !8 + DBG_VALUE %stack.0, $noreg, !10, !DIExpression(DW_OP_deref), debug-location !8 + SD killed renamable $x13, %stack.1, 0, debug-location !8 + DBG_VALUE %stack.1, $noreg, !11, !DIExpression(DW_OP_deref), debug-location !8 + + PseudoVSE32_V_M1 killed renamable $v8, %stack.2, 8, 5, debug-location !DILocation(line: 5, column: 1, scope: !5) + DBG_VALUE %stack.2, $noreg, !12, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 5, column: 1, scope: !5) + PseudoVSE32_V_M1 killed renamable $v9, %stack.3, 8, 5, debug-location !DILocation(line: 6, column: 1, scope: !5) + DBG_VALUE %stack.3, $noreg, !13, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 6, column: 1, scope: !5) + + PseudoVSM_V_B64 killed renamable $v0, %stack.4, 8, 0, debug-location !DILocation(line: 2, column: 1, scope: !5) + DBG_VALUE %stack.4, $noreg, !14, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 2, column: 1, scope: !5) + PseudoVSM_V_B64 killed renamable $v1, %stack.5, 8, 0, debug-location !DILocation(line: 3, column: 1, scope: !5) + DBG_VALUE %stack.5, $noreg, !15, !DIExpression(DW_OP_deref), debug-location !DILocation(line: 3, column: 1, scope: !5) + + PseudoRET + +... diff --git a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll --- a/llvm/test/CodeGen/Thumb2/active_lane_mask.ll +++ b/llvm/test/CodeGen/Thumb2/active_lane_mask.ll @@ -1,10 +1,72 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve %s -o - | FileCheck %s +define <2 x i64> @v2i64(i32 %index, i32 %TC, <2 x i64> %V1, <2 x i64> %V2) { +; CHECK-LABEL: v2i64: +; CHECK: @ %bb.0: +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: vmov q0[2], q0[0], r0, r0 +; CHECK-NEXT: vmov.i64 q1, #0xffffffff +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vmov q2[2], q2[0], r1, r1 +; CHECK-NEXT: vmov r0, r12, d1 +; CHECK-NEXT: vmov lr, s0 +; CHECK-NEXT: adds r0, #1 +; CHECK-NEXT: vmov q0[2], q0[0], lr, r0 +; CHECK-NEXT: adc r12, r12, #0 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vand q1, q2, q1 +; CHECK-NEXT: vmov r4, r5, d1 +; CHECK-NEXT: vmov.i32 q2, #0x1 +; CHECK-NEXT: vmov r1, r6, d3 +; CHECK-NEXT: eors r0, r4 +; CHECK-NEXT: subs r1, r4, r1 +; CHECK-NEXT: sbcs.w r1, r5, r6 +; CHECK-NEXT: vmov r5, r6, d0 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov r7, r1, d2 +; CHECK-NEXT: csetm r8, ne +; CHECK-NEXT: subs r7, r5, r7 +; CHECK-NEXT: sbcs.w r1, r6, r1 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: orrs.w r0, r0, r12 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: teq.w r5, lr +; CHECK-NEXT: vmov q0[2], q0[0], r1, r8 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: veor q1, q1, q2 +; CHECK-NEXT: vldr d5, [sp, #24] +; CHECK-NEXT: vand q0, q1, q0 +; CHECK-NEXT: vmov d4, r2, r3 +; CHECK-NEXT: vmov r0, s2 +; CHECK-NEXT: vmov r1, s0 +; CHECK-NEXT: and r0, r0, #1 +; CHECK-NEXT: and r1, r1, #1 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: rsbs r1, r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: add r0, sp, #32 +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vbic q1, q1, q0 +; CHECK-NEXT: vand q0, q2, q0 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} + %active.lane.mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 %index, i32 %TC) + %select = select <2 x i1> %active.lane.mask, <2 x i64> %V1, <2 x i64> %V2 + ret <2 x i64> %select +} + define <4 x i32> @v4i32(i32 %index, i32 %TC, <4 x i32> %V1, <4 x i32> %V2) { ; CHECK-LABEL: v4i32: ; CHECK: @ %bb.0: -; CHECK-NEXT: adr.w r12, .LCPI0_0 +; CHECK-NEXT: adr.w r12, .LCPI1_0 ; CHECK-NEXT: vdup.32 q1, r0 ; CHECK-NEXT: vldrw.u32 q0, [r12] ; CHECK-NEXT: vadd.i32 q0, q0, r0 @@ -23,7 +85,7 @@ ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI0_0: +; CHECK-NEXT: .LCPI1_0: ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 2 @ 0x2 @@ -36,7 +98,7 @@ define <7 x i32> @v7i32(i32 %index, i32 %TC, <7 x i32> %V1, <7 x i32> %V2) { ; CHECK-LABEL: v7i32: ; CHECK: @ %bb.0: -; CHECK-NEXT: adr r3, .LCPI1_0 +; CHECK-NEXT: adr r3, .LCPI2_0 ; CHECK-NEXT: vdup.32 q1, r1 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: ldr r3, [sp, #32] @@ -57,7 +119,7 @@ ; CHECK-NEXT: ldr r2, [sp, #12] ; CHECK-NEXT: ldr r3, [sp, #4] ; CHECK-NEXT: vmov q3[3], q3[1], r3, r2 -; CHECK-NEXT: adr r2, .LCPI1_1 +; CHECK-NEXT: adr r2, .LCPI2_1 ; CHECK-NEXT: vpsel q2, q3, q2 ; CHECK-NEXT: vstrw.32 q2, [r0] ; CHECK-NEXT: vldrw.u32 q2, [r2] @@ -89,12 +151,12 @@ ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI1_0: +; CHECK-NEXT: .LCPI2_0: ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 2 @ 0x2 ; CHECK-NEXT: .long 3 @ 0x3 -; CHECK-NEXT: .LCPI1_1: +; CHECK-NEXT: .LCPI2_1: ; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 5 @ 0x5 ; CHECK-NEXT: .long 6 @ 0x6 @@ -108,7 +170,7 @@ ; CHECK-LABEL: v8i16: ; CHECK: @ %bb.0: ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: adr.w r12, .LCPI2_0 +; CHECK-NEXT: adr.w r12, .LCPI3_0 ; CHECK-NEXT: vdup.32 q5, r1 ; CHECK-NEXT: vldrw.u32 q0, [r12] ; CHECK-NEXT: vmov.i8 q1, #0x0 @@ -121,7 +183,7 @@ ; CHECK-NEXT: vmov.16 q0[1], r12 ; CHECK-NEXT: vmov r1, r12, d9 ; CHECK-NEXT: vmov.16 q0[2], r1 -; CHECK-NEXT: adr r1, .LCPI2_1 +; CHECK-NEXT: adr r1, .LCPI3_1 ; CHECK-NEXT: vldrw.u32 q4, [r1] ; CHECK-NEXT: vmov.16 q0[3], r12 ; CHECK-NEXT: vadd.i32 q4, q4, r0 @@ -165,12 +227,12 @@ ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI2_0: +; CHECK-NEXT: .LCPI3_0: ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 2 @ 0x2 ; CHECK-NEXT: .long 3 @ 0x3 -; CHECK-NEXT: .LCPI2_1: +; CHECK-NEXT: .LCPI3_1: ; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 5 @ 0x5 ; CHECK-NEXT: .long 6 @ 0x6 @@ -185,7 +247,7 @@ ; CHECK: @ %bb.0: ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: sub sp, #16 -; CHECK-NEXT: adr.w r12, .LCPI3_0 +; CHECK-NEXT: adr.w r12, .LCPI4_0 ; CHECK-NEXT: vdup.32 q7, r1 ; CHECK-NEXT: vldrw.u32 q0, [r12] ; CHECK-NEXT: vmov.i8 q5, #0x0 @@ -198,7 +260,7 @@ ; CHECK-NEXT: vmov.16 q2[1], r12 ; CHECK-NEXT: vmov r1, r12, d1 ; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: adr r1, .LCPI3_1 +; CHECK-NEXT: adr r1, .LCPI4_1 ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: vmov.16 q2[3], r12 ; CHECK-NEXT: vadd.i32 q3, q0, r0 @@ -228,7 +290,7 @@ ; CHECK-NEXT: vmov.8 q2[6], r1 ; CHECK-NEXT: vmov.u16 r1, q0[7] ; CHECK-NEXT: vmov.8 q2[7], r1 -; CHECK-NEXT: adr r1, .LCPI3_2 +; CHECK-NEXT: adr r1, .LCPI4_2 ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vcmp.u32 hi, q7, q0 @@ -239,7 +301,7 @@ ; CHECK-NEXT: vmov.16 q0[1], r12 ; CHECK-NEXT: vmov r1, r12, d13 ; CHECK-NEXT: vmov.16 q0[2], r1 -; CHECK-NEXT: adr r1, .LCPI3_3 +; CHECK-NEXT: adr r1, .LCPI4_3 ; CHECK-NEXT: vldrw.u32 q6, [r1] ; CHECK-NEXT: vmov.16 q0[3], r12 ; CHECK-NEXT: vadd.i32 q6, q6, r0 @@ -355,22 +417,22 @@ ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.1: -; CHECK-NEXT: .LCPI3_0: +; CHECK-NEXT: .LCPI4_0: ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 2 @ 0x2 ; CHECK-NEXT: .long 3 @ 0x3 -; CHECK-NEXT: .LCPI3_1: +; CHECK-NEXT: .LCPI4_1: ; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 5 @ 0x5 ; CHECK-NEXT: .long 6 @ 0x6 ; CHECK-NEXT: .long 7 @ 0x7 -; CHECK-NEXT: .LCPI3_2: +; CHECK-NEXT: .LCPI4_2: ; CHECK-NEXT: .long 8 @ 0x8 ; CHECK-NEXT: .long 9 @ 0x9 ; CHECK-NEXT: .long 10 @ 0xa ; CHECK-NEXT: .long 11 @ 0xb -; CHECK-NEXT: .LCPI3_3: +; CHECK-NEXT: .LCPI4_3: ; CHECK-NEXT: .long 12 @ 0xc ; CHECK-NEXT: .long 13 @ 0xd ; CHECK-NEXT: .long 14 @ 0xe @@ -388,58 +450,50 @@ ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: sub sp, #8 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: beq.w .LBB4_3 +; CHECK-NEXT: beq.w .LBB5_3 ; CHECK-NEXT: @ %bb.1: @ %for.body.preheader ; CHECK-NEXT: adds r0, r2, #1 ; CHECK-NEXT: vmov q1[2], q1[0], r2, r2 ; CHECK-NEXT: bic r0, r0, #1 -; CHECK-NEXT: adr r2, .LCPI4_0 +; CHECK-NEXT: adr r2, .LCPI5_0 ; CHECK-NEXT: subs r0, #2 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: vmov.i64 q0, #0xffffffff ; CHECK-NEXT: vldrw.u32 q2, [r2] ; CHECK-NEXT: add.w lr, r3, r0, lsr #1 -; CHECK-NEXT: mov.w r12, #0 +; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: vand q1, q1, q0 -; CHECK-NEXT: .LBB4_2: @ %vector.body +; CHECK-NEXT: .LBB5_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmov q3[2], q3[0], r12, r12 +; CHECK-NEXT: vmov q3[2], q3[0], r8, r8 ; CHECK-NEXT: vmov r6, r7, d3 ; CHECK-NEXT: vand q3, q3, q0 -; CHECK-NEXT: add.w r12, r12, #2 +; CHECK-NEXT: add.w r8, r8, #2 ; CHECK-NEXT: vmov r2, r3, d7 ; CHECK-NEXT: vmov r9, s12 -; CHECK-NEXT: adds r0, r2, #1 -; CHECK-NEXT: vmov q3[2], q3[0], r9, r0 -; CHECK-NEXT: adc r8, r3, #0 +; CHECK-NEXT: adds r2, #1 +; CHECK-NEXT: vmov q3[2], q3[0], r9, r2 +; CHECK-NEXT: adc r12, r3, #0 ; CHECK-NEXT: vand q3, q3, q0 -; CHECK-NEXT: vmov r3, r2, d2 +; CHECK-NEXT: vmov r0, r3, d2 ; CHECK-NEXT: vmov r4, r5, d7 ; CHECK-NEXT: subs r6, r4, r6 -; CHECK-NEXT: eor.w r0, r0, r4 ; CHECK-NEXT: sbcs r5, r7 ; CHECK-NEXT: vmov r6, r7, d6 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cset r5, lo ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csetm r5, ne -; CHECK-NEXT: subs r3, r6, r3 -; CHECK-NEXT: sbcs.w r2, r7, r2 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: orrs.w r0, r0, r8 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: vmov q3[2], q3[0], r2, r5 +; CHECK-NEXT: subs r0, r6, r0 +; CHECK-NEXT: sbcs.w r0, r7, r3 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vmov q3[3], q3[1], r2, r5 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov q3[2], q3[0], r0, r5 +; CHECK-NEXT: vmov q3[3], q3[1], r0, r5 +; CHECK-NEXT: eor.w r0, r4, r2 +; CHECK-NEXT: orrs.w r0, r0, r12 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: teq.w r6, r9 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csetm r2, ne ; CHECK-NEXT: vmov q4[2], q4[0], r2, r0 ; CHECK-NEXT: vmov q4[3], q4[1], r2, r0 @@ -474,15 +528,15 @@ ; CHECK-NEXT: vmovmi r0, s14 ; CHECK-NEXT: strmi r0, [r1, #4] ; CHECK-NEXT: adds r1, #8 -; CHECK-NEXT: le lr, .LBB4_2 -; CHECK-NEXT: .LBB4_3: @ %for.cond.cleanup +; CHECK-NEXT: le lr, .LBB5_2 +; CHECK-NEXT: .LBB5_3: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #8 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.4: -; CHECK-NEXT: .LCPI4_0: +; CHECK-NEXT: .LCPI5_0: ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 1 @ 0x1 diff --git a/llvm/test/CodeGen/Thumb2/mve-ctlz.ll b/llvm/test/CodeGen/Thumb2/mve-ctlz.ll --- a/llvm/test/CodeGen/Thumb2/mve-ctlz.ll +++ b/llvm/test/CodeGen/Thumb2/mve-ctlz.ll @@ -8,9 +8,7 @@ ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: clz r0, r0 ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: adds r0, #32 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: add.w r0, r0, #32 ; CHECK-NEXT: it ne ; CHECK-NEXT: clzne r0, r1 ; CHECK-NEXT: vmov s2, r0 @@ -19,9 +17,7 @@ ; CHECK-NEXT: vmov.f32 s3, s1 ; CHECK-NEXT: clz r0, r0 ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: adds r0, #32 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: add.w r0, r0, #32 ; CHECK-NEXT: it ne ; CHECK-NEXT: clzne r0, r1 ; CHECK-NEXT: vmov s0, r0 @@ -71,9 +67,7 @@ ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: clz r0, r0 ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: adds r0, #32 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: add.w r0, r0, #32 ; CHECK-NEXT: it ne ; CHECK-NEXT: clzne r0, r1 ; CHECK-NEXT: vmov s2, r0 @@ -82,9 +76,7 @@ ; CHECK-NEXT: vmov.f32 s3, s1 ; CHECK-NEXT: clz r0, r0 ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: adds r0, #32 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: add.w r0, r0, #32 ; CHECK-NEXT: it ne ; CHECK-NEXT: clzne r0, r1 ; CHECK-NEXT: vmov s0, r0 diff --git a/llvm/test/CodeGen/Thumb2/mve-cttz.ll b/llvm/test/CodeGen/Thumb2/mve-cttz.ll --- a/llvm/test/CodeGen/Thumb2/mve-cttz.ll +++ b/llvm/test/CodeGen/Thumb2/mve-cttz.ll @@ -6,27 +6,23 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: rbit r1, r1 -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: rbit r2, r0 ; CHECK-NEXT: clz r1, r1 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: adds r1, #32 -; CHECK-NEXT: rbit r0, r0 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: add.w r1, r1, #32 ; CHECK-NEXT: it ne -; CHECK-NEXT: clzne r1, r0 +; CHECK-NEXT: clzne r1, r2 ; CHECK-NEXT: vmov s2, r1 ; CHECK-NEXT: vmov r0, r1, d0 ; CHECK-NEXT: vldr s1, .LCPI0_0 ; CHECK-NEXT: vmov.f32 s3, s1 ; CHECK-NEXT: rbit r1, r1 -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: rbit r2, r0 ; CHECK-NEXT: clz r1, r1 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: adds r1, #32 -; CHECK-NEXT: rbit r0, r0 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: add.w r1, r1, #32 ; CHECK-NEXT: it ne -; CHECK-NEXT: clzne r1, r0 +; CHECK-NEXT: clzne r1, r2 ; CHECK-NEXT: vmov s0, r1 ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 2 @@ -79,27 +75,23 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: rbit r1, r1 -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: rbit r2, r0 ; CHECK-NEXT: clz r1, r1 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: adds r1, #32 -; CHECK-NEXT: rbit r0, r0 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: add.w r1, r1, #32 ; CHECK-NEXT: it ne -; CHECK-NEXT: clzne r1, r0 +; CHECK-NEXT: clzne r1, r2 ; CHECK-NEXT: vmov s2, r1 ; CHECK-NEXT: vmov r0, r1, d0 ; CHECK-NEXT: vldr s1, .LCPI4_0 ; CHECK-NEXT: vmov.f32 s3, s1 ; CHECK-NEXT: rbit r1, r1 -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: rbit r2, r0 ; CHECK-NEXT: clz r1, r1 -; CHECK-NEXT: cset r2, ne -; CHECK-NEXT: adds r1, #32 -; CHECK-NEXT: rbit r0, r0 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: add.w r1, r1, #32 ; CHECK-NEXT: it ne -; CHECK-NEXT: clzne r1, r0 +; CHECK-NEXT: clzne r1, r2 ; CHECK-NEXT: vmov s0, r1 ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 2 diff --git a/llvm/test/CodeGen/Thumb2/mve-fmas.ll b/llvm/test/CodeGen/Thumb2/mve-fmas.ll --- a/llvm/test/CodeGen/Thumb2/mve-fmas.ll +++ b/llvm/test/CodeGen/Thumb2/mve-fmas.ll @@ -401,106 +401,74 @@ ; CHECK-MVE-LABEL: vfma16_v1_pred: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s14, s4 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmp.f16 s14, #0 ; CHECK-MVE-NEXT: vmovx.f16 s13, s0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s14, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s8 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.f32 s15, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmla.f16 s15, s14, s12 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 -; CHECK-MVE-NEXT: vseleq.f16 s12, s13, s15 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s14, s0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s14, s4, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s5 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s4, s9 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s12, s13, s15 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s8, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s0, s14 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vins.f16 s0, s12 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmov.f32 s14, s12 -; CHECK-MVE-NEXT: vmla.f16 s14, s8, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s5, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmla.f16 s14, s8, s4 ; CHECK-MVE-NEXT: vmov.f32 s8, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s8, s5, s9 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: vmovx.f16 s12, s2 +; CHECK-MVE-NEXT: vmov.f32 s14, s12 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s1, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, #0 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s12, s2 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vins.f16 s1, s4 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 -; CHECK-MVE-NEXT: vmov.f32 s14, s12 ; CHECK-MVE-NEXT: vmla.f16 s14, s8, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s6, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s8, s2 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s8, s6, s10 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s6, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s6, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s2, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s3 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmov.f32 s10, s8 ; CHECK-MVE-NEXT: vmla.f16 s10, s6, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s7, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s10 -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vmov.f32 s6, s3 -; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmla.f16 s6, s7, s11 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s10 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s3, s6 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -530,106 +498,74 @@ ; CHECK-MVE-LABEL: vfma16_v2_pred: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s14, s4 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmp.f16 s14, #0 ; CHECK-MVE-NEXT: vmovx.f16 s13, s0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s14, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s8 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.f32 s15, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmla.f16 s15, s14, s12 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 -; CHECK-MVE-NEXT: vseleq.f16 s12, s13, s15 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s14, s0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s14, s4, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s5 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s4, s9 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s12, s13, s15 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s8, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s0, s14 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vins.f16 s0, s12 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmov.f32 s14, s12 -; CHECK-MVE-NEXT: vmla.f16 s14, s8, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s5, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmla.f16 s14, s8, s4 ; CHECK-MVE-NEXT: vmov.f32 s8, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s8, s5, s9 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: vmovx.f16 s12, s2 +; CHECK-MVE-NEXT: vmov.f32 s14, s12 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s1, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, #0 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s12, s2 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vins.f16 s1, s4 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 -; CHECK-MVE-NEXT: vmov.f32 s14, s12 ; CHECK-MVE-NEXT: vmla.f16 s14, s8, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s6, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s8, s2 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s8, s6, s10 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s6, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s6, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s2, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s3 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmov.f32 s10, s8 ; CHECK-MVE-NEXT: vmla.f16 s10, s6, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s7, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s10 -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vmov.f32 s6, s3 -; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmla.f16 s6, s7, s11 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s10 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s3, s6 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -659,106 +595,74 @@ ; CHECK-MVE-LABEL: vfms16_pred: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s14, s4 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmp.f16 s14, #0 ; CHECK-MVE-NEXT: vmovx.f16 s13, s0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s14, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s8 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.f32 s15, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmls.f16 s15, s14, s12 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 -; CHECK-MVE-NEXT: vseleq.f16 s12, s13, s15 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s14, s0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmls.f16 s14, s4, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s5 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s4, s9 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s12, s13, s15 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s8, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s0, s14 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vins.f16 s0, s12 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmov.f32 s14, s12 -; CHECK-MVE-NEXT: vmls.f16 s14, s8, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s5, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmls.f16 s14, s8, s4 ; CHECK-MVE-NEXT: vmov.f32 s8, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmls.f16 s8, s5, s9 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: vmovx.f16 s12, s2 +; CHECK-MVE-NEXT: vmov.f32 s14, s12 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s1, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, #0 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s12, s2 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vins.f16 s1, s4 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 -; CHECK-MVE-NEXT: vmov.f32 s14, s12 ; CHECK-MVE-NEXT: vmls.f16 s14, s8, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s6, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s8, s2 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmls.f16 s8, s6, s10 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s6, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s12, s14 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s6, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s2, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s3 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmov.f32 s10, s8 ; CHECK-MVE-NEXT: vmls.f16 s10, s6, s4 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s7, #0 -; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s10 -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vmov.f32 s6, s3 -; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmls.f16 s6, s7, s11 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s10 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s3, s6 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -793,55 +697,38 @@ ; CHECK-MVE-LABEL: vfmar16_pred: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s10, s4 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmp.f16 s10, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s10, #0 ; CHECK-MVE-NEXT: vcvtb.f16.f32 s8, s8 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.f32 s14, s12 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s14, s10, s8 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s10, s12, s14 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s12, s0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s12, s4, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s0, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vins.f16 s0, s10 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s10, s1 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmov.f32 s12, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmla.f16 s12, s4, s8 ; CHECK-MVE-NEXT: vcmp.f16 s5, #0 +; CHECK-MVE-NEXT: vmla.f16 s12, s4, s8 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s10, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s10, s1 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmla.f16 s10, s5, s8 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s1, s10 ; CHECK-MVE-NEXT: vmovx.f16 s10, s2 ; CHECK-MVE-NEXT: vins.f16 s1, s4 @@ -849,47 +736,32 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmov.f32 s12, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s12, s4, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s6, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s10, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s10, s2 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmla.f16 s10, s6, s8 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s2, s10 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmov.f32 s10, s6 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmov.f32 s10, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmla.f16 s10, s4, s8 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s7, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s10 -; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vmov.f32 s6, s3 -; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmla.f16 s6, s7, s8 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s3, s6 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -926,102 +798,70 @@ ; CHECK-MVE-LABEL: vfma16_pred: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s10, s4 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmp.f16 s10, #0 ; CHECK-MVE-NEXT: vcvtb.f16.f32 s8, s8 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s10, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.f32 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s14, s12, s10 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s10, s12, s14 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s12, s8 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s12, s0, s4 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s0, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vins.f16 s0, s10 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s10, s1 ; CHECK-MVE-NEXT: vmov.f32 s12, s8 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmla.f16 s12, s10, s4 ; CHECK-MVE-NEXT: vcmp.f16 s5, #0 +; CHECK-MVE-NEXT: vmla.f16 s12, s10, s4 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s10, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s10, s8 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmla.f16 s10, s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmov.f32 s12, s8 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s1, s10 ; CHECK-MVE-NEXT: vmovx.f16 s10, s2 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 -; CHECK-MVE-NEXT: vmov.f32 s12, s8 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmla.f16 s12, s10, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s6, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s10, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmov.f32 s10, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmla.f16 s10, s2, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s2, s10 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmov.f32 s10, s8 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 -; CHECK-MVE-NEXT: vmov.f32 s10, s8 -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmla.f16 s10, s6, s4 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f16 s7, #0 +; CHECK-MVE-NEXT: vmla.f16 s8, s3, s7 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s10 -; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vmla.f16 s8, s3, s7 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s3, s8 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1053,49 +893,33 @@ ; ; CHECK-MVE-LABEL: vfma32_v1_pred: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vmov.f32 s14, s0 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vmov.f32 s12, s1 -; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s5, #0 -; CHECK-MVE-NEXT: movs r3, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: vmov.f32 s14, s0 +; CHECK-MVE-NEXT: vmov.f32 s12, s1 ; CHECK-MVE-NEXT: vmla.f32 s14, s4, s8 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: vmov.f32 s8, s3 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vmov.f32 s4, s3 +; CHECK-MVE-NEXT: vmov.f32 s8, s2 ; CHECK-MVE-NEXT: vmla.f32 s12, s5, s9 -; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vmov.f32 s5, s2 -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: vmla.f32 s4, s7, s11 +; CHECK-MVE-NEXT: vmla.f32 s8, s6, s10 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s7, #0 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s6, #0 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: vmla.f32 s8, s7, s11 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s4 ; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: vmla.f32 s5, s6, s10 -; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s8 +; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s8 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s5 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s1, s12 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s0, s14 ; CHECK-MVE-NEXT: bx lr entry: @@ -1122,49 +946,33 @@ ; ; CHECK-MVE-LABEL: vfma32_v2_pred: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vmov.f32 s14, s0 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vmov.f32 s12, s1 -; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s5, #0 -; CHECK-MVE-NEXT: movs r3, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: vmov.f32 s14, s0 +; CHECK-MVE-NEXT: vmov.f32 s12, s1 ; CHECK-MVE-NEXT: vmla.f32 s14, s4, s8 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: vmov.f32 s8, s3 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vmov.f32 s4, s3 +; CHECK-MVE-NEXT: vmov.f32 s8, s2 ; CHECK-MVE-NEXT: vmla.f32 s12, s5, s9 -; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vmov.f32 s5, s2 -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: vmla.f32 s4, s7, s11 +; CHECK-MVE-NEXT: vmla.f32 s8, s6, s10 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s7, #0 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s6, #0 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: vmla.f32 s8, s7, s11 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s4 ; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: vmla.f32 s5, s6, s10 -; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s8 +; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s8 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s5 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s1, s12 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s0, s14 ; CHECK-MVE-NEXT: bx lr entry: @@ -1191,49 +999,33 @@ ; ; CHECK-MVE-LABEL: vfms32_pred: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vmov.f32 s14, s0 -; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vmov.f32 s12, s1 -; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s5, #0 -; CHECK-MVE-NEXT: movs r3, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr +; CHECK-MVE-NEXT: vmov.f32 s14, s0 +; CHECK-MVE-NEXT: vmov.f32 s12, s1 ; CHECK-MVE-NEXT: vmls.f32 s14, s4, s8 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: vmov.f32 s8, s3 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vmov.f32 s4, s3 +; CHECK-MVE-NEXT: vmov.f32 s8, s2 ; CHECK-MVE-NEXT: vmls.f32 s12, s5, s9 -; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vmov.f32 s5, s2 -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: vmls.f32 s4, s7, s11 +; CHECK-MVE-NEXT: vmls.f32 s8, s6, s10 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s7, #0 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s6, #0 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: vmls.f32 s8, s7, s11 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s4 ; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: vmls.f32 s5, s6, s10 -; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s8 +; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s8 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s5 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s1, s12 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s0, s14 ; CHECK-MVE-NEXT: bx lr entry: @@ -1263,49 +1055,33 @@ ; ; CHECK-MVE-LABEL: vfmar32_pred: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vmov.f32 s10, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s5, #0 -; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, #0 -; CHECK-MVE-NEXT: vmov.f32 s14, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vmov.f32 s12, s0 +; CHECK-MVE-NEXT: vmov.f32 s14, s2 +; CHECK-MVE-NEXT: vmov.f32 s10, s1 +; CHECK-MVE-NEXT: vmla.f32 s12, s4, s8 +; CHECK-MVE-NEXT: vmov.f32 s4, s3 +; CHECK-MVE-NEXT: vmla.f32 s14, s6, s8 ; CHECK-MVE-NEXT: vmla.f32 s10, s5, s8 -; CHECK-MVE-NEXT: vmov.f32 s5, s2 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: vmla.f32 s4, s7, s8 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s7, #0 -; CHECK-MVE-NEXT: movs r3, #0 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: vmov.f32 s12, s0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s6, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: vmla.f32 s14, s7, s8 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s4 ; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: vmla.f32 s5, s6, s8 -; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s14 +; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s14 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s5 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s1, s10 -; CHECK-MVE-NEXT: vmla.f32 s12, s4, s8 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s0, s12 ; CHECK-MVE-NEXT: bx lr entry: @@ -1337,47 +1113,31 @@ ; CHECK-MVE-LABEL: vfmas32_pred: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s5, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, #0 -; CHECK-MVE-NEXT: vmov.f32 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: vmov.f32 s12, s8 +; CHECK-MVE-NEXT: vmov.f32 s10, s8 +; CHECK-MVE-NEXT: vmla.f32 s12, s0, s4 +; CHECK-MVE-NEXT: vmov.f32 s4, s8 +; CHECK-MVE-NEXT: vmla.f32 s8, s2, s6 +; CHECK-MVE-NEXT: vmla.f32 s10, s1, s5 +; CHECK-MVE-NEXT: vmla.f32 s4, s3, s7 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s7, #0 -; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vmov.f32 s10, s8 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vmov.f32 s12, s8 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s6, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: vmla.f32 s14, s3, s7 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s4 ; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: vmla.f32 s8, s2, s6 -; CHECK-MVE-NEXT: vseleq.f32 s3, s3, s14 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s2, s8 -; CHECK-MVE-NEXT: vmla.f32 s10, s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmla.f32 s12, s0, s4 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s1, s10 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s0, s12 ; CHECK-MVE-NEXT: bx lr entry: diff --git a/llvm/test/CodeGen/Thumb2/mve-fmath.ll b/llvm/test/CodeGen/Thumb2/mve-fmath.ll --- a/llvm/test/CodeGen/Thumb2/mve-fmath.ll +++ b/llvm/test/CodeGen/Thumb2/mve-fmath.ll @@ -1024,67 +1024,59 @@ ; CHECK-NEXT: ldrb.w r0, [sp, #25] ; CHECK-NEXT: vmovx.f16 s4, s0 ; CHECK-NEXT: vabs.f16 s4, s4 -; CHECK-NEXT: vabs.f16 s0, s0 -; CHECK-NEXT: tst.w r0, #128 ; CHECK-NEXT: vneg.f16 s6, s4 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: lsls r0, r0, #24 +; CHECK-NEXT: it pl +; CHECK-NEXT: vmovpl.f32 s6, s4 ; CHECK-NEXT: ldrb.w r0, [sp, #29] -; CHECK-NEXT: vseleq.f16 s4, s4, s6 -; CHECK-NEXT: vneg.f16 s6, s0 -; CHECK-NEXT: tst.w r0, #128 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: vabs.f16 s4, s0 +; CHECK-NEXT: vneg.f16 s0, s4 +; CHECK-NEXT: lsls r0, r0, #24 +; CHECK-NEXT: it pl +; CHECK-NEXT: vmovpl.f32 s0, s4 ; CHECK-NEXT: ldrb.w r0, [sp, #17] -; CHECK-NEXT: vseleq.f16 s0, s0, s6 -; CHECK-NEXT: tst.w r0, #128 -; CHECK-NEXT: vins.f16 s0, s4 ; CHECK-NEXT: vmovx.f16 s4, s1 -; CHECK-NEXT: cset r0, ne ; CHECK-NEXT: vabs.f16 s4, s4 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: ldrb.w r0, [sp, #21] +; CHECK-NEXT: vins.f16 s0, s6 ; CHECK-NEXT: vneg.f16 s6, s4 -; CHECK-NEXT: vseleq.f16 s4, s4, s6 -; CHECK-NEXT: vabs.f16 s6, s1 -; CHECK-NEXT: tst.w r0, #128 -; CHECK-NEXT: vneg.f16 s8, s6 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: lsls r0, r0, #24 +; CHECK-NEXT: it pl +; CHECK-NEXT: vmovpl.f32 s6, s4 +; CHECK-NEXT: ldrb.w r0, [sp, #21] +; CHECK-NEXT: vabs.f16 s4, s1 +; CHECK-NEXT: vneg.f16 s1, s4 +; CHECK-NEXT: lsls r0, r0, #24 +; CHECK-NEXT: it pl +; CHECK-NEXT: vmovpl.f32 s1, s4 ; CHECK-NEXT: ldrb.w r0, [sp, #9] -; CHECK-NEXT: vseleq.f16 s1, s6, s8 -; CHECK-NEXT: tst.w r0, #128 -; CHECK-NEXT: vins.f16 s1, s4 ; CHECK-NEXT: vmovx.f16 s4, s2 -; CHECK-NEXT: cset r0, ne ; CHECK-NEXT: vabs.f16 s4, s4 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: ldrb.w r0, [sp, #13] +; CHECK-NEXT: vins.f16 s1, s6 ; CHECK-NEXT: vneg.f16 s6, s4 -; CHECK-NEXT: vseleq.f16 s4, s4, s6 -; CHECK-NEXT: vabs.f16 s2, s2 -; CHECK-NEXT: tst.w r0, #128 -; CHECK-NEXT: vneg.f16 s6, s2 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: lsls r0, r0, #24 +; CHECK-NEXT: it pl +; CHECK-NEXT: vmovpl.f32 s6, s4 +; CHECK-NEXT: ldrb.w r0, [sp, #13] +; CHECK-NEXT: vabs.f16 s4, s2 +; CHECK-NEXT: vneg.f16 s2, s4 +; CHECK-NEXT: lsls r0, r0, #24 +; CHECK-NEXT: it pl +; CHECK-NEXT: vmovpl.f32 s2, s4 ; CHECK-NEXT: ldrb.w r0, [sp, #1] -; CHECK-NEXT: vseleq.f16 s2, s2, s6 -; CHECK-NEXT: tst.w r0, #128 -; CHECK-NEXT: vins.f16 s2, s4 ; CHECK-NEXT: vmovx.f16 s4, s3 -; CHECK-NEXT: cset r0, ne ; CHECK-NEXT: vabs.f16 s4, s4 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: ldrb.w r0, [sp, #5] +; CHECK-NEXT: vins.f16 s2, s6 ; CHECK-NEXT: vneg.f16 s6, s4 -; CHECK-NEXT: vseleq.f16 s4, s4, s6 -; CHECK-NEXT: vabs.f16 s6, s3 -; CHECK-NEXT: tst.w r0, #128 -; CHECK-NEXT: vneg.f16 s8, s6 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vseleq.f16 s3, s6, s8 -; CHECK-NEXT: vins.f16 s3, s4 +; CHECK-NEXT: lsls r0, r0, #24 +; CHECK-NEXT: it pl +; CHECK-NEXT: vmovpl.f32 s6, s4 +; CHECK-NEXT: ldrb.w r0, [sp, #5] +; CHECK-NEXT: vabs.f16 s4, s3 +; CHECK-NEXT: vneg.f16 s3, s4 +; CHECK-NEXT: lsls r0, r0, #24 +; CHECK-NEXT: it pl +; CHECK-NEXT: vmovpl.f32 s3, s4 +; CHECK-NEXT: vins.f16 s3, s6 ; CHECK-NEXT: add sp, #32 ; CHECK-NEXT: bx lr entry: diff --git a/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll b/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll --- a/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll +++ b/llvm/test/CodeGen/Thumb2/mve-fpclamptosat_vec.ll @@ -18,27 +18,22 @@ ; CHECK-NEXT: vmov r0, r1, d8 ; CHECK-NEXT: bl __aeabi_d2lz ; CHECK-NEXT: adr r3, .LCPI0_0 -; CHECK-NEXT: mvn r12, #-2147483648 +; CHECK-NEXT: mvn r2, #-2147483648 ; CHECK-NEXT: vldrw.u32 q0, [r3] -; CHECK-NEXT: subs.w r3, r4, r12 +; CHECK-NEXT: subs r3, r4, r2 ; CHECK-NEXT: sbcs r3, r5, #0 ; CHECK-NEXT: vmov q1[2], q1[0], r0, r4 -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 ; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: mov.w r5, #-1 ; CHECK-NEXT: csetm r3, ne -; CHECK-NEXT: subs.w r0, r0, r12 +; CHECK-NEXT: subs r0, r0, r2 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r12, #-1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: adr r4, .LCPI0_1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: adr r4, .LCPI0_1 ; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 ; CHECK-NEXT: vmov q2[3], q2[1], r0, r3 ; CHECK-NEXT: vand q1, q1, q2 @@ -46,19 +41,16 @@ ; CHECK-NEXT: vorr q0, q1, q0 ; CHECK-NEXT: vldrw.u32 q1, [r4] ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: vmov r3, r5, d0 +; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: rsbs.w r0, r0, #-2147483648 -; CHECK-NEXT: sbcs.w r0, r12, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: sbcs.w r0, r5, r1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: rsbs.w r1, r3, #-2147483648 -; CHECK-NEXT: sbcs.w r1, r12, r5 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: rsbs.w r1, r2, #-2147483648 +; CHECK-NEXT: sbcs.w r1, r5, r3 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 @@ -103,25 +95,21 @@ ; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: vmov r0, r1, d8 ; CHECK-NEXT: bl __aeabi_d2ulz -; CHECK-NEXT: subs.w r3, r4, #-1 +; CHECK-NEXT: subs.w r2, r4, #-1 ; CHECK-NEXT: vmov q1[2], q1[0], r0, r4 -; CHECK-NEXT: sbcs r3, r5, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: sbcs r2, r5, #0 ; CHECK-NEXT: vmov.i64 q0, #0xffffffff -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r3, #1 -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: cset r2, lo +; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne ; CHECK-NEXT: subs.w r0, r0, #-1 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 +; CHECK-NEXT: cset r0, lo +; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 -; CHECK-NEXT: vmov q2[3], q2[1], r0, r3 +; CHECK-NEXT: vmov q2[2], q2[0], r0, r2 +; CHECK-NEXT: vmov q2[3], q2[1], r0, r2 ; CHECK-NEXT: vand q1, q1, q2 ; CHECK-NEXT: vbic q0, q0, q2 ; CHECK-NEXT: vorr q0, q1, q0 @@ -149,43 +137,36 @@ ; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: vmov r0, r1, d8 ; CHECK-NEXT: bl __aeabi_d2lz -; CHECK-NEXT: subs.w r3, r4, #-1 +; CHECK-NEXT: subs.w r2, r4, #-1 ; CHECK-NEXT: vmov q1[2], q1[0], r0, r4 -; CHECK-NEXT: sbcs r3, r5, #0 -; CHECK-NEXT: vmov.i64 q0, #0xffffffff -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: sbcs r2, r5, #0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: vmov.i64 q0, #0xffffffff +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: csetm r2, ne ; CHECK-NEXT: subs.w r0, r0, #-1 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 -; CHECK-NEXT: vmov q2[3], q2[1], r0, r3 +; CHECK-NEXT: vmov q2[2], q2[0], r0, r2 +; CHECK-NEXT: vmov q2[3], q2[1], r0, r2 ; CHECK-NEXT: vand q1, q1, q2 ; CHECK-NEXT: vbic q0, q0, q2 ; CHECK-NEXT: vorr q0, q1, q0 ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: vmov r3, r5, d0 +; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: rsbs r0, r0, #0 -; CHECK-NEXT: sbcs.w r0, r2, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: sbcs.w r0, r5, r1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: rsbs r1, r3, #0 -; CHECK-NEXT: sbcs.w r1, r2, r5 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: rsbs r1, r2, #0 +; CHECK-NEXT: sbcs.w r1, r5, r3 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 @@ -205,141 +186,118 @@ define arm_aapcs_vfpcc <4 x i32> @stest_f32i32(<4 x float> %x) { ; CHECK-LABEL: stest_f32i32: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: .vsave {d8, d9} -; CHECK-NEXT: vpush {d8, d9} -; CHECK-NEXT: .pad #8 -; CHECK-NEXT: sub sp, #8 +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} ; CHECK-NEXT: vmov q4, q0 -; CHECK-NEXT: vmov r0, r6, d8 +; CHECK-NEXT: vmov r4, r0, d8 ; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: mov r9, r0 -; CHECK-NEXT: mvn r0, #-2147483648 -; CHECK-NEXT: subs.w r0, r9, r0 -; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mvn r4, #-2147483648 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: mov r0, r6 -; CHECK-NEXT: csetm r11, ne +; CHECK-NEXT: mov r5, r0 +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: mov r6, r1 ; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: mov r6, r0 -; CHECK-NEXT: subs r0, r0, r4 +; CHECK-NEXT: vmov r4, r2, d9 +; CHECK-NEXT: adr r3, .LCPI3_0 +; CHECK-NEXT: mvn r7, #-2147483648 +; CHECK-NEXT: vldrw.u32 q5, [r3] +; CHECK-NEXT: subs r3, r5, r7 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: sbcs r3, r6, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r6 +; CHECK-NEXT: cset r3, lt +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: subs r0, r0, r7 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov r10, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vmov r4, r0, d9 -; CHECK-NEXT: csetm r8, ne +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vmov q1[2], q1[0], r0, r3 +; CHECK-NEXT: vmov q1[3], q1[1], r0, r3 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vbic q1, q5, q1 +; CHECK-NEXT: vorr q4, q0, q1 +; CHECK-NEXT: vmov r9, r8, d8 +; CHECK-NEXT: mov r0, r2 ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: mov r5, r0 ; CHECK-NEXT: mov r0, r4 -; CHECK-NEXT: mov r7, r1 +; CHECK-NEXT: mov r6, r1 ; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: adr r2, .LCPI3_0 -; CHECK-NEXT: mvn r4, #-2147483648 -; CHECK-NEXT: vldrw.u32 q0, [r2] -; CHECK-NEXT: adr r2, .LCPI3_1 -; CHECK-NEXT: vldrw.u32 q2, [r2] -; CHECK-NEXT: subs r2, r5, r4 -; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload -; CHECK-NEXT: vmov q1[2], q1[0], r9, r6 -; CHECK-NEXT: vmov q4[2], q4[0], r11, r8 -; CHECK-NEXT: vmov q3[2], q3[0], r0, r5 -; CHECK-NEXT: vmov q1[3], q1[1], r2, r10 -; CHECK-NEXT: vmov q4[3], q4[1], r11, r8 -; CHECK-NEXT: vand q1, q1, q4 -; CHECK-NEXT: vbic q4, q2, q4 -; CHECK-NEXT: vorr q1, q1, q4 -; CHECK-NEXT: vmov q3[3], q3[1], r1, r7 -; CHECK-NEXT: vmov r2, r3, d2 -; CHECK-NEXT: sbcs r7, r7, #0 -; CHECK-NEXT: vmov r6, r5, d3 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csetm r7, ne -; CHECK-NEXT: subs r0, r0, r4 -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: mov.w r0, #-1 -; CHECK-NEXT: vmov q4[2], q4[0], r1, r7 -; CHECK-NEXT: vmov q4[3], q4[1], r1, r7 -; CHECK-NEXT: vand q3, q3, q4 -; CHECK-NEXT: vbic q2, q2, q4 -; CHECK-NEXT: vorr q2, q3, q2 -; CHECK-NEXT: vmov r1, r7, d5 -; CHECK-NEXT: rsbs.w r2, r2, #-2147483648 -; CHECK-NEXT: sbcs.w r2, r0, r3 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: rsbs.w r3, r6, #-2147483648 -; CHECK-NEXT: sbcs.w r3, r0, r5 -; CHECK-NEXT: vmov r6, r5, d4 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: adr r3, .LCPI3_1 +; CHECK-NEXT: vmov q1[2], q1[0], r0, r5 +; CHECK-NEXT: vldrw.u32 q0, [r3] +; CHECK-NEXT: subs r3, r5, r7 +; CHECK-NEXT: sbcs r3, r6, #0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r6 +; CHECK-NEXT: cset r3, lt +; CHECK-NEXT: mov.w r2, #-1 ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: subs r0, r0, r7 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov r1, r7, d9 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: rsbs.w r6, r9, #-2147483648 +; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 +; CHECK-NEXT: sbcs.w r6, r2, r8 +; CHECK-NEXT: vmov q2[3], q2[1], r0, r3 +; CHECK-NEXT: cset r6, lt +; CHECK-NEXT: vand q1, q1, q2 +; CHECK-NEXT: vbic q2, q5, q2 +; CHECK-NEXT: vorr q1, q1, q2 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: vmov r0, r3, d3 +; CHECK-NEXT: csetm r6, ne +; CHECK-NEXT: vmov r5, r4, d2 ; CHECK-NEXT: rsbs.w r1, r1, #-2147483648 -; CHECK-NEXT: sbcs.w r1, r0, r7 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: sbcs.w r1, r2, r7 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: rsbs.w r7, r6, #-2147483648 -; CHECK-NEXT: sbcs r0, r5 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: rsbs.w r0, r0, #-2147483648 +; CHECK-NEXT: sbcs.w r0, r2, r3 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov.32 q3[1], r0 -; CHECK-NEXT: vmov q3[2], q3[0], r0, r1 -; CHECK-NEXT: vbic q4, q0, q3 -; CHECK-NEXT: vand q2, q2, q3 -; CHECK-NEXT: vmov.32 q3[1], r2 -; CHECK-NEXT: vorr q2, q2, q4 -; CHECK-NEXT: vmov q3[2], q3[0], r2, r3 -; CHECK-NEXT: vbic q0, q0, q3 -; CHECK-NEXT: vand q1, q1, q3 -; CHECK-NEXT: vorr q0, q1, q0 +; CHECK-NEXT: rsbs.w r3, r5, #-2147483648 +; CHECK-NEXT: sbcs r2, r4 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: vmov.32 q2[1], r2 +; CHECK-NEXT: vmov q2[2], q2[0], r2, r0 +; CHECK-NEXT: vbic q3, q0, q2 +; CHECK-NEXT: vand q1, q1, q2 +; CHECK-NEXT: vmov.32 q2[1], r6 +; CHECK-NEXT: vorr q1, q1, q3 +; CHECK-NEXT: vmov q2[2], q2[0], r6, r1 +; CHECK-NEXT: vbic q0, q0, q2 +; CHECK-NEXT: vand q2, q4, q2 +; CHECK-NEXT: vorr q0, q2, q0 ; CHECK-NEXT: vmov.f32 s1, s2 -; CHECK-NEXT: vmov.f32 s2, s8 -; CHECK-NEXT: vmov.f32 s3, s10 -; CHECK-NEXT: add sp, #8 -; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: vmov.f32 s2, s4 +; CHECK-NEXT: vmov.f32 s3, s6 +; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI3_0: -; CHECK-NEXT: .long 2147483648 @ 0x80000000 -; CHECK-NEXT: .long 4294967295 @ 0xffffffff -; CHECK-NEXT: .long 2147483648 @ 0x80000000 -; CHECK-NEXT: .long 4294967295 @ 0xffffffff -; CHECK-NEXT: .LCPI3_1: ; CHECK-NEXT: .long 2147483647 @ 0x7fffffff ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 2147483647 @ 0x7fffffff ; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .LCPI3_1: +; CHECK-NEXT: .long 2147483648 @ 0x80000000 +; CHECK-NEXT: .long 4294967295 @ 0xffffffff +; CHECK-NEXT: .long 2147483648 @ 0x80000000 +; CHECK-NEXT: .long 4294967295 @ 0xffffffff entry: %conv = fptosi <4 x float> %x to <4 x i64> %0 = icmp slt <4 x i64> %conv, @@ -372,42 +330,34 @@ ; CHECK-NEXT: mov r0, r5 ; CHECK-NEXT: mov r4, r1 ; CHECK-NEXT: bl __aeabi_f2ulz -; CHECK-NEXT: subs.w r3, r7, #-1 +; CHECK-NEXT: subs.w r2, r7, #-1 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r7 -; CHECK-NEXT: sbcs r3, r4, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: sbcs r2, r4, #0 ; CHECK-NEXT: vmov q1[2], q1[0], r10, r6 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r3, #1 -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: cset r2, lo +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne ; CHECK-NEXT: subs.w r0, r0, #-1 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: subs.w r1, r6, #-1 ; CHECK-NEXT: sbcs r1, r9, #0 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r1, #1 +; CHECK-NEXT: cset r1, lo ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: subs.w r7, r10, #-1 -; CHECK-NEXT: sbcs r7, r8, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: vmov.32 q2[1], r2 -; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 +; CHECK-NEXT: subs.w r3, r10, #-1 +; CHECK-NEXT: sbcs r3, r8, #0 +; CHECK-NEXT: cset r3, lo +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: vmov.32 q2[1], r3 +; CHECK-NEXT: vmov q2[2], q2[0], r3, r1 ; CHECK-NEXT: vand q1, q1, q2 ; CHECK-NEXT: vorn q1, q1, q2 ; CHECK-NEXT: vmov.32 q2[1], r0 -; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 +; CHECK-NEXT: vmov q2[2], q2[0], r0, r2 ; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vorn q0, q0, q2 ; CHECK-NEXT: vmov.f32 s1, s2 @@ -443,19 +393,14 @@ ; CHECK-NEXT: subs.w r3, r5, #-1 ; CHECK-NEXT: sbcs r3, r6, #0 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: vmov.i64 q5, #0xffffffff -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 ; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r6 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: subs.w r0, r0, #-1 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r6 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: vmov q1[2], q1[0], r0, r3 @@ -467,61 +412,51 @@ ; CHECK-NEXT: mov r0, r2 ; CHECK-NEXT: bl __aeabi_f2lz ; CHECK-NEXT: mov r5, r0 -; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: subs.w r0, r0, #-1 +; CHECK-NEXT: sbcs r0, r1, #0 ; CHECK-NEXT: mov r6, r1 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: csetm r7, ne ; CHECK-NEXT: bl __aeabi_f2lz -; CHECK-NEXT: subs.w r2, r5, #-1 +; CHECK-NEXT: subs.w r3, r0, #-1 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 -; CHECK-NEXT: sbcs r2, r6, #0 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r6 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: subs.w r0, r0, #-1 ; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r6 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: vmov r1, r3, d8 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 ; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: mov.w r2, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: rsbs.w r6, r9, #0 -; CHECK-NEXT: vmov q1[2], q1[0], r0, r2 -; CHECK-NEXT: sbcs.w r6, r7, r8 -; CHECK-NEXT: vmov q1[3], q1[1], r0, r2 -; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: vmov q1[2], q1[0], r0, r7 +; CHECK-NEXT: sbcs.w r6, r2, r8 +; CHECK-NEXT: vmov q1[3], q1[1], r0, r7 +; CHECK-NEXT: cset r6, lt ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vbic q1, q5, q1 ; CHECK-NEXT: vorr q0, q0, q1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 -; CHECK-NEXT: vmov r0, r2, d1 ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: vmov r5, r4, d0 +; CHECK-NEXT: vmov r0, r7, d1 ; CHECK-NEXT: csetm r6, ne +; CHECK-NEXT: vmov r5, r4, d0 ; CHECK-NEXT: rsbs r1, r1, #0 -; CHECK-NEXT: sbcs.w r1, r7, r3 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: sbcs.w r1, r2, r3 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r6 ; CHECK-NEXT: vand q2, q4, q2 ; CHECK-NEXT: rsbs r0, r0, #0 -; CHECK-NEXT: sbcs.w r0, r7, r2 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: sbcs.w r0, r2, r7 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: rsbs r2, r5, #0 -; CHECK-NEXT: sbcs.w r2, r7, r4 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: rsbs r3, r5, #0 +; CHECK-NEXT: sbcs r2, r4 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csetm r2, ne ; CHECK-NEXT: vmov q1[2], q1[0], r2, r0 ; CHECK-NEXT: vand q0, q0, q1 @@ -633,33 +568,26 @@ ; CHECK-NEXT: mov.w r2, #0 ; CHECK-NEXT: sbcs.w r3, r2, r7 ; CHECK-NEXT: vmov q0[2], q0[0], r0, r6 -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: vmov q1[2], q1[0], r4, r5 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: rsbs r7, r4, #0 ; CHECK-NEXT: sbcs.w r7, r2, r8 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cset r7, lt ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csetm r7, ne ; CHECK-NEXT: rsbs r6, r6, #0 ; CHECK-NEXT: sbcs.w r6, r2, r9 ; CHECK-NEXT: vmov q3[2], q3[0], r7, r3 -; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: cset r6, lt ; CHECK-NEXT: vand q1, q1, q3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: csetm r6, ne ; CHECK-NEXT: rsbs r0, r0, #0 ; CHECK-NEXT: sbcs.w r0, r2, r1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: vmov q2[2], q2[0], r0, r6 ; CHECK-NEXT: vand q0, q0, q2 @@ -698,52 +626,44 @@ ; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 ; CHECK-NEXT: movw r4, #32767 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 -; CHECK-NEXT: adr.w r12, .LCPI9_0 -; CHECK-NEXT: vmov r1, r2, d1 -; CHECK-NEXT: vldrw.u32 q1, [r12] -; CHECK-NEXT: vmov r3, r5, d0 -; CHECK-NEXT: movw lr, #32768 -; CHECK-NEXT: movt lr, #65535 +; CHECK-NEXT: adr r5, .LCPI9_0 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vldrw.u32 q1, [r5] +; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: mov.w r12, #-1 -; CHECK-NEXT: movs r0, #0 -; CHECK-NEXT: subs r1, r1, r4 -; CHECK-NEXT: sbcs r1, r2, #0 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: adr r5, .LCPI9_1 +; CHECK-NEXT: subs r0, r0, r4 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: subs r1, r2, r4 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: movw r4, #32768 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: movt r4, #65535 ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: subs r2, r3, r4 -; CHECK-NEXT: sbcs r2, r5, #0 -; CHECK-NEXT: adr r4, .LCPI9_1 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 -; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 +; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 +; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vorr q0, q0, q1 -; CHECK-NEXT: vldrw.u32 q1, [r4] -; CHECK-NEXT: vmov r1, r2, d1 -; CHECK-NEXT: vmov r3, r5, d0 -; CHECK-NEXT: subs.w r1, lr, r1 -; CHECK-NEXT: sbcs.w r1, r12, r2 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: subs.w r2, lr, r3 -; CHECK-NEXT: sbcs.w r2, r12, r5 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: vldrw.u32 q1, [r5] +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: subs r0, r4, r0 +; CHECK-NEXT: sbcs.w r0, r12, r1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov q2[2], q2[0], r0, r1 -; CHECK-NEXT: vmov q2[3], q2[1], r0, r1 +; CHECK-NEXT: subs r1, r4, r2 +; CHECK-NEXT: sbcs.w r1, r12, r3 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 +; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vorr q0, q0, q1 @@ -786,24 +706,20 @@ ; CHECK-NEXT: vmov r0, r1, d8 ; CHECK-NEXT: bl __aeabi_d2ulz ; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 -; CHECK-NEXT: movw r4, #65535 +; CHECK-NEXT: vmov.i64 q1, #0xffff ; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 -; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: movw r5, #65535 ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: vmov.i64 q1, #0xffff ; CHECK-NEXT: vmov r2, r3, d0 -; CHECK-NEXT: subs r0, r0, r4 +; CHECK-NEXT: subs r0, r0, r5 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: subs r1, r2, r4 +; CHECK-NEXT: subs r1, r2, r5 ; CHECK-NEXT: sbcs r1, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 -; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 @@ -835,48 +751,41 @@ ; CHECK-NEXT: vmov r0, r1, d8 ; CHECK-NEXT: bl __aeabi_d2lz ; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 -; CHECK-NEXT: movw r4, #65535 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 ; CHECK-NEXT: vmov.i64 q1, #0xffff -; CHECK-NEXT: vmov r1, r2, d1 -; CHECK-NEXT: movs r0, #0 -; CHECK-NEXT: vmov r3, r5, d0 -; CHECK-NEXT: subs r1, r1, r4 -; CHECK-NEXT: sbcs r1, r2, #0 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r5 +; CHECK-NEXT: movw r5, #65535 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: subs r0, r0, r5 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: subs r1, r2, r5 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: subs r2, r3, r4 -; CHECK-NEXT: sbcs r2, r5, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 -; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 +; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 +; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vorr q0, q0, q1 -; CHECK-NEXT: vmov r1, r2, d1 -; CHECK-NEXT: vmov r3, r5, d0 -; CHECK-NEXT: rsbs r1, r1, #0 -; CHECK-NEXT: sbcs.w r1, r0, r2 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: rsbs r2, r3, #0 -; CHECK-NEXT: sbcs.w r2, r0, r5 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: sbcs.w r0, r5, r1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov q1[2], q1[0], r0, r1 -; CHECK-NEXT: vmov q1[3], q1[1], r0, r1 +; CHECK-NEXT: rsbs r1, r2, #0 +; CHECK-NEXT: sbcs.w r1, r5, r3 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: pop {r4, r5, r7, pc} @@ -1087,71 +996,62 @@ define arm_aapcs_vfpcc <2 x i64> @stest_f64i64(<2 x double> %x) { ; CHECK-LABEL: stest_f64i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov q4, q0 -; CHECK-NEXT: movs r4, #0 ; CHECK-NEXT: vmov r0, r1, d9 -; CHECK-NEXT: mvn r9, #-2147483648 ; CHECK-NEXT: bl __fixdfti -; CHECK-NEXT: subs.w r7, r0, #-1 -; CHECK-NEXT: mov.w r5, #-1 -; CHECK-NEXT: sbcs.w r7, r1, r9 +; CHECK-NEXT: vmov r12, lr, d8 +; CHECK-NEXT: subs.w r4, r0, #-1 +; CHECK-NEXT: mvn r9, #-2147483648 +; CHECK-NEXT: sbcs.w r4, r1, r9 +; CHECK-NEXT: sbcs r4, r2, #0 +; CHECK-NEXT: mov.w r7, #-1 +; CHECK-NEXT: sbcs r4, r3, #0 ; CHECK-NEXT: mov.w r10, #-2147483648 -; CHECK-NEXT: sbcs r7, r2, #0 -; CHECK-NEXT: sbcs r7, r3, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r11, r0, r5, ne -; CHECK-NEXT: csel r3, r3, r7, ne -; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: cset r4, lt +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r3, r3, r4, ne +; CHECK-NEXT: csel r2, r2, r4, ne +; CHECK-NEXT: csel r4, r0, r7, ne ; CHECK-NEXT: csel r1, r1, r9, ne -; CHECK-NEXT: rsbs.w r0, r11, #0 -; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: rsbs r0, r4, #0 ; CHECK-NEXT: sbcs.w r0, r10, r1 -; CHECK-NEXT: sbcs.w r0, r5, r2 -; CHECK-NEXT: sbcs.w r0, r5, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: sbcs.w r0, r7, r2 +; CHECK-NEXT: sbcs.w r0, r7, r3 +; CHECK-NEXT: cset r5, lt +; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r8, r1, r10, ne -; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: mov r0, r12 +; CHECK-NEXT: mov r1, lr ; CHECK-NEXT: bl __fixdfti ; CHECK-NEXT: subs.w r6, r0, #-1 ; CHECK-NEXT: sbcs.w r6, r1, r9 ; CHECK-NEXT: sbcs r6, r2, #0 ; CHECK-NEXT: sbcs r6, r3, #0 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: cset r6, lt ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: csel r1, r1, r9, ne ; CHECK-NEXT: csel r3, r3, r6, ne ; CHECK-NEXT: csel r2, r2, r6, ne ; CHECK-NEXT: rsbs r6, r0, #0 ; CHECK-NEXT: sbcs.w r6, r10, r1 -; CHECK-NEXT: sbcs.w r2, r5, r2 -; CHECK-NEXT: sbcs.w r2, r5, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 -; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: sbcs.w r2, r7, r2 +; CHECK-NEXT: sbcs.w r2, r7, r3 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r1, r1, r10, ne -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r2, r11, r7, ne -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r0, r4, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r3, r4, r5, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r3 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r8 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc} entry: %conv = fptosi <2 x double> %x to <2 x i128> %0 = icmp slt <2 x i128> %conv, @@ -1165,39 +1065,38 @@ define arm_aapcs_vfpcc <2 x i64> @utest_f64i64(<2 x double> %x) { ; CHECK-LABEL: utest_f64i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: .save {r4, r5, r6, r7, lr} +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vmov r0, r1, d9 ; CHECK-NEXT: bl __fixunsdfti -; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: vmov r4, r1, d8 ; CHECK-NEXT: subs r2, #1 -; CHECK-NEXT: mov.w r7, #0 ; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: csel r5, r0, r7, ne +; CHECK-NEXT: cset r6, lo +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r7, r0, r6, ne ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: bl __fixunsdfti ; CHECK-NEXT: subs r2, #1 ; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r0, r0, r6, ne -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r2, r8, r7, ne +; CHECK-NEXT: cset r2, lo +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r1, r1, r6, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: csel r3, r5, r6, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r2, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r7 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} entry: %conv = fptoui <2 x double> %x to <2 x i128> %0 = icmp ult <2 x i128> %conv, @@ -1219,54 +1118,47 @@ ; CHECK-NEXT: vmov r0, r1, d9 ; CHECK-NEXT: bl __fixdfti ; CHECK-NEXT: vmov r12, lr, d8 -; CHECK-NEXT: subs r5, r2, #1 -; CHECK-NEXT: sbcs r5, r3, #0 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 -; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: subs r4, r2, #1 +; CHECK-NEXT: sbcs r4, r3, #0 ; CHECK-NEXT: mov.w r8, #1 -; CHECK-NEXT: csel r0, r0, r6, ne -; CHECK-NEXT: csel r3, r3, r6, ne -; CHECK-NEXT: csel r5, r1, r6, ne +; CHECK-NEXT: cset r5, lt +; CHECK-NEXT: movs r7, #0 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: csel r3, r3, r5, ne ; CHECK-NEXT: csel r2, r2, r8, ne +; CHECK-NEXT: csel r4, r1, r5, ne ; CHECK-NEXT: rsbs r1, r0, #0 -; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: sbcs.w r1, r4, r5 -; CHECK-NEXT: sbcs.w r1, r4, r2 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: sbcs.w r1, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: sbcs.w r1, r7, r4 +; CHECK-NEXT: sbcs.w r1, r7, r2 +; CHECK-NEXT: sbcs.w r1, r7, r3 +; CHECK-NEXT: cset r6, lt ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: csel r9, r0, r6, ne ; CHECK-NEXT: mov r0, r12 ; CHECK-NEXT: mov r1, lr ; CHECK-NEXT: bl __fixdfti -; CHECK-NEXT: subs r7, r2, #1 -; CHECK-NEXT: sbcs r7, r3, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: subs r5, r2, #1 +; CHECK-NEXT: sbcs r5, r3, #0 +; CHECK-NEXT: cset r5, lt +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne ; CHECK-NEXT: csel r2, r2, r8, ne -; CHECK-NEXT: csel r3, r3, r7, ne -; CHECK-NEXT: csel r1, r1, r7, ne -; CHECK-NEXT: rsbs r7, r0, #0 -; CHECK-NEXT: sbcs.w r7, r4, r1 -; CHECK-NEXT: sbcs.w r2, r4, r2 -; CHECK-NEXT: sbcs.w r2, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: csel r3, r3, r5, ne +; CHECK-NEXT: csel r1, r1, r5, ne +; CHECK-NEXT: rsbs r5, r0, #0 +; CHECK-NEXT: sbcs.w r5, r7, r1 +; CHECK-NEXT: sbcs.w r2, r7, r2 +; CHECK-NEXT: sbcs.w r2, r7, r3 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r2, r5, r6, ne -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: csel r3, r4, r6, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r2, ne ; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} @@ -1287,63 +1179,53 @@ ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: vmov r8, r0, d0 +; CHECK-NEXT: vmov r9, r0, d0 ; CHECK-NEXT: bl __fixsfti ; CHECK-NEXT: subs.w r7, r0, #-1 ; CHECK-NEXT: mvn r10, #-2147483648 ; CHECK-NEXT: sbcs.w r7, r1, r10 -; CHECK-NEXT: mov.w r11, #-2147483648 +; CHECK-NEXT: mov.w r4, #-1 ; CHECK-NEXT: sbcs r7, r2, #0 -; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: mov.w r11, #-2147483648 ; CHECK-NEXT: sbcs r7, r3, #0 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cset r7, lt ; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r5, r0, r4, ne ; CHECK-NEXT: csel r3, r3, r7, ne ; CHECK-NEXT: csel r2, r2, r7, ne -; CHECK-NEXT: mov.w r7, #-1 ; CHECK-NEXT: csel r1, r1, r10, ne -; CHECK-NEXT: csel r9, r0, r7, ne -; CHECK-NEXT: rsbs.w r0, r9, #0 +; CHECK-NEXT: rsbs r0, r5, #0 ; CHECK-NEXT: sbcs.w r0, r11, r1 -; CHECK-NEXT: sbcs.w r0, r7, r2 -; CHECK-NEXT: sbcs.w r0, r7, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r1, r11, ne -; CHECK-NEXT: str r0, [sp] @ 4-byte Spill -; CHECK-NEXT: mov r0, r8 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: subs.w r6, r0, #-1 -; CHECK-NEXT: sbcs.w r6, r1, r10 -; CHECK-NEXT: sbcs r6, r2, #0 -; CHECK-NEXT: sbcs r6, r3, #0 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: sbcs.w r0, r4, r2 +; CHECK-NEXT: sbcs.w r0, r4, r3 +; CHECK-NEXT: cset r6, lt +; CHECK-NEXT: mov r0, r9 ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: csel r8, r1, r11, ne +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: subs.w r7, r0, #-1 +; CHECK-NEXT: sbcs.w r7, r1, r10 +; CHECK-NEXT: sbcs r7, r2, #0 +; CHECK-NEXT: sbcs r7, r3, #0 +; CHECK-NEXT: cset r7, lt +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r4, ne ; CHECK-NEXT: csel r1, r1, r10, ne -; CHECK-NEXT: csel r3, r3, r6, ne -; CHECK-NEXT: csel r2, r2, r6, ne -; CHECK-NEXT: rsbs r6, r0, #0 -; CHECK-NEXT: sbcs.w r6, r11, r1 -; CHECK-NEXT: sbcs.w r2, r7, r2 -; CHECK-NEXT: sbcs.w r2, r7, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 -; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: rsbs r7, r0, #0 +; CHECK-NEXT: sbcs.w r7, r11, r1 +; CHECK-NEXT: sbcs.w r2, r4, r2 +; CHECK-NEXT: sbcs.w r2, r4, r3 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r1, r1, r11, ne -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r2, r9, r4, ne -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r0, r0, r5, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 -; CHECK-NEXT: ldr r0, [sp] @ 4-byte Reload -; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r3, r5, r6, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r3 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r8 ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} entry: @@ -1359,34 +1241,33 @@ define arm_aapcs_vfpcc <2 x i64> @utest_f32i64(<2 x float> %x) { ; CHECK-LABEL: utest_f32i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: .save {r4, r5, r6, r7, lr} +; CHECK-NEXT: push {r4, r5, r6, r7, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: vmov r4, r0, d0 ; CHECK-NEXT: bl __fixunssfti -; CHECK-NEXT: mov r8, r1 +; CHECK-NEXT: mov r5, r1 ; CHECK-NEXT: subs r1, r2, #1 ; CHECK-NEXT: sbcs r1, r3, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r5, r0, r7, ne +; CHECK-NEXT: cset r6, lo +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r7, r0, r6, ne ; CHECK-NEXT: mov r0, r4 -; CHECK-NEXT: movs r6, #0 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: subs r2, #1 ; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r0, r0, r6, ne -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r2, r8, r7, ne +; CHECK-NEXT: cset r2, lo +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r1, r1, r6, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +; CHECK-NEXT: csel r3, r5, r6, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r2, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r7 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop {r4, r5, r6, r7, pc} entry: %conv = fptoui <2 x float> %x to <2 x i128> %0 = icmp ult <2 x i128> %conv, @@ -1402,55 +1283,48 @@ ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: vmov r4, r0, d0 -; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: subs r7, r2, #1 -; CHECK-NEXT: mov.w r9, #1 -; CHECK-NEXT: sbcs r7, r3, #0 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r0, r0, r7, ne -; CHECK-NEXT: csel r3, r3, r7, ne -; CHECK-NEXT: csel r6, r1, r7, ne -; CHECK-NEXT: csel r2, r2, r9, ne -; CHECK-NEXT: rsbs r1, r0, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: sbcs.w r1, r5, r6 -; CHECK-NEXT: sbcs.w r1, r5, r2 -; CHECK-NEXT: sbcs.w r1, r5, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r8, r0, r7, ne -; CHECK-NEXT: mov r0, r4 +; CHECK-NEXT: vmov r6, r0, d0 ; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: subs r4, r2, #1 -; CHECK-NEXT: sbcs r4, r3, #0 -; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: subs r5, r2, #1 +; CHECK-NEXT: mov.w r8, #1 +; CHECK-NEXT: sbcs r5, r3, #0 +; CHECK-NEXT: cset r4, lt ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csel r0, r0, r4, ne -; CHECK-NEXT: csel r2, r2, r9, ne ; CHECK-NEXT: csel r3, r3, r4, ne -; CHECK-NEXT: csel r1, r1, r4, ne -; CHECK-NEXT: rsbs r4, r0, #0 -; CHECK-NEXT: sbcs.w r4, r5, r1 -; CHECK-NEXT: sbcs.w r2, r5, r2 -; CHECK-NEXT: sbcs.w r2, r5, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: csel r5, r1, r4, ne +; CHECK-NEXT: csel r2, r2, r8, ne +; CHECK-NEXT: rsbs r1, r0, #0 +; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: sbcs.w r1, r4, r5 +; CHECK-NEXT: sbcs.w r1, r4, r2 +; CHECK-NEXT: sbcs.w r1, r4, r3 +; CHECK-NEXT: cset r7, lt ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r2, r6, r7, ne -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r1, r1, r5, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r8 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: csel r9, r0, r7, ne +; CHECK-NEXT: mov r0, r6 +; CHECK-NEXT: bl __fixsfti +; CHECK-NEXT: subs r6, r2, #1 +; CHECK-NEXT: sbcs r6, r3, #0 +; CHECK-NEXT: cset r6, lt +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: csel r2, r2, r8, ne +; CHECK-NEXT: csel r3, r3, r6, ne +; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: rsbs r6, r0, #0 +; CHECK-NEXT: sbcs.w r6, r4, r1 +; CHECK-NEXT: sbcs.w r2, r4, r2 +; CHECK-NEXT: sbcs.w r2, r4, r3 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r3, r5, r7, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r2, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} entry: @@ -1466,10 +1340,8 @@ define arm_aapcs_vfpcc <2 x i64> @stest_f16i64(<2 x half> %x) { ; CHECK-LABEL: stest_f16i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov.u16 r0, q0[1] @@ -1478,59 +1350,50 @@ ; CHECK-NEXT: subs.w r7, r0, #-1 ; CHECK-NEXT: mvn r9, #-2147483648 ; CHECK-NEXT: sbcs.w r7, r1, r9 -; CHECK-NEXT: mov.w r6, #-1 -; CHECK-NEXT: sbcs r7, r2, #0 ; CHECK-NEXT: mov.w r10, #-2147483648 +; CHECK-NEXT: sbcs r7, r2, #0 ; CHECK-NEXT: sbcs r7, r3, #0 -; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cset r7, lt ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r11, r0, r6, ne ; CHECK-NEXT: csel r3, r3, r7, ne ; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: mov.w r7, #-1 ; CHECK-NEXT: csel r1, r1, r9, ne -; CHECK-NEXT: rsbs.w r0, r11, #0 -; CHECK-NEXT: mov.w r7, #0 +; CHECK-NEXT: csel r4, r0, r7, ne +; CHECK-NEXT: rsbs r0, r4, #0 ; CHECK-NEXT: sbcs.w r0, r10, r1 -; CHECK-NEXT: sbcs.w r0, r6, r2 -; CHECK-NEXT: sbcs.w r0, r6, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: sbcs.w r0, r7, r2 +; CHECK-NEXT: sbcs.w r0, r7, r3 +; CHECK-NEXT: cset r5, lt ; CHECK-NEXT: vmov.u16 r0, q4[0] +; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r8, r1, r10, ne ; CHECK-NEXT: bl __fixhfti -; CHECK-NEXT: subs.w r5, r0, #-1 -; CHECK-NEXT: sbcs.w r5, r1, r9 -; CHECK-NEXT: sbcs r5, r2, #0 -; CHECK-NEXT: sbcs r5, r3, #0 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: subs.w r6, r0, #-1 +; CHECK-NEXT: sbcs.w r6, r1, r9 +; CHECK-NEXT: sbcs r6, r2, #0 +; CHECK-NEXT: sbcs r6, r3, #0 +; CHECK-NEXT: cset r6, lt +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: csel r1, r1, r9, ne -; CHECK-NEXT: csel r3, r3, r5, ne -; CHECK-NEXT: csel r2, r2, r5, ne -; CHECK-NEXT: rsbs r5, r0, #0 -; CHECK-NEXT: sbcs.w r5, r10, r1 -; CHECK-NEXT: sbcs.w r2, r6, r2 -; CHECK-NEXT: sbcs.w r2, r6, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 -; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r3, r3, r6, ne +; CHECK-NEXT: csel r2, r2, r6, ne +; CHECK-NEXT: rsbs r6, r0, #0 +; CHECK-NEXT: sbcs.w r6, r10, r1 +; CHECK-NEXT: sbcs.w r2, r7, r2 +; CHECK-NEXT: sbcs.w r2, r7, r3 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r1, r1, r10, ne -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r2, r11, r7, ne -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r0, r4, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r3, r4, r5, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne +; CHECK-NEXT: vmov q0[2], q0[0], r0, r3 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r8 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, pc} entry: %conv = fptosi <2 x half> %x to <2 x i128> %0 = icmp slt <2 x i128> %conv, @@ -1579,53 +1442,46 @@ ; CHECK-NEXT: vmov.u16 r0, q0[1] ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: bl __fixhfti -; CHECK-NEXT: subs r7, r2, #1 +; CHECK-NEXT: subs r5, r2, #1 ; CHECK-NEXT: mov.w r8, #1 -; CHECK-NEXT: sbcs r7, r3, #0 -; CHECK-NEXT: mov.w r4, #0 +; CHECK-NEXT: sbcs r5, r3, #0 ; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r0, r0, r7, ne -; CHECK-NEXT: csel r3, r3, r7, ne +; CHECK-NEXT: cset r5, lt +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: csel r3, r3, r5, ne ; CHECK-NEXT: csel r2, r2, r8, ne -; CHECK-NEXT: csel r5, r1, r7, ne +; CHECK-NEXT: csel r4, r1, r5, ne ; CHECK-NEXT: rsbs r1, r0, #0 -; CHECK-NEXT: sbcs.w r1, r4, r5 -; CHECK-NEXT: sbcs.w r1, r4, r2 -; CHECK-NEXT: sbcs.w r1, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 +; CHECK-NEXT: sbcs.w r1, r7, r4 +; CHECK-NEXT: sbcs.w r1, r7, r2 +; CHECK-NEXT: sbcs.w r1, r7, r3 +; CHECK-NEXT: cset r6, lt ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: csel r9, r0, r6, ne ; CHECK-NEXT: vmov.u16 r0, q4[0] ; CHECK-NEXT: bl __fixhfti -; CHECK-NEXT: subs r7, r2, #1 -; CHECK-NEXT: sbcs r7, r3, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: subs r5, r2, #1 +; CHECK-NEXT: sbcs r5, r3, #0 +; CHECK-NEXT: cset r5, lt +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne ; CHECK-NEXT: csel r2, r2, r8, ne -; CHECK-NEXT: csel r3, r3, r7, ne -; CHECK-NEXT: csel r1, r1, r7, ne -; CHECK-NEXT: rsbs r7, r0, #0 -; CHECK-NEXT: sbcs.w r7, r4, r1 -; CHECK-NEXT: sbcs.w r2, r4, r2 -; CHECK-NEXT: sbcs.w r2, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: csel r3, r3, r5, ne +; CHECK-NEXT: csel r1, r1, r5, ne +; CHECK-NEXT: rsbs r5, r0, #0 +; CHECK-NEXT: sbcs.w r5, r7, r1 +; CHECK-NEXT: sbcs.w r2, r7, r2 +; CHECK-NEXT: sbcs.w r2, r7, r3 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r2, r5, r6, ne -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: csel r3, r4, r6, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r2, ne ; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} @@ -2731,10 +2587,8 @@ define arm_aapcs_vfpcc <2 x i64> @utest_f64i64_mm(<2 x double> %x) { ; CHECK-LABEL: utest_f64i64_mm: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov q4, q0 @@ -2742,43 +2596,38 @@ ; CHECK-NEXT: bl __fixunsdfti ; CHECK-NEXT: mov r8, r1 ; CHECK-NEXT: vmov r4, r1, d8 -; CHECK-NEXT: eor r7, r2, #1 +; CHECK-NEXT: eor r6, r2, #1 ; CHECK-NEXT: subs r2, #1 ; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: orr.w r7, r7, r3 -; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: orr.w r6, r6, r3 +; CHECK-NEXT: cset r7, lo ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: csel r9, r0, r7, ne +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r5, r0, r6, ne ; CHECK-NEXT: mov r0, r4 ; CHECK-NEXT: bl __fixunsdfti ; CHECK-NEXT: eor r4, r2, #1 ; CHECK-NEXT: subs r2, #1 ; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: orr.w r4, r4, r3 -; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cset r2, lo +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csel r0, r0, r4, ne -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r2, r8, r5, ne ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: csel r3, r8, r7, ne ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: csel r3, r3, r6, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r2, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csel r1, r1, r4, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r5 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} entry: %conv = fptoui <2 x double> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -2802,77 +2651,70 @@ ; CHECK-NEXT: eor r7, r2, #1 ; CHECK-NEXT: sbcs r6, r3, #0 ; CHECK-NEXT: orr.w r7, r7, r3 -; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: cset r6, lt ; CHECK-NEXT: mov.w r10, #1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 ; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: mov.w r11, #0 ; CHECK-NEXT: csel r0, r0, r6, ne ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: csel r1, r1, r6, ne ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r11, r1, r7, ne -; CHECK-NEXT: movs r4, #0 -; CHECK-NEXT: cmp.w r11, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: csel r1, r0, r11, ne -; CHECK-NEXT: csel r6, r0, r1, eq +; CHECK-NEXT: csel r4, r1, r7, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r0, r4, ne +; CHECK-NEXT: csel r7, r0, r1, eq ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: csel r1, r2, r10, lo ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r2, r2, r10, mi ; CHECK-NEXT: csel r1, r1, r2, eq -; CHECK-NEXT: csel r2, r3, r4, mi +; CHECK-NEXT: csel r2, r3, r11, mi ; CHECK-NEXT: rsbs r3, r1, #0 -; CHECK-NEXT: sbcs.w r3, r4, r2 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r3, r0, r7, ne +; CHECK-NEXT: sbcs.w r3, r11, r2 +; CHECK-NEXT: cset r6, lt +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r3, r0, r6, ne ; CHECK-NEXT: orrs.w r9, r1, r2 ; CHECK-NEXT: vmov r0, r1, d8 -; CHECK-NEXT: csel r8, r6, r3, eq +; CHECK-NEXT: csel r8, r7, r3, eq ; CHECK-NEXT: bl __fixdfti ; CHECK-NEXT: subs r5, r2, #1 -; CHECK-NEXT: eor r6, r2, #1 +; CHECK-NEXT: eor r7, r2, #1 ; CHECK-NEXT: sbcs r5, r3, #0 -; CHECK-NEXT: orr.w r6, r6, r3 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: orr.w r7, r7, r3 +; CHECK-NEXT: cset r5, lt ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r0, r0, r5, ne -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r1, r1, r5, ne -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r1, r1, r7, ne ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r6, r0, r1, ne -; CHECK-NEXT: csel r6, r0, r6, eq +; CHECK-NEXT: csel r7, r0, r1, ne +; CHECK-NEXT: csel r7, r0, r7, eq ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: csel r5, r2, r10, lo ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r2, r2, r10, mi -; CHECK-NEXT: csel r3, r3, r4, mi +; CHECK-NEXT: csel r3, r3, r11, mi ; CHECK-NEXT: csel r2, r5, r2, eq ; CHECK-NEXT: rsbs r5, r2, #0 -; CHECK-NEXT: sbcs.w r5, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: sbcs.w r5, r11, r3 +; CHECK-NEXT: cset r5, lt +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne ; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: csel r0, r6, r0, eq -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r3, r11, r7, ne +; CHECK-NEXT: csel r0, r7, r0, eq +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r3, r4, r6, ne ; CHECK-NEXT: cmp.w r9, #0 -; CHECK-NEXT: csel r3, r11, r3, eq -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r7, r1, r4, ne +; CHECK-NEXT: csel r3, r4, r3, eq +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r7, r1, r5, ne ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r1, r1, r7, eq ; CHECK-NEXT: vmov q0[2], q0[0], r0, r8 @@ -3005,49 +2847,42 @@ define arm_aapcs_vfpcc <2 x i64> @utest_f32i64_mm(<2 x float> %x) { ; CHECK-LABEL: utest_f32i64_mm: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: .pad #4 -; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} ; CHECK-NEXT: vmov r5, r0, d0 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: mov r8, r1 ; CHECK-NEXT: eor r1, r2, #1 -; CHECK-NEXT: orr.w r7, r1, r3 +; CHECK-NEXT: orr.w r6, r1, r3 ; CHECK-NEXT: subs r1, r2, #1 ; CHECK-NEXT: sbcs r1, r3, #0 -; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r4, #1 -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: cset r7, lo ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r9, r0, r7, ne +; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r4, r0, r6, ne ; CHECK-NEXT: mov r0, r5 -; CHECK-NEXT: movs r6, #0 ; CHECK-NEXT: bl __fixunssfti ; CHECK-NEXT: eor r5, r2, #1 ; CHECK-NEXT: subs r2, #1 ; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: orr.w r5, r5, r3 -; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cset r2, lo +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r0, r0, r2, ne ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r0, r0, r5, ne -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r2, r8, r4, ne ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r2, r2, r7, ne +; CHECK-NEXT: csel r3, r8, r7, ne ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: csel r3, r3, r6, ne +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csel r1, r1, r2, ne ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r1, r1, r5, ne -; CHECK-NEXT: vmov q0[2], q0[0], r0, r9 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r2 -; CHECK-NEXT: add sp, #4 -; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; CHECK-NEXT: vmov q0[2], q0[0], r0, r4 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} entry: %conv = fptoui <2 x float> %x to <2 x i128> %spec.store.select = call <2 x i128> @llvm.umin.v2i128(<2 x i128> %conv, <2 x i128> ) @@ -3062,61 +2897,55 @@ ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 -; CHECK-NEXT: vmov r9, r0, d0 +; CHECK-NEXT: vmov r10, r0, d0 ; CHECK-NEXT: bl __fixsfti ; CHECK-NEXT: subs r6, r2, #1 ; CHECK-NEXT: eor r7, r2, #1 ; CHECK-NEXT: sbcs r6, r3, #0 ; CHECK-NEXT: orr.w r7, r7, r3 -; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: cset r6, lt ; CHECK-NEXT: mov.w r11, #1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 ; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: csel r0, r0, r6, ne ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: csel r1, r1, r6, ne ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r8, r1, r7, ne -; CHECK-NEXT: movs r5, #0 -; CHECK-NEXT: cmp.w r8, #0 -; CHECK-NEXT: csel r1, r0, r8, ne +; CHECK-NEXT: csel r5, r1, r7, ne +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r1, r0, r5, ne ; CHECK-NEXT: csel r1, r0, r1, eq ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: csel r7, r2, r11, lo ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r2, r2, r11, mi -; CHECK-NEXT: csel r3, r3, r5, mi +; CHECK-NEXT: csel r3, r3, r8, mi ; CHECK-NEXT: csel r2, r7, r2, eq ; CHECK-NEXT: rsbs r7, r2, #0 -; CHECK-NEXT: sbcs.w r7, r5, r3 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: sbcs.w r7, r8, r3 +; CHECK-NEXT: cset r7, lt ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne -; CHECK-NEXT: orrs.w r10, r2, r3 +; CHECK-NEXT: orrs.w r9, r2, r3 ; CHECK-NEXT: csel r0, r1, r0, eq ; CHECK-NEXT: str r0, [sp] @ 4-byte Spill -; CHECK-NEXT: mov r0, r9 +; CHECK-NEXT: mov r0, r10 ; CHECK-NEXT: bl __fixsfti -; CHECK-NEXT: subs r6, r2, #1 -; CHECK-NEXT: eor r4, r2, #1 -; CHECK-NEXT: sbcs r6, r3, #0 -; CHECK-NEXT: orr.w r4, r4, r3 -; CHECK-NEXT: mov.w r6, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: subs r4, r2, #1 +; CHECK-NEXT: eor r6, r2, #1 +; CHECK-NEXT: sbcs r4, r3, #0 +; CHECK-NEXT: orr.w r6, r6, r3 +; CHECK-NEXT: cset r4, lt ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csel r0, r0, r4, ne ; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: csel r0, r0, r6, ne ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csel r1, r1, r4, ne +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r1, r1, r6, ne ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csel r6, r0, r1, ne ; CHECK-NEXT: csel r6, r0, r6, eq @@ -3124,22 +2953,21 @@ ; CHECK-NEXT: csel r4, r2, r11, lo ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r2, r2, r11, mi -; CHECK-NEXT: csel r3, r3, r5, mi +; CHECK-NEXT: csel r3, r3, r8, mi ; CHECK-NEXT: csel r2, r4, r2, eq ; CHECK-NEXT: rsbs r4, r2, #0 -; CHECK-NEXT: sbcs.w r4, r5, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r0, r0, r5, ne +; CHECK-NEXT: sbcs.w r4, r8, r3 +; CHECK-NEXT: cset r4, lt +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r0, r0, r4, ne ; CHECK-NEXT: orrs r2, r3 ; CHECK-NEXT: csel r0, r6, r0, eq ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r3, r8, r7, ne -; CHECK-NEXT: cmp.w r10, #0 -; CHECK-NEXT: csel r3, r8, r3, eq -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csel r7, r1, r5, ne +; CHECK-NEXT: csel r3, r5, r7, ne +; CHECK-NEXT: cmp.w r9, #0 +; CHECK-NEXT: csel r3, r5, r3, eq +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r7, r1, r4, ne ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: ldr r2, [sp] @ 4-byte Reload ; CHECK-NEXT: csel r1, r1, r7, eq @@ -3317,77 +3145,70 @@ ; CHECK-NEXT: eor r7, r2, #1 ; CHECK-NEXT: sbcs r6, r3, #0 ; CHECK-NEXT: orr.w r7, r7, r3 -; CHECK-NEXT: mov.w r6, #0 +; CHECK-NEXT: cset r6, lt ; CHECK-NEXT: mov.w r10, #1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 ; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: mov.w r11, #0 ; CHECK-NEXT: csel r0, r0, r6, ne ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp r6, #0 ; CHECK-NEXT: csel r1, r1, r6, ne ; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r11, r1, r7, ne -; CHECK-NEXT: movs r4, #0 -; CHECK-NEXT: cmp.w r11, #0 -; CHECK-NEXT: csel r1, r0, r11, ne +; CHECK-NEXT: csel r4, r1, r7, ne +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csel r1, r0, r4, ne ; CHECK-NEXT: csel r1, r0, r1, eq ; CHECK-NEXT: cmp r2, #1 -; CHECK-NEXT: csel r7, r2, r10, lo +; CHECK-NEXT: csel r6, r2, r10, lo ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r2, r2, r10, mi -; CHECK-NEXT: csel r3, r3, r4, mi -; CHECK-NEXT: csel r2, r7, r2, eq -; CHECK-NEXT: rsbs r7, r2, #0 -; CHECK-NEXT: sbcs.w r7, r4, r3 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r0, r0, r7, ne +; CHECK-NEXT: csel r3, r3, r11, mi +; CHECK-NEXT: csel r2, r6, r2, eq +; CHECK-NEXT: rsbs r6, r2, #0 +; CHECK-NEXT: sbcs.w r6, r11, r3 +; CHECK-NEXT: cset r6, lt +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r0, r0, r6, ne ; CHECK-NEXT: orrs.w r9, r2, r3 ; CHECK-NEXT: csel r8, r1, r0, eq ; CHECK-NEXT: vmov.u16 r0, q4[0] ; CHECK-NEXT: bl __fixhfti ; CHECK-NEXT: subs r5, r2, #1 -; CHECK-NEXT: eor r6, r2, #1 +; CHECK-NEXT: eor r7, r2, #1 ; CHECK-NEXT: sbcs r5, r3, #0 -; CHECK-NEXT: orr.w r6, r6, r3 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: orr.w r7, r7, r3 +; CHECK-NEXT: cset r5, lt ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r0, r0, r5, ne -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r0, r0, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r1, r1, r5, ne -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csel r1, r1, r6, ne +; CHECK-NEXT: cmp r7, #0 +; CHECK-NEXT: csel r1, r1, r7, ne ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csel r6, r0, r1, ne -; CHECK-NEXT: csel r6, r0, r6, eq +; CHECK-NEXT: csel r7, r0, r1, ne +; CHECK-NEXT: csel r7, r0, r7, eq ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: csel r5, r2, r10, lo ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csel r2, r2, r10, mi -; CHECK-NEXT: csel r3, r3, r4, mi +; CHECK-NEXT: csel r3, r3, r11, mi ; CHECK-NEXT: csel r2, r5, r2, eq ; CHECK-NEXT: rsbs r5, r2, #0 -; CHECK-NEXT: sbcs.w r5, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r0, r4, ne +; CHECK-NEXT: sbcs.w r5, r11, r3 +; CHECK-NEXT: cset r5, lt +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r0, r0, r5, ne ; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: csel r0, r6, r0, eq -; CHECK-NEXT: cmp r7, #0 -; CHECK-NEXT: csel r3, r11, r7, ne +; CHECK-NEXT: csel r0, r7, r0, eq +; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: csel r3, r4, r6, ne ; CHECK-NEXT: cmp.w r9, #0 -; CHECK-NEXT: csel r3, r11, r3, eq -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r7, r1, r4, ne +; CHECK-NEXT: csel r3, r4, r3, eq +; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: csel r7, r1, r5, ne ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r1, r1, r7, eq ; CHECK-NEXT: vmov q0[2], q0[0], r0, r8 diff --git a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving.ll b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving.ll --- a/llvm/test/CodeGen/Thumb2/mve-laneinterleaving.ll +++ b/llvm/test/CodeGen/Thumb2/mve-laneinterleaving.ll @@ -346,99 +346,91 @@ ; CHECK-NEXT: vand q1, q1, q3 ; CHECK-NEXT: vmov.f32 s12, s2 ; CHECK-NEXT: vmov.f32 s2, s3 -; CHECK-NEXT: vmov r12, r2, d5 -; CHECK-NEXT: vmov r8, r9, d3 +; CHECK-NEXT: vmov r11, r2, d5 +; CHECK-NEXT: vmov r9, lr, d3 +; CHECK-NEXT: vmov r10, s12 ; CHECK-NEXT: vmov r1, s2 ; CHECK-NEXT: vmov.f32 s2, s1 -; CHECK-NEXT: vmov lr, s2 +; CHECK-NEXT: vmov r8, s2 ; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill -; CHECK-NEXT: adds.w r4, r1, r12 +; CHECK-NEXT: adds.w r4, r1, r11 ; CHECK-NEXT: asr.w r0, r1, #31 -; CHECK-NEXT: adc.w r5, r0, r2 -; CHECK-NEXT: asrl r4, r5, r12 -; CHECK-NEXT: subs.w r0, r4, r12 -; CHECK-NEXT: sbc.w r2, r5, r2 -; CHECK-NEXT: umull r0, r4, r0, r12 -; CHECK-NEXT: adds.w r6, lr, r8 -; CHECK-NEXT: mla r3, r2, r12, r4 -; CHECK-NEXT: asr.w r5, lr, #31 -; CHECK-NEXT: adc.w r5, r5, r9 +; CHECK-NEXT: adc.w r3, r0, r2 +; CHECK-NEXT: asrl r4, r3, r11 +; CHECK-NEXT: subs.w r0, r4, r11 +; CHECK-NEXT: sbc.w r2, r3, r2 +; CHECK-NEXT: umull r0, r4, r0, r11 +; CHECK-NEXT: adds.w r6, r8, r9 +; CHECK-NEXT: mla r7, r2, r11, r4 +; CHECK-NEXT: asr.w r3, r8, #31 +; CHECK-NEXT: adc.w r3, r3, lr ; CHECK-NEXT: rsbs r2, r1, #0 -; CHECK-NEXT: asrl r6, r5, r8 -; CHECK-NEXT: lsll r0, r3, r2 -; CHECK-NEXT: subs.w r7, r6, r8 +; CHECK-NEXT: asrl r6, r3, r9 +; CHECK-NEXT: lsll r0, r7, r2 +; CHECK-NEXT: subs.w r5, r6, r9 ; CHECK-NEXT: vmov r6, r2, d4 -; CHECK-NEXT: sbc.w r10, r5, r9 -; CHECK-NEXT: vmov r5, s12 -; CHECK-NEXT: lsll r0, r3, r12 -; CHECK-NEXT: adds r4, r5, r6 -; CHECK-NEXT: asr.w r3, r5, #31 -; CHECK-NEXT: adcs r3, r2 -; CHECK-NEXT: asrl r4, r3, r6 +; CHECK-NEXT: lsll r0, r7, r11 +; CHECK-NEXT: sbc.w r3, r3, lr +; CHECK-NEXT: asr.w r7, r10, #31 +; CHECK-NEXT: adds.w r4, r10, r6 +; CHECK-NEXT: adcs r7, r2 +; CHECK-NEXT: asrl r4, r7, r6 ; CHECK-NEXT: subs r4, r4, r6 -; CHECK-NEXT: sbc.w r2, r3, r2 -; CHECK-NEXT: umull r4, r3, r4, r6 -; CHECK-NEXT: mla r3, r2, r6, r3 -; CHECK-NEXT: rsbs r2, r5, #0 -; CHECK-NEXT: lsll r4, r3, r2 -; CHECK-NEXT: lsll r4, r3, r6 -; CHECK-NEXT: eors r6, r5 -; CHECK-NEXT: vmov q3[2], q3[0], r4, r0 -; CHECK-NEXT: umull r2, r0, r7, r8 -; CHECK-NEXT: orr.w r6, r6, r5, asr #31 -; CHECK-NEXT: mul r3, r7, r9 -; CHECK-NEXT: vmov r7, s0 -; CHECK-NEXT: orrs r0, r3 +; CHECK-NEXT: sbcs r7, r2 +; CHECK-NEXT: umull r2, r4, r4, r6 +; CHECK-NEXT: mla r7, r7, r6, r4 +; CHECK-NEXT: rsb.w r4, r10, #0 +; CHECK-NEXT: lsll r2, r7, r4 +; CHECK-NEXT: lsll r2, r7, r6 +; CHECK-NEXT: vmov q3[2], q3[0], r2, r0 +; CHECK-NEXT: umull r12, r0, r5, r9 +; CHECK-NEXT: mul r7, r5, lr +; CHECK-NEXT: vmov r5, s0 +; CHECK-NEXT: orrs r0, r7 +; CHECK-NEXT: mla r7, r3, r9, r0 ; CHECK-NEXT: vmov r3, r4, d2 -; CHECK-NEXT: mla r11, r10, r8, r0 -; CHECK-NEXT: asr.w r9, r7, #31 -; CHECK-NEXT: adds r0, r7, r3 -; CHECK-NEXT: adc.w r9, r9, r4 -; CHECK-NEXT: asrl r0, r9, r3 -; CHECK-NEXT: subs.w r10, r0, r3 -; CHECK-NEXT: sbc.w r9, r9, r4 -; CHECK-NEXT: umull r0, r1, r10, r3 -; CHECK-NEXT: mul r4, r10, r4 -; CHECK-NEXT: orr.w r10, r1, r4 -; CHECK-NEXT: eor.w r1, lr, r8 -; CHECK-NEXT: orr.w r1, r1, lr, asr #31 -; CHECK-NEXT: eor.w r4, r7, r3 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: orr.w r4, r4, r7, asr #31 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: rsbs r7, r7, #0 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: asr.w lr, r5, #31 +; CHECK-NEXT: adds r0, r5, r3 +; CHECK-NEXT: adc.w r1, lr, r4 +; CHECK-NEXT: asrl r0, r1, r3 +; CHECK-NEXT: subs r0, r0, r3 +; CHECK-NEXT: sbc.w lr, r1, r4 +; CHECK-NEXT: umull r2, r1, r0, r3 +; CHECK-NEXT: muls r0, r4, r0 +; CHECK-NEXT: eor.w r4, r5, r3 +; CHECK-NEXT: orr.w r4, r4, r5, asr #31 +; CHECK-NEXT: orrs r1, r0 +; CHECK-NEXT: eor.w r0, r8, r9 +; CHECK-NEXT: orr.w r0, r0, r8, asr #31 +; CHECK-NEXT: mla r1, lr, r3, r1 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: cset r4, eq -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csetm r4, ne +; CHECK-NEXT: csetm r4, eq ; CHECK-NEXT: vmov.32 q0[1], r4 -; CHECK-NEXT: vmov q0[2], q0[0], r4, r1 +; CHECK-NEXT: vmov q0[2], q0[0], r4, r0 ; CHECK-NEXT: ldr r4, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: vbic q4, q1, q0 -; CHECK-NEXT: eor.w r1, r4, r12 -; CHECK-NEXT: orr.w r1, r1, r4, asr #31 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: cset r6, eq -; CHECK-NEXT: cmp r6, #0 -; CHECK-NEXT: csetm r6, ne -; CHECK-NEXT: vmov.32 q5[1], r6 -; CHECK-NEXT: vmov q5[2], q5[0], r6, r1 -; CHECK-NEXT: mla r1, r9, r3, r10 -; CHECK-NEXT: rsb.w r6, lr, #0 +; CHECK-NEXT: eor.w r0, r4, r11 +; CHECK-NEXT: orr.w r0, r0, r4, asr #31 +; CHECK-NEXT: eor.w r4, r10, r6 +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: orr.w r4, r4, r10, asr #31 +; CHECK-NEXT: csetm r0, eq +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: csetm r4, eq +; CHECK-NEXT: vmov.32 q5[1], r4 +; CHECK-NEXT: vmov q5[2], q5[0], r4, r0 +; CHECK-NEXT: rsb.w r0, r8, #0 +; CHECK-NEXT: lsll r12, r7, r0 +; CHECK-NEXT: rsbs r0, r5, #0 +; CHECK-NEXT: lsll r2, r1, r0 ; CHECK-NEXT: vbic q1, q2, q5 -; CHECK-NEXT: lsll r2, r11, r6 -; CHECK-NEXT: lsll r0, r1, r7 ; CHECK-NEXT: vand q2, q3, q5 -; CHECK-NEXT: lsll r2, r11, r8 -; CHECK-NEXT: lsll r0, r1, r3 +; CHECK-NEXT: lsll r12, r7, r9 +; CHECK-NEXT: lsll r2, r1, r3 ; CHECK-NEXT: vorr q1, q2, q1 -; CHECK-NEXT: vmov q2[2], q2[0], r0, r2 +; CHECK-NEXT: vmov q2[2], q2[0], r2, r12 ; CHECK-NEXT: vand q0, q2, q0 ; CHECK-NEXT: vorr q0, q0, q4 ; CHECK-NEXT: vmov.f32 s1, s2 diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll --- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll +++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll @@ -93,61 +93,55 @@ ; CHECK-LE-NEXT: push {r4, r5, r7, lr} ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 -; CHECK-LE-NEXT: ldrd lr, r5, [r1] -; CHECK-LE-NEXT: movs r3, #0 -; CHECK-LE-NEXT: @ implicit-def: $q0 -; CHECK-LE-NEXT: rsbs.w r1, lr, #0 -; CHECK-LE-NEXT: vmov q1[2], q1[0], lr, r5 -; CHECK-LE-NEXT: sbcs.w r1, r3, lr, asr #31 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs r4, r5, #0 -; CHECK-LE-NEXT: sbcs.w r4, r3, r5, asr #31 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r3, #1 +; CHECK-LE-NEXT: ldrd lr, r1, [r1] +; CHECK-LE-NEXT: rsbs r3, r1, #0 +; CHECK-LE-NEXT: mov.w r12, #0 +; CHECK-LE-NEXT: sbcs.w r3, r12, r1, asr #31 +; CHECK-LE-NEXT: cset r3, lt +; CHECK-LE-NEXT: @ implicit-def: $q1 +; CHECK-LE-NEXT: vmov q0[2], q0[0], lr, r1 ; CHECK-LE-NEXT: cmp r3, #0 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: mvnne r3, #1 -; CHECK-LE-NEXT: bfi r3, r1, #0, #1 -; CHECK-LE-NEXT: vmov r4, s4 +; CHECK-LE-NEXT: rsbs.w r4, lr, #0 +; CHECK-LE-NEXT: sbcs.w r4, r12, lr, asr #31 +; CHECK-LE-NEXT: mov.w lr, #0 +; CHECK-LE-NEXT: cset r4, lt +; CHECK-LE-NEXT: bfi r3, r4, #0, #1 ; CHECK-LE-NEXT: and r12, r3, #3 -; CHECK-LE-NEXT: lsls r1, r3, #31 +; CHECK-LE-NEXT: lsls r3, r3, #31 ; CHECK-LE-NEXT: itt ne -; CHECK-LE-NEXT: ldrne r1, [r2] -; CHECK-LE-NEXT: vmovne.32 q0[0], r1 +; CHECK-LE-NEXT: ldrne r3, [r2] +; CHECK-LE-NEXT: vmovne.32 q1[0], r3 ; CHECK-LE-NEXT: lsls.w r1, r12, #30 ; CHECK-LE-NEXT: itt mi ; CHECK-LE-NEXT: ldrmi r1, [r2, #4] -; CHECK-LE-NEXT: vmovmi.32 q0[2], r1 -; CHECK-LE-NEXT: vmov r3, s0 -; CHECK-LE-NEXT: movs r2, #0 -; CHECK-LE-NEXT: vmov r1, s2 -; CHECK-LE-NEXT: vmov q0[2], q0[0], r3, r1 -; CHECK-LE-NEXT: rsbs r5, r4, #0 -; CHECK-LE-NEXT: asr.w lr, r3, #31 -; CHECK-LE-NEXT: vmov r3, s6 +; CHECK-LE-NEXT: vmovmi.32 q1[2], r1 +; CHECK-LE-NEXT: vmov r1, s6 +; CHECK-LE-NEXT: vmov r2, s2 +; CHECK-LE-NEXT: vmov r3, s4 +; CHECK-LE-NEXT: vmov q1[2], q1[0], r3, r1 +; CHECK-LE-NEXT: rsbs r5, r2, #0 ; CHECK-LE-NEXT: asr.w r12, r1, #31 -; CHECK-LE-NEXT: sbcs.w r1, r2, r4, asr #31 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: vmov q0[3], q0[1], lr, r12 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs r5, r3, #0 -; CHECK-LE-NEXT: sbcs.w r3, r2, r3, asr #31 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r2, #1 +; CHECK-LE-NEXT: sbcs.w r1, lr, r2, asr #31 +; CHECK-LE-NEXT: vmov r1, s0 +; CHECK-LE-NEXT: cset r2, lt ; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: asr.w r4, r3, #31 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: vmov q1[3], q1[1], r4, r12 +; CHECK-LE-NEXT: rsbs r3, r1, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r1, asr #31 +; CHECK-LE-NEXT: cset r1, lt ; CHECK-LE-NEXT: bfi r2, r1, #0, #1 ; CHECK-LE-NEXT: and r1, r2, #3 ; CHECK-LE-NEXT: lsls r2, r2, #31 ; CHECK-LE-NEXT: it ne -; CHECK-LE-NEXT: vstrne d0, [r0] +; CHECK-LE-NEXT: vstrne d2, [r0] ; CHECK-LE-NEXT: lsls r1, r1, #30 ; CHECK-LE-NEXT: it mi -; CHECK-LE-NEXT: vstrmi d1, [r0, #8] +; CHECK-LE-NEXT: vstrmi d3, [r0, #8] ; CHECK-LE-NEXT: add sp, #4 ; CHECK-LE-NEXT: pop {r4, r5, r7, pc} ; @@ -157,23 +151,20 @@ ; CHECK-BE-NEXT: push {r4, r5, r7, lr} ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 -; CHECK-BE-NEXT: ldrd r12, lr, [r1] -; CHECK-BE-NEXT: rsbs.w r1, lr, #0 -; CHECK-BE-NEXT: mov.w r3, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, lr, asr #31 -; CHECK-BE-NEXT: vmov q0[3], q0[1], r12, lr -; CHECK-BE-NEXT: mov.w lr, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt.w lr, #1 -; CHECK-BE-NEXT: rsbs.w r1, r12, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, r12, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r3, #1 +; CHECK-BE-NEXT: ldrd r3, lr, [r1] +; CHECK-BE-NEXT: mov.w r12, #0 +; CHECK-BE-NEXT: @ implicit-def: $q2 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: vmov q0[3], q0[1], r3, lr +; CHECK-BE-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-BE-NEXT: cset r3, lt ; CHECK-BE-NEXT: cmp r3, #0 ; CHECK-BE-NEXT: it ne ; CHECK-BE-NEXT: mvnne r3, #1 -; CHECK-BE-NEXT: bfi r3, lr, #0, #1 -; CHECK-BE-NEXT: @ implicit-def: $q2 +; CHECK-BE-NEXT: rsbs.w r1, lr, #0 +; CHECK-BE-NEXT: sbcs.w r1, r12, lr, asr #31 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r3, r1, #0, #1 ; CHECK-BE-NEXT: and r1, r3, #3 ; CHECK-BE-NEXT: lsls r3, r3, #30 ; CHECK-BE-NEXT: bpl .LBB5_2 @@ -193,31 +184,28 @@ ; CHECK-BE-NEXT: .LBB5_4: @ %else2 ; CHECK-BE-NEXT: vrev64.32 q0, q2 ; CHECK-BE-NEXT: vrev64.32 q2, q1 -; CHECK-BE-NEXT: vmov r2, s11 -; CHECK-BE-NEXT: movs r4, #0 ; CHECK-BE-NEXT: vmov r1, s3 +; CHECK-BE-NEXT: movs r4, #0 ; CHECK-BE-NEXT: vmov r3, s1 -; CHECK-BE-NEXT: rsbs r5, r2, #0 -; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31 ; CHECK-BE-NEXT: vmov r2, s9 ; CHECK-BE-NEXT: asr.w r12, r1, #31 ; CHECK-BE-NEXT: asr.w lr, r3, #31 +; CHECK-BE-NEXT: rsbs r5, r2, #0 ; CHECK-BE-NEXT: vmov q1[2], q1[0], lr, r12 +; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31 ; CHECK-BE-NEXT: vmov q1[3], q1[1], r3, r1 -; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r1, #1 +; CHECK-BE-NEXT: vmov r1, s11 +; CHECK-BE-NEXT: cset r2, lt ; CHECK-BE-NEXT: vrev64.32 q0, q1 -; CHECK-BE-NEXT: rsbs r3, r2, #0 -; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r4, #1 -; CHECK-BE-NEXT: cmp r4, #0 +; CHECK-BE-NEXT: cmp r2, #0 ; CHECK-BE-NEXT: it ne -; CHECK-BE-NEXT: mvnne r4, #1 -; CHECK-BE-NEXT: bfi r4, r1, #0, #1 -; CHECK-BE-NEXT: and r1, r4, #3 -; CHECK-BE-NEXT: lsls r2, r4, #30 +; CHECK-BE-NEXT: mvnne r2, #1 +; CHECK-BE-NEXT: rsbs r3, r1, #0 +; CHECK-BE-NEXT: sbcs.w r1, r4, r1, asr #31 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r2, #3 +; CHECK-BE-NEXT: lsls r2, r2, #30 ; CHECK-BE-NEXT: it mi ; CHECK-BE-NEXT: vstrmi d0, [r0] ; CHECK-BE-NEXT: lsls r1, r1, #31 @@ -241,53 +229,47 @@ ; CHECK-LE-NEXT: push {r4, r5, r7, lr} ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 -; CHECK-LE-NEXT: ldrd lr, r5, [r1] -; CHECK-LE-NEXT: movs r3, #0 +; CHECK-LE-NEXT: ldrd lr, r1, [r1] +; CHECK-LE-NEXT: rsbs r3, r1, #0 +; CHECK-LE-NEXT: mov.w r12, #0 +; CHECK-LE-NEXT: sbcs.w r3, r12, r1, asr #31 +; CHECK-LE-NEXT: cset r3, lt ; CHECK-LE-NEXT: @ implicit-def: $q0 -; CHECK-LE-NEXT: rsbs.w r1, lr, #0 -; CHECK-LE-NEXT: vmov q1[2], q1[0], lr, r5 -; CHECK-LE-NEXT: sbcs.w r1, r3, lr, asr #31 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs r4, r5, #0 -; CHECK-LE-NEXT: sbcs.w r4, r3, r5, asr #31 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r3, #1 +; CHECK-LE-NEXT: vmov q1[2], q1[0], lr, r1 ; CHECK-LE-NEXT: cmp r3, #0 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: mvnne r3, #1 -; CHECK-LE-NEXT: bfi r3, r1, #0, #1 -; CHECK-LE-NEXT: vmov r4, s4 +; CHECK-LE-NEXT: rsbs.w r4, lr, #0 +; CHECK-LE-NEXT: sbcs.w r4, r12, lr, asr #31 +; CHECK-LE-NEXT: mov.w lr, #0 +; CHECK-LE-NEXT: cset r4, lt +; CHECK-LE-NEXT: bfi r3, r4, #0, #1 ; CHECK-LE-NEXT: and r12, r3, #3 -; CHECK-LE-NEXT: lsls r1, r3, #31 +; CHECK-LE-NEXT: lsls r3, r3, #31 ; CHECK-LE-NEXT: itt ne -; CHECK-LE-NEXT: ldrne r1, [r2] -; CHECK-LE-NEXT: vmovne.32 q0[0], r1 +; CHECK-LE-NEXT: ldrne r3, [r2] +; CHECK-LE-NEXT: vmovne.32 q0[0], r3 ; CHECK-LE-NEXT: lsls.w r1, r12, #30 ; CHECK-LE-NEXT: itt mi ; CHECK-LE-NEXT: ldrmi r1, [r2, #4] ; CHECK-LE-NEXT: vmovmi.32 q0[2], r1 -; CHECK-LE-NEXT: vmov r3, s0 -; CHECK-LE-NEXT: movs r2, #0 ; CHECK-LE-NEXT: vmov r1, s2 +; CHECK-LE-NEXT: vmov r2, s6 +; CHECK-LE-NEXT: vmov r3, s0 ; CHECK-LE-NEXT: vmov q0[2], q0[0], r3, r1 -; CHECK-LE-NEXT: rsbs r5, r4, #0 -; CHECK-LE-NEXT: asr.w lr, r3, #31 -; CHECK-LE-NEXT: vmov r3, s6 +; CHECK-LE-NEXT: rsbs r5, r2, #0 ; CHECK-LE-NEXT: asr.w r12, r1, #31 -; CHECK-LE-NEXT: sbcs.w r1, r2, r4, asr #31 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: vmov q0[3], q0[1], lr, r12 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs r5, r3, #0 -; CHECK-LE-NEXT: sbcs.w r3, r2, r3, asr #31 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r2, #1 +; CHECK-LE-NEXT: sbcs.w r1, lr, r2, asr #31 +; CHECK-LE-NEXT: vmov r1, s4 +; CHECK-LE-NEXT: cset r2, lt ; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: asr.w r4, r3, #31 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: vmov q0[3], q0[1], r4, r12 +; CHECK-LE-NEXT: rsbs r3, r1, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r1, asr #31 +; CHECK-LE-NEXT: cset r1, lt ; CHECK-LE-NEXT: bfi r2, r1, #0, #1 ; CHECK-LE-NEXT: and r1, r2, #3 ; CHECK-LE-NEXT: lsls r2, r2, #31 @@ -307,23 +289,20 @@ ; CHECK-BE-NEXT: push {r4, r5, r7, lr} ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 -; CHECK-BE-NEXT: ldrd r12, lr, [r1] -; CHECK-BE-NEXT: rsbs.w r1, lr, #0 -; CHECK-BE-NEXT: mov.w r3, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, lr, asr #31 -; CHECK-BE-NEXT: vmov q0[3], q0[1], r12, lr -; CHECK-BE-NEXT: mov.w lr, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt.w lr, #1 -; CHECK-BE-NEXT: rsbs.w r1, r12, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, r12, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r3, #1 +; CHECK-BE-NEXT: ldrd r3, lr, [r1] +; CHECK-BE-NEXT: mov.w r12, #0 +; CHECK-BE-NEXT: @ implicit-def: $q2 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: vmov q0[3], q0[1], r3, lr +; CHECK-BE-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-BE-NEXT: cset r3, lt ; CHECK-BE-NEXT: cmp r3, #0 ; CHECK-BE-NEXT: it ne ; CHECK-BE-NEXT: mvnne r3, #1 -; CHECK-BE-NEXT: bfi r3, lr, #0, #1 -; CHECK-BE-NEXT: @ implicit-def: $q2 +; CHECK-BE-NEXT: rsbs.w r1, lr, #0 +; CHECK-BE-NEXT: sbcs.w r1, r12, lr, asr #31 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r3, r1, #0, #1 ; CHECK-BE-NEXT: and r1, r3, #3 ; CHECK-BE-NEXT: lsls r3, r3, #30 ; CHECK-BE-NEXT: bpl .LBB6_2 @@ -343,31 +322,28 @@ ; CHECK-BE-NEXT: .LBB6_4: @ %else2 ; CHECK-BE-NEXT: vrev64.32 q0, q2 ; CHECK-BE-NEXT: vrev64.32 q2, q1 -; CHECK-BE-NEXT: vmov r2, s11 -; CHECK-BE-NEXT: movs r4, #0 ; CHECK-BE-NEXT: vmov r1, s3 +; CHECK-BE-NEXT: movs r4, #0 ; CHECK-BE-NEXT: vmov r3, s1 -; CHECK-BE-NEXT: rsbs r5, r2, #0 -; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31 ; CHECK-BE-NEXT: vmov r2, s9 ; CHECK-BE-NEXT: asr.w r12, r1, #31 ; CHECK-BE-NEXT: asr.w lr, r3, #31 +; CHECK-BE-NEXT: rsbs r5, r2, #0 ; CHECK-BE-NEXT: vmov q1[2], q1[0], lr, r12 +; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31 ; CHECK-BE-NEXT: vmov q1[3], q1[1], r3, r1 -; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r1, #1 +; CHECK-BE-NEXT: vmov r1, s11 +; CHECK-BE-NEXT: cset r2, lt ; CHECK-BE-NEXT: vrev64.32 q0, q1 -; CHECK-BE-NEXT: rsbs r3, r2, #0 -; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r4, #1 -; CHECK-BE-NEXT: cmp r4, #0 +; CHECK-BE-NEXT: cmp r2, #0 ; CHECK-BE-NEXT: it ne -; CHECK-BE-NEXT: mvnne r4, #1 -; CHECK-BE-NEXT: bfi r4, r1, #0, #1 -; CHECK-BE-NEXT: and r1, r4, #3 -; CHECK-BE-NEXT: lsls r2, r4, #30 +; CHECK-BE-NEXT: mvnne r2, #1 +; CHECK-BE-NEXT: rsbs r3, r1, #0 +; CHECK-BE-NEXT: sbcs.w r1, r4, r1, asr #31 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r2, #3 +; CHECK-BE-NEXT: lsls r2, r2, #30 ; CHECK-BE-NEXT: itt mi ; CHECK-BE-NEXT: vmovmi r2, r3, d0 ; CHECK-BE-NEXT: strdmi r3, r2, [r0] @@ -389,53 +365,47 @@ define void @foo_zext_v2i64_v2i32(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) { ; CHECK-LE-LABEL: foo_zext_v2i64_v2i32: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .save {r4, r5, r7, lr} -; CHECK-LE-NEXT: push {r4, r5, r7, lr} +; CHECK-LE-NEXT: .save {r4, lr} +; CHECK-LE-NEXT: push {r4, lr} ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 -; CHECK-LE-NEXT: ldrd lr, r5, [r1] -; CHECK-LE-NEXT: movs r3, #0 +; CHECK-LE-NEXT: ldrd lr, r1, [r1] +; CHECK-LE-NEXT: rsbs r3, r1, #0 +; CHECK-LE-NEXT: mov.w r12, #0 +; CHECK-LE-NEXT: sbcs.w r3, r12, r1, asr #31 +; CHECK-LE-NEXT: cset r3, lt ; CHECK-LE-NEXT: @ implicit-def: $q0 -; CHECK-LE-NEXT: vmov.i64 q2, #0xffffffff -; CHECK-LE-NEXT: rsbs.w r1, lr, #0 -; CHECK-LE-NEXT: vmov q1[2], q1[0], lr, r5 -; CHECK-LE-NEXT: sbcs.w r1, r3, lr, asr #31 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs r4, r5, #0 -; CHECK-LE-NEXT: sbcs.w r4, r3, r5, asr #31 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r3, #1 +; CHECK-LE-NEXT: vmov q1[2], q1[0], lr, r1 ; CHECK-LE-NEXT: cmp r3, #0 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: mvnne r3, #1 -; CHECK-LE-NEXT: bfi r3, r1, #0, #1 +; CHECK-LE-NEXT: rsbs.w r4, lr, #0 +; CHECK-LE-NEXT: vmov.i64 q2, #0xffffffff +; CHECK-LE-NEXT: sbcs.w r4, r12, lr, asr #31 +; CHECK-LE-NEXT: cset r4, lt +; CHECK-LE-NEXT: bfi r3, r4, #0, #1 ; CHECK-LE-NEXT: and r12, r3, #3 -; CHECK-LE-NEXT: lsls r1, r3, #31 +; CHECK-LE-NEXT: lsls r3, r3, #31 ; CHECK-LE-NEXT: itt ne -; CHECK-LE-NEXT: ldrne r1, [r2] -; CHECK-LE-NEXT: vmovne.32 q0[0], r1 +; CHECK-LE-NEXT: ldrne r3, [r2] +; CHECK-LE-NEXT: vmovne.32 q0[0], r3 ; CHECK-LE-NEXT: lsls.w r1, r12, #30 ; CHECK-LE-NEXT: itt mi ; CHECK-LE-NEXT: ldrmi r1, [r2, #4] ; CHECK-LE-NEXT: vmovmi.32 q0[2], r1 -; CHECK-LE-NEXT: vmov r1, s4 -; CHECK-LE-NEXT: movs r2, #0 +; CHECK-LE-NEXT: vmov r2, s6 +; CHECK-LE-NEXT: movs r1, #0 ; CHECK-LE-NEXT: vand q0, q0, q2 -; CHECK-LE-NEXT: rsbs r3, r1, #0 -; CHECK-LE-NEXT: vmov r3, s6 -; CHECK-LE-NEXT: sbcs.w r1, r2, r1, asr #31 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs r5, r3, #0 -; CHECK-LE-NEXT: sbcs.w r3, r2, r3, asr #31 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r2, #1 +; CHECK-LE-NEXT: rsbs r3, r2, #0 +; CHECK-LE-NEXT: vmov r3, s4 +; CHECK-LE-NEXT: sbcs.w r2, r1, r2, asr #31 +; CHECK-LE-NEXT: cset r2, lt ; CHECK-LE-NEXT: cmp r2, #0 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: rsbs r4, r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, r1, r3, asr #31 +; CHECK-LE-NEXT: cset r1, lt ; CHECK-LE-NEXT: bfi r2, r1, #0, #1 ; CHECK-LE-NEXT: and r1, r2, #3 ; CHECK-LE-NEXT: lsls r2, r2, #31 @@ -445,7 +415,7 @@ ; CHECK-LE-NEXT: it mi ; CHECK-LE-NEXT: vstrmi d1, [r0, #8] ; CHECK-LE-NEXT: add sp, #4 -; CHECK-LE-NEXT: pop {r4, r5, r7, pc} +; CHECK-LE-NEXT: pop {r4, pc} ; ; CHECK-BE-LABEL: foo_zext_v2i64_v2i32: ; CHECK-BE: @ %bb.0: @ %entry @@ -453,59 +423,53 @@ ; CHECK-BE-NEXT: push {r7, lr} ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 -; CHECK-BE-NEXT: ldrd r12, lr, [r1] -; CHECK-BE-NEXT: rsbs.w r1, lr, #0 -; CHECK-BE-NEXT: mov.w r3, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, lr, asr #31 -; CHECK-BE-NEXT: vmov q0[3], q0[1], r12, lr -; CHECK-BE-NEXT: mov.w lr, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt.w lr, #1 -; CHECK-BE-NEXT: rsbs.w r1, r12, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, r12, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r3, #1 +; CHECK-BE-NEXT: ldrd r3, lr, [r1] +; CHECK-BE-NEXT: mov.w r12, #0 +; CHECK-BE-NEXT: @ implicit-def: $q0 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: vmov q1[3], q1[1], r3, lr +; CHECK-BE-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-BE-NEXT: cset r3, lt ; CHECK-BE-NEXT: cmp r3, #0 ; CHECK-BE-NEXT: it ne ; CHECK-BE-NEXT: mvnne r3, #1 -; CHECK-BE-NEXT: bfi r3, lr, #0, #1 -; CHECK-BE-NEXT: @ implicit-def: $q1 +; CHECK-BE-NEXT: rsbs.w r1, lr, #0 +; CHECK-BE-NEXT: sbcs.w r1, r12, lr, asr #31 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r3, r1, #0, #1 ; CHECK-BE-NEXT: and r1, r3, #3 ; CHECK-BE-NEXT: lsls r3, r3, #30 ; CHECK-BE-NEXT: bpl .LBB7_2 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load ; CHECK-BE-NEXT: ldr r3, [r2] ; CHECK-BE-NEXT: vmov.32 q2[1], r3 -; CHECK-BE-NEXT: vrev64.32 q1, q2 +; CHECK-BE-NEXT: vrev64.32 q0, q2 ; CHECK-BE-NEXT: .LBB7_2: @ %else -; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vrev64.32 q2, q1 ; CHECK-BE-NEXT: lsls r1, r1, #31 ; CHECK-BE-NEXT: beq .LBB7_4 ; CHECK-BE-NEXT: @ %bb.3: @ %cond.load1 ; CHECK-BE-NEXT: ldr r1, [r2, #4] -; CHECK-BE-NEXT: vrev64.32 q0, q1 -; CHECK-BE-NEXT: vmov.32 q0[3], r1 ; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vmov.32 q1[3], r1 +; CHECK-BE-NEXT: vrev64.32 q0, q1 ; CHECK-BE-NEXT: .LBB7_4: @ %else2 ; CHECK-BE-NEXT: vrev64.32 q3, q2 -; CHECK-BE-NEXT: movs r2, #0 -; CHECK-BE-NEXT: vmov r1, s15 ; CHECK-BE-NEXT: mov.w r12, #0 -; CHECK-BE-NEXT: vmov.i64 q0, #0xffffffff -; CHECK-BE-NEXT: vand q0, q1, q0 -; CHECK-BE-NEXT: rsbs r3, r1, #0 -; CHECK-BE-NEXT: vmov r3, s13 -; CHECK-BE-NEXT: sbcs.w r1, r2, r1, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt.w r12, #1 -; CHECK-BE-NEXT: rsbs r1, r3, #0 -; CHECK-BE-NEXT: sbcs.w r1, r2, r3, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r2, #1 +; CHECK-BE-NEXT: vmov r2, s13 +; CHECK-BE-NEXT: vmov.i64 q1, #0xffffffff +; CHECK-BE-NEXT: vand q0, q0, q1 +; CHECK-BE-NEXT: rsbs r3, r2, #0 +; CHECK-BE-NEXT: vmov r3, s15 +; CHECK-BE-NEXT: sbcs.w r2, r12, r2, asr #31 +; CHECK-BE-NEXT: cset r2, lt ; CHECK-BE-NEXT: cmp r2, #0 ; CHECK-BE-NEXT: it ne ; CHECK-BE-NEXT: mvnne r2, #1 -; CHECK-BE-NEXT: bfi r2, r12, #0, #1 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 ; CHECK-BE-NEXT: and r1, r2, #3 ; CHECK-BE-NEXT: lsls r2, r2, #30 ; CHECK-BE-NEXT: it mi @@ -527,53 +491,47 @@ define void @foo_zext_v2i64_v2i32_unaligned(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) { ; CHECK-LE-LABEL: foo_zext_v2i64_v2i32_unaligned: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .save {r4, r5, r7, lr} -; CHECK-LE-NEXT: push {r4, r5, r7, lr} +; CHECK-LE-NEXT: .save {r4, lr} +; CHECK-LE-NEXT: push {r4, lr} ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 -; CHECK-LE-NEXT: ldrd lr, r5, [r1] -; CHECK-LE-NEXT: movs r3, #0 +; CHECK-LE-NEXT: ldrd lr, r1, [r1] +; CHECK-LE-NEXT: rsbs r3, r1, #0 +; CHECK-LE-NEXT: mov.w r12, #0 +; CHECK-LE-NEXT: sbcs.w r3, r12, r1, asr #31 +; CHECK-LE-NEXT: cset r3, lt ; CHECK-LE-NEXT: @ implicit-def: $q0 -; CHECK-LE-NEXT: vmov.i64 q2, #0xffffffff -; CHECK-LE-NEXT: rsbs.w r1, lr, #0 -; CHECK-LE-NEXT: vmov q1[2], q1[0], lr, r5 -; CHECK-LE-NEXT: sbcs.w r1, r3, lr, asr #31 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs r4, r5, #0 -; CHECK-LE-NEXT: sbcs.w r4, r3, r5, asr #31 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r3, #1 +; CHECK-LE-NEXT: vmov q1[2], q1[0], lr, r1 ; CHECK-LE-NEXT: cmp r3, #0 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: mvnne r3, #1 -; CHECK-LE-NEXT: bfi r3, r1, #0, #1 +; CHECK-LE-NEXT: rsbs.w r4, lr, #0 +; CHECK-LE-NEXT: vmov.i64 q2, #0xffffffff +; CHECK-LE-NEXT: sbcs.w r4, r12, lr, asr #31 +; CHECK-LE-NEXT: cset r4, lt +; CHECK-LE-NEXT: bfi r3, r4, #0, #1 ; CHECK-LE-NEXT: and r12, r3, #3 -; CHECK-LE-NEXT: lsls r1, r3, #31 +; CHECK-LE-NEXT: lsls r3, r3, #31 ; CHECK-LE-NEXT: itt ne -; CHECK-LE-NEXT: ldrne r1, [r2] -; CHECK-LE-NEXT: vmovne.32 q0[0], r1 +; CHECK-LE-NEXT: ldrne r3, [r2] +; CHECK-LE-NEXT: vmovne.32 q0[0], r3 ; CHECK-LE-NEXT: lsls.w r1, r12, #30 ; CHECK-LE-NEXT: itt mi ; CHECK-LE-NEXT: ldrmi r1, [r2, #4] ; CHECK-LE-NEXT: vmovmi.32 q0[2], r1 -; CHECK-LE-NEXT: vmov r1, s4 -; CHECK-LE-NEXT: movs r2, #0 +; CHECK-LE-NEXT: vmov r2, s6 +; CHECK-LE-NEXT: movs r1, #0 ; CHECK-LE-NEXT: vand q0, q0, q2 -; CHECK-LE-NEXT: rsbs r3, r1, #0 -; CHECK-LE-NEXT: vmov r3, s6 -; CHECK-LE-NEXT: sbcs.w r1, r2, r1, asr #31 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs r5, r3, #0 -; CHECK-LE-NEXT: sbcs.w r3, r2, r3, asr #31 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r2, #1 +; CHECK-LE-NEXT: rsbs r3, r2, #0 +; CHECK-LE-NEXT: vmov r3, s4 +; CHECK-LE-NEXT: sbcs.w r2, r1, r2, asr #31 +; CHECK-LE-NEXT: cset r2, lt ; CHECK-LE-NEXT: cmp r2, #0 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: rsbs r4, r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, r1, r3, asr #31 +; CHECK-LE-NEXT: cset r1, lt ; CHECK-LE-NEXT: bfi r2, r1, #0, #1 ; CHECK-LE-NEXT: and r1, r2, #3 ; CHECK-LE-NEXT: lsls r2, r2, #31 @@ -585,7 +543,7 @@ ; CHECK-LE-NEXT: vmovmi r1, r2, d1 ; CHECK-LE-NEXT: strdmi r1, r2, [r0, #8] ; CHECK-LE-NEXT: add sp, #4 -; CHECK-LE-NEXT: pop {r4, r5, r7, pc} +; CHECK-LE-NEXT: pop {r4, pc} ; ; CHECK-BE-LABEL: foo_zext_v2i64_v2i32_unaligned: ; CHECK-BE: @ %bb.0: @ %entry @@ -593,59 +551,53 @@ ; CHECK-BE-NEXT: push {r7, lr} ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 -; CHECK-BE-NEXT: ldrd r12, lr, [r1] -; CHECK-BE-NEXT: rsbs.w r1, lr, #0 -; CHECK-BE-NEXT: mov.w r3, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, lr, asr #31 -; CHECK-BE-NEXT: vmov q0[3], q0[1], r12, lr -; CHECK-BE-NEXT: mov.w lr, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt.w lr, #1 -; CHECK-BE-NEXT: rsbs.w r1, r12, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, r12, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r3, #1 +; CHECK-BE-NEXT: ldrd r3, lr, [r1] +; CHECK-BE-NEXT: mov.w r12, #0 +; CHECK-BE-NEXT: @ implicit-def: $q0 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: vmov q1[3], q1[1], r3, lr +; CHECK-BE-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-BE-NEXT: cset r3, lt ; CHECK-BE-NEXT: cmp r3, #0 ; CHECK-BE-NEXT: it ne ; CHECK-BE-NEXT: mvnne r3, #1 -; CHECK-BE-NEXT: bfi r3, lr, #0, #1 -; CHECK-BE-NEXT: @ implicit-def: $q1 +; CHECK-BE-NEXT: rsbs.w r1, lr, #0 +; CHECK-BE-NEXT: sbcs.w r1, r12, lr, asr #31 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r3, r1, #0, #1 ; CHECK-BE-NEXT: and r1, r3, #3 ; CHECK-BE-NEXT: lsls r3, r3, #30 ; CHECK-BE-NEXT: bpl .LBB8_2 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load ; CHECK-BE-NEXT: ldr r3, [r2] ; CHECK-BE-NEXT: vmov.32 q2[1], r3 -; CHECK-BE-NEXT: vrev64.32 q1, q2 +; CHECK-BE-NEXT: vrev64.32 q0, q2 ; CHECK-BE-NEXT: .LBB8_2: @ %else -; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vrev64.32 q2, q1 ; CHECK-BE-NEXT: lsls r1, r1, #31 ; CHECK-BE-NEXT: beq .LBB8_4 ; CHECK-BE-NEXT: @ %bb.3: @ %cond.load1 ; CHECK-BE-NEXT: ldr r1, [r2, #4] -; CHECK-BE-NEXT: vrev64.32 q0, q1 -; CHECK-BE-NEXT: vmov.32 q0[3], r1 ; CHECK-BE-NEXT: vrev64.32 q1, q0 +; CHECK-BE-NEXT: vmov.32 q1[3], r1 +; CHECK-BE-NEXT: vrev64.32 q0, q1 ; CHECK-BE-NEXT: .LBB8_4: @ %else2 ; CHECK-BE-NEXT: vrev64.32 q3, q2 -; CHECK-BE-NEXT: movs r2, #0 -; CHECK-BE-NEXT: vmov r1, s15 ; CHECK-BE-NEXT: mov.w r12, #0 -; CHECK-BE-NEXT: vmov.i64 q0, #0xffffffff -; CHECK-BE-NEXT: vand q0, q1, q0 -; CHECK-BE-NEXT: rsbs r3, r1, #0 -; CHECK-BE-NEXT: vmov r3, s13 -; CHECK-BE-NEXT: sbcs.w r1, r2, r1, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt.w r12, #1 -; CHECK-BE-NEXT: rsbs r1, r3, #0 -; CHECK-BE-NEXT: sbcs.w r1, r2, r3, asr #31 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r2, #1 +; CHECK-BE-NEXT: vmov r2, s13 +; CHECK-BE-NEXT: vmov.i64 q1, #0xffffffff +; CHECK-BE-NEXT: vand q0, q0, q1 +; CHECK-BE-NEXT: rsbs r3, r2, #0 +; CHECK-BE-NEXT: vmov r3, s15 +; CHECK-BE-NEXT: sbcs.w r2, r12, r2, asr #31 +; CHECK-BE-NEXT: cset r2, lt ; CHECK-BE-NEXT: cmp r2, #0 ; CHECK-BE-NEXT: it ne ; CHECK-BE-NEXT: mvnne r2, #1 -; CHECK-BE-NEXT: bfi r2, r12, #0, #1 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 ; CHECK-BE-NEXT: and r1, r2, #3 ; CHECK-BE-NEXT: lsls r2, r2, #30 ; CHECK-BE-NEXT: itt mi diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll --- a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll +++ b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll @@ -1754,24 +1754,21 @@ ; CHECK-LE-NEXT: push {r7, lr} ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 -; CHECK-LE-NEXT: vmov r1, r2, d0 -; CHECK-LE-NEXT: movs r3, #0 -; CHECK-LE-NEXT: vmov lr, r12, d1 +; CHECK-LE-NEXT: vmov r1, r2, d1 +; CHECK-LE-NEXT: mov.w lr, #0 +; CHECK-LE-NEXT: vmov r3, r12, d0 ; CHECK-LE-NEXT: rsbs r1, r1, #0 -; CHECK-LE-NEXT: sbcs.w r1, r3, r2 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs.w r2, lr, #0 -; CHECK-LE-NEXT: sbcs.w r2, r3, r12 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r2 +; CHECK-LE-NEXT: cset r2, lt +; CHECK-LE-NEXT: cmp r2, #0 ; CHECK-LE-NEXT: it ne -; CHECK-LE-NEXT: mvnne r3, #1 -; CHECK-LE-NEXT: bfi r3, r1, #0, #1 -; CHECK-LE-NEXT: and r1, r3, #3 -; CHECK-LE-NEXT: lsls r2, r3, #31 +; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: rsbs r1, r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r12 +; CHECK-LE-NEXT: cset r1, lt +; CHECK-LE-NEXT: bfi r2, r1, #0, #1 +; CHECK-LE-NEXT: and r1, r2, #3 +; CHECK-LE-NEXT: lsls r2, r2, #31 ; CHECK-LE-NEXT: beq .LBB49_2 ; CHECK-LE-NEXT: @ %bb.1: @ %cond.load ; CHECK-LE-NEXT: vldr d1, .LCPI49_0 @@ -1798,24 +1795,21 @@ ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.32 q1, q0 -; CHECK-BE-NEXT: movs r3, #0 -; CHECK-BE-NEXT: vmov r1, r2, d3 -; CHECK-BE-NEXT: vmov r12, lr, d2 +; CHECK-BE-NEXT: mov.w lr, #0 +; CHECK-BE-NEXT: vmov r1, r2, d2 +; CHECK-BE-NEXT: vmov r12, r3, d3 ; CHECK-BE-NEXT: rsbs r2, r2, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, r1 -; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r1, #1 -; CHECK-BE-NEXT: rsbs.w r2, lr, #0 -; CHECK-BE-NEXT: sbcs.w r2, r3, r12 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, lr, r1 +; CHECK-BE-NEXT: cset r2, lt +; CHECK-BE-NEXT: cmp r2, #0 ; CHECK-BE-NEXT: it ne -; CHECK-BE-NEXT: mvnne r3, #1 -; CHECK-BE-NEXT: bfi r3, r1, #0, #1 -; CHECK-BE-NEXT: and r1, r3, #3 -; CHECK-BE-NEXT: lsls r2, r3, #30 +; CHECK-BE-NEXT: mvnne r2, #1 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, lr, r12 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r2, #3 +; CHECK-BE-NEXT: lsls r2, r2, #30 ; CHECK-BE-NEXT: bpl .LBB49_2 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load ; CHECK-BE-NEXT: vldr d1, .LCPI49_0 @@ -1847,24 +1841,21 @@ ; CHECK-LE-NEXT: push {r7, lr} ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 -; CHECK-LE-NEXT: vmov r1, r2, d2 -; CHECK-LE-NEXT: movs r3, #0 -; CHECK-LE-NEXT: vmov lr, r12, d3 +; CHECK-LE-NEXT: vmov r1, r2, d3 +; CHECK-LE-NEXT: mov.w lr, #0 +; CHECK-LE-NEXT: vmov r3, r12, d2 ; CHECK-LE-NEXT: rsbs r1, r1, #0 -; CHECK-LE-NEXT: sbcs.w r1, r3, r2 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs.w r2, lr, #0 -; CHECK-LE-NEXT: sbcs.w r2, r3, r12 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r2 +; CHECK-LE-NEXT: cset r2, lt +; CHECK-LE-NEXT: cmp r2, #0 ; CHECK-LE-NEXT: it ne -; CHECK-LE-NEXT: mvnne r3, #1 -; CHECK-LE-NEXT: bfi r3, r1, #0, #1 -; CHECK-LE-NEXT: and r1, r3, #3 -; CHECK-LE-NEXT: lsls r2, r3, #31 +; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: rsbs r1, r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r12 +; CHECK-LE-NEXT: cset r1, lt +; CHECK-LE-NEXT: bfi r2, r1, #0, #1 +; CHECK-LE-NEXT: and r1, r2, #3 +; CHECK-LE-NEXT: lsls r2, r2, #31 ; CHECK-LE-NEXT: beq .LBB50_2 ; CHECK-LE-NEXT: @ %bb.1: @ %cond.load ; CHECK-LE-NEXT: vldr d1, .LCPI50_0 @@ -1891,24 +1882,21 @@ ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.32 q0, q1 -; CHECK-BE-NEXT: movs r3, #0 -; CHECK-BE-NEXT: vmov r1, r2, d1 -; CHECK-BE-NEXT: vmov r12, lr, d0 +; CHECK-BE-NEXT: mov.w lr, #0 +; CHECK-BE-NEXT: vmov r1, r2, d0 +; CHECK-BE-NEXT: vmov r12, r3, d1 ; CHECK-BE-NEXT: rsbs r2, r2, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, r1 -; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r1, #1 -; CHECK-BE-NEXT: rsbs.w r2, lr, #0 -; CHECK-BE-NEXT: sbcs.w r2, r3, r12 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, lr, r1 +; CHECK-BE-NEXT: cset r2, lt +; CHECK-BE-NEXT: cmp r2, #0 ; CHECK-BE-NEXT: it ne -; CHECK-BE-NEXT: mvnne r3, #1 -; CHECK-BE-NEXT: bfi r3, r1, #0, #1 -; CHECK-BE-NEXT: and r1, r3, #3 -; CHECK-BE-NEXT: lsls r2, r3, #30 +; CHECK-BE-NEXT: mvnne r2, #1 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, lr, r12 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r2, #3 +; CHECK-BE-NEXT: lsls r2, r2, #30 ; CHECK-BE-NEXT: bpl .LBB50_2 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load ; CHECK-BE-NEXT: vldr d1, .LCPI50_0 diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll --- a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll +++ b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll @@ -939,24 +939,21 @@ ; CHECK-LE-NEXT: push {r7, lr} ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 -; CHECK-LE-NEXT: vmov r1, r2, d0 -; CHECK-LE-NEXT: movs r3, #0 -; CHECK-LE-NEXT: vmov lr, r12, d1 +; CHECK-LE-NEXT: vmov r1, r2, d1 +; CHECK-LE-NEXT: mov.w lr, #0 +; CHECK-LE-NEXT: vmov r3, r12, d0 ; CHECK-LE-NEXT: rsbs r1, r1, #0 -; CHECK-LE-NEXT: sbcs.w r1, r3, r2 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs.w r2, lr, #0 -; CHECK-LE-NEXT: sbcs.w r2, r3, r12 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r2 +; CHECK-LE-NEXT: cset r2, lt +; CHECK-LE-NEXT: cmp r2, #0 ; CHECK-LE-NEXT: it ne -; CHECK-LE-NEXT: mvnne r3, #1 -; CHECK-LE-NEXT: bfi r3, r1, #0, #1 -; CHECK-LE-NEXT: and r1, r3, #3 -; CHECK-LE-NEXT: lsls r2, r3, #31 +; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: rsbs r1, r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r12 +; CHECK-LE-NEXT: cset r1, lt +; CHECK-LE-NEXT: bfi r2, r1, #0, #1 +; CHECK-LE-NEXT: and r1, r2, #3 +; CHECK-LE-NEXT: lsls r2, r2, #31 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: vstrne d0, [r0] ; CHECK-LE-NEXT: lsls r1, r1, #30 @@ -972,24 +969,21 @@ ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.32 q1, q0 -; CHECK-BE-NEXT: movs r3, #0 -; CHECK-BE-NEXT: vmov r1, r2, d3 -; CHECK-BE-NEXT: vmov r12, lr, d2 +; CHECK-BE-NEXT: mov.w lr, #0 +; CHECK-BE-NEXT: vmov r1, r2, d2 +; CHECK-BE-NEXT: vmov r12, r3, d3 ; CHECK-BE-NEXT: rsbs r2, r2, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, r1 -; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r1, #1 -; CHECK-BE-NEXT: rsbs.w r2, lr, #0 -; CHECK-BE-NEXT: sbcs.w r2, r3, r12 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, lr, r1 +; CHECK-BE-NEXT: cset r2, lt +; CHECK-BE-NEXT: cmp r2, #0 ; CHECK-BE-NEXT: it ne -; CHECK-BE-NEXT: mvnne r3, #1 -; CHECK-BE-NEXT: bfi r3, r1, #0, #1 -; CHECK-BE-NEXT: and r1, r3, #3 -; CHECK-BE-NEXT: lsls r2, r3, #30 +; CHECK-BE-NEXT: mvnne r2, #1 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, lr, r12 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r2, #3 +; CHECK-BE-NEXT: lsls r2, r2, #30 ; CHECK-BE-NEXT: it mi ; CHECK-BE-NEXT: vstrmi d0, [r0] ; CHECK-BE-NEXT: lsls r1, r1, #31 @@ -1010,24 +1004,21 @@ ; CHECK-LE-NEXT: push {r7, lr} ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 -; CHECK-LE-NEXT: vmov r1, r2, d2 -; CHECK-LE-NEXT: movs r3, #0 -; CHECK-LE-NEXT: vmov lr, r12, d3 +; CHECK-LE-NEXT: vmov r1, r2, d3 +; CHECK-LE-NEXT: mov.w lr, #0 +; CHECK-LE-NEXT: vmov r3, r12, d2 ; CHECK-LE-NEXT: rsbs r1, r1, #0 -; CHECK-LE-NEXT: sbcs.w r1, r3, r2 -; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r1, #1 -; CHECK-LE-NEXT: rsbs.w r2, lr, #0 -; CHECK-LE-NEXT: sbcs.w r2, r3, r12 -; CHECK-LE-NEXT: it lt -; CHECK-LE-NEXT: movlt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r2 +; CHECK-LE-NEXT: cset r2, lt +; CHECK-LE-NEXT: cmp r2, #0 ; CHECK-LE-NEXT: it ne -; CHECK-LE-NEXT: mvnne r3, #1 -; CHECK-LE-NEXT: bfi r3, r1, #0, #1 -; CHECK-LE-NEXT: and r1, r3, #3 -; CHECK-LE-NEXT: lsls r2, r3, #31 +; CHECK-LE-NEXT: mvnne r2, #1 +; CHECK-LE-NEXT: rsbs r1, r3, #0 +; CHECK-LE-NEXT: sbcs.w r1, lr, r12 +; CHECK-LE-NEXT: cset r1, lt +; CHECK-LE-NEXT: bfi r2, r1, #0, #1 +; CHECK-LE-NEXT: and r1, r2, #3 +; CHECK-LE-NEXT: lsls r2, r2, #31 ; CHECK-LE-NEXT: it ne ; CHECK-LE-NEXT: vstrne d0, [r0] ; CHECK-LE-NEXT: lsls r1, r1, #30 @@ -1043,24 +1034,21 @@ ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.32 q2, q1 -; CHECK-BE-NEXT: movs r3, #0 -; CHECK-BE-NEXT: vmov r1, r2, d5 -; CHECK-BE-NEXT: vmov r12, lr, d4 +; CHECK-BE-NEXT: mov.w lr, #0 +; CHECK-BE-NEXT: vmov r1, r2, d4 +; CHECK-BE-NEXT: vmov r12, r3, d5 ; CHECK-BE-NEXT: rsbs r2, r2, #0 -; CHECK-BE-NEXT: sbcs.w r1, r3, r1 -; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r1, #1 -; CHECK-BE-NEXT: rsbs.w r2, lr, #0 -; CHECK-BE-NEXT: sbcs.w r2, r3, r12 -; CHECK-BE-NEXT: it lt -; CHECK-BE-NEXT: movlt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, lr, r1 +; CHECK-BE-NEXT: cset r2, lt +; CHECK-BE-NEXT: cmp r2, #0 ; CHECK-BE-NEXT: it ne -; CHECK-BE-NEXT: mvnne r3, #1 -; CHECK-BE-NEXT: bfi r3, r1, #0, #1 -; CHECK-BE-NEXT: and r1, r3, #3 -; CHECK-BE-NEXT: lsls r2, r3, #30 +; CHECK-BE-NEXT: mvnne r2, #1 +; CHECK-BE-NEXT: rsbs r1, r3, #0 +; CHECK-BE-NEXT: sbcs.w r1, lr, r12 +; CHECK-BE-NEXT: cset r1, lt +; CHECK-BE-NEXT: bfi r2, r1, #0, #1 +; CHECK-BE-NEXT: and r1, r2, #3 +; CHECK-BE-NEXT: lsls r2, r2, #30 ; CHECK-BE-NEXT: it mi ; CHECK-BE-NEXT: vstrmi d0, [r0] ; CHECK-BE-NEXT: lsls r1, r1, #31 @@ -1224,42 +1212,34 @@ ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vcmp.f32 s0, #0 -; CHECK-LE-NEXT: movs r1, #0 +; CHECK-LE-NEXT: vcvtb.f16.f32 s4, s0 ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r1, #1 +; CHECK-LE-NEXT: vcmp.f32 s1, #0 +; CHECK-LE-NEXT: vcvtt.f16.f32 s4, s1 +; CHECK-LE-NEXT: vcvtb.f16.f32 s6, s2 +; CHECK-LE-NEXT: vcvtt.f16.f32 s5, s3 +; CHECK-LE-NEXT: cset r1, gt ; CHECK-LE-NEXT: cmp r1, #0 ; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: csetm r3, ne -; CHECK-LE-NEXT: vcmp.f32 s1, #0 -; CHECK-LE-NEXT: bfi r1, r3, #0, #1 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: mov.w r3, #0 +; CHECK-LE-NEXT: bfi r1, r2, #0, #1 ; CHECK-LE-NEXT: vcmp.f32 s2, #0 -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 -; CHECK-LE-NEXT: csetm r3, ne +; CHECK-LE-NEXT: cset r2, gt +; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: bfi r1, r3, #1, #1 -; CHECK-LE-NEXT: mov.w r3, #0 -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: bfi r1, r2, #1, #1 ; CHECK-LE-NEXT: vcmp.f32 s3, #0 -; CHECK-LE-NEXT: csetm r3, ne -; CHECK-LE-NEXT: movs r2, #0 +; CHECK-LE-NEXT: cset r2, gt +; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r2, #1 +; CHECK-LE-NEXT: bfi r1, r2, #2, #1 +; CHECK-LE-NEXT: cset r2, gt ; CHECK-LE-NEXT: cmp r2, #0 -; CHECK-LE-NEXT: bfi r1, r3, #2, #1 ; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: bfi r1, r2, #3, #1 -; CHECK-LE-NEXT: vcvtb.f16.f32 s4, s0 -; CHECK-LE-NEXT: vcvtt.f16.f32 s4, s1 -; CHECK-LE-NEXT: vcvtb.f16.f32 s6, s2 -; CHECK-LE-NEXT: vcvtt.f16.f32 s5, s3 ; CHECK-LE-NEXT: lsls r2, r1, #31 ; CHECK-LE-NEXT: bne .LBB25_5 ; CHECK-LE-NEXT: @ %bb.1: @ %else @@ -1298,43 +1278,35 @@ ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.32 q1, q0 -; CHECK-BE-NEXT: movs r1, #0 ; CHECK-BE-NEXT: vcmp.f32 s7, #0 -; CHECK-BE-NEXT: movs r2, #0 +; CHECK-BE-NEXT: vcvtb.f16.f32 s0, s4 ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r1, #1 +; CHECK-BE-NEXT: vcmp.f32 s6, #0 +; CHECK-BE-NEXT: vcvtt.f16.f32 s0, s5 +; CHECK-BE-NEXT: vcvtb.f16.f32 s2, s6 +; CHECK-BE-NEXT: vcvtt.f16.f32 s1, s7 +; CHECK-BE-NEXT: cset r1, gt ; CHECK-BE-NEXT: cmp r1, #0 ; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: csetm r3, ne -; CHECK-BE-NEXT: vcmp.f32 s6, #0 -; CHECK-BE-NEXT: bfi r1, r3, #0, #1 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: mov.w r3, #0 +; CHECK-BE-NEXT: bfi r1, r2, #0, #1 ; CHECK-BE-NEXT: vcmp.f32 s5, #0 -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 -; CHECK-BE-NEXT: csetm r3, ne +; CHECK-BE-NEXT: cset r2, gt +; CHECK-BE-NEXT: cmp r2, #0 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: bfi r1, r3, #1, #1 -; CHECK-BE-NEXT: mov.w r3, #0 -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: bfi r1, r2, #1, #1 ; CHECK-BE-NEXT: vcmp.f32 s4, #0 -; CHECK-BE-NEXT: csetm r3, ne +; CHECK-BE-NEXT: cset r2, gt +; CHECK-BE-NEXT: cmp r2, #0 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r2, #1 +; CHECK-BE-NEXT: bfi r1, r2, #2, #1 +; CHECK-BE-NEXT: cset r2, gt ; CHECK-BE-NEXT: cmp r2, #0 -; CHECK-BE-NEXT: bfi r1, r3, #2, #1 ; CHECK-BE-NEXT: csetm r2, ne -; CHECK-BE-NEXT: vcvtb.f16.f32 s0, s4 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1 -; CHECK-BE-NEXT: vcvtt.f16.f32 s0, s5 -; CHECK-BE-NEXT: vcvtb.f16.f32 s2, s6 -; CHECK-BE-NEXT: vcvtt.f16.f32 s1, s7 ; CHECK-BE-NEXT: lsls r2, r1, #28 ; CHECK-BE-NEXT: bmi .LBB25_5 ; CHECK-BE-NEXT: @ %bb.1: @ %else @@ -1380,42 +1352,34 @@ ; CHECK-LE-NEXT: .pad #4 ; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vcmp.f32 s0, #0 -; CHECK-LE-NEXT: movs r1, #0 +; CHECK-LE-NEXT: vcvtb.f16.f32 s4, s0 ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r1, #1 +; CHECK-LE-NEXT: vcmp.f32 s1, #0 +; CHECK-LE-NEXT: vcvtt.f16.f32 s4, s1 +; CHECK-LE-NEXT: vcvtb.f16.f32 s6, s2 +; CHECK-LE-NEXT: vcvtt.f16.f32 s5, s3 +; CHECK-LE-NEXT: cset r1, gt ; CHECK-LE-NEXT: cmp r1, #0 ; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: csetm r3, ne -; CHECK-LE-NEXT: vcmp.f32 s1, #0 -; CHECK-LE-NEXT: bfi r1, r3, #0, #1 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: mov.w r3, #0 +; CHECK-LE-NEXT: bfi r1, r2, #0, #1 ; CHECK-LE-NEXT: vcmp.f32 s2, #0 -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 -; CHECK-LE-NEXT: csetm r3, ne +; CHECK-LE-NEXT: cset r2, gt +; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: bfi r1, r3, #1, #1 -; CHECK-LE-NEXT: mov.w r3, #0 -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: bfi r1, r2, #1, #1 ; CHECK-LE-NEXT: vcmp.f32 s3, #0 -; CHECK-LE-NEXT: csetm r3, ne -; CHECK-LE-NEXT: movs r2, #0 +; CHECK-LE-NEXT: cset r2, gt +; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r2, #1 +; CHECK-LE-NEXT: bfi r1, r2, #2, #1 +; CHECK-LE-NEXT: cset r2, gt ; CHECK-LE-NEXT: cmp r2, #0 -; CHECK-LE-NEXT: bfi r1, r3, #2, #1 ; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: bfi r1, r2, #3, #1 -; CHECK-LE-NEXT: vcvtb.f16.f32 s4, s0 -; CHECK-LE-NEXT: vcvtt.f16.f32 s4, s1 -; CHECK-LE-NEXT: vcvtb.f16.f32 s6, s2 -; CHECK-LE-NEXT: vcvtt.f16.f32 s5, s3 ; CHECK-LE-NEXT: lsls r2, r1, #31 ; CHECK-LE-NEXT: bne .LBB26_5 ; CHECK-LE-NEXT: @ %bb.1: @ %else @@ -1454,43 +1418,35 @@ ; CHECK-BE-NEXT: .pad #4 ; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.32 q1, q0 -; CHECK-BE-NEXT: movs r1, #0 ; CHECK-BE-NEXT: vcmp.f32 s7, #0 -; CHECK-BE-NEXT: movs r2, #0 +; CHECK-BE-NEXT: vcvtb.f16.f32 s0, s4 ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r1, #1 +; CHECK-BE-NEXT: vcmp.f32 s6, #0 +; CHECK-BE-NEXT: vcvtt.f16.f32 s0, s5 +; CHECK-BE-NEXT: vcvtb.f16.f32 s2, s6 +; CHECK-BE-NEXT: vcvtt.f16.f32 s1, s7 +; CHECK-BE-NEXT: cset r1, gt ; CHECK-BE-NEXT: cmp r1, #0 ; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: csetm r3, ne -; CHECK-BE-NEXT: vcmp.f32 s6, #0 -; CHECK-BE-NEXT: bfi r1, r3, #0, #1 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: mov.w r3, #0 +; CHECK-BE-NEXT: bfi r1, r2, #0, #1 ; CHECK-BE-NEXT: vcmp.f32 s5, #0 -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 -; CHECK-BE-NEXT: csetm r3, ne +; CHECK-BE-NEXT: cset r2, gt +; CHECK-BE-NEXT: cmp r2, #0 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: bfi r1, r3, #1, #1 -; CHECK-BE-NEXT: mov.w r3, #0 -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: bfi r1, r2, #1, #1 ; CHECK-BE-NEXT: vcmp.f32 s4, #0 -; CHECK-BE-NEXT: csetm r3, ne +; CHECK-BE-NEXT: cset r2, gt +; CHECK-BE-NEXT: cmp r2, #0 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r2, #1 +; CHECK-BE-NEXT: bfi r1, r2, #2, #1 +; CHECK-BE-NEXT: cset r2, gt ; CHECK-BE-NEXT: cmp r2, #0 -; CHECK-BE-NEXT: bfi r1, r3, #2, #1 ; CHECK-BE-NEXT: csetm r2, ne -; CHECK-BE-NEXT: vcvtb.f16.f32 s0, s4 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1 -; CHECK-BE-NEXT: vcvtt.f16.f32 s0, s5 -; CHECK-BE-NEXT: vcvtb.f16.f32 s2, s6 -; CHECK-BE-NEXT: vcvtt.f16.f32 s1, s7 ; CHECK-BE-NEXT: lsls r2, r1, #28 ; CHECK-BE-NEXT: bmi .LBB26_5 ; CHECK-BE-NEXT: @ %bb.1: @ %else @@ -1536,42 +1492,34 @@ ; CHECK-LE-NEXT: .pad #20 ; CHECK-LE-NEXT: sub sp, #20 ; CHECK-LE-NEXT: vcmp.f32 s0, #0 -; CHECK-LE-NEXT: movs r1, #0 +; CHECK-LE-NEXT: vcvtb.f16.f32 s4, s0 ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r1, #1 +; CHECK-LE-NEXT: vcmp.f32 s1, #0 +; CHECK-LE-NEXT: vcvtt.f16.f32 s4, s1 +; CHECK-LE-NEXT: vcvtb.f16.f32 s6, s2 +; CHECK-LE-NEXT: vcvtt.f16.f32 s5, s3 +; CHECK-LE-NEXT: cset r1, gt ; CHECK-LE-NEXT: cmp r1, #0 ; CHECK-LE-NEXT: mov.w r1, #0 -; CHECK-LE-NEXT: csetm r3, ne -; CHECK-LE-NEXT: vcmp.f32 s1, #0 -; CHECK-LE-NEXT: bfi r1, r3, #0, #1 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: mov.w r3, #0 +; CHECK-LE-NEXT: bfi r1, r2, #0, #1 ; CHECK-LE-NEXT: vcmp.f32 s2, #0 -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 -; CHECK-LE-NEXT: csetm r3, ne +; CHECK-LE-NEXT: cset r2, gt +; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: bfi r1, r3, #1, #1 -; CHECK-LE-NEXT: mov.w r3, #0 -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r3, #1 -; CHECK-LE-NEXT: cmp r3, #0 +; CHECK-LE-NEXT: bfi r1, r2, #1, #1 ; CHECK-LE-NEXT: vcmp.f32 s3, #0 -; CHECK-LE-NEXT: csetm r3, ne -; CHECK-LE-NEXT: movs r2, #0 +; CHECK-LE-NEXT: cset r2, gt +; CHECK-LE-NEXT: cmp r2, #0 +; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-LE-NEXT: it gt -; CHECK-LE-NEXT: movgt r2, #1 +; CHECK-LE-NEXT: bfi r1, r2, #2, #1 +; CHECK-LE-NEXT: cset r2, gt ; CHECK-LE-NEXT: cmp r2, #0 -; CHECK-LE-NEXT: bfi r1, r3, #2, #1 ; CHECK-LE-NEXT: csetm r2, ne ; CHECK-LE-NEXT: bfi r1, r2, #3, #1 -; CHECK-LE-NEXT: vcvtb.f16.f32 s4, s0 -; CHECK-LE-NEXT: vcvtt.f16.f32 s4, s1 -; CHECK-LE-NEXT: vcvtb.f16.f32 s6, s2 -; CHECK-LE-NEXT: vcvtt.f16.f32 s5, s3 ; CHECK-LE-NEXT: lsls r2, r1, #31 ; CHECK-LE-NEXT: bne .LBB27_5 ; CHECK-LE-NEXT: @ %bb.1: @ %else @@ -1618,43 +1566,35 @@ ; CHECK-BE-NEXT: .pad #20 ; CHECK-BE-NEXT: sub sp, #20 ; CHECK-BE-NEXT: vrev64.32 q1, q0 -; CHECK-BE-NEXT: movs r1, #0 ; CHECK-BE-NEXT: vcmp.f32 s7, #0 -; CHECK-BE-NEXT: movs r2, #0 +; CHECK-BE-NEXT: vcvtb.f16.f32 s0, s4 ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r1, #1 +; CHECK-BE-NEXT: vcmp.f32 s6, #0 +; CHECK-BE-NEXT: vcvtt.f16.f32 s0, s5 +; CHECK-BE-NEXT: vcvtb.f16.f32 s2, s6 +; CHECK-BE-NEXT: vcvtt.f16.f32 s1, s7 +; CHECK-BE-NEXT: cset r1, gt ; CHECK-BE-NEXT: cmp r1, #0 ; CHECK-BE-NEXT: mov.w r1, #0 -; CHECK-BE-NEXT: csetm r3, ne -; CHECK-BE-NEXT: vcmp.f32 s6, #0 -; CHECK-BE-NEXT: bfi r1, r3, #0, #1 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: mov.w r3, #0 +; CHECK-BE-NEXT: bfi r1, r2, #0, #1 ; CHECK-BE-NEXT: vcmp.f32 s5, #0 -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 -; CHECK-BE-NEXT: csetm r3, ne +; CHECK-BE-NEXT: cset r2, gt +; CHECK-BE-NEXT: cmp r2, #0 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: bfi r1, r3, #1, #1 -; CHECK-BE-NEXT: mov.w r3, #0 -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r3, #1 -; CHECK-BE-NEXT: cmp r3, #0 +; CHECK-BE-NEXT: bfi r1, r2, #1, #1 ; CHECK-BE-NEXT: vcmp.f32 s4, #0 -; CHECK-BE-NEXT: csetm r3, ne +; CHECK-BE-NEXT: cset r2, gt +; CHECK-BE-NEXT: cmp r2, #0 +; CHECK-BE-NEXT: csetm r2, ne ; CHECK-BE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-BE-NEXT: it gt -; CHECK-BE-NEXT: movgt r2, #1 +; CHECK-BE-NEXT: bfi r1, r2, #2, #1 +; CHECK-BE-NEXT: cset r2, gt ; CHECK-BE-NEXT: cmp r2, #0 -; CHECK-BE-NEXT: bfi r1, r3, #2, #1 ; CHECK-BE-NEXT: csetm r2, ne -; CHECK-BE-NEXT: vcvtb.f16.f32 s0, s4 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1 -; CHECK-BE-NEXT: vcvtt.f16.f32 s0, s5 -; CHECK-BE-NEXT: vcvtb.f16.f32 s2, s6 -; CHECK-BE-NEXT: vcvtt.f16.f32 s1, s7 ; CHECK-BE-NEXT: lsls r2, r1, #28 ; CHECK-BE-NEXT: bmi .LBB27_5 ; CHECK-BE-NEXT: @ %bb.1: @ %else diff --git a/llvm/test/CodeGen/Thumb2/mve-minmax.ll b/llvm/test/CodeGen/Thumb2/mve-minmax.ll --- a/llvm/test/CodeGen/Thumb2/mve-minmax.ll +++ b/llvm/test/CodeGen/Thumb2/mve-minmax.ll @@ -38,32 +38,28 @@ define arm_aapcs_vfpcc <2 x i64> @smin_v2i64(<2 x i64> %s1, <2 x i64> %s2) { ; CHECK-LABEL: smin_v2i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, lr} -; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: vmov r0, r1, d3 -; CHECK-NEXT: movs r6, #0 ; CHECK-NEXT: vmov r2, r3, d1 ; CHECK-NEXT: vmov r12, lr, d2 ; CHECK-NEXT: vmov r4, r5, d0 ; CHECK-NEXT: subs r0, r2, r0 ; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: subs.w r1, r4, r12 ; CHECK-NEXT: sbcs.w r1, r5, lr -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 -; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vorr q0, q0, q1 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %0 = icmp slt <2 x i64> %s1, %s2 %1 = select <2 x i1> %0, <2 x i64> %s1, <2 x i64> %s2 @@ -106,32 +102,28 @@ define arm_aapcs_vfpcc <2 x i64> @umin_v2i64(<2 x i64> %s1, <2 x i64> %s2) { ; CHECK-LABEL: umin_v2i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, lr} -; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: vmov r0, r1, d3 -; CHECK-NEXT: movs r6, #0 ; CHECK-NEXT: vmov r2, r3, d1 ; CHECK-NEXT: vmov r12, lr, d2 ; CHECK-NEXT: vmov r4, r5, d0 ; CHECK-NEXT: subs r0, r2, r0 ; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: subs.w r1, r4, r12 ; CHECK-NEXT: sbcs.w r1, r5, lr -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vorr q0, q0, q1 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %0 = icmp ult <2 x i64> %s1, %s2 %1 = select <2 x i1> %0, <2 x i64> %s1, <2 x i64> %s2 @@ -175,32 +167,28 @@ define arm_aapcs_vfpcc <2 x i64> @smax_v2i64(<2 x i64> %s1, <2 x i64> %s2) { ; CHECK-LABEL: smax_v2i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, lr} -; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r6, #0 ; CHECK-NEXT: vmov r2, r3, d3 ; CHECK-NEXT: vmov r12, lr, d0 ; CHECK-NEXT: vmov r4, r5, d2 ; CHECK-NEXT: subs r0, r2, r0 ; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: subs.w r1, r4, r12 ; CHECK-NEXT: sbcs.w r1, r5, lr -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r6, #1 -; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vorr q0, q0, q1 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %0 = icmp sgt <2 x i64> %s1, %s2 %1 = select <2 x i1> %0, <2 x i64> %s1, <2 x i64> %s2 @@ -243,32 +231,28 @@ define arm_aapcs_vfpcc <2 x i64> @umax_v2i64(<2 x i64> %s1, <2 x i64> %s2) { ; CHECK-LABEL: umax_v2i64: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r6, lr} -; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r6, #0 ; CHECK-NEXT: vmov r2, r3, d3 ; CHECK-NEXT: vmov r12, lr, d0 ; CHECK-NEXT: vmov r4, r5, d2 ; CHECK-NEXT: subs r0, r2, r0 ; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: subs.w r1, r4, r12 ; CHECK-NEXT: sbcs.w r1, r5, lr -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r6, #1 -; CHECK-NEXT: cmp r6, #0 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vorr q0, q0, q1 -; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: pop {r4, r5, r7, pc} entry: %0 = icmp ugt <2 x i64> %s1, %s2 %1 = select <2 x i1> %0, <2 x i64> %s1, <2 x i64> %s2 diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-and.ll b/llvm/test/CodeGen/Thumb2/mve-pred-and.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-and.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-and.ll @@ -578,13 +578,9 @@ ; CHECK-NEXT: vmov r0, r1, d5 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d4 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q1, q1, q2 @@ -609,27 +605,19 @@ ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r12, r2, d4 ; CHECK-NEXT: vmov r3, r1, d2 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: eors r1, r2 ; CHECK-NEXT: eor.w r2, r3, r12 ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 ; CHECK-NEXT: vand q2, q3, q2 @@ -653,27 +641,19 @@ ; CHECK-NEXT: eors r2, r0 ; CHECK-NEXT: orrs r2, r3 ; CHECK-NEXT: vmov r12, r3, d2 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: eors r1, r3 ; CHECK-NEXT: eor.w r0, r0, r12 ; CHECK-NEXT: orrs r0, r1 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: vmov q2[2], q2[0], r0, r2 ; CHECK-NEXT: vmov q2[3], q2[1], r0, r2 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 ; CHECK-NEXT: vand q2, q3, q2 diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-ext.ll b/llvm/test/CodeGen/Thumb2/mve-pred-ext.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-ext.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-ext.ll @@ -75,20 +75,17 @@ ; CHECK-LABEL: sext_v2i1_v2i64: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r3, #0 -; CHECK-NEXT: vmov r2, r12, d0 +; CHECK-NEXT: mov.w r12, #0 +; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: rsbs r0, r0, #0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: sbcs.w r0, r12, r1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: rsbs r1, r2, #0 -; CHECK-NEXT: sbcs.w r1, r3, r12 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 -; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: sbcs.w r1, r12, r3 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 @@ -216,22 +213,19 @@ ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r3, #0 -; CHECK-NEXT: vmov lr, r12, d0 -; CHECK-NEXT: adr r2, .LCPI12_0 -; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: mov.w r12, #0 +; CHECK-NEXT: vmov r2, lr, d0 +; CHECK-NEXT: adr r3, .LCPI12_0 +; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: rsbs r0, r0, #0 -; CHECK-NEXT: sbcs.w r0, r3, r1 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: sbcs.w r0, r12, r1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: rsbs.w r1, lr, #0 -; CHECK-NEXT: sbcs.w r1, r3, r12 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 -; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: rsbs r1, r2, #0 +; CHECK-NEXT: sbcs.w r1, r12, lr +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vand q0, q1, q0 @@ -418,8 +412,6 @@ ret <4 x float> %s } - - define arm_aapcs_vfpcc <8 x half> @uitofp_v8i1_v8f16(<8 x i16> %src) { ; CHECK-LABEL: uitofp_v8i1_v8f16: ; CHECK: @ %bb.0: @ %entry @@ -475,3 +467,162 @@ %s = select <8 x i1> %0, <8 x half> , <8 x half> zeroinitializer ret <8 x half> %s } + + +define arm_aapcs_vfpcc <2 x double> @uitofp_v2i1_v2f64(<2 x i64> %src) { +; CHECK-LABEL: uitofp_v2i1_v2f64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: mov.w r12, #0 +; CHECK-NEXT: vmov r2, lr, d0 +; CHECK-NEXT: adr r3, .LCPI26_0 +; CHECK-NEXT: vldrw.u32 q0, [r3] +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: sbcs.w r0, r12, r1 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: rsbs r1, r2, #0 +; CHECK-NEXT: sbcs.w r1, r12, lr +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vand q4, q1, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_ul2d +; CHECK-NEXT: vmov r2, r3, d8 +; CHECK-NEXT: vmov d9, r0, r1 +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r1, r3 +; CHECK-NEXT: bl __aeabi_ul2d +; CHECK-NEXT: vmov d8, r0, r1 +; CHECK-NEXT: vmov q0, q4 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r7, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI26_0: +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %c = icmp sgt <2 x i64> %src, zeroinitializer + %0 = uitofp <2 x i1> %c to <2 x double> + ret <2 x double> %0 +} + +define arm_aapcs_vfpcc <2 x double> @sitofp_v2i1_v2f64(<2 x i64> %src) { +; CHECK-LABEL: sitofp_v2i1_v2f64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: mov.w r12, #0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: sbcs.w r0, r12, r1 +; CHECK-NEXT: cset r4, lt +; CHECK-NEXT: rsbs r0, r2, #0 +; CHECK-NEXT: sbcs.w r0, r12, r3 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: mov r1, r0 +; CHECK-NEXT: bl __aeabi_l2d +; CHECK-NEXT: cmp r4, #0 +; CHECK-NEXT: vmov d9, r0, r1 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: mov r0, r2 +; CHECK-NEXT: mov r1, r2 +; CHECK-NEXT: bl __aeabi_l2d +; CHECK-NEXT: vmov d8, r0, r1 +; CHECK-NEXT: vmov q0, q4 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, pc} +entry: + %c = icmp sgt <2 x i64> %src, zeroinitializer + %0 = sitofp <2 x i1> %c to <2 x double> + ret <2 x double> %0 +} + +define arm_aapcs_vfpcc <2 x double> @fptoui_v2i1_v2f64(<2 x double> %src) { +; CHECK-LABEL: fptoui_v2i1_v2f64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2ulz +; CHECK-NEXT: vmov q1[2], q1[0], r0, r4 +; CHECK-NEXT: adr r2, .LCPI28_0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: vmov r1, s4 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: rsbs r1, r1, #0 +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI28_0: +; CHECK-NEXT: .long 0 @ double 1 +; CHECK-NEXT: .long 1072693248 +; CHECK-NEXT: .long 0 @ double 1 +; CHECK-NEXT: .long 1072693248 +entry: + %0 = fptoui <2 x double> %src to <2 x i1> + %s = select <2 x i1> %0, <2 x double> , <2 x double> zeroinitializer + ret <2 x double> %s +} + +define arm_aapcs_vfpcc <2 x double> @fptosi_v2i1_v2f64(<2 x double> %src) { +; CHECK-LABEL: fptosi_v2i1_v2f64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r7, lr} +; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .vsave {d8, d9} +; CHECK-NEXT: vpush {d8, d9} +; CHECK-NEXT: vmov q4, q0 +; CHECK-NEXT: vmov r0, r1, d9 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: mov r4, r0 +; CHECK-NEXT: mov r5, r1 +; CHECK-NEXT: vmov r0, r1, d8 +; CHECK-NEXT: bl __aeabi_d2lz +; CHECK-NEXT: adr r2, .LCPI29_0 +; CHECK-NEXT: vmov q1[2], q1[0], r0, r4 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: vmov q1[3], q1[1], r1, r5 +; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vpop {d8, d9} +; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI29_0: +; CHECK-NEXT: .long 0 @ double 1 +; CHECK-NEXT: .long 1072693248 +; CHECK-NEXT: .long 0 @ double 1 +; CHECK-NEXT: .long 1072693248 +entry: + %0 = fptosi <2 x double> %src to <2 x i1> + %s = select <2 x i1> %0, <2 x double> , <2 x double> zeroinitializer + ret <2 x double> %s +} diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-not.ll b/llvm/test/CodeGen/Thumb2/mve-pred-not.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-not.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-not.ll @@ -326,13 +326,9 @@ ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q0, q0, q2 @@ -352,13 +348,9 @@ ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vbic q0, q0, q2 diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-or.ll b/llvm/test/CodeGen/Thumb2/mve-pred-or.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-or.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-or.ll @@ -380,25 +380,17 @@ ; CHECK-NEXT: vmov r0, r1, d3 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d2 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 ; CHECK-NEXT: vorr q2, q3, q2 @@ -424,27 +416,19 @@ ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r12, r2, d4 ; CHECK-NEXT: vmov r3, r1, d2 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: eors r1, r2 ; CHECK-NEXT: eor.w r2, r3, r12 ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 ; CHECK-NEXT: vorr q2, q3, q2 diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-shuffle.ll b/llvm/test/CodeGen/Thumb2/mve-pred-shuffle.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-shuffle.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-shuffle.ll @@ -1,6 +1,36 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s +define <2 x i64> @shuffle1_v2i64(<2 x i64> %src, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: shuffle1_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: cset r0, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: orrs.w r1, r2, r3 +; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vbic q1, q1, q0 +; CHECK-NEXT: vand q0, q2, q0 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: bx lr +entry: + %c = icmp eq <2 x i64> %src, zeroinitializer + %sh = shufflevector <2 x i1> %c, <2 x i1> undef, <2 x i32> + %s = select <2 x i1> %sh, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} + define <4 x i32> @shuffle1_v4i32(<4 x i32> %src, <4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: shuffle1_v4i32: ; CHECK: @ %bb.0: @ %entry @@ -76,6 +106,32 @@ ret <16 x i8> %s } +define <2 x i64> @shuffle2_v2i64(<2 x i64> %src, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: shuffle2_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: orrs r2, r3 +; CHECK-NEXT: csetm r2, eq +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: csetm r0, eq +; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: vmov q0[3], q0[1], r0, r2 +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vbic q1, q1, q0 +; CHECK-NEXT: vand q0, q2, q0 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: bx lr +entry: + %c = icmp eq <2 x i64> %src, zeroinitializer + %sh = shufflevector <2 x i1> %c, <2 x i1> undef, <2 x i32> + %s = select <2 x i1> %sh, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} + define <4 x i32> @shuffle2_v4i32(<4 x i32> %src, <4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: shuffle2_v4i32: ; CHECK: @ %bb.0: @ %entry @@ -139,6 +195,31 @@ ret <16 x i8> %s } +define <2 x i64> @shuffle3_v2i64(<2 x i64> %src, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: shuffle3_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: cset r0, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vdup.32 q0, r0 +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vbic q1, q1, q0 +; CHECK-NEXT: vand q0, q2, q0 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: bx lr +entry: + %c = icmp eq <2 x i64> %src, zeroinitializer + %sh = shufflevector <2 x i1> %c, <2 x i1> undef, <2 x i32> + %s = select <2 x i1> %sh, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} + define <4 x i32> @shuffle3_v4i32(<4 x i32> %src, <4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: shuffle3_v4i32: ; CHECK: @ %bb.0: @ %entry @@ -220,6 +301,31 @@ ret <16 x i8> %s } +define <2 x i64> @shuffle4_v2i64(<2 x i64> %src, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: shuffle4_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: orrs.w r0, r2, r3 +; CHECK-NEXT: cset r0, eq +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: vdup.32 q0, r0 +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vbic q1, q1, q0 +; CHECK-NEXT: vand q0, q2, q0 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: bx lr +entry: + %c = icmp eq <2 x i64> %src, zeroinitializer + %sh = shufflevector <2 x i1> %c, <2 x i1> undef, <2 x i32> + %s = select <2 x i1> %sh, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} + define <4 x i32> @shuffle4_v4i32(<4 x i32> %src, <4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: shuffle4_v4i32: ; CHECK: @ %bb.0: @ %entry @@ -306,6 +412,66 @@ ret <16 x i8> %s } +define <2 x i64> @shuffle5_b_v2i64(<4 x i32> %src, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: shuffle5_b_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d1, r2, r3 +; CHECK-NEXT: vmov d0, r0, r1 +; CHECK-NEXT: vcmp.i32 eq, q0, zr +; CHECK-NEXT: vmrs r0, p0 +; CHECK-NEXT: ubfx r1, r0, #4, #1 +; CHECK-NEXT: and r0, r0, #1 +; CHECK-NEXT: rsbs r1, r1, #0 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r1 +; CHECK-NEXT: vmov q0[3], q0[1], r0, r1 +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vbic q1, q1, q0 +; CHECK-NEXT: vand q0, q2, q0 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: bx lr +entry: + %c = icmp eq <4 x i32> %src, zeroinitializer + %sh = shufflevector <4 x i1> %c, <4 x i1> undef, <2 x i32> + %s = select <2 x i1> %sh, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} + +define <2 x i64> @shuffle5_t_v2i64(<4 x i32> %src, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: shuffle5_t_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov d1, r2, r3 +; CHECK-NEXT: vmov d0, r0, r1 +; CHECK-NEXT: vcmp.i32 eq, q0, zr +; CHECK-NEXT: vmrs r0, p0 +; CHECK-NEXT: ubfx r1, r0, #12, #1 +; CHECK-NEXT: ubfx r0, r0, #8, #1 +; CHECK-NEXT: rsbs r1, r1, #0 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r1 +; CHECK-NEXT: vmov q0[3], q0[1], r0, r1 +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vldrw.u32 q2, [r0] +; CHECK-NEXT: vbic q1, q1, q0 +; CHECK-NEXT: vand q0, q2, q0 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: bx lr +entry: + %c = icmp eq <4 x i32> %src, zeroinitializer + %sh = shufflevector <4 x i1> %c, <4 x i1> undef, <2 x i32> + %s = select <2 x i1> %sh, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} + define <4 x i32> @shuffle5_b_v4i32(<8 x i16> %src, <4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: shuffle5_b_v4i32: ; CHECK: @ %bb.0: @ %entry @@ -450,6 +616,53 @@ ret <8 x i16> %s } +define <4 x i32> @shuffle6_v2i64(<2 x i64> %src1, <2 x i64> %src2, <4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: shuffle6_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: orrs r2, r3 +; CHECK-NEXT: csetm r2, eq +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: csetm r0, eq +; CHECK-NEXT: movs r1, #0 +; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 +; CHECK-NEXT: vmov r0, s0 +; CHECK-NEXT: and r0, r0, #1 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: bfi r1, r0, #0, #4 +; CHECK-NEXT: and r0, r2, #1 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: bfi r1, r0, #4, #4 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: vmov r0, r2, d0 +; CHECK-NEXT: orrs r0, r2 +; CHECK-NEXT: csetm r0, eq +; CHECK-NEXT: and r0, r0, #1 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: bfi r1, r0, #8, #4 +; CHECK-NEXT: vmov r0, r2, d1 +; CHECK-NEXT: orrs r0, r2 +; CHECK-NEXT: csetm r0, eq +; CHECK-NEXT: and r0, r0, #1 +; CHECK-NEXT: rsbs r0, r0, #0 +; CHECK-NEXT: bfi r1, r0, #12, #4 +; CHECK-NEXT: add r0, sp, #32 +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add r0, sp, #16 +; CHECK-NEXT: vldrw.u32 q1, [r0] +; CHECK-NEXT: vmsr p0, r1 +; CHECK-NEXT: vpsel q0, q1, q0 +; CHECK-NEXT: vmov r0, r1, d0 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: bx lr +entry: + %c1 = icmp eq <2 x i64> %src1, zeroinitializer + %c2 = icmp eq <2 x i64> %src2, zeroinitializer + %sh = shufflevector <2 x i1> %c1, <2 x i1> %c2, <4 x i32> + %s = select <4 x i1> %sh, <4 x i32> %a, <4 x i32> %b + ret <4 x i32> %s +} + define <8 x i16> @shuffle6_v4i32(<4 x i32> %src1, <4 x i32> %src2, <8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: shuffle6_v4i32: ; CHECK: @ %bb.0: @ %entry diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-spill.ll b/llvm/test/CodeGen/Thumb2/mve-pred-spill.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-spill.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-spill.ll @@ -2,10 +2,67 @@ ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-LE ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-BE +declare arm_aapcs_vfpcc <2 x i64> @ext_i64(<2 x i64> %c) declare arm_aapcs_vfpcc <4 x i32> @ext_i32(<4 x i32> %c) declare arm_aapcs_vfpcc <8 x i16> @ext_i16(<8 x i16> %c) declare arm_aapcs_vfpcc <16 x i8> @ext_i8(<16 x i8> %c) +define arm_aapcs_vfpcc <2 x i64> @shuffle1_v2i64(<2 x i64> %src, <2 x i64> %a) { +; CHECK-LE-LABEL: shuffle1_v2i64: +; CHECK-LE: @ %bb.0: @ %entry +; CHECK-LE-NEXT: .save {r7, lr} +; CHECK-LE-NEXT: push {r7, lr} +; CHECK-LE-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-LE-NEXT: vpush {d8, d9, d10, d11} +; CHECK-LE-NEXT: vmov r0, r1, d1 +; CHECK-LE-NEXT: orrs r0, r1 +; CHECK-LE-NEXT: vmov r1, r2, d0 +; CHECK-LE-NEXT: csetm r0, eq +; CHECK-LE-NEXT: orrs r1, r2 +; CHECK-LE-NEXT: csetm r1, eq +; CHECK-LE-NEXT: vmov q5[2], q5[0], r1, r0 +; CHECK-LE-NEXT: vmov q5[3], q5[1], r1, r0 +; CHECK-LE-NEXT: vand q4, q1, q5 +; CHECK-LE-NEXT: vmov q0, q4 +; CHECK-LE-NEXT: bl ext_i64 +; CHECK-LE-NEXT: vbic q0, q0, q5 +; CHECK-LE-NEXT: vorr q0, q4, q0 +; CHECK-LE-NEXT: vpop {d8, d9, d10, d11} +; CHECK-LE-NEXT: pop {r7, pc} +; +; CHECK-BE-LABEL: shuffle1_v2i64: +; CHECK-BE: @ %bb.0: @ %entry +; CHECK-BE-NEXT: .save {r7, lr} +; CHECK-BE-NEXT: push {r7, lr} +; CHECK-BE-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-BE-NEXT: vpush {d8, d9, d10, d11} +; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vmov r0, r1, d5 +; CHECK-BE-NEXT: orrs r0, r1 +; CHECK-BE-NEXT: vmov r1, r2, d4 +; CHECK-BE-NEXT: csetm r0, eq +; CHECK-BE-NEXT: orrs r1, r2 +; CHECK-BE-NEXT: csetm r1, eq +; CHECK-BE-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-BE-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-BE-NEXT: vrev64.32 q2, q0 +; CHECK-BE-NEXT: vmov.i8 q0, #0xff +; CHECK-BE-NEXT: vand q4, q1, q2 +; CHECK-BE-NEXT: veor q5, q2, q0 +; CHECK-BE-NEXT: vmov q0, q4 +; CHECK-BE-NEXT: bl ext_i64 +; CHECK-BE-NEXT: vand q0, q0, q5 +; CHECK-BE-NEXT: vorr q0, q4, q0 +; CHECK-BE-NEXT: vpop {d8, d9, d10, d11} +; CHECK-BE-NEXT: pop {r7, pc} +entry: + %c = icmp eq <2 x i64> %src, zeroinitializer + %s1 = select <2 x i1> %c, <2 x i64> %a, <2 x i64> zeroinitializer + %ext = call arm_aapcs_vfpcc <2 x i64> @ext_i64(<2 x i64> %s1) + %s = select <2 x i1> %c, <2 x i64> %a, <2 x i64> %ext + ret <2 x i64> %s +} + define arm_aapcs_vfpcc <4 x i32> @shuffle1_v4i32(<4 x i32> %src, <4 x i32> %a) { ; CHECK-LE-LABEL: shuffle1_v4i32: ; CHECK-LE: @ %bb.0: @ %entry diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll b/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-vselect.ll @@ -75,38 +75,26 @@ ; CHECK-NEXT: vmov r0, r1, d5 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d4 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, r1, d3 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d2 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: vbic q3, q3, q2 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q4[2], q4[0], r1, r0 ; CHECK-NEXT: vmov q4[3], q4[1], r1, r0 ; CHECK-NEXT: vand q2, q4, q2 @@ -199,24 +187,16 @@ ; CHECK-NEXT: vmov r0, r1, d5 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d4 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, r1, d3 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d2 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 @@ -224,12 +204,8 @@ ; CHECK-NEXT: vbic q3, q3, q2 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q4[2], q4[0], r1, r0 ; CHECK-NEXT: vmov q4[3], q4[1], r1, r0 @@ -431,33 +407,25 @@ ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: vmov r2, r3, d3 ; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: vmov r2, r3, d2 -; CHECK-NEXT: csetm r12, ne +; CHECK-NEXT: csetm lr, eq ; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: vmov r2, r3, d1 -; CHECK-NEXT: csetm r4, ne +; CHECK-NEXT: csetm r4, eq ; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: vmov r2, r3, d0 -; CHECK-NEXT: csetm lr, ne +; CHECK-NEXT: csetm r12, eq ; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: beq .LBB15_2 ; CHECK-NEXT: @ %bb.1: @ %select.false -; CHECK-NEXT: vmov q2[2], q2[0], r4, r12 -; CHECK-NEXT: vmov q2[3], q2[1], r4, r12 +; CHECK-NEXT: vmov q2[2], q2[0], r4, lr +; CHECK-NEXT: vmov q2[3], q2[1], r4, lr ; CHECK-NEXT: b .LBB15_3 ; CHECK-NEXT: .LBB15_2: -; CHECK-NEXT: vmov q2[2], q2[0], r2, lr -; CHECK-NEXT: vmov q2[3], q2[1], r2, lr +; CHECK-NEXT: vmov q2[2], q2[0], r2, r12 +; CHECK-NEXT: vmov q2[3], q2[1], r2, r12 ; CHECK-NEXT: .LBB15_3: @ %select.end ; CHECK-NEXT: vmov r0, s10 ; CHECK-NEXT: vmov r1, s8 diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-xor.ll b/llvm/test/CodeGen/Thumb2/mve-pred-xor.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-xor.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-xor.ll @@ -460,25 +460,17 @@ ; CHECK-NEXT: vmov r0, r1, d3 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d2 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 ; CHECK-NEXT: veor q2, q3, q2 @@ -504,27 +496,19 @@ ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r12, r2, d4 ; CHECK-NEXT: vmov r3, r1, d2 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: eors r1, r2 ; CHECK-NEXT: eor.w r2, r3, r12 ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 ; CHECK-NEXT: veor q2, q3, q2 diff --git a/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll b/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll --- a/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll +++ b/llvm/test/CodeGen/Thumb2/mve-satmul-loops.ll @@ -46,21 +46,17 @@ ; CHECK-NEXT: smull r6, r5, r8, r5 ; CHECK-NEXT: rsbs.w r9, r4, #-2147483648 ; CHECK-NEXT: sbcs r3, r7 -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: asrl r6, r5, #31 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 ; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: vmov q4[2], q4[0], r6, r4 ; CHECK-NEXT: csetm r9, ne ; CHECK-NEXT: rsbs.w r3, r6, #-2147483648 ; CHECK-NEXT: mov.w r3, #-1 -; CHECK-NEXT: vmov q4[2], q4[0], r6, r4 -; CHECK-NEXT: sbcs r3, r5 ; CHECK-NEXT: vmov q4[3], q4[1], r5, r7 -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: sbcs r3, r5 ; CHECK-NEXT: mvn r6, #-2147483648 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov q2[2], q2[0], r3, r9 @@ -72,17 +68,13 @@ ; CHECK-NEXT: subs r3, r3, r6 ; CHECK-NEXT: sbcs r3, r4, #0 ; CHECK-NEXT: vmov r4, r5, d5 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov.32 q3[1], r3 ; CHECK-NEXT: subs r4, r4, r6 ; CHECK-NEXT: sbcs r4, r5, #0 -; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cset r4, lt ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csetm r4, ne ; CHECK-NEXT: vmov q3[2], q3[0], r3, r4 @@ -110,9 +102,7 @@ ; CHECK-NEXT: asrl r4, r3, #31 ; CHECK-NEXT: subs r5, r1, r4 ; CHECK-NEXT: sbcs.w r5, r0, r3 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cset r5, lt ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r4, r4, r1, ne ; CHECK-NEXT: csel r3, r3, r0, ne @@ -223,10 +213,10 @@ ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: beq.w .LBB1_8 ; CHECK-NEXT: @ %bb.1: @ %for.body.preheader -; CHECK-NEXT: movs r7, #0 ; CHECK-NEXT: cmp r3, #3 ; CHECK-NEXT: bhi .LBB1_3 ; CHECK-NEXT: @ %bb.2: +; CHECK-NEXT: movs r7, #0 ; CHECK-NEXT: mov r12, r0 ; CHECK-NEXT: mov r9, r1 ; CHECK-NEXT: mov r11, r2 @@ -263,20 +253,16 @@ ; CHECK-NEXT: vmov r6, s12 ; CHECK-NEXT: rsbs.w r5, r4, #-2147483648 ; CHECK-NEXT: sbcs.w r5, r2, r7 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cset r5, lt ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: vmov r10, r5, d12 -; CHECK-NEXT: csetm r8, ne ; CHECK-NEXT: asrl r10, r5, #31 +; CHECK-NEXT: csetm r8, ne ; CHECK-NEXT: rsbs.w r3, r10, #-2147483648 ; CHECK-NEXT: vmov q6[2], q6[0], r10, r4 ; CHECK-NEXT: sbcs.w r3, r2, r5 ; CHECK-NEXT: vmov q6[3], q6[1], r5, r7 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov q4[2], q4[0], r3, r8 @@ -289,18 +275,14 @@ ; CHECK-NEXT: subs.w r3, r3, r8 ; CHECK-NEXT: sbcs r3, r4, #0 ; CHECK-NEXT: vmov r4, r5, d9 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov.32 q5[1], r3 ; CHECK-NEXT: subs.w r4, r4, r8 ; CHECK-NEXT: sbcs r4, r5, #0 ; CHECK-NEXT: vmov r5, s8 -; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cset r4, lt ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csetm r4, ne ; CHECK-NEXT: vmov q5[2], q5[0], r3, r4 @@ -318,16 +300,12 @@ ; CHECK-NEXT: vmov q5[2], q5[0], r6, r4 ; CHECK-NEXT: sbcs.w r3, r2, r7 ; CHECK-NEXT: vmov q5[3], q5[1], r5, r7 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: rsbs.w r1, r6, #-2147483648 ; CHECK-NEXT: sbcs.w r1, r2, r5 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q2[2], q2[0], r1, r3 @@ -339,18 +317,14 @@ ; CHECK-NEXT: vmov r4, r3, d4 ; CHECK-NEXT: subs.w r4, r4, r8 ; CHECK-NEXT: sbcs r3, r3, #0 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: vmov r3, r4, d5 ; CHECK-NEXT: csetm r5, ne ; CHECK-NEXT: vmov.32 q3[1], r5 ; CHECK-NEXT: subs.w r3, r3, r8 ; CHECK-NEXT: sbcs r3, r4, #0 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov q3[2], q3[0], r5, r3 @@ -379,9 +353,7 @@ ; CHECK-NEXT: asrl r4, r1, #31 ; CHECK-NEXT: subs r5, r3, r4 ; CHECK-NEXT: sbcs.w r5, r0, r1 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cset r5, lt ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r4, r4, r3, ne ; CHECK-NEXT: csel r1, r1, r0, ne @@ -532,21 +504,17 @@ ; CHECK-NEXT: asrl r6, r5, #31 ; CHECK-NEXT: rsbs.w r7, r6, #-2147483648 ; CHECK-NEXT: sbcs.w r7, r12, r5 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cset r7, lt ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: vmov r4, r7, d0 -; CHECK-NEXT: csetm r10, ne ; CHECK-NEXT: asrl r4, r7, #31 +; CHECK-NEXT: csetm r10, ne ; CHECK-NEXT: rsbs.w r3, r4, #-2147483648 ; CHECK-NEXT: vmov q7[2], q7[0], r4, r6 ; CHECK-NEXT: sbcs.w r3, r12, r7 ; CHECK-NEXT: vmov q7[3], q7[1], r7, r5 -; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: vmov r7, s20 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov q0[2], q0[0], r3, r10 @@ -558,17 +526,13 @@ ; CHECK-NEXT: subs.w r3, r3, r8 ; CHECK-NEXT: sbcs r3, r4, #0 ; CHECK-NEXT: vmov r4, r5, d13 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov.32 q0[1], r3 ; CHECK-NEXT: subs.w r4, r4, r8 ; CHECK-NEXT: sbcs r4, r5, #0 -; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cset r4, lt ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csetm r4, ne ; CHECK-NEXT: vmov q0[2], q0[0], r3, r4 @@ -584,9 +548,7 @@ ; CHECK-NEXT: asrl r6, r5, #31 ; CHECK-NEXT: rsbs.w r3, r6, #-2147483648 ; CHECK-NEXT: sbcs.w r3, r12, r5 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r10, ne ; CHECK-NEXT: smull r4, r7, r7, r4 @@ -595,9 +557,7 @@ ; CHECK-NEXT: vmov q5[2], q5[0], r4, r6 ; CHECK-NEXT: sbcs.w r3, r12, r7 ; CHECK-NEXT: vmov q5[3], q5[1], r7, r5 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov q0[2], q0[0], r3, r10 @@ -609,17 +569,13 @@ ; CHECK-NEXT: subs.w r3, r3, r8 ; CHECK-NEXT: sbcs r3, r4, #0 ; CHECK-NEXT: vmov r4, r5, d9 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: vmov.32 q0[1], r3 ; CHECK-NEXT: subs.w r4, r4, r8 ; CHECK-NEXT: sbcs r4, r5, #0 -; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: cset r4, lt ; CHECK-NEXT: cmp r4, #0 ; CHECK-NEXT: csetm r4, ne ; CHECK-NEXT: vmov q0[2], q0[0], r3, r4 @@ -733,9 +689,7 @@ ; CHECK-NEXT: subs.w r6, r4, #-1 ; CHECK-NEXT: umull r6, r7, r10, r7 ; CHECK-NEXT: sbcs r5, r5, #0 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cset r5, lo ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: lsrl r6, r7, #31 ; CHECK-NEXT: csetm r9, ne @@ -743,9 +697,7 @@ ; CHECK-NEXT: vmov.32 q0[1], r9 ; CHECK-NEXT: sbcs r5, r7, #0 ; CHECK-NEXT: vmov q1[2], q1[0], r4, r6 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cset r5, lo ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csetm r5, ne ; CHECK-NEXT: vmov q0[2], q0[0], r9, r5 @@ -860,10 +812,10 @@ ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: beq.w .LBB4_8 ; CHECK-NEXT: @ %bb.1: @ %for.body.preheader -; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: cmp r3, #3 ; CHECK-NEXT: bhi .LBB4_3 ; CHECK-NEXT: @ %bb.2: +; CHECK-NEXT: mov.w r8, #0 ; CHECK-NEXT: mov r12, r0 ; CHECK-NEXT: mov r9, r1 ; CHECK-NEXT: mov r10, r2 @@ -892,19 +844,15 @@ ; CHECK-NEXT: subs.w r6, r4, #-1 ; CHECK-NEXT: sbcs r5, r5, #0 ; CHECK-NEXT: vmov r6, r7, d9 -; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: cset r5, lo ; CHECK-NEXT: lsrl r6, r7, #31 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 ; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: vmov q3[2], q3[0], r4, r6 ; CHECK-NEXT: csetm r11, ne ; CHECK-NEXT: subs.w r5, r6, #-1 ; CHECK-NEXT: sbcs r5, r7, #0 ; CHECK-NEXT: vmov.32 q1[1], r11 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: vmov q3[2], q3[0], r4, r6 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cset r5, lo ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csetm r5, ne ; CHECK-NEXT: vmov q1[2], q1[0], r11, r5 @@ -916,19 +864,15 @@ ; CHECK-NEXT: subs.w r6, r4, #-1 ; CHECK-NEXT: sbcs r5, r5, #0 ; CHECK-NEXT: vmov r6, r7, d7 -; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: cset r5, lo ; CHECK-NEXT: lsrl r6, r7, #31 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 ; CHECK-NEXT: cmp r5, #0 +; CHECK-NEXT: vmov q2[2], q2[0], r4, r6 ; CHECK-NEXT: csetm r11, ne ; CHECK-NEXT: subs.w r5, r6, #-1 ; CHECK-NEXT: sbcs r5, r7, #0 ; CHECK-NEXT: vmov.32 q0[1], r11 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: vmov q2[2], q2[0], r4, r6 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r5, #1 +; CHECK-NEXT: cset r5, lo ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csetm r5, ne ; CHECK-NEXT: vmov q0[2], q0[0], r11, r5 diff --git a/llvm/test/CodeGen/Thumb2/mve-vcmp.ll b/llvm/test/CodeGen/Thumb2/mve-vcmp.ll --- a/llvm/test/CodeGen/Thumb2/mve-vcmp.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vcmp.ll @@ -374,13 +374,38 @@ ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r12, r2, d2 ; CHECK-NEXT: vmov r3, r1, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: eors r1, r2 ; CHECK-NEXT: eor.w r2, r3, r12 ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: csetm r1, eq +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: vbic q1, q3, q0 +; CHECK-NEXT: vand q0, q2, q0 +; CHECK-NEXT: vorr q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %c = icmp eq <2 x i64> %src, %srcb + %s = select <2 x i1> %c, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} + +define arm_aapcs_vfpcc <2 x i64> @vcmp_slt_v2i64(<2 x i64> %src, <2 x i64> %srcb, <2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: vcmp_slt_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov r0, r1, d3 +; CHECK-NEXT: vmov r2, r3, d1 +; CHECK-NEXT: subs r0, r2, r0 +; CHECK-NEXT: sbcs.w r0, r3, r1 +; CHECK-NEXT: vmov r1, r12, d2 +; CHECK-NEXT: vmov r3, r2, d0 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: subs r1, r3, r1 +; CHECK-NEXT: sbcs.w r1, r2, r12 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 @@ -390,7 +415,7 @@ ; CHECK-NEXT: vorr q0, q0, q1 ; CHECK-NEXT: bx lr entry: - %c = icmp eq <2 x i64> %src, %srcb + %c = icmp slt <2 x i64> %src, %srcb %s = select <2 x i1> %c, <2 x i64> %a, <2 x i64> %b ret <2 x i64> %s } @@ -405,15 +430,11 @@ ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r12, r2, d2 ; CHECK-NEXT: vmov r3, r1, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: eors r1, r2 ; CHECK-NEXT: eor.w r2, r3, r12 ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vbic q1, q3, q0 @@ -429,64 +450,48 @@ define arm_aapcs_vfpcc <2 x i32> @vcmp_multi_v2i32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { ; CHECK-LABEL: vcmp_multi_v2i32: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, lr} -; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r3, #0 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 ; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vbic q0, q2, q0 ; CHECK-NEXT: vmov r0, s2 -; CHECK-NEXT: subs r1, r0, r2 -; CHECK-NEXT: asr.w r12, r0, #31 -; CHECK-NEXT: sbcs.w r1, r12, r2, asr #31 -; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: subs r3, r0, r2 +; CHECK-NEXT: asr.w r1, r0, #31 +; CHECK-NEXT: vmov r3, s8 +; CHECK-NEXT: sbcs.w r1, r1, r2, asr #31 ; CHECK-NEXT: vmov r2, s0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: vmov r1, s8 ; CHECK-NEXT: csetm lr, ne +; CHECK-NEXT: subs r1, r2, r3 ; CHECK-NEXT: asr.w r12, r2, #31 -; CHECK-NEXT: subs r4, r2, r1 -; CHECK-NEXT: sbcs.w r1, r12, r1, asr #31 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 -; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, ne ; CHECK-NEXT: vmov q3[2], q3[0], r1, lr -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vmov q3[3], q3[1], r1, lr ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q3[3], q3[1], r1, lr ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q4[2], q4[0], r1, r0 ; CHECK-NEXT: vmov q4[3], q4[1], r1, r0 ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 @@ -496,7 +501,7 @@ ; CHECK-NEXT: vand q1, q2, q1 ; CHECK-NEXT: vorr q0, q1, q0 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop {r4, pc} +; CHECK-NEXT: pop {r7, pc} %a4 = icmp eq <2 x i64> %a, zeroinitializer %a5 = select <2 x i1> %a4, <2 x i32> zeroinitializer, <2 x i32> %c %a6 = icmp ne <2 x i32> %b, zeroinitializer diff --git a/llvm/test/CodeGen/Thumb2/mve-vcmpf.ll b/llvm/test/CodeGen/Thumb2/mve-vcmpf.ll --- a/llvm/test/CodeGen/Thumb2/mve-vcmpf.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vcmpf.ll @@ -6,40 +6,24 @@ ; CHECK-MVE-LABEL: vcmp_oeq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, eq +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -58,50 +42,34 @@ ; CHECK-MVE-LABEL: vcmp_one_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r3, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movgt r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -121,40 +89,24 @@ ; CHECK-MVE-LABEL: vcmp_ogt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, gt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -173,40 +125,24 @@ ; CHECK-MVE-LABEL: vcmp_oge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ge +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -225,40 +161,24 @@ ; CHECK-MVE-LABEL: vcmp_olt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -277,40 +197,24 @@ ; CHECK-MVE-LABEL: vcmp_ole_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ls +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -329,50 +233,34 @@ ; CHECK-MVE-LABEL: vcmp_ueq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r3, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movvs r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -392,40 +280,24 @@ ; CHECK-MVE-LABEL: vcmp_une_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -444,40 +316,24 @@ ; CHECK-MVE-LABEL: vcmp_ugt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, hi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -496,40 +352,24 @@ ; CHECK-MVE-LABEL: vcmp_uge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, pl +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -548,40 +388,24 @@ ; CHECK-MVE-LABEL: vcmp_ult_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, lt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -600,40 +424,24 @@ ; CHECK-MVE-LABEL: vcmp_ule_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, le +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -652,40 +460,24 @@ ; CHECK-MVE-LABEL: vcmp_ord_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vc +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -705,40 +497,24 @@ ; CHECK-MVE-LABEL: vcmp_uno_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s5 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s7 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s6 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vs +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -764,92 +540,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -875,109 +619,77 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 ; CHECK-MVE-NEXT: vpop {d8, d9} @@ -1003,92 +715,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1114,92 +794,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1225,92 +873,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1336,92 +952,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1447,109 +1031,77 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 ; CHECK-MVE-NEXT: vpop {d8, d9} @@ -1575,91 +1127,59 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 @@ -1686,92 +1206,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1797,92 +1285,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1908,92 +1364,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -2019,92 +1443,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -2130,92 +1522,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -2242,92 +1602,60 @@ ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 ; CHECK-MVE-NEXT: vcmp.f16 s18, s16 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s16, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 +; CHECK-MVE-NEXT: vmovx.f16 s4, s5 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s4, s5 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s1 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s5 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s16 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 -; CHECK-MVE-NEXT: vins.f16 s0, s16 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vcmp.f16 s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s6 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s6, s3 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s6, s3 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s6, s15 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s7 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s4 diff --git a/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll b/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll --- a/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vcmpfr.ll @@ -6,40 +6,24 @@ ; CHECK-MVE-LABEL: vcmp_oeq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, eq +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -61,50 +45,34 @@ ; CHECK-MVE-LABEL: vcmp_one_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r3, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movgt r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -127,40 +95,24 @@ ; CHECK-MVE-LABEL: vcmp_ogt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, gt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -182,40 +134,24 @@ ; CHECK-MVE-LABEL: vcmp_oge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ge +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -237,40 +173,24 @@ ; CHECK-MVE-LABEL: vcmp_olt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -292,40 +212,24 @@ ; CHECK-MVE-LABEL: vcmp_ole_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ls +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -347,50 +251,34 @@ ; CHECK-MVE-LABEL: vcmp_ueq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r3, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movvs r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -413,40 +301,24 @@ ; CHECK-MVE-LABEL: vcmp_une_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -468,40 +340,24 @@ ; CHECK-MVE-LABEL: vcmp_ugt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, hi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -523,40 +379,24 @@ ; CHECK-MVE-LABEL: vcmp_uge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, pl +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -578,40 +418,24 @@ ; CHECK-MVE-LABEL: vcmp_ult_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, lt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -633,40 +457,24 @@ ; CHECK-MVE-LABEL: vcmp_ule_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, le +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -688,40 +496,24 @@ ; CHECK-MVE-LABEL: vcmp_ord_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vc +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -744,40 +536,24 @@ ; CHECK-MVE-LABEL: vcmp_uno_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s4 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s4 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s4 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s4 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vs +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -802,27 +578,17 @@ ; CHECK-MVE-LABEL: vcmp_oeq_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -830,62 +596,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -909,31 +653,21 @@ ; CHECK-MVE-LABEL: vcmp_one_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -941,75 +675,53 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 ; CHECK-MVE-NEXT: bx lr @@ -1033,27 +745,17 @@ ; CHECK-MVE-LABEL: vcmp_ogt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1061,62 +763,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -1140,27 +820,17 @@ ; CHECK-MVE-LABEL: vcmp_oge_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1168,62 +838,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -1247,27 +895,17 @@ ; CHECK-MVE-LABEL: vcmp_olt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1275,62 +913,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -1354,27 +970,17 @@ ; CHECK-MVE-LABEL: vcmp_ole_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1382,62 +988,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -1461,31 +1045,21 @@ ; CHECK-MVE-LABEL: vcmp_ueq_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1493,75 +1067,53 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 ; CHECK-MVE-NEXT: bx lr @@ -1585,27 +1137,17 @@ ; CHECK-MVE-LABEL: vcmp_une_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1613,61 +1155,39 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 @@ -1692,27 +1212,17 @@ ; CHECK-MVE-LABEL: vcmp_ugt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1720,62 +1230,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -1799,27 +1287,17 @@ ; CHECK-MVE-LABEL: vcmp_uge_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1827,62 +1305,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -1906,27 +1362,17 @@ ; CHECK-MVE-LABEL: vcmp_ult_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -1934,62 +1380,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -2013,27 +1437,17 @@ ; CHECK-MVE-LABEL: vcmp_ule_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -2041,62 +1455,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -2120,27 +1512,17 @@ ; CHECK-MVE-LABEL: vcmp_ord_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -2148,62 +1530,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -2228,27 +1588,17 @@ ; CHECK-MVE-LABEL: vcmp_uno_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -2256,62 +1606,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -2339,40 +1667,24 @@ ; CHECK-MVE-LABEL: vcmp_r_oeq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, eq +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2394,50 +1706,34 @@ ; CHECK-MVE-LABEL: vcmp_r_one_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r3, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movgt r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2460,40 +1756,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ogt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, gt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2515,40 +1795,24 @@ ; CHECK-MVE-LABEL: vcmp_r_oge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ge +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2570,40 +1834,24 @@ ; CHECK-MVE-LABEL: vcmp_r_olt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2625,40 +1873,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ole_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ls +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2680,50 +1912,34 @@ ; CHECK-MVE-LABEL: vcmp_r_ueq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r3, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movvs r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2746,40 +1962,24 @@ ; CHECK-MVE-LABEL: vcmp_r_une_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2801,40 +2001,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ugt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, hi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2856,40 +2040,24 @@ ; CHECK-MVE-LABEL: vcmp_r_uge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, pl +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2911,40 +2079,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ult_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, lt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -2966,40 +2118,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ule_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, le +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -3021,40 +2157,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ord_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vc +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -3077,40 +2197,24 @@ ; CHECK-MVE-LABEL: vcmp_r_uno_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s4, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s3 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s4, s2 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vs +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s15, s11 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s14, s10 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s13, s9 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s12, s8 ; CHECK-MVE-NEXT: bx lr ; @@ -3135,27 +2239,17 @@ ; CHECK-MVE-LABEL: vcmp_r_oeq_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -3163,62 +2257,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -3242,31 +2314,21 @@ ; CHECK-MVE-LABEL: vcmp_r_one_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -3274,75 +2336,53 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 ; CHECK-MVE-NEXT: bx lr @@ -3366,27 +2406,17 @@ ; CHECK-MVE-LABEL: vcmp_r_ogt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -3394,62 +2424,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -3473,27 +2481,17 @@ ; CHECK-MVE-LABEL: vcmp_r_oge_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -3501,62 +2499,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -3580,27 +2556,17 @@ ; CHECK-MVE-LABEL: vcmp_r_olt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -3608,62 +2574,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -3687,27 +2631,17 @@ ; CHECK-MVE-LABEL: vcmp_r_ole_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -3715,62 +2649,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -3794,31 +2706,21 @@ ; CHECK-MVE-LABEL: vcmp_r_ueq_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -3826,75 +2728,53 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 ; CHECK-MVE-NEXT: bx lr @@ -3918,27 +2798,17 @@ ; CHECK-MVE-LABEL: vcmp_r_une_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -3946,61 +2816,39 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 @@ -4025,27 +2873,17 @@ ; CHECK-MVE-LABEL: vcmp_r_ugt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -4053,62 +2891,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -4132,27 +2948,17 @@ ; CHECK-MVE-LABEL: vcmp_r_uge_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -4160,62 +2966,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -4239,27 +3023,17 @@ ; CHECK-MVE-LABEL: vcmp_r_ult_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -4267,62 +3041,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -4346,27 +3098,17 @@ ; CHECK-MVE-LABEL: vcmp_r_ule_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -4374,62 +3116,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -4453,27 +3173,17 @@ ; CHECK-MVE-LABEL: vcmp_r_ord_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -4481,62 +3191,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -4561,27 +3249,17 @@ ; CHECK-MVE-LABEL: vcmp_r_uno_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s4, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -4589,62 +3267,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s1 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s4, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s4, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 @@ -4671,27 +3327,17 @@ ; CHECK-MVE-LABEL: vcmp_oeq_v8f16_bc: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s6, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s5, s12 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s8 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s5, s12 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s4 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s5, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s12, s8 ; CHECK-MVE-NEXT: vmovx.f16 s8, s13 ; CHECK-MVE-NEXT: vins.f16 s0, s6 @@ -4699,62 +3345,40 @@ ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s9 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s14 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s13, s9 ; CHECK-MVE-NEXT: vins.f16 s1, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s2 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s10 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s15 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s14, s10 ; CHECK-MVE-NEXT: vins.f16 s2, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s3 ; CHECK-MVE-NEXT: vcmp.f16 s6, s4 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s4 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s6, s8, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s15, s11 ; CHECK-MVE-NEXT: vins.f16 s3, s6 diff --git a/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll b/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll --- a/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vcmpfz.ll @@ -6,40 +6,24 @@ ; CHECK-MVE-LABEL: vcmp_oeq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, eq +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -58,50 +42,34 @@ ; CHECK-MVE-LABEL: vcmp_one_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r3, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movgt r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -121,40 +89,24 @@ ; CHECK-MVE-LABEL: vcmp_ogt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, gt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -173,40 +125,24 @@ ; CHECK-MVE-LABEL: vcmp_oge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ge +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -225,40 +161,24 @@ ; CHECK-MVE-LABEL: vcmp_olt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -277,40 +197,24 @@ ; CHECK-MVE-LABEL: vcmp_ole_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ls +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -329,50 +233,34 @@ ; CHECK-MVE-LABEL: vcmp_ueq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r3, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movvs r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -392,40 +280,24 @@ ; CHECK-MVE-LABEL: vcmp_une_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -444,40 +316,24 @@ ; CHECK-MVE-LABEL: vcmp_ugt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, hi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -496,40 +352,24 @@ ; CHECK-MVE-LABEL: vcmp_uge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, pl +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -548,40 +388,24 @@ ; CHECK-MVE-LABEL: vcmp_ult_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, lt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -600,40 +424,24 @@ ; CHECK-MVE-LABEL: vcmp_ule_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, le +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -652,40 +460,24 @@ ; CHECK-MVE-LABEL: vcmp_ord_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s3 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s2 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vc +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -705,40 +497,24 @@ ; CHECK-MVE-LABEL: vcmp_uno_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s3 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s2 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vs +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -760,71 +536,45 @@ ; CHECK-MVE-LABEL: vcmp_oeq_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -832,18 +582,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -864,83 +608,57 @@ ; CHECK-MVE-LABEL: vcmp_one_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -948,23 +666,17 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 ; CHECK-MVE-NEXT: bx lr @@ -985,71 +697,45 @@ ; CHECK-MVE-LABEL: vcmp_ogt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1057,18 +743,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1089,71 +769,45 @@ ; CHECK-MVE-LABEL: vcmp_oge_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1161,18 +815,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1193,71 +841,45 @@ ; CHECK-MVE-LABEL: vcmp_olt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1265,18 +887,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1297,71 +913,45 @@ ; CHECK-MVE-LABEL: vcmp_ole_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1369,18 +959,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1401,83 +985,57 @@ ; CHECK-MVE-LABEL: vcmp_ueq_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1485,23 +1043,17 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 ; CHECK-MVE-NEXT: bx lr @@ -1522,71 +1074,45 @@ ; CHECK-MVE-LABEL: vcmp_une_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1594,17 +1120,11 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 @@ -1626,71 +1146,45 @@ ; CHECK-MVE-LABEL: vcmp_ugt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1698,18 +1192,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1730,71 +1218,45 @@ ; CHECK-MVE-LABEL: vcmp_uge_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1802,18 +1264,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1834,71 +1290,45 @@ ; CHECK-MVE-LABEL: vcmp_ult_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -1906,18 +1336,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -1938,71 +1362,45 @@ ; CHECK-MVE-LABEL: vcmp_ule_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -2010,18 +1408,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -2042,71 +1434,45 @@ ; CHECK-MVE-LABEL: vcmp_ord_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, s12 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -2114,18 +1480,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -2147,71 +1507,45 @@ ; CHECK-MVE-LABEL: vcmp_uno_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, s12 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -2219,18 +1553,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -2255,40 +1583,24 @@ ; CHECK-MVE-LABEL: vcmp_r_oeq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, eq +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2307,50 +1619,34 @@ ; CHECK-MVE-LABEL: vcmp_r_one_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r3, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movgt r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2370,40 +1666,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ogt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, mi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, mi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2422,40 +1702,24 @@ ; CHECK-MVE-LABEL: vcmp_r_oge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ls ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ls +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2474,40 +1738,24 @@ ; CHECK-MVE-LABEL: vcmp_r_olt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, gt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, gt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2526,40 +1774,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ole_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ge ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ge +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2578,50 +1810,34 @@ ; CHECK-MVE-LABEL: vcmp_r_ueq_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r2, #1 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 +; CHECK-MVE-NEXT: cset r1, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r3, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cset r2, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r3, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: movvs r3, #1 +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2641,40 +1857,24 @@ ; CHECK-MVE-LABEL: vcmp_r_une_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, ne +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2693,40 +1893,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ugt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, lt ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, lt +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2745,40 +1929,24 @@ ; CHECK-MVE-LABEL: vcmp_r_uge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, le ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, le +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2797,40 +1965,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ult_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, hi ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, hi +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2849,40 +2001,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ule_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, #0 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, #0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, #0 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, #0 -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, pl ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, pl +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2901,40 +2037,24 @@ ; CHECK-MVE-LABEL: vcmp_r_ord_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s3 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s2 -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vc ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vc +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -2954,40 +2074,24 @@ ; CHECK-MVE-LABEL: vcmp_r_uno_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vcmp.f32 s1, s1 -; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f32 s0, s0 -; CHECK-MVE-NEXT: cset r1, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r2, #0 ; CHECK-MVE-NEXT: vcmp.f32 s3, s3 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r2, #1 -; CHECK-MVE-NEXT: cmp r2, #0 -; CHECK-MVE-NEXT: cset r2, ne +; CHECK-MVE-NEXT: cset r1, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: mov.w r3, #0 ; CHECK-MVE-NEXT: vcmp.f32 s2, s2 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r3, #1 -; CHECK-MVE-NEXT: cmp r3, #0 -; CHECK-MVE-NEXT: cset r3, ne -; CHECK-MVE-NEXT: movs r0, #0 +; CHECK-MVE-NEXT: cset r2, vs ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r3, #0 +; CHECK-MVE-NEXT: cset r3, vs +; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: vseleq.f32 s3, s11, s7 -; CHECK-MVE-NEXT: cmp r0, #0 +; CHECK-MVE-NEXT: cmp r3, #0 ; CHECK-MVE-NEXT: vseleq.f32 s2, s10, s6 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f32 s1, s9, s5 -; CHECK-MVE-NEXT: cmp r2, #0 +; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vseleq.f32 s0, s8, s4 ; CHECK-MVE-NEXT: bx lr ; @@ -3009,71 +2113,45 @@ ; CHECK-MVE-LABEL: vcmp_r_oeq_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3081,18 +2159,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -3113,83 +2185,57 @@ ; CHECK-MVE-LABEL: vcmp_r_one_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3197,23 +2243,17 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: it gt +; CHECK-MVE-NEXT: movgt r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 ; CHECK-MVE-NEXT: bx lr @@ -3234,71 +2274,45 @@ ; CHECK-MVE-LABEL: vcmp_r_ogt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3306,18 +2320,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, mi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it mi -; CHECK-MVE-NEXT: movmi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, mi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -3338,71 +2346,45 @@ ; CHECK-MVE-LABEL: vcmp_r_oge_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3410,18 +2392,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ls +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ls -; CHECK-MVE-NEXT: movls r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ls ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -3442,71 +2418,45 @@ ; CHECK-MVE-LABEL: vcmp_r_olt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3514,18 +2464,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, gt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it gt -; CHECK-MVE-NEXT: movgt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, gt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -3546,71 +2490,45 @@ ; CHECK-MVE-LABEL: vcmp_r_ole_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3618,18 +2536,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ge +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ge -; CHECK-MVE-NEXT: movge r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, ge ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -3650,83 +2562,57 @@ ; CHECK-MVE-LABEL: vcmp_r_ueq_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3734,23 +2620,17 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r1, #1 -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, eq +; CHECK-MVE-NEXT: it vs +; CHECK-MVE-NEXT: movvs r0, #1 +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it eq -; CHECK-MVE-NEXT: moveq r0, #1 +; CHECK-MVE-NEXT: cset r0, eq ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 ; CHECK-MVE-NEXT: bx lr @@ -3771,71 +2651,45 @@ ; CHECK-MVE-LABEL: vcmp_r_une_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3843,17 +2697,11 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it ne -; CHECK-MVE-NEXT: movne r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 @@ -3875,71 +2723,45 @@ ; CHECK-MVE-LABEL: vcmp_r_ugt_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -3947,18 +2769,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, lt +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it lt -; CHECK-MVE-NEXT: movlt r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, lt ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -3979,71 +2795,45 @@ ; CHECK-MVE-LABEL: vcmp_r_uge_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -4051,18 +2841,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, le +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it le -; CHECK-MVE-NEXT: movle r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, le ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -4083,71 +2867,45 @@ ; CHECK-MVE-LABEL: vcmp_r_ult_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -4155,18 +2913,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, hi +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it hi -; CHECK-MVE-NEXT: movhi r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, hi ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -4187,71 +2939,45 @@ ; CHECK-MVE-LABEL: vcmp_r_ule_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -4259,18 +2985,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, #0 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, pl +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it pl -; CHECK-MVE-NEXT: movpl r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, pl ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -4291,71 +3011,45 @@ ; CHECK-MVE-LABEL: vcmp_r_ord_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, s12 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -4363,18 +3057,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vc +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vc -; CHECK-MVE-NEXT: movvc r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vc ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 @@ -4396,71 +3084,45 @@ ; CHECK-MVE-LABEL: vcmp_r_uno_v8f16: ; CHECK-MVE: @ %bb.0: @ %entry ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 -; CHECK-MVE-NEXT: movs r1, #0 +; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: vcmp.f16 s12, s12 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s0, s0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s14, s12 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: movs r0, #0 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s0, s8, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s1 ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s5 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vmovx.f16 s8, s9 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vcmp.f16 s1, s1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: vins.f16 s0, s12 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s8, s10 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s1, s9, s5 -; CHECK-MVE-NEXT: vins.f16 s0, s12 ; CHECK-MVE-NEXT: vins.f16 s1, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s2 ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s6 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s2, s2 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: mov.w r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s2, s10, s6 ; CHECK-MVE-NEXT: vmovx.f16 s6, s11 ; CHECK-MVE-NEXT: vins.f16 s2, s4 @@ -4468,18 +3130,12 @@ ; CHECK-MVE-NEXT: vcmp.f16 s4, s4 ; CHECK-MVE-NEXT: vmovx.f16 s4, s7 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r1, #1 -; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: vcmp.f16 s3, s3 -; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: cmp r1, #0 +; CHECK-MVE-NEXT: cset r0, vs +; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s4, s6, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr -; CHECK-MVE-NEXT: it vs -; CHECK-MVE-NEXT: movvs r0, #1 -; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: cset r0, ne +; CHECK-MVE-NEXT: cset r0, vs ; CHECK-MVE-NEXT: cmp r0, #0 ; CHECK-MVE-NEXT: vseleq.f16 s3, s11, s7 ; CHECK-MVE-NEXT: vins.f16 s3, s4 diff --git a/llvm/test/CodeGen/Thumb2/mve-vcmpr.ll b/llvm/test/CodeGen/Thumb2/mve-vcmpr.ll --- a/llvm/test/CodeGen/Thumb2/mve-vcmpr.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vcmpr.ll @@ -438,15 +438,11 @@ ; CHECK-NEXT: eors r2, r0 ; CHECK-NEXT: orrs r2, r3 ; CHECK-NEXT: vmov r12, r3, d0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: eors r1, r3 ; CHECK-NEXT: eor.w r0, r0, r12 ; CHECK-NEXT: orrs r0, r1 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r2 ; CHECK-NEXT: vbic q2, q2, q0 @@ -469,15 +465,11 @@ ; CHECK-NEXT: eors r2, r0 ; CHECK-NEXT: orrs r2, r3 ; CHECK-NEXT: vmov r12, r3, d0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: eors r1, r3 ; CHECK-NEXT: eor.w r0, r0, r12 ; CHECK-NEXT: orrs r0, r1 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r2 ; CHECK-NEXT: vbic q2, q2, q0 @@ -495,64 +487,48 @@ define arm_aapcs_vfpcc <2 x i32> @vcmp_multi_v2i32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { ; CHECK-LABEL: vcmp_multi_v2i32: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, lr} -; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r3, #0 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 ; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vbic q0, q2, q0 ; CHECK-NEXT: vmov r0, s2 -; CHECK-NEXT: subs r1, r0, r2 -; CHECK-NEXT: asr.w r12, r0, #31 -; CHECK-NEXT: sbcs.w r1, r12, r2, asr #31 -; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: subs r3, r0, r2 +; CHECK-NEXT: asr.w r1, r0, #31 +; CHECK-NEXT: vmov r3, s8 +; CHECK-NEXT: sbcs.w r1, r1, r2, asr #31 ; CHECK-NEXT: vmov r2, s0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: vmov r1, s8 ; CHECK-NEXT: csetm lr, ne +; CHECK-NEXT: subs r1, r2, r3 ; CHECK-NEXT: asr.w r12, r2, #31 -; CHECK-NEXT: subs r4, r2, r1 -; CHECK-NEXT: sbcs.w r1, r12, r1, asr #31 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 -; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, ne ; CHECK-NEXT: vmov q3[2], q3[0], r1, lr -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vmov q3[3], q3[1], r1, lr ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q3[3], q3[1], r1, lr ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q4[2], q4[0], r1, r0 ; CHECK-NEXT: vmov q4[3], q4[1], r1, r0 ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 @@ -562,7 +538,7 @@ ; CHECK-NEXT: vand q1, q2, q1 ; CHECK-NEXT: vorr q0, q1, q0 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop {r4, pc} +; CHECK-NEXT: pop {r7, pc} %a4 = icmp eq <2 x i64> %a, zeroinitializer %a5 = select <2 x i1> %a4, <2 x i32> zeroinitializer, <2 x i32> %c %a6 = icmp ne <2 x i32> %b, zeroinitializer @@ -1013,15 +989,11 @@ ; CHECK-NEXT: eors r2, r0 ; CHECK-NEXT: orrs r2, r3 ; CHECK-NEXT: vmov r12, r3, d0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: eors r1, r3 ; CHECK-NEXT: eor.w r0, r0, r12 ; CHECK-NEXT: orrs r0, r1 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r2 ; CHECK-NEXT: vbic q2, q2, q0 @@ -1044,15 +1016,11 @@ ; CHECK-NEXT: eors r2, r0 ; CHECK-NEXT: orrs r2, r3 ; CHECK-NEXT: vmov r12, r3, d0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: eors r1, r3 ; CHECK-NEXT: eor.w r0, r0, r12 ; CHECK-NEXT: orrs r0, r1 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 ; CHECK-NEXT: vmov q0[3], q0[1], r0, r2 ; CHECK-NEXT: vbic q2, q2, q0 @@ -1070,64 +1038,48 @@ define arm_aapcs_vfpcc <2 x i32> @vcmp_r_multi_v2i32(<2 x i64> %a, <2 x i32> %b, <2 x i32> %c) { ; CHECK-LABEL: vcmp_r_multi_v2i32: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, lr} -; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: .save {r7, lr} +; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r3, #0 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 ; CHECK-NEXT: vmov r2, s10 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vbic q0, q2, q0 ; CHECK-NEXT: vmov r0, s2 -; CHECK-NEXT: subs r1, r0, r2 -; CHECK-NEXT: asr.w r12, r0, #31 -; CHECK-NEXT: sbcs.w r1, r12, r2, asr #31 -; CHECK-NEXT: mov.w r1, #0 +; CHECK-NEXT: subs r3, r0, r2 +; CHECK-NEXT: asr.w r1, r0, #31 +; CHECK-NEXT: vmov r3, s8 +; CHECK-NEXT: sbcs.w r1, r1, r2, asr #31 ; CHECK-NEXT: vmov r2, s0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: vmov r1, s8 ; CHECK-NEXT: csetm lr, ne +; CHECK-NEXT: subs r1, r2, r3 ; CHECK-NEXT: asr.w r12, r2, #31 -; CHECK-NEXT: subs r4, r2, r1 -; CHECK-NEXT: sbcs.w r1, r12, r1, asr #31 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 -; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: sbcs.w r1, r12, r3, asr #31 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, ne ; CHECK-NEXT: vmov q3[2], q3[0], r1, lr -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: vmov q3[3], q3[1], r1, lr ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: vmov q3[3], q3[1], r1, lr ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q4[2], q4[0], r1, r0 ; CHECK-NEXT: vmov q4[3], q4[1], r1, r0 ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, ne -; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, ne -; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 @@ -1137,7 +1089,7 @@ ; CHECK-NEXT: vand q1, q2, q1 ; CHECK-NEXT: vorr q0, q1, q0 ; CHECK-NEXT: vpop {d8, d9} -; CHECK-NEXT: pop {r4, pc} +; CHECK-NEXT: pop {r7, pc} %a4 = icmp eq <2 x i64> %a, zeroinitializer %a5 = select <2 x i1> %a4, <2 x i32> zeroinitializer, <2 x i32> %c %a6 = icmp ne <2 x i32> %b, zeroinitializer diff --git a/llvm/test/CodeGen/Thumb2/mve-vcmpz.ll b/llvm/test/CodeGen/Thumb2/mve-vcmpz.ll --- a/llvm/test/CodeGen/Thumb2/mve-vcmpz.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vcmpz.ll @@ -364,13 +364,9 @@ ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vbic q2, q2, q0 @@ -389,13 +385,9 @@ ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vbic q2, q2, q0 @@ -774,13 +766,9 @@ ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vbic q2, q2, q0 @@ -799,13 +787,9 @@ ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vbic q2, q2, q0 diff --git a/llvm/test/CodeGen/Thumb2/mve-vctp.ll b/llvm/test/CodeGen/Thumb2/mve-vctp.ll --- a/llvm/test/CodeGen/Thumb2/mve-vctp.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vctp.ll @@ -178,16 +178,13 @@ ; CHECK-NEXT: rsbs.w r3, r0, #1 ; CHECK-NEXT: mov.w r2, #0 ; CHECK-NEXT: sbcs.w r3, r2, r1 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r3, #1 +; CHECK-NEXT: cset r3, lo ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: csetm r3, ne ; CHECK-NEXT: rsbs r0, r0, #0 ; CHECK-NEXT: sbcs.w r0, r2, r1 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r2, #1 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: cset r0, lo +; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: vmov q2[2], q2[0], r0, r3 ; CHECK-NEXT: vmov q2[3], q2[1], r0, r3 @@ -207,15 +204,13 @@ ; CHECK-LABEL: vcmp_uge_v2i64: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: subs r0, #1 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: it hs -; CHECK-NEXT: movhs r2, #1 -; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: vldr s8, .LCPI11_0 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: cset r0, hs +; CHECK-NEXT: vmov.f32 s9, s8 +; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: vmov s10, r0 -; CHECK-NEXT: vmov.f32 s9, s8 ; CHECK-NEXT: vmov.f32 s11, s10 ; CHECK-NEXT: vbic q1, q1, q2 ; CHECK-NEXT: vand q0, q0, q2 diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll --- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll @@ -48,13 +48,9 @@ ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vmov.i64 q1, #0xffffffff @@ -84,13 +80,9 @@ ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: vand q0, q0, q1 @@ -429,13 +421,9 @@ ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vand q0, q0, q2 @@ -459,13 +447,9 @@ ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: vmov r0, s2 @@ -1386,13 +1370,9 @@ ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vand q0, q0, q2 @@ -1416,13 +1396,9 @@ ; CHECK-NEXT: vmov r0, s6 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: vmov r0, s2 @@ -1453,13 +1429,9 @@ ; CHECK-NEXT: vmov r0, r1, d3 ; CHECK-NEXT: orrs r0, r1 ; CHECK-NEXT: vmov r1, r2, d2 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: vand q0, q0, q1 @@ -1527,13 +1499,9 @@ ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vmov.i64 q1, #0xffffffff @@ -1568,13 +1536,9 @@ ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 ; CHECK-NEXT: vmov q1[3], q1[1], r3, r2 ; CHECK-NEXT: vand q0, q0, q1 @@ -1899,13 +1863,9 @@ ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vand q0, q0, q2 @@ -1934,13 +1894,9 @@ ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 ; CHECK-NEXT: vmov q1[3], q1[1], r3, r2 ; CHECK-NEXT: vmov r2, s2 @@ -2595,13 +2551,9 @@ ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vand q0, q0, q2 @@ -2630,13 +2582,9 @@ ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 ; CHECK-NEXT: vmov q1[3], q1[1], r3, r2 ; CHECK-NEXT: vmov r2, s2 @@ -2671,14 +2619,10 @@ ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: vmov r2, r3, d3 ; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: vmov r3, r2, d2 -; CHECK-NEXT: csetm r12, ne +; CHECK-NEXT: csetm r12, eq ; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: vmov q1[2], q1[0], r2, r12 ; CHECK-NEXT: vmov q1[3], q1[1], r2, r12 ; CHECK-NEXT: vand q0, q0, q1 diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll --- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll @@ -54,13 +54,9 @@ ; CHECK-NEXT: vmullb.u32 q3, q0, q1 ; CHECK-NEXT: vmov r1, s8 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vand q0, q3, q0 @@ -86,13 +82,9 @@ ; CHECK-NEXT: vmullb.s32 q3, q0, q1 ; CHECK-NEXT: vmov r1, s8 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: vand q0, q3, q0 @@ -368,13 +360,9 @@ ; CHECK-NEXT: vmov q0[3], q0[1], r3, r1 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: vand q0, q0, q1 @@ -405,14 +393,10 @@ ; CHECK-NEXT: sxth r2, r2 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: sxth r3, r3 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq +; CHECK-NEXT: smull r2, r3, r3, r2 ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, s6 @@ -1585,13 +1569,9 @@ ; CHECK-NEXT: vmov q0[3], q0[1], r3, r1 ; CHECK-NEXT: vmov r1, s4 ; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: vand q0, q0, q1 @@ -1622,14 +1602,10 @@ ; CHECK-NEXT: sxtb r2, r2 ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: sxtb r3, r3 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: smull r2, r3, r3, r2 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: cset r1, eq +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: csetm r1, eq +; CHECK-NEXT: smull r2, r3, r3, r2 ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov r0, s6 @@ -1660,31 +1636,27 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} -; CHECK-NEXT: vmov r0, r12, d3 -; CHECK-NEXT: vmov r2, lr, d1 +; CHECK-NEXT: vmov r0, r1, d5 ; CHECK-NEXT: vmov r4, r9, d2 ; CHECK-NEXT: vmov r6, r7, d0 -; CHECK-NEXT: umull r1, r8, r2, r0 +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: vmov r1, r2, d4 +; CHECK-NEXT: csetm r0, eq ; CHECK-NEXT: umull r3, r5, r6, r4 +; CHECK-NEXT: orrs r1, r2 +; CHECK-NEXT: vmov r2, lr, d1 +; CHECK-NEXT: csetm r1, eq +; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 +; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 +; CHECK-NEXT: vmov r0, r12, d3 +; CHECK-NEXT: umull r1, r8, r2, r0 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r1 ; CHECK-NEXT: mla r1, r2, r12, r8 ; CHECK-NEXT: mla r0, lr, r0, r1 ; CHECK-NEXT: mla r1, r6, r9, r5 ; CHECK-NEXT: mla r1, r7, r4, r1 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 -; CHECK-NEXT: vmov r0, r1, d5 -; CHECK-NEXT: orrs r0, r1 -; CHECK-NEXT: vmov r1, r2, d4 -; CHECK-NEXT: cset r0, eq -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: orrs r1, r2 -; CHECK-NEXT: cset r1, eq -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 -; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 -; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: adds r0, r0, r2 @@ -1756,13 +1728,9 @@ ; CHECK-NEXT: vmullb.u32 q3, q0, q1 ; CHECK-NEXT: vmov r3, s8 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q0[2], q0[0], r3, r2 ; CHECK-NEXT: vmov q0[3], q0[1], r3, r2 ; CHECK-NEXT: vand q0, q3, q0 @@ -1793,13 +1761,9 @@ ; CHECK-NEXT: vmullb.s32 q3, q0, q1 ; CHECK-NEXT: vmov r3, s8 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q0[2], q0[0], r3, r2 ; CHECK-NEXT: vmov q0[3], q0[1], r3, r2 ; CHECK-NEXT: vand q0, q3, q0 @@ -2018,13 +1982,9 @@ ; CHECK-NEXT: vmov q0[3], q0[1], r3, r12 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 ; CHECK-NEXT: vmov q1[3], q1[1], r3, r2 ; CHECK-NEXT: vand q0, q0, q1 @@ -2056,13 +2016,9 @@ ; CHECK-NEXT: vmov r2, s10 ; CHECK-NEXT: vmov r3, s8 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q2[2], q2[0], r3, r2 ; CHECK-NEXT: vmov q2[3], q2[1], r3, r2 ; CHECK-NEXT: vmov r2, s6 @@ -2936,13 +2892,9 @@ ; CHECK-NEXT: vmov q0[3], q0[1], r3, r12 ; CHECK-NEXT: vmov r3, s4 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 ; CHECK-NEXT: vmov q1[3], q1[1], r3, r2 ; CHECK-NEXT: vand q0, q0, q1 @@ -2974,13 +2926,9 @@ ; CHECK-NEXT: vmov r2, s10 ; CHECK-NEXT: vmov r3, s8 ; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: csetm r2, eq ; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne +; CHECK-NEXT: csetm r3, eq ; CHECK-NEXT: vmov q2[2], q2[0], r3, r2 ; CHECK-NEXT: vmov q2[3], q2[1], r3, r2 ; CHECK-NEXT: vmov r2, s6 @@ -3019,31 +2967,27 @@ ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} -; CHECK-NEXT: vmov r2, r12, d3 -; CHECK-NEXT: vmov r3, lr, d1 +; CHECK-NEXT: vmov r2, r3, d5 ; CHECK-NEXT: vmov r6, r9, d2 ; CHECK-NEXT: vmov r5, r11, d0 -; CHECK-NEXT: umull r10, r8, r3, r2 +; CHECK-NEXT: orrs r2, r3 +; CHECK-NEXT: vmov r3, r2, d4 +; CHECK-NEXT: csetm r12, eq ; CHECK-NEXT: umull r4, r7, r5, r6 +; CHECK-NEXT: orrs r2, r3 +; CHECK-NEXT: vmov r3, lr, d1 +; CHECK-NEXT: csetm r2, eq +; CHECK-NEXT: vmov q2[2], q2[0], r2, r12 +; CHECK-NEXT: vmov q2[3], q2[1], r2, r12 +; CHECK-NEXT: vmov r2, r12, d3 +; CHECK-NEXT: umull r10, r8, r3, r2 ; CHECK-NEXT: mla r3, r3, r12, r8 ; CHECK-NEXT: vmov q0[2], q0[0], r4, r10 ; CHECK-NEXT: mla r2, lr, r2, r3 ; CHECK-NEXT: mla r3, r5, r9, r7 ; CHECK-NEXT: mla r3, r11, r6, r3 ; CHECK-NEXT: vmov q0[3], q0[1], r3, r2 -; CHECK-NEXT: vmov r2, r3, d5 -; CHECK-NEXT: orrs r2, r3 -; CHECK-NEXT: vmov r3, r7, d4 -; CHECK-NEXT: cset r2, eq -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: orrs r3, r7 -; CHECK-NEXT: cset r3, eq -; CHECK-NEXT: cmp r3, #0 -; CHECK-NEXT: csetm r3, ne -; CHECK-NEXT: vmov q1[2], q1[0], r3, r2 -; CHECK-NEXT: vmov q1[3], q1[1], r3, r2 -; CHECK-NEXT: vand q0, q0, q1 +; CHECK-NEXT: vand q0, q0, q2 ; CHECK-NEXT: vmov r2, r3, d1 ; CHECK-NEXT: vmov r7, r6, d0 ; CHECK-NEXT: adds r2, r2, r7 diff --git a/llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll b/llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll --- a/llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vmaxv-vminv-scalar.ll @@ -493,8 +493,8 @@ define arm_aapcs_vfpcc i64 @uminv2i64(<2 x i64> %vec, i64 %min) { ; CHECK-LABEL: uminv2i64: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, r5, r7, lr} -; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: vmov r2, r12, d1 ; CHECK-NEXT: vmov r3, lr, d0 ; CHECK-NEXT: cmp r3, r2 @@ -502,16 +502,14 @@ ; CHECK-NEXT: cmp lr, r12 ; CHECK-NEXT: csel r2, r3, r2, lo ; CHECK-NEXT: csel r3, lr, r12, lo -; CHECK-NEXT: csel r5, r4, r2, eq -; CHECK-NEXT: movs r4, #0 -; CHECK-NEXT: subs r2, r5, r0 -; CHECK-NEXT: sbcs.w r2, r3, r1 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r4, #1 +; CHECK-NEXT: csel r2, r4, r2, eq +; CHECK-NEXT: subs r4, r2, r0 +; CHECK-NEXT: sbcs.w r4, r3, r1 +; CHECK-NEXT: cset r4, lo ; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r5, r0, ne +; CHECK-NEXT: csel r0, r2, r0, ne ; CHECK-NEXT: csel r1, r3, r1, ne -; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: pop {r4, pc} %x = call i64 @llvm.vector.reduce.umin.v2i64(<2 x i64> %vec) %cmp = icmp ult i64 %x, %min %1 = select i1 %cmp, i64 %x, i64 %min @@ -521,8 +519,8 @@ define arm_aapcs_vfpcc i64 @sminv2i64(<2 x i64> %vec, i64 %min) { ; CHECK-LABEL: sminv2i64: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, r5, r7, lr} -; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: vmov r2, r12, d1 ; CHECK-NEXT: vmov r3, lr, d0 ; CHECK-NEXT: cmp r3, r2 @@ -530,16 +528,14 @@ ; CHECK-NEXT: cmp lr, r12 ; CHECK-NEXT: csel r2, r3, r2, lt ; CHECK-NEXT: csel r3, lr, r12, lt -; CHECK-NEXT: csel r5, r4, r2, eq -; CHECK-NEXT: movs r4, #0 -; CHECK-NEXT: subs r2, r5, r0 -; CHECK-NEXT: sbcs.w r2, r3, r1 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: csel r2, r4, r2, eq +; CHECK-NEXT: subs r4, r2, r0 +; CHECK-NEXT: sbcs.w r4, r3, r1 +; CHECK-NEXT: cset r4, lt ; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r5, r0, ne +; CHECK-NEXT: csel r0, r2, r0, ne ; CHECK-NEXT: csel r1, r3, r1, ne -; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: pop {r4, pc} %x = call i64 @llvm.vector.reduce.smin.v2i64(<2 x i64> %vec) %cmp = icmp slt i64 %x, %min %1 = select i1 %cmp, i64 %x, i64 %min @@ -549,8 +545,8 @@ define arm_aapcs_vfpcc i64 @umaxv2i64(<2 x i64> %vec, i64 %max) { ; CHECK-LABEL: umaxv2i64: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, r5, r7, lr} -; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: vmov r2, r12, d1 ; CHECK-NEXT: vmov r3, lr, d0 ; CHECK-NEXT: cmp r3, r2 @@ -558,16 +554,14 @@ ; CHECK-NEXT: cmp lr, r12 ; CHECK-NEXT: csel r2, r3, r2, hi ; CHECK-NEXT: csel r3, lr, r12, hi -; CHECK-NEXT: csel r5, r4, r2, eq -; CHECK-NEXT: movs r4, #0 -; CHECK-NEXT: subs r2, r0, r5 -; CHECK-NEXT: sbcs.w r2, r1, r3 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r4, #1 +; CHECK-NEXT: csel r2, r4, r2, eq +; CHECK-NEXT: subs r4, r0, r2 +; CHECK-NEXT: sbcs.w r4, r1, r3 +; CHECK-NEXT: cset r4, lo ; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r5, r0, ne +; CHECK-NEXT: csel r0, r2, r0, ne ; CHECK-NEXT: csel r1, r3, r1, ne -; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: pop {r4, pc} %x = call i64 @llvm.vector.reduce.umax.v2i64(<2 x i64> %vec) %cmp = icmp ugt i64 %x, %max %1 = select i1 %cmp, i64 %x, i64 %max @@ -577,8 +571,8 @@ define arm_aapcs_vfpcc i64 @smaxv2i64(<2 x i64> %vec, i64 %max) { ; CHECK-LABEL: smaxv2i64: ; CHECK: @ %bb.0: -; CHECK-NEXT: .save {r4, r5, r7, lr} -; CHECK-NEXT: push {r4, r5, r7, lr} +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: vmov r2, r12, d1 ; CHECK-NEXT: vmov r3, lr, d0 ; CHECK-NEXT: cmp r3, r2 @@ -586,16 +580,14 @@ ; CHECK-NEXT: cmp lr, r12 ; CHECK-NEXT: csel r2, r3, r2, gt ; CHECK-NEXT: csel r3, lr, r12, gt -; CHECK-NEXT: csel r5, r4, r2, eq -; CHECK-NEXT: movs r4, #0 -; CHECK-NEXT: subs r2, r0, r5 -; CHECK-NEXT: sbcs.w r2, r1, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 +; CHECK-NEXT: csel r2, r4, r2, eq +; CHECK-NEXT: subs r4, r0, r2 +; CHECK-NEXT: sbcs.w r4, r1, r3 +; CHECK-NEXT: cset r4, lt ; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r0, r5, r0, ne +; CHECK-NEXT: csel r0, r2, r0, ne ; CHECK-NEXT: csel r1, r3, r1, ne -; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: pop {r4, pc} %x = call i64 @llvm.vector.reduce.smax.v2i64(<2 x i64> %vec) %cmp = icmp sgt i64 %x, %max %1 = select i1 %cmp, i64 %x, i64 %max diff --git a/llvm/test/CodeGen/Thumb2/mve-vmovimm.ll b/llvm/test/CodeGen/Thumb2/mve-vmovimm.ll --- a/llvm/test/CodeGen/Thumb2/mve-vmovimm.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vmovimm.ll @@ -507,3 +507,91 @@ %s = select <4 x i1> %l699, <4 x i32> %a, <4 x i32> %b ret <4 x i32> %s } + +define arm_aapcs_vfpcc <2 x i64> @v2i1and_vmov(<2 x i64> %a, <2 x i64> %b, i32 %c) { +; CHECKBE-LABEL: v2i1and_vmov: +; CHECKBE: @ %bb.0: @ %entry +; CHECKBE-NEXT: .vsave {d8, d9} +; CHECKBE-NEXT: vpush {d8, d9} +; CHECKBE-NEXT: cmp r0, #0 +; CHECKBE-NEXT: adr r1, .LCPI37_0 +; CHECKBE-NEXT: cset r0, eq +; CHECKBE-NEXT: vldrw.u32 q3, [r1] +; CHECKBE-NEXT: vmov.32 q4[3], r0 +; CHECKBE-NEXT: rsbs r0, r0, #0 +; CHECKBE-NEXT: vand q3, q4, q3 +; CHECKBE-NEXT: vmov.i8 q2, #0xff +; CHECKBE-NEXT: vmov r1, s15 +; CHECKBE-NEXT: vmov q3[2], q3[0], r0, r1 +; CHECKBE-NEXT: vmov q3[3], q3[1], r0, r1 +; CHECKBE-NEXT: vrev64.32 q4, q3 +; CHECKBE-NEXT: veor q2, q4, q2 +; CHECKBE-NEXT: vand q0, q0, q4 +; CHECKBE-NEXT: vand q1, q1, q2 +; CHECKBE-NEXT: vorr q0, q0, q1 +; CHECKBE-NEXT: vpop {d8, d9} +; CHECKBE-NEXT: bx lr +; CHECKBE-NEXT: .p2align 4 +; CHECKBE-NEXT: @ %bb.1: +; CHECKBE-NEXT: .LCPI37_0: +; CHECKBE-NEXT: .zero 4 +; CHECKBE-NEXT: .long 1 @ 0x1 +; CHECKBE-NEXT: .zero 4 +; CHECKBE-NEXT: .long 0 @ 0x0 +entry: + %c1 = icmp eq i32 %c, zeroinitializer + %broadcast.splatinsert1967 = insertelement <2 x i1> undef, i1 %c1, i32 0 + %broadcast.splat1968 = shufflevector <2 x i1> %broadcast.splatinsert1967, <2 x i1> undef, <2 x i32> zeroinitializer + %l699 = and <2 x i1> %broadcast.splat1968, + %s = select <2 x i1> %l699, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} + +define arm_aapcs_vfpcc <2 x i64> @v2i1or_vmov(<2 x i64> %a, <2 x i64> %b, i32 %c) { +; CHECKLE-LABEL: v2i1or_vmov: +; CHECKLE: @ %bb.0: @ %entry +; CHECKLE-NEXT: cmp r0, #0 +; CHECKLE-NEXT: vldr s8, .LCPI38_0 +; CHECKLE-NEXT: csetm r0, eq +; CHECKLE-NEXT: vmov s10, r0 +; CHECKLE-NEXT: vmov.f32 s9, s8 +; CHECKLE-NEXT: vmov.f32 s11, s10 +; CHECKLE-NEXT: vbic q1, q1, q2 +; CHECKLE-NEXT: vand q0, q0, q2 +; CHECKLE-NEXT: vorr q0, q0, q1 +; CHECKLE-NEXT: bx lr +; CHECKLE-NEXT: .p2align 2 +; CHECKLE-NEXT: @ %bb.1: +; CHECKLE-NEXT: .LCPI38_0: +; CHECKLE-NEXT: .long 0xffffffff @ float NaN +; +; CHECKBE-LABEL: v2i1or_vmov: +; CHECKBE: @ %bb.0: @ %entry +; CHECKBE-NEXT: .vsave {d8, d9} +; CHECKBE-NEXT: vpush {d8, d9} +; CHECKBE-NEXT: cmp r0, #0 +; CHECKBE-NEXT: vldr s8, .LCPI38_0 +; CHECKBE-NEXT: csetm r0, eq +; CHECKBE-NEXT: vmov.i8 q3, #0xff +; CHECKBE-NEXT: vmov s10, r0 +; CHECKBE-NEXT: vmov.f32 s9, s8 +; CHECKBE-NEXT: vmov.f32 s11, s10 +; CHECKBE-NEXT: vrev64.32 q4, q2 +; CHECKBE-NEXT: veor q2, q4, q3 +; CHECKBE-NEXT: vand q0, q0, q4 +; CHECKBE-NEXT: vand q1, q1, q2 +; CHECKBE-NEXT: vorr q0, q0, q1 +; CHECKBE-NEXT: vpop {d8, d9} +; CHECKBE-NEXT: bx lr +; CHECKBE-NEXT: .p2align 2 +; CHECKBE-NEXT: @ %bb.1: +; CHECKBE-NEXT: .LCPI38_0: +; CHECKBE-NEXT: .long 0xffffffff @ float NaN +entry: + %c1 = icmp eq i32 %c, zeroinitializer + %broadcast.splatinsert1967 = insertelement <2 x i1> undef, i1 %c1, i32 0 + %broadcast.splat1968 = shufflevector <2 x i1> %broadcast.splatinsert1967, <2 x i1> undef, <2 x i32> zeroinitializer + %l699 = or <2 x i1> %broadcast.splat1968, + %s = select <2 x i1> %l699, <2 x i64> %a, <2 x i64> %b + ret <2 x i64> %s +} diff --git a/llvm/test/CodeGen/Thumb2/mve-vpsel.ll b/llvm/test/CodeGen/Thumb2/mve-vpsel.ll --- a/llvm/test/CodeGen/Thumb2/mve-vpsel.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vpsel.ll @@ -37,6 +37,27 @@ ret <4 x i32> %1 } +define arm_aapcs_vfpcc <2 x i64> @vpsel_i64(<2 x i64> %mask, <2 x i64> %src1, <2 x i64> %src2) { +; CHECK-LABEL: vpsel_i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: orrs.w r1, r2, r3 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: vbic q2, q2, q0 +; CHECK-NEXT: vand q0, q1, q0 +; CHECK-NEXT: vorr q0, q0, q2 +; CHECK-NEXT: bx lr +entry: + %0 = icmp ne <2 x i64> %mask, zeroinitializer + %1 = select <2 x i1> %0, <2 x i64> %src1, <2 x i64> %src2 + ret <2 x i64> %1 +} + define arm_aapcs_vfpcc <8 x half> @vpsel_f16(<8 x i16> %mask, <8 x half> %src1, <8 x half> %src2) { ; CHECK-LABEL: vpsel_f16: ; CHECK: @ %bb.0: @ %entry @@ -61,6 +82,27 @@ ret <4 x float> %1 } +define arm_aapcs_vfpcc <2 x double> @vpsel_f64(<2 x i64> %mask, <2 x double> %src1, <2 x double> %src2) { +; CHECK-LABEL: vpsel_f64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: orrs r0, r1 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: orrs.w r1, r2, r3 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 +; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 +; CHECK-NEXT: vbic q2, q2, q0 +; CHECK-NEXT: vand q0, q1, q0 +; CHECK-NEXT: vorr q0, q0, q2 +; CHECK-NEXT: bx lr +entry: + %0 = icmp ne <2 x i64> %mask, zeroinitializer + %1 = select <2 x i1> %0, <2 x double> %src1, <2 x double> %src2 + ret <2 x double> %1 +} + define arm_aapcs_vfpcc <4 x i32> @foo(<4 x i32> %vec.ind) { ; CHECK-LABEL: foo: ; CHECK: @ %bb.0: diff --git a/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll b/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll --- a/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vqdmulh.ll @@ -513,9 +513,7 @@ ; CHECK-NEXT: mov.w r9, #1 ; CHECK-NEXT: sbcs r7, r3, #0 ; CHECK-NEXT: mov.w r4, #0 -; CHECK-NEXT: mov.w r7, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r7, #1 +; CHECK-NEXT: cset r7, lt ; CHECK-NEXT: cmp r7, #0 ; CHECK-NEXT: csel r0, r0, r7, ne ; CHECK-NEXT: csel r3, r3, r7, ne @@ -525,9 +523,7 @@ ; CHECK-NEXT: sbcs.w r7, r4, r1 ; CHECK-NEXT: sbcs.w r2, r4, r2 ; CHECK-NEXT: sbcs.w r2, r4, r3 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 +; CHECK-NEXT: cset r2, lt ; CHECK-NEXT: cmp r2, #0 ; CHECK-NEXT: csel r6, r0, r2, ne ; CHECK-NEXT: csel r7, r1, r2, ne @@ -536,9 +532,7 @@ ; CHECK-NEXT: bl __fixdfti ; CHECK-NEXT: subs r5, r2, #1 ; CHECK-NEXT: sbcs r5, r3, #0 -; CHECK-NEXT: mov.w r5, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 +; CHECK-NEXT: cset r5, lt ; CHECK-NEXT: cmp r5, #0 ; CHECK-NEXT: csel r0, r0, r5, ne ; CHECK-NEXT: csel r3, r3, r5, ne @@ -548,11 +542,10 @@ ; CHECK-NEXT: sbcs.w r5, r4, r1 ; CHECK-NEXT: sbcs.w r2, r4, r2 ; CHECK-NEXT: sbcs.w r2, r4, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r4, #1 -; CHECK-NEXT: cmp r4, #0 -; CHECK-NEXT: csel r2, r0, r4, ne -; CHECK-NEXT: csel r3, r1, r4, ne +; CHECK-NEXT: cset r3, lt +; CHECK-NEXT: cmp r3, #0 +; CHECK-NEXT: csel r2, r0, r3, ne +; CHECK-NEXT: csel r3, r1, r3, ne ; CHECK-NEXT: mov r0, r6 ; CHECK-NEXT: mov r1, r7 ; CHECK-NEXT: add sp, #4 diff --git a/llvm/test/CodeGen/Thumb2/mve-vqmovn.ll b/llvm/test/CodeGen/Thumb2/mve-vqmovn.ll --- a/llvm/test/CodeGen/Thumb2/mve-vqmovn.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vqmovn.ll @@ -164,49 +164,41 @@ define arm_aapcs_vfpcc <2 x i64> @vqmovni64_smaxmin(<2 x i64> %s0) { ; CHECK-LABEL: vqmovni64_smaxmin: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov r1, r2, d1 -; CHECK-NEXT: mvn r12, #-2147483648 -; CHECK-NEXT: movs r0, #0 -; CHECK-NEXT: subs.w r1, r1, r12 -; CHECK-NEXT: sbcs r1, r2, #0 -; CHECK-NEXT: vmov r2, r3, d0 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: mvn r2, #-2147483648 +; CHECK-NEXT: subs r0, r0, r2 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov r1, r3, d0 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne +; CHECK-NEXT: subs r1, r1, r2 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: subs.w r2, r2, r12 -; CHECK-NEXT: mov.w r12, #-1 -; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: vmov q1[2], q1[0], r2, r1 -; CHECK-NEXT: vmov q1[3], q1[1], r2, r1 -; CHECK-NEXT: adr r1, .LCPI12_0 -; CHECK-NEXT: vldrw.u32 q2, [r1] +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 +; CHECK-NEXT: adr r0, .LCPI12_0 +; CHECK-NEXT: vldrw.u32 q2, [r0] ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vbic q2, q2, q1 ; CHECK-NEXT: vorr q0, q0, q2 -; CHECK-NEXT: vmov r1, r2, d1 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: rsbs.w r0, r0, #-2147483648 +; CHECK-NEXT: sbcs.w r0, r2, r1 +; CHECK-NEXT: vmov r1, r3, d0 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: rsbs.w r1, r1, #-2147483648 -; CHECK-NEXT: sbcs.w r1, r12, r2 -; CHECK-NEXT: vmov r2, r3, d0 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: sbcs.w r1, r2, r3 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: rsbs.w r2, r2, #-2147483648 -; CHECK-NEXT: sbcs.w r2, r12, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 -; CHECK-NEXT: cmp r0, #0 -; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov q1[2], q1[0], r0, r1 -; CHECK-NEXT: vmov q1[3], q1[1], r0, r1 +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: adr r0, .LCPI12_1 ; CHECK-NEXT: vldrw.u32 q2, [r0] ; CHECK-NEXT: vand q0, q0, q1 @@ -236,49 +228,41 @@ define arm_aapcs_vfpcc <2 x i64> @vqmovni64_sminmax(<2 x i64> %s0) { ; CHECK-LABEL: vqmovni64_sminmax: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov r1, r2, d1 -; CHECK-NEXT: mov.w r12, #-1 -; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: rsbs.w r0, r0, #-2147483648 +; CHECK-NEXT: sbcs.w r0, r2, r1 +; CHECK-NEXT: vmov r1, r3, d0 +; CHECK-NEXT: cset r0, lt +; CHECK-NEXT: cmp r0, #0 +; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: rsbs.w r1, r1, #-2147483648 -; CHECK-NEXT: sbcs.w r1, r12, r2 -; CHECK-NEXT: vmov r2, r3, d0 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 +; CHECK-NEXT: sbcs.w r1, r2, r3 +; CHECK-NEXT: mvn r2, #-2147483648 +; CHECK-NEXT: cset r1, lt ; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: rsbs.w r2, r2, #-2147483648 -; CHECK-NEXT: sbcs.w r2, r12, r3 -; CHECK-NEXT: mvn r12, #-2147483648 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: vmov q1[2], q1[0], r2, r1 -; CHECK-NEXT: vmov q1[3], q1[1], r2, r1 -; CHECK-NEXT: adr r1, .LCPI13_0 -; CHECK-NEXT: vldrw.u32 q2, [r1] +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 +; CHECK-NEXT: adr r0, .LCPI13_0 +; CHECK-NEXT: vldrw.u32 q2, [r0] ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vbic q2, q2, q1 ; CHECK-NEXT: vorr q0, q0, q2 -; CHECK-NEXT: vmov r1, r2, d1 -; CHECK-NEXT: subs.w r1, r1, r12 -; CHECK-NEXT: sbcs r1, r2, #0 -; CHECK-NEXT: vmov r2, r3, d0 -; CHECK-NEXT: mov.w r1, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r1, #1 -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: csetm r1, ne -; CHECK-NEXT: subs.w r2, r2, r12 -; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: subs r0, r0, r2 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov r1, r3, d0 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov q1[2], q1[0], r0, r1 -; CHECK-NEXT: vmov q1[3], q1[1], r0, r1 +; CHECK-NEXT: subs r1, r1, r2 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: adr r0, .LCPI13_1 ; CHECK-NEXT: vldrw.u32 q2, [r0] ; CHECK-NEXT: vand q0, q0, q1 @@ -309,21 +293,17 @@ ; CHECK-LABEL: vqmovni64_umaxmin: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r2, #0 ; CHECK-NEXT: vmov.i64 q2, #0xffffffff ; CHECK-NEXT: subs.w r0, r0, #-1 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: vmov r1, r3, d0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: vmov r1, r2, d0 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: subs.w r1, r1, #-1 -; CHECK-NEXT: sbcs r1, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r2, #1 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: sbcs r1, r2, #0 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 @@ -341,21 +321,17 @@ ; CHECK-LABEL: vqmovni64_uminmax: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vmov r0, r1, d1 -; CHECK-NEXT: movs r2, #0 ; CHECK-NEXT: vmov.i64 q2, #0xffffffff ; CHECK-NEXT: subs.w r0, r0, #-1 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: vmov r1, r3, d0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: vmov r1, r2, d0 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: subs.w r1, r1, #-1 -; CHECK-NEXT: sbcs r1, r3, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r2, #1 -; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: sbcs r1, r2, #0 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 diff --git a/llvm/test/CodeGen/Thumb2/mve-vqshrn.ll b/llvm/test/CodeGen/Thumb2/mve-vqshrn.ll --- a/llvm/test/CodeGen/Thumb2/mve-vqshrn.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vqshrn.ll @@ -180,52 +180,42 @@ define arm_aapcs_vfpcc <2 x i64> @vqshrni64_smaxmin(<2 x i64> %so) { ; CHECK-LABEL: vqshrni64_smaxmin: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r7, lr} -; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: vmov r2, r3, d1 -; CHECK-NEXT: mvn lr, #-2147483648 -; CHECK-NEXT: vmov r0, r1, d0 -; CHECK-NEXT: asrl r2, r3, #3 +; CHECK-NEXT: vmov r0, r1, d1 +; CHECK-NEXT: mvn r12, #-2147483648 +; CHECK-NEXT: vmov r2, r3, d0 ; CHECK-NEXT: asrl r0, r1, #3 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: vmov q0[2], q0[0], r0, r2 -; CHECK-NEXT: subs.w r2, r2, lr -; CHECK-NEXT: sbcs r2, r3, #0 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 -; CHECK-NEXT: mov.w r2, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r2, #1 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: csetm r2, ne -; CHECK-NEXT: subs.w r0, r0, lr +; CHECK-NEXT: asrl r2, r3, #3 +; CHECK-NEXT: vmov q0[2], q0[0], r2, r0 +; CHECK-NEXT: subs.w r0, r0, r12 ; CHECK-NEXT: sbcs r0, r1, #0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: vmov q0[3], q0[1], r3, r1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: vmov q1[2], q1[0], r0, r2 -; CHECK-NEXT: vmov q1[3], q1[1], r0, r2 +; CHECK-NEXT: subs.w r1, r2, r12 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: mov.w r2, #-1 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 +; CHECK-NEXT: csetm r1, ne +; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 +; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 ; CHECK-NEXT: adr r0, .LCPI12_0 ; CHECK-NEXT: vldrw.u32 q2, [r0] ; CHECK-NEXT: vand q0, q0, q1 -; CHECK-NEXT: mov.w r2, #-1 ; CHECK-NEXT: vbic q1, q2, q1 ; CHECK-NEXT: vorr q0, q0, q1 ; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: rsbs.w r0, r0, #-2147483648 ; CHECK-NEXT: sbcs.w r0, r2, r1 ; CHECK-NEXT: vmov r1, r3, d0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: rsbs.w r1, r1, #-2147483648 ; CHECK-NEXT: sbcs.w r1, r2, r3 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w r12, #1 -; CHECK-NEXT: cmp.w r12, #0 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 @@ -234,7 +224,7 @@ ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vbic q2, q2, q1 ; CHECK-NEXT: vorr q0, q0, q2 -; CHECK-NEXT: pop {r7, pc} +; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI12_0: @@ -259,35 +249,30 @@ define arm_aapcs_vfpcc <2 x i64> @vqshrni64_sminmax(<2 x i64> %so) { ; CHECK-LABEL: vqshrni64_sminmax: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .save {r4, r5, r7, lr} -; CHECK-NEXT: push {r4, r5, r7, lr} -; CHECK-NEXT: vmov r2, r1, d1 +; CHECK-NEXT: .save {r4, lr} +; CHECK-NEXT: push {r4, lr} +; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: mov.w r12, #-1 -; CHECK-NEXT: asrl r2, r1, #3 -; CHECK-NEXT: mov.w lr, #0 -; CHECK-NEXT: rsbs.w r3, r2, #-2147483648 +; CHECK-NEXT: asrl r0, r1, #3 +; CHECK-NEXT: rsbs.w r3, r0, #-2147483648 ; CHECK-NEXT: sbcs.w r3, r12, r1 -; CHECK-NEXT: mov.w r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r3, #1 +; CHECK-NEXT: cset r3, lt ; CHECK-NEXT: cmp r3, #0 ; CHECK-NEXT: vmov r4, r3, d0 -; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: asrl r4, r3, #3 -; CHECK-NEXT: rsbs.w r5, r4, #-2147483648 -; CHECK-NEXT: vmov q2[2], q2[0], r4, r2 -; CHECK-NEXT: sbcs.w r5, r12, r3 +; CHECK-NEXT: csetm lr, ne +; CHECK-NEXT: rsbs.w r2, r4, #-2147483648 +; CHECK-NEXT: vmov q2[2], q2[0], r4, r0 +; CHECK-NEXT: sbcs.w r2, r12, r3 ; CHECK-NEXT: vmov q2[3], q2[1], r3, r1 -; CHECK-NEXT: mov.w r5, #0 +; CHECK-NEXT: cset r2, lt +; CHECK-NEXT: cmp r2, #0 +; CHECK-NEXT: csetm r2, ne +; CHECK-NEXT: vmov q0[2], q0[0], r2, lr +; CHECK-NEXT: vmov q0[3], q0[1], r2, lr +; CHECK-NEXT: adr r2, .LCPI13_0 +; CHECK-NEXT: vldrw.u32 q1, [r2] ; CHECK-NEXT: mvn r2, #-2147483648 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r5, #1 -; CHECK-NEXT: cmp r5, #0 -; CHECK-NEXT: csetm r5, ne -; CHECK-NEXT: vmov q0[2], q0[0], r5, r0 -; CHECK-NEXT: vmov q0[3], q0[1], r5, r0 -; CHECK-NEXT: adr r0, .LCPI13_0 -; CHECK-NEXT: vldrw.u32 q1, [r0] ; CHECK-NEXT: vbic q1, q1, q0 ; CHECK-NEXT: vand q0, q2, q0 ; CHECK-NEXT: vorr q0, q0, q1 @@ -295,16 +280,13 @@ ; CHECK-NEXT: subs r0, r0, r2 ; CHECK-NEXT: sbcs r0, r1, #0 ; CHECK-NEXT: vmov r1, r3, d0 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt r0, #1 +; CHECK-NEXT: cset r0, lt ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne ; CHECK-NEXT: subs r1, r1, r2 ; CHECK-NEXT: sbcs r1, r3, #0 -; CHECK-NEXT: it lt -; CHECK-NEXT: movlt.w lr, #1 -; CHECK-NEXT: cmp.w lr, #0 +; CHECK-NEXT: cset r1, lt +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 @@ -313,7 +295,7 @@ ; CHECK-NEXT: vand q0, q0, q1 ; CHECK-NEXT: vbic q2, q2, q1 ; CHECK-NEXT: vorr q0, q0, q2 -; CHECK-NEXT: pop {r4, r5, r7, pc} +; CHECK-NEXT: pop {r4, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.1: ; CHECK-NEXT: .LCPI13_0: @@ -338,26 +320,22 @@ define arm_aapcs_vfpcc <2 x i64> @vqshrni64_umaxmin(<2 x i64> %so) { ; CHECK-LABEL: vqshrni64_umaxmin: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov r0, r3, d1 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: vmov r2, r1, d0 -; CHECK-NEXT: lsrl r0, r3, #3 -; CHECK-NEXT: lsrl r2, r1, #3 +; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: vmov.i64 q2, #0xffffffff +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: lsrl r0, r1, #3 +; CHECK-NEXT: lsrl r2, r3, #3 ; CHECK-NEXT: vmov q0[2], q0[0], r2, r0 ; CHECK-NEXT: subs.w r0, r0, #-1 -; CHECK-NEXT: sbcs r0, r3, #0 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r3, r1 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: subs.w r2, r2, #-1 -; CHECK-NEXT: sbcs r1, r1, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r12, #1 -; CHECK-NEXT: cmp.w r12, #0 +; CHECK-NEXT: subs.w r1, r2, #-1 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 @@ -375,26 +353,22 @@ define arm_aapcs_vfpcc <2 x i64> @vqshrni64_uminmax(<2 x i64> %so) { ; CHECK-LABEL: vqshrni64_uminmax: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov r0, r3, d1 -; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: vmov r2, r1, d0 -; CHECK-NEXT: lsrl r0, r3, #3 -; CHECK-NEXT: lsrl r2, r1, #3 +; CHECK-NEXT: vmov r0, r1, d1 ; CHECK-NEXT: vmov.i64 q2, #0xffffffff +; CHECK-NEXT: vmov r2, r3, d0 +; CHECK-NEXT: lsrl r0, r1, #3 +; CHECK-NEXT: lsrl r2, r3, #3 ; CHECK-NEXT: vmov q0[2], q0[0], r2, r0 ; CHECK-NEXT: subs.w r0, r0, #-1 -; CHECK-NEXT: sbcs r0, r3, #0 -; CHECK-NEXT: vmov q0[3], q0[1], r1, r3 -; CHECK-NEXT: mov.w r0, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo r0, #1 +; CHECK-NEXT: sbcs r0, r1, #0 +; CHECK-NEXT: vmov q0[3], q0[1], r3, r1 +; CHECK-NEXT: cset r0, lo ; CHECK-NEXT: cmp r0, #0 ; CHECK-NEXT: csetm r0, ne -; CHECK-NEXT: subs.w r2, r2, #-1 -; CHECK-NEXT: sbcs r1, r1, #0 -; CHECK-NEXT: it lo -; CHECK-NEXT: movlo.w r12, #1 -; CHECK-NEXT: cmp.w r12, #0 +; CHECK-NEXT: subs.w r1, r2, #-1 +; CHECK-NEXT: sbcs r1, r3, #0 +; CHECK-NEXT: cset r1, lo +; CHECK-NEXT: cmp r1, #0 ; CHECK-NEXT: csetm r1, ne ; CHECK-NEXT: vmov q1[2], q1[0], r1, r0 ; CHECK-NEXT: vmov q1[3], q1[1], r1, r0 diff --git a/llvm/test/DebugInfo/Generic/empty.ll b/llvm/test/DebugInfo/Generic/empty.ll --- a/llvm/test/DebugInfo/Generic/empty.ll +++ b/llvm/test/DebugInfo/Generic/empty.ll @@ -1,3 +1,4 @@ +; UNSUPPORTED: -aix ; RUN: %llc_dwarf < %s -filetype=obj | llvm-dwarfdump -v - | FileCheck %s ; RUN: %llc_dwarf -split-dwarf-file=foo.dwo < %s -filetype=obj | llvm-dwarfdump -v - | FileCheck --check-prefix=FISSION %s diff --git a/llvm/test/Feature/OperandBundles/adce.ll b/llvm/test/Feature/OperandBundles/adce.ll --- a/llvm/test/Feature/OperandBundles/adce.ll +++ b/llvm/test/Feature/OperandBundles/adce.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -adce < %s | FileCheck %s +; RUN: opt -S -passes=adce < %s | FileCheck %s ; While it is normally okay to DCE out calls to @readonly_function and ; @readnone_function, we cannot do that if they're carrying operand diff --git a/llvm/test/Feature/OperandBundles/basic-aa-argmemonly.ll b/llvm/test/Feature/OperandBundles/basic-aa-argmemonly.ll --- a/llvm/test/Feature/OperandBundles/basic-aa-argmemonly.ll +++ b/llvm/test/Feature/OperandBundles/basic-aa-argmemonly.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -basic-aa -gvn < %s | FileCheck %s +; RUN: opt -S -aa-pipeline=basic-aa -passes=gvn < %s | FileCheck %s declare void @argmemonly_function(i32 *) argmemonly diff --git a/llvm/test/Feature/OperandBundles/dse.ll b/llvm/test/Feature/OperandBundles/dse.ll --- a/llvm/test/Feature/OperandBundles/dse.ll +++ b/llvm/test/Feature/OperandBundles/dse.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -dse < %s | FileCheck %s +; RUN: opt -S -passes=dse < %s | FileCheck %s declare void @f() declare noalias i8* @malloc(i32) nounwind diff --git a/llvm/test/Feature/OperandBundles/early-cse.ll b/llvm/test/Feature/OperandBundles/early-cse.ll --- a/llvm/test/Feature/OperandBundles/early-cse.ll +++ b/llvm/test/Feature/OperandBundles/early-cse.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s +; RUN: opt -S -passes=early-cse -earlycse-debug-hash < %s | FileCheck %s ; While it is normally okay to do memory optimizations over calls to ; @readonly_function and @readnone_function, we cannot do that if diff --git a/llvm/test/Other/debugcounter-predicateinfo.ll b/llvm/test/Other/debugcounter-predicateinfo.ll --- a/llvm/test/Other/debugcounter-predicateinfo.ll +++ b/llvm/test/Other/debugcounter-predicateinfo.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; REQUIRES: asserts -; RUN: opt -debug-counter=predicateinfo-rename-skip=1,predicateinfo-rename-count=1 -print-predicateinfo < %s 2>&1 | FileCheck %s +; RUN: opt -debug-counter=predicateinfo-rename-skip=1,predicateinfo-rename-count=1 -passes=print-predicateinfo < %s 2>&1 | FileCheck %s ;; Test that, with debug counters on, we don't rename the first info, only the second define fastcc void @barney() { ; CHECK-LABEL: @barney( diff --git a/llvm/test/Transforms/ConstantMerge/2002-09-23-CPR-Update.ll b/llvm/test/Transforms/ConstantMerge/2002-09-23-CPR-Update.ll --- a/llvm/test/Transforms/ConstantMerge/2002-09-23-CPR-Update.ll +++ b/llvm/test/Transforms/ConstantMerge/2002-09-23-CPR-Update.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -constmerge > /dev/null +; RUN: opt < %s -passes=constmerge > /dev/null @foo.upgrd.1 = internal constant { i32 } { i32 7 } ; <{ i32 }*> [#uses=1] @bar = internal constant { i32 } { i32 7 } ; <{ i32 }*> [#uses=1] diff --git a/llvm/test/Transforms/ConstantMerge/2003-10-28-MergeExternalConstants.ll b/llvm/test/Transforms/ConstantMerge/2003-10-28-MergeExternalConstants.ll --- a/llvm/test/Transforms/ConstantMerge/2003-10-28-MergeExternalConstants.ll +++ b/llvm/test/Transforms/ConstantMerge/2003-10-28-MergeExternalConstants.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -constmerge < %s | FileCheck %s +; RUN: opt -S -passes=constmerge < %s | FileCheck %s ; CHECK: @foo = constant i32 6 ; CHECK: @bar = constant i32 6 diff --git a/llvm/test/Transforms/ConstantMerge/2011-01-15-EitherOrder.ll b/llvm/test/Transforms/ConstantMerge/2011-01-15-EitherOrder.ll --- a/llvm/test/Transforms/ConstantMerge/2011-01-15-EitherOrder.ll +++ b/llvm/test/Transforms/ConstantMerge/2011-01-15-EitherOrder.ll @@ -1,4 +1,4 @@ -; RUN: opt -constmerge -S < %s | FileCheck %s +; RUN: opt -passes=constmerge -S < %s | FileCheck %s ; PR8978 declare i32 @zed(%struct.foobar*, %struct.foobar*) diff --git a/llvm/test/Transforms/ConstantMerge/align.ll b/llvm/test/Transforms/ConstantMerge/align.ll --- a/llvm/test/Transforms/ConstantMerge/align.ll +++ b/llvm/test/Transforms/ConstantMerge/align.ll @@ -1,4 +1,4 @@ -; RUN: opt -constmerge -S < %s | FileCheck %s +; RUN: opt -passes=constmerge -S < %s | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/ConstantMerge/dont-merge.ll b/llvm/test/Transforms/ConstantMerge/dont-merge.ll --- a/llvm/test/Transforms/ConstantMerge/dont-merge.ll +++ b/llvm/test/Transforms/ConstantMerge/dont-merge.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -constmerge -S | FileCheck %s +; RUN: opt < %s -passes=constmerge -S | FileCheck %s ; Don't merge constants with specified sections. diff --git a/llvm/test/Transforms/ConstantMerge/merge-dbg.ll b/llvm/test/Transforms/ConstantMerge/merge-dbg.ll --- a/llvm/test/Transforms/ConstantMerge/merge-dbg.ll +++ b/llvm/test/Transforms/ConstantMerge/merge-dbg.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -constmerge -S | FileCheck %s +; RUN: opt < %s -passes=constmerge -S | FileCheck %s ; CHECK: = constant i32 1, !dbg [[A:![0-9]+]], !dbg [[B:![0-9]+]] @a = internal constant i32 1, !dbg !0 diff --git a/llvm/test/Transforms/ConstantMerge/unnamed-addr.ll b/llvm/test/Transforms/ConstantMerge/unnamed-addr.ll --- a/llvm/test/Transforms/ConstantMerge/unnamed-addr.ll +++ b/llvm/test/Transforms/ConstantMerge/unnamed-addr.ll @@ -1,4 +1,4 @@ -; RUN: opt -constmerge -S < %s | FileCheck %s +; RUN: opt -passes=constmerge -S < %s | FileCheck %s ; Test which corresponding x and y are merged and that unnamed_addr ; is correctly set. diff --git a/llvm/test/Transforms/ConstraintElimination/add-nuw.ll b/llvm/test/Transforms/ConstraintElimination/add-nuw.ll --- a/llvm/test/Transforms/ConstraintElimination/add-nuw.ll +++ b/llvm/test/Transforms/ConstraintElimination/add-nuw.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define void @test.not.uge.ult(i8 %start, i8 %low, i8 %high) { ; CHECK-LABEL: @test.not.uge.ult( diff --git a/llvm/test/Transforms/ConstraintElimination/add.ll b/llvm/test/Transforms/ConstraintElimination/add.ll --- a/llvm/test/Transforms/ConstraintElimination/add.ll +++ b/llvm/test/Transforms/ConstraintElimination/add.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define void @test.not.uge.ult(i8 %start, i8 %low, i8 %high) { ; CHECK-LABEL: @test.not.uge.ult( diff --git a/llvm/test/Transforms/ConstraintElimination/and.ll b/llvm/test/Transforms/ConstraintElimination/and.ll --- a/llvm/test/Transforms/ConstraintElimination/and.ll +++ b/llvm/test/Transforms/ConstraintElimination/and.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/assumes.ll b/llvm/test/Transforms/ConstraintElimination/assumes.ll --- a/llvm/test/Transforms/ConstraintElimination/assumes.ll +++ b/llvm/test/Transforms/ConstraintElimination/assumes.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @llvm.assume(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/decompose-with-temporary-indices.ll b/llvm/test/Transforms/ConstraintElimination/decompose-with-temporary-indices.ll --- a/llvm/test/Transforms/ConstraintElimination/decompose-with-temporary-indices.ll +++ b/llvm/test/Transforms/ConstraintElimination/decompose-with-temporary-indices.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/dom.ll b/llvm/test/Transforms/ConstraintElimination/dom.ll --- a/llvm/test/Transforms/ConstraintElimination/dom.ll +++ b/llvm/test/Transforms/ConstraintElimination/dom.ll @@ -1,5 +1,4 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s ; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s ; Test cases where both the true and false successors reach the same block, diff --git a/llvm/test/Transforms/ConstraintElimination/empty-constraint.ll b/llvm/test/Transforms/ConstraintElimination/empty-constraint.ll --- a/llvm/test/Transforms/ConstraintElimination/empty-constraint.ll +++ b/llvm/test/Transforms/ConstraintElimination/empty-constraint.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s ; Make sure constraints where all variable coefficients are 0 are handled ; properly. diff --git a/llvm/test/Transforms/ConstraintElimination/eq.ll b/llvm/test/Transforms/ConstraintElimination/eq.ll --- a/llvm/test/Transforms/ConstraintElimination/eq.ll +++ b/llvm/test/Transforms/ConstraintElimination/eq.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define i1 @test_eq_1(i8 %a, i8 %b) { ; CHECK-LABEL: @test_eq_1( diff --git a/llvm/test/Transforms/ConstraintElimination/gep-arithmetic.ll b/llvm/test/Transforms/ConstraintElimination/gep-arithmetic.ll --- a/llvm/test/Transforms/ConstraintElimination/gep-arithmetic.ll +++ b/llvm/test/Transforms/ConstraintElimination/gep-arithmetic.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @llvm.assume(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/geps-inbounds-precondition.ll b/llvm/test/Transforms/ConstraintElimination/geps-inbounds-precondition.ll --- a/llvm/test/Transforms/ConstraintElimination/geps-inbounds-precondition.ll +++ b/llvm/test/Transforms/ConstraintElimination/geps-inbounds-precondition.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s ; Tests for using inbounds information from GEPs. diff --git a/llvm/test/Transforms/ConstraintElimination/geps-pointers-to-arrays.ll b/llvm/test/Transforms/ConstraintElimination/geps-pointers-to-arrays.ll --- a/llvm/test/Transforms/ConstraintElimination/geps-pointers-to-arrays.ll +++ b/llvm/test/Transforms/ConstraintElimination/geps-pointers-to-arrays.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define void @pointer.to.array.test.ult.true.due.to.first.dimension([10 x i8]* %start, i8* %high) { ; CHECK-LABEL: @pointer.to.array.test.ult.true.due.to.first.dimension( diff --git a/llvm/test/Transforms/ConstraintElimination/geps-pointers-to-structs.ll b/llvm/test/Transforms/ConstraintElimination/geps-pointers-to-structs.ll --- a/llvm/test/Transforms/ConstraintElimination/geps-pointers-to-structs.ll +++ b/llvm/test/Transforms/ConstraintElimination/geps-pointers-to-structs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s %struct.1 = type { i32, i64, i8 } diff --git a/llvm/test/Transforms/ConstraintElimination/geps-precondition-overflow-check.ll b/llvm/test/Transforms/ConstraintElimination/geps-precondition-overflow-check.ll --- a/llvm/test/Transforms/ConstraintElimination/geps-precondition-overflow-check.ll +++ b/llvm/test/Transforms/ConstraintElimination/geps-precondition-overflow-check.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s ; Tests for cases with explicit checks that %ptr + x >= %ptr. The information can ; be used to determine that certain GEPs do not overflow. diff --git a/llvm/test/Transforms/ConstraintElimination/geps.ll b/llvm/test/Transforms/ConstraintElimination/geps.ll --- a/llvm/test/Transforms/ConstraintElimination/geps.ll +++ b/llvm/test/Transforms/ConstraintElimination/geps.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define i32 @test.ult(i32* readonly %src, i32* readnone %min, i32* readnone %max) { ; CHECK-LABEL: @test.ult( diff --git a/llvm/test/Transforms/ConstraintElimination/i128.ll b/llvm/test/Transforms/ConstraintElimination/i128.ll --- a/llvm/test/Transforms/ConstraintElimination/i128.ll +++ b/llvm/test/Transforms/ConstraintElimination/i128.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/large-system-growth.ll b/llvm/test/Transforms/ConstraintElimination/large-system-growth.ll --- a/llvm/test/Transforms/ConstraintElimination/large-system-growth.ll +++ b/llvm/test/Transforms/ConstraintElimination/large-system-growth.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s ; The system for the function below grows quite large. Check to make sure ; we can handle that scenario. diff --git a/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-base.ll b/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-base.ll --- a/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-base.ll +++ b/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-base.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll b/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll --- a/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll +++ b/llvm/test/Transforms/ConstraintElimination/loops-bottom-tested-pointer-cmps.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) define void @checks_in_loops_removable(i8* %ptr, i8* %lower, i8* %upper, i8 %n) { diff --git a/llvm/test/Transforms/ConstraintElimination/loops-header-tested-base.ll b/llvm/test/Transforms/ConstraintElimination/loops-header-tested-base.ll --- a/llvm/test/Transforms/ConstraintElimination/loops-header-tested-base.ll +++ b/llvm/test/Transforms/ConstraintElimination/loops-header-tested-base.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll b/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll --- a/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll +++ b/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-cmps.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-iv.ll b/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-iv.ll --- a/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-iv.ll +++ b/llvm/test/Transforms/ConstraintElimination/loops-header-tested-pointer-iv.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/loops.ll b/llvm/test/Transforms/ConstraintElimination/loops.ll --- a/llvm/test/Transforms/ConstraintElimination/loops.ll +++ b/llvm/test/Transforms/ConstraintElimination/loops.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/mixed-signed-unsigned-predicates.ll b/llvm/test/Transforms/ConstraintElimination/mixed-signed-unsigned-predicates.ll --- a/llvm/test/Transforms/ConstraintElimination/mixed-signed-unsigned-predicates.ll +++ b/llvm/test/Transforms/ConstraintElimination/mixed-signed-unsigned-predicates.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define i1 @test_add_nuw(i8 %start, i8 %low, i8 %high) { ; CHECK-LABEL: @test_add_nuw( diff --git a/llvm/test/Transforms/ConstraintElimination/mixed.ll b/llvm/test/Transforms/ConstraintElimination/mixed.ll --- a/llvm/test/Transforms/ConstraintElimination/mixed.ll +++ b/llvm/test/Transforms/ConstraintElimination/mixed.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s ; Make sure we do not incorrectly add variables to the system. diff --git a/llvm/test/Transforms/ConstraintElimination/ne.ll b/llvm/test/Transforms/ConstraintElimination/ne.ll --- a/llvm/test/Transforms/ConstraintElimination/ne.ll +++ b/llvm/test/Transforms/ConstraintElimination/ne.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define i1 @test_eq_ne_0(i8 %a, i8 %b) { ; CHECK-LABEL: @test_eq_ne_0( diff --git a/llvm/test/Transforms/ConstraintElimination/or.ll b/llvm/test/Transforms/ConstraintElimination/or.ll --- a/llvm/test/Transforms/ConstraintElimination/or.ll +++ b/llvm/test/Transforms/ConstraintElimination/or.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/pointercast.ll b/llvm/test/Transforms/ConstraintElimination/pointercast.ll --- a/llvm/test/Transforms/ConstraintElimination/pointercast.ll +++ b/llvm/test/Transforms/ConstraintElimination/pointercast.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define i1 @bitcast_and_cmp(i32* readonly %src, i32* readnone %min, i32* readnone %max) { ; CHECK-LABEL: @bitcast_and_cmp( diff --git a/llvm/test/Transforms/ConstraintElimination/sge.ll b/llvm/test/Transforms/ConstraintElimination/sge.ll --- a/llvm/test/Transforms/ConstraintElimination/sge.ll +++ b/llvm/test/Transforms/ConstraintElimination/sge.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/sub-nuw.ll b/llvm/test/Transforms/ConstraintElimination/sub-nuw.ll --- a/llvm/test/Transforms/ConstraintElimination/sub-nuw.ll +++ b/llvm/test/Transforms/ConstraintElimination/sub-nuw.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define void @test.not.uge.ult(i8 %start, i8 %low, i8 %high) { ; CHECK-LABEL: @test.not.uge.ult( diff --git a/llvm/test/Transforms/ConstraintElimination/sub.ll b/llvm/test/Transforms/ConstraintElimination/sub.ll --- a/llvm/test/Transforms/ConstraintElimination/sub.ll +++ b/llvm/test/Transforms/ConstraintElimination/sub.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define void @test.not.uge.ult(i8 %start, i8 %low, i8 %high) { ; CHECK-LABEL: @test.not.uge.ult( diff --git a/llvm/test/Transforms/ConstraintElimination/uge.ll b/llvm/test/Transforms/ConstraintElimination/uge.ll --- a/llvm/test/Transforms/ConstraintElimination/uge.ll +++ b/llvm/test/Transforms/ConstraintElimination/uge.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/ugt-ule.ll b/llvm/test/Transforms/ConstraintElimination/ugt-ule.ll --- a/llvm/test/Transforms/ConstraintElimination/ugt-ule.ll +++ b/llvm/test/Transforms/ConstraintElimination/ugt-ule.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/ule.ll b/llvm/test/Transforms/ConstraintElimination/ule.ll --- a/llvm/test/Transforms/ConstraintElimination/ule.ll +++ b/llvm/test/Transforms/ConstraintElimination/ule.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/vector-compares.ll b/llvm/test/Transforms/ConstraintElimination/vector-compares.ll --- a/llvm/test/Transforms/ConstraintElimination/vector-compares.ll +++ b/llvm/test/Transforms/ConstraintElimination/vector-compares.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/ConstraintElimination/wrapping-math.ll b/llvm/test/Transforms/ConstraintElimination/wrapping-math.ll --- a/llvm/test/Transforms/ConstraintElimination/wrapping-math.ll +++ b/llvm/test/Transforms/ConstraintElimination/wrapping-math.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define i1 @wrapping_add_unknown_1(i8 %a) { ; CHECK-LABEL: @wrapping_add_unknown_1( diff --git a/llvm/test/Transforms/ConstraintElimination/zext.ll b/llvm/test/Transforms/ConstraintElimination/zext.ll --- a/llvm/test/Transforms/ConstraintElimination/zext.ll +++ b/llvm/test/Transforms/ConstraintElimination/zext.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -constraint-elimination -S %s | FileCheck %s +; RUN: opt -passes=constraint-elimination -S %s | FileCheck %s define i1 @uge_zext(i8 %x, i16 %y) { ; CHECK-LABEL: @uge_zext( diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/2010-09-02-Trunc.ll b/llvm/test/Transforms/CorrelatedValuePropagation/2010-09-02-Trunc.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/2010-09-02-Trunc.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/2010-09-02-Trunc.ll @@ -1,4 +1,4 @@ -; RUN: opt -S < %s -correlated-propagation | FileCheck %s +; RUN: opt -S < %s -passes=correlated-propagation | FileCheck %s ; CHECK-LABEL: @test( define i16 @test(i32 %a, i1 %b) { diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/abs.ll b/llvm/test/Transforms/CorrelatedValuePropagation/abs.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/abs.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/abs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s declare void @llvm.assume(i1) declare i8 @llvm.abs(i8, i1) diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/add.ll b/llvm/test/Transforms/CorrelatedValuePropagation/add.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/add.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/add.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s ; CHECK-LABEL: @test0( define void @test0(i32 %a) { diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll b/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/alloca.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -correlated-propagation -debug-only=lazy-value-info <%s 2>&1 | FileCheck %s +; RUN: opt -S -passes=correlated-propagation -debug-only=lazy-value-info <%s 2>&1 | FileCheck %s ; REQUIRES: asserts ; ; Shortcut in Correlated Value Propagation ensures not to take Lazy Value Info diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/and.ll b/llvm/test/Transforms/CorrelatedValuePropagation/and.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/and.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/and.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s define i32 @test(i32 %a) { ; CHECK-LABEL: @test( diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/ashr.ll b/llvm/test/Transforms/CorrelatedValuePropagation/ashr.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/ashr.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/ashr.ll @@ -1,8 +1,8 @@ -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s ; Check that debug locations are preserved. For more info see: ; https://llvm.org/docs/SourceLevelDebugging.html#fixing-errors -; RUN: opt < %s -enable-debugify -correlated-propagation -S 2>&1 | \ +; RUN: opt < %s -enable-debugify -passes=correlated-propagation -S 2>&1 | \ ; RUN: FileCheck %s -check-prefix=DEBUG ; DEBUG: CheckModuleDebugify: PASS diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/basic.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s ; PR2581 define i32 @test1(i1 %C) { diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/conflict.ll b/llvm/test/Transforms/CorrelatedValuePropagation/conflict.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/conflict.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/conflict.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -correlated-propagation -S < %s | FileCheck %s +; RUN: opt -passes=correlated-propagation -S < %s | FileCheck %s ; Checks that we don't crash on conflicting facts about a value ; (i.e. unreachable code) diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/crash.ll b/llvm/test/Transforms/CorrelatedValuePropagation/crash.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/crash.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/crash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -correlated-propagation +; RUN: opt < %s -passes=correlated-propagation ; PR8161 define void @test1() nounwind ssp { diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/deopt.ll b/llvm/test/Transforms/CorrelatedValuePropagation/deopt.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/deopt.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/deopt.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -correlated-propagation -S < %s | FileCheck %s +; RUN: opt -passes=correlated-propagation -S < %s | FileCheck %s declare void @use() declare void @use_ptr(i8*) diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/guards.ll b/llvm/test/Transforms/CorrelatedValuePropagation/guards.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/guards.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/guards.ll @@ -1,4 +1,4 @@ -; RUN: opt -correlated-propagation -S < %s | FileCheck %s +; RUN: opt -passes=correlated-propagation -S < %s | FileCheck %s declare void @llvm.experimental.guard(i1,...) diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll b/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/icmp.ll @@ -1,5 +1,4 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -correlated-propagation -S %s | FileCheck %s ; RUN: opt -passes=correlated-propagation -S %s | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/merge-range-and-undef.ll b/llvm/test/Transforms/CorrelatedValuePropagation/merge-range-and-undef.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/merge-range-and-undef.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/merge-range-and-undef.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -correlated-propagation %s | FileCheck %s +; RUN: opt -S -passes=correlated-propagation %s | FileCheck %s ; Test case for PR44949. diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/min-max.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s declare void @llvm.assume(i1) declare i8 @llvm.umin(i8, i8) diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll b/llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/minmaxabs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -correlated-propagation < %s | FileCheck %s +; RUN: opt -S -passes=correlated-propagation < %s | FileCheck %s declare i32 @llvm.umin.i32(i32, i32) declare i32 @llvm.umax.i32(i32, i32) diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/mul.ll b/llvm/test/Transforms/CorrelatedValuePropagation/mul.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/mul.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/mul.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s define i8 @test0(i8 %a) { ; CHECK-LABEL: @test0( diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll b/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/non-null.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s define void @test1(i8* %ptr) { ; CHECK-LABEL: @test1( diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/overflow_predicate.ll b/llvm/test/Transforms/CorrelatedValuePropagation/overflow_predicate.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/overflow_predicate.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/overflow_predicate.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -correlated-propagation < %s | FileCheck %s +; RUN: opt -S -passes=correlated-propagation < %s | FileCheck %s declare void @llvm.trap() declare {i8, i1} @llvm.uadd.with.overflow(i8, i8) diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/overflows.ll b/llvm/test/Transforms/CorrelatedValuePropagation/overflows.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/overflows.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/overflows.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -correlated-propagation < %s | FileCheck %s +; RUN: opt -S -passes=correlated-propagation < %s | FileCheck %s ; Check that debug locations are preserved. For more info see: ; https://llvm.org/docs/SourceLevelDebugging.html#fixing-errors -; RUN: opt < %s -enable-debugify -correlated-propagation -S 2>&1 | \ +; RUN: opt < %s -enable-debugify -passes=correlated-propagation -S 2>&1 | \ ; RUN: FileCheck %s -check-prefix=DEBUG ; DEBUG: CheckModuleDebugify: PASS diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/phi-common-val.ll b/llvm/test/Transforms/CorrelatedValuePropagation/phi-common-val.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/phi-common-val.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/phi-common-val.ll @@ -1,6 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s -; RUN: opt < %s -passes="correlated-propagation" -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s define i8* @simplify_phi_common_value_op0(i8* %ptr, i32* %b) { ; CHECK-LABEL: @simplify_phi_common_value_op0( diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/pointer.ll b/llvm/test/Transforms/CorrelatedValuePropagation/pointer.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/pointer.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/pointer.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -correlated-propagation -S -o - %s | FileCheck %s +; RUN: opt -passes=correlated-propagation -S -o - %s | FileCheck %s ; Testcase that checks that we don't end in a neverending recursion resulting in ; a segmentation fault. The checks below verify that nothing is changed. diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/pr35807.ll b/llvm/test/Transforms/CorrelatedValuePropagation/pr35807.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/pr35807.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/pr35807.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -correlated-propagation -S %s | FileCheck %s +; RUN: opt -passes=correlated-propagation -S %s | FileCheck %s target triple = "x86_64-apple-darwin17.4.0" diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/profmd.ll b/llvm/test/Transforms/CorrelatedValuePropagation/profmd.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/profmd.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/profmd.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s ; Removed several cases from switch. define i32 @switch1(i32 %s) { diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/range.ll b/llvm/test/Transforms/CorrelatedValuePropagation/range.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/range.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/range.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -correlated-propagation -S < %s | FileCheck %s +; RUN: opt -passes=correlated-propagation -S < %s | FileCheck %s declare i32 @foo() diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll b/llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/sdiv.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/select.ll b/llvm/test/Transforms/CorrelatedValuePropagation/select.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/select.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/select.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s define i8 @simple(i1) { ; CHECK-LABEL: @simple( diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/sext.ll b/llvm/test/Transforms/CorrelatedValuePropagation/sext.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/sext.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/sext.ll @@ -1,9 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s ; Check that debug locations are preserved. For more info see: ; https://llvm.org/docs/SourceLevelDebugging.html#fixing-errors -; RUN: opt < %s -enable-debugify -correlated-propagation -S 2>&1 | \ +; RUN: opt < %s -enable-debugify -passes=correlated-propagation -S 2>&1 | \ ; RUN: FileCheck %s -check-prefix=DEBUG ; DEBUG: CheckModuleDebugify: PASS diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/shl.ll b/llvm/test/Transforms/CorrelatedValuePropagation/shl.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/shl.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/shl.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s define i8 @test0(i8 %a, i8 %b) { ; CHECK-LABEL: @test0( diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/srem.ll b/llvm/test/Transforms/CorrelatedValuePropagation/srem.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/srem.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/srem.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" target triple = "thumbv7m-arm-none-eabi" diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/sub.ll b/llvm/test/Transforms/CorrelatedValuePropagation/sub.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/sub.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/sub.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s define void @test0(i32 %a) { ; CHECK-LABEL: @test0( diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/udiv.ll b/llvm/test/Transforms/CorrelatedValuePropagation/udiv.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/udiv.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/udiv.ll @@ -1,8 +1,8 @@ -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s ; Check that debug locations are preserved. For more info see: ; https://llvm.org/docs/SourceLevelDebugging.html#fixing-errors -; RUN: opt < %s -enable-debugify -correlated-propagation -S 2>&1 | \ +; RUN: opt < %s -enable-debugify -passes=correlated-propagation -S 2>&1 | \ ; RUN: FileCheck %s -check-prefix=DEBUG ; DEBUG: CheckModuleDebugify: PASS diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/urem.ll b/llvm/test/Transforms/CorrelatedValuePropagation/urem.ll --- a/llvm/test/Transforms/CorrelatedValuePropagation/urem.ll +++ b/llvm/test/Transforms/CorrelatedValuePropagation/urem.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -correlated-propagation -S | FileCheck %s +; RUN: opt < %s -passes=correlated-propagation -S | FileCheck %s define void @test_nop(i32 %n) { ; CHECK-LABEL: @test_nop( diff --git a/llvm/test/Transforms/GlobalDCE/2002-07-17-CastRef.ll b/llvm/test/Transforms/GlobalDCE/2002-07-17-CastRef.ll --- a/llvm/test/Transforms/GlobalDCE/2002-07-17-CastRef.ll +++ b/llvm/test/Transforms/GlobalDCE/2002-07-17-CastRef.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce +; RUN: opt < %s -passes=globaldce ; define internal void @func() { ret void diff --git a/llvm/test/Transforms/GlobalDCE/2002-07-17-ConstantRef.ll b/llvm/test/Transforms/GlobalDCE/2002-07-17-ConstantRef.ll --- a/llvm/test/Transforms/GlobalDCE/2002-07-17-ConstantRef.ll +++ b/llvm/test/Transforms/GlobalDCE/2002-07-17-ConstantRef.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce +; RUN: opt < %s -passes=globaldce ; @X = global void ()* @func ; [#uses=0] diff --git a/llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll b/llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll --- a/llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll +++ b/llvm/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll @@ -1,7 +1,7 @@ ; Make sure that functions are removed successfully if they are referred to by ; a global that is dead. Make sure any globals they refer to die as well. -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; CHECK-NOT: foo ;; Unused, kills %foo diff --git a/llvm/test/Transforms/GlobalDCE/2002-08-17-WorkListTest.ll b/llvm/test/Transforms/GlobalDCE/2002-08-17-WorkListTest.ll --- a/llvm/test/Transforms/GlobalDCE/2002-08-17-WorkListTest.ll +++ b/llvm/test/Transforms/GlobalDCE/2002-08-17-WorkListTest.ll @@ -1,7 +1,7 @@ ; This testcase tests that a worklist is being used, and that globals can be ; removed if they are the subject of a constexpr and ConstantPointerRef -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; CHECK-NOT: global diff --git a/llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll b/llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll --- a/llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll +++ b/llvm/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce +; RUN: opt < %s -passes=globaldce ;; Should die when function %foo is killed @foo.upgrd.1 = internal global i32 7 ; [#uses=3] diff --git a/llvm/test/Transforms/GlobalDCE/2003-07-01-SelfReference.ll b/llvm/test/Transforms/GlobalDCE/2003-07-01-SelfReference.ll --- a/llvm/test/Transforms/GlobalDCE/2003-07-01-SelfReference.ll +++ b/llvm/test/Transforms/GlobalDCE/2003-07-01-SelfReference.ll @@ -1,5 +1,5 @@ ; distilled from 255.vortex -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; CHECK-NOT: testfunc diff --git a/llvm/test/Transforms/GlobalDCE/2003-10-09-PreserveWeakGlobals.ll b/llvm/test/Transforms/GlobalDCE/2003-10-09-PreserveWeakGlobals.ll --- a/llvm/test/Transforms/GlobalDCE/2003-10-09-PreserveWeakGlobals.ll +++ b/llvm/test/Transforms/GlobalDCE/2003-10-09-PreserveWeakGlobals.ll @@ -1,6 +1,6 @@ ; Weak variables should be preserved by global DCE! -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; CHECK: @A @A = weak global i32 54 diff --git a/llvm/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll b/llvm/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll --- a/llvm/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll +++ b/llvm/test/Transforms/GlobalDCE/2009-01-05-DeadAliases.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S > %t +; RUN: opt < %s -passes=globaldce -S > %t ; RUN: FileCheck %s < %t ; RUN: FileCheck --check-prefix=DEAD %s < %t diff --git a/llvm/test/Transforms/GlobalDCE/2009-02-17-AliasUsesAliasee.ll b/llvm/test/Transforms/GlobalDCE/2009-02-17-AliasUsesAliasee.ll --- a/llvm/test/Transforms/GlobalDCE/2009-02-17-AliasUsesAliasee.ll +++ b/llvm/test/Transforms/GlobalDCE/2009-02-17-AliasUsesAliasee.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce +; RUN: opt < %s -passes=globaldce @A = internal alias void (), void ()* @F define internal void @F() { ret void } diff --git a/llvm/test/Transforms/GlobalDCE/call-with-ptrtoint.ll b/llvm/test/Transforms/GlobalDCE/call-with-ptrtoint.ll --- a/llvm/test/Transforms/GlobalDCE/call-with-ptrtoint.ll +++ b/llvm/test/Transforms/GlobalDCE/call-with-ptrtoint.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S +; RUN: opt < %s -passes=globaldce -S target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/comdats.ll b/llvm/test/Transforms/GlobalDCE/comdats.ll --- a/llvm/test/Transforms/GlobalDCE/comdats.ll +++ b/llvm/test/Transforms/GlobalDCE/comdats.ll @@ -1,6 +1,6 @@ ; Test the behavior of GlobalDCE in conjunction with comdats. ; -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; First test checks that if one function in a comdat group is used, both other ; functions and other globals even if unused will be preserved. diff --git a/llvm/test/Transforms/GlobalDCE/externally_available.ll b/llvm/test/Transforms/GlobalDCE/externally_available.ll --- a/llvm/test/Transforms/GlobalDCE/externally_available.ll +++ b/llvm/test/Transforms/GlobalDCE/externally_available.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; test_global should not be emitted to the .s file. ; CHECK-NOT: @test_global = diff --git a/llvm/test/Transforms/GlobalDCE/global-ifunc.ll b/llvm/test/Transforms/GlobalDCE/global-ifunc.ll --- a/llvm/test/Transforms/GlobalDCE/global-ifunc.ll +++ b/llvm/test/Transforms/GlobalDCE/global-ifunc.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globaldce < %s | FileCheck %s +; RUN: opt -S -passes=globaldce < %s | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalDCE/global_ctors.ll b/llvm/test/Transforms/GlobalDCE/global_ctors.ll --- a/llvm/test/Transforms/GlobalDCE/global_ctors.ll +++ b/llvm/test/Transforms/GlobalDCE/global_ctors.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globaldce < %s | FileCheck %s +; RUN: opt -S -passes=globaldce < %s | FileCheck %s ; Test that the presence of debug intrinsics isn't affecting GlobalDCE. ; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_notremovable, i8* null }] diff --git a/llvm/test/Transforms/GlobalDCE/indirectbr.ll b/llvm/test/Transforms/GlobalDCE/indirectbr.ll --- a/llvm/test/Transforms/GlobalDCE/indirectbr.ll +++ b/llvm/test/Transforms/GlobalDCE/indirectbr.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globaldce < %s | FileCheck %s +; RUN: opt -S -passes=globaldce < %s | FileCheck %s @L = internal unnamed_addr constant [3 x i8*] [i8* blockaddress(@test1, %L1), i8* blockaddress(@test1, %L2), i8* null], align 16 diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-base-call.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-base-pointer-call.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-call.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-derived-pointer-call.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-novfe.ll @@ -1,6 +1,6 @@ ; Tests that VFE is not performed when the Virtual Function Elim metadata set ; to 0. This is the same as virtual-functions.ll otherwise. -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-relative-pointers-bad.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-relative-pointers-bad.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-relative-pointers-bad.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-relative-pointers-bad.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-relative-pointers.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-relative-pointers.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-relative-pointers.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-relative-pointers.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-post-lto.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; structs A, B and C have vcall_visibility of public, linkage-unit and ; translation-unit respectively. This test is run after LTO linking (the diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions-visibility-pre-lto.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; structs A, B and C have vcall_visibility of public, linkage-unit and ; translation-unit respectively. This test is run before LTO linking occurs diff --git a/llvm/test/Transforms/GlobalDCE/virtual-functions.ll b/llvm/test/Transforms/GlobalDCE/virtual-functions.ll --- a/llvm/test/Transforms/GlobalDCE/virtual-functions.ll +++ b/llvm/test/Transforms/GlobalDCE/virtual-functions.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/GlobalDCE/vtable-rtti.ll b/llvm/test/Transforms/GlobalDCE/vtable-rtti.ll --- a/llvm/test/Transforms/GlobalDCE/vtable-rtti.ll +++ b/llvm/test/Transforms/GlobalDCE/vtable-rtti.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globaldce -S | FileCheck %s +; RUN: opt < %s -passes=globaldce -S | FileCheck %s ; We currently only use llvm.type.checked.load for virtual function pointers, ; not any other part of the vtable, so we can't remove the RTTI pointer even if diff --git a/llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll b/llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll --- a/llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll +++ b/llvm/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output ; PR579 @g_40507551 = internal global i16 31038 ; [#uses=1] diff --git a/llvm/test/Transforms/GlobalOpt/2005-09-27-Crash.ll b/llvm/test/Transforms/GlobalOpt/2005-09-27-Crash.ll --- a/llvm/test/Transforms/GlobalOpt/2005-09-27-Crash.ll +++ b/llvm/test/Transforms/GlobalOpt/2005-09-27-Crash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output %RPyString = type { i32, %arraytype.Char } %arraytype.Char = type { i32, [0 x i8] } %arraytype.Signed = type { i32, [0 x i32] } diff --git a/llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll b/llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll --- a/llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll +++ b/llvm/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output ; PR820 target datalayout = "e-p:32:32" target triple = "i686-pc-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll b/llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll --- a/llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll +++ b/llvm/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output %struct._list = type { i32*, %struct._list* } %struct._play = type { i32, i32*, %struct._list*, %struct._play* } diff --git a/llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll b/llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll --- a/llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll +++ b/llvm/test/Transforms/GlobalOpt/2007-04-05-Crash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:64:64-v128:128:128-a0:0:32" target triple = "thumb-apple-darwin8" diff --git a/llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll b/llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll --- a/llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll +++ b/llvm/test/Transforms/GlobalOpt/2007-05-13-Crash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64" target triple = "i686-apple-darwin8" diff --git a/llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll b/llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll --- a/llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll +++ b/llvm/test/Transforms/GlobalOpt/2007-06-04-PackedStruct.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output ; PR1491 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64" diff --git a/llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll b/llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll --- a/llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll +++ b/llvm/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128" target triple = "powerpc-unknown-linux-gnu" %struct.empty0 = type { } diff --git a/llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll b/llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll --- a/llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll +++ b/llvm/test/Transforms/GlobalOpt/2008-01-03-Crash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt | llvm-dis +; RUN: opt < %s -passes=globalopt | llvm-dis ; PR1896 @indirect1 = internal global void (i32)* null ; [#uses=2] diff --git a/llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll b/llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll --- a/llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll +++ b/llvm/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; The 'X' indices could be larger than 31. Do not SROA the outer ; indices of this array. diff --git a/llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll b/llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll --- a/llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll +++ b/llvm/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK: load volatile @t0.1441 = internal global double 0x3FD5555555555555, align 8 ; [#uses=1] diff --git a/llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll b/llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll --- a/llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll +++ b/llvm/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll @@ -2,7 +2,7 @@ ; alignments. Elements 0 and 2 must be 16-byte aligned, and element ; 1 must be at least 8 byte aligned (but could be more). -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK: @G.0 = internal unnamed_addr global {{.*}}align 16 ; CHECK: @G.1 = internal unnamed_addr global {{.*}}align 8 ; CHECK: @G.2 = internal unnamed_addr global {{.*}}align 16 diff --git a/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll b/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll --- a/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll +++ b/llvm/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll @@ -2,7 +2,7 @@ ; values. This used to crash, because globalopt forgot to put the new var in the ; same address space as the old one. -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; Check that the new global values still have their address space ; CHECK: addrspace(1) global diff --git a/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll b/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll --- a/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll +++ b/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt | llvm-dis +; RUN: opt < %s -passes=globalopt | llvm-dis target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "i386-apple-darwin7" %struct.foo = type { i32, i32 } diff --git a/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll b/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll --- a/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll +++ b/llvm/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt | llvm-dis +; RUN: opt < %s -passes=globalopt | llvm-dis target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "i386-apple-darwin7" %struct.foo = type { i32, i32 } diff --git a/llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll b/llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll --- a/llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll +++ b/llvm/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK: phi{{.*}}@head ; PR3321 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" diff --git a/llvm/test/Transforms/GlobalOpt/2009-02-15-ResolveAlias.ll b/llvm/test/Transforms/GlobalOpt/2009-02-15-ResolveAlias.ll --- a/llvm/test/Transforms/GlobalOpt/2009-02-15-ResolveAlias.ll +++ b/llvm/test/Transforms/GlobalOpt/2009-02-15-ResolveAlias.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s define internal void @f() { ; CHECK-NOT: @f( diff --git a/llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll b/llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll --- a/llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll +++ b/llvm/test/Transforms/GlobalOpt/2009-03-05-dbg.ll @@ -1,5 +1,5 @@ ; REQUIRES: asserts -; RUN: opt < %s -globalopt -stats -disable-output 2>&1 | FileCheck %s +; RUN: opt < %s -passes=globalopt -stats -disable-output 2>&1 | FileCheck %s ; CHECK: 1 globalopt - Number of global vars shrunk to booleans source_filename = "test/Transforms/GlobalOpt/2009-03-05-dbg.ll" diff --git a/llvm/test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll b/llvm/test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll --- a/llvm/test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll +++ b/llvm/test/Transforms/GlobalOpt/2009-03-06-Anonymous.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @0 = global i32 0 ; CHECK-DAG: @0 = internal global i32 0 diff --git a/llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll b/llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll --- a/llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll +++ b/llvm/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" target triple = "i386-apple-darwin7" diff --git a/llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll b/llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll --- a/llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll +++ b/llvm/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-apple-darwin10.0" diff --git a/llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll b/llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll --- a/llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll +++ b/llvm/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll @@ -2,7 +2,7 @@ ; GlobalOpt was treating a non-optimizable array malloc as a non-array malloc ; and optimizing the global object that the malloc was stored to as a single ; element global. The global object @TOP in this test should not be optimized. -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-apple-darwin10.0" diff --git a/llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll b/llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll --- a/llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll +++ b/llvm/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll @@ -1,5 +1,5 @@ ; PR6422 -; RUN: opt -globalopt -S < %s +; RUN: opt -passes=globalopt -S < %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll b/llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll --- a/llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll +++ b/llvm/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s +; RUN: opt -passes=globalopt -S < %s ; PR6435 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/2010-10-19-WeakOdr.ll b/llvm/test/Transforms/GlobalOpt/2010-10-19-WeakOdr.ll --- a/llvm/test/Transforms/GlobalOpt/2010-10-19-WeakOdr.ll +++ b/llvm/test/Transforms/GlobalOpt/2010-10-19-WeakOdr.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; PR8389: Globals with weak_odr linkage type must not be modified diff --git a/llvm/test/Transforms/GlobalOpt/2011-04-09-EmptyGlobalCtors.ll b/llvm/test/Transforms/GlobalOpt/2011-04-09-EmptyGlobalCtors.ll --- a/llvm/test/Transforms/GlobalOpt/2011-04-09-EmptyGlobalCtors.ll +++ b/llvm/test/Transforms/GlobalOpt/2011-04-09-EmptyGlobalCtors.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -disable-output +; RUN: opt < %s -passes=globalopt -disable-output %0 = type { i32, void ()*, i8* } @llvm.global_ctors = appending global [0 x %0] zeroinitializer diff --git a/llvm/test/Transforms/GlobalOpt/2012-05-11-blockaddress.ll b/llvm/test/Transforms/GlobalOpt/2012-05-11-blockaddress.ll --- a/llvm/test/Transforms/GlobalOpt/2012-05-11-blockaddress.ll +++ b/llvm/test/Transforms/GlobalOpt/2012-05-11-blockaddress.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; Check that the mere presence of a blockaddress doesn't prevent -globalopt ; from promoting @f to fastcc. diff --git a/llvm/test/Transforms/GlobalOpt/2021-08-02-CastStoreOnceP2I.ll b/llvm/test/Transforms/GlobalOpt/2021-08-02-CastStoreOnceP2I.ll --- a/llvm/test/Transforms/GlobalOpt/2021-08-02-CastStoreOnceP2I.ll +++ b/llvm/test/Transforms/GlobalOpt/2021-08-02-CastStoreOnceP2I.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s ; RUN: opt -passes=globalopt -S < %s | FileCheck %s ; This tests the assignemnt of non-pointer to global address diff --git a/llvm/test/Transforms/GlobalOpt/2021-08-03-StoreOnceLoadMultiCasts.ll b/llvm/test/Transforms/GlobalOpt/2021-08-03-StoreOnceLoadMultiCasts.ll --- a/llvm/test/Transforms/GlobalOpt/2021-08-03-StoreOnceLoadMultiCasts.ll +++ b/llvm/test/Transforms/GlobalOpt/2021-08-03-StoreOnceLoadMultiCasts.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s ; RUN: opt -passes=globalopt -S < %s | FileCheck %s @g = internal global i32* null, align 8 diff --git a/llvm/test/Transforms/GlobalOpt/GSROA-section.ll b/llvm/test/Transforms/GlobalOpt/GSROA-section.ll --- a/llvm/test/Transforms/GlobalOpt/GSROA-section.ll +++ b/llvm/test/Transforms/GlobalOpt/GSROA-section.ll @@ -1,7 +1,7 @@ ; This test lets globalopt split the global struct and array into different ; values. The pass needs to preserve section attribute. -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; Check that the new global values still have their section assignment. ; CHECK: @struct ; CHECK: section ".foo" diff --git a/llvm/test/Transforms/GlobalOpt/MallocSROA-section-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/MallocSROA-section-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/MallocSROA-section-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/MallocSROA-section-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s ; CHECK: @Y ; CHECK: section ".foo" diff --git a/llvm/test/Transforms/GlobalOpt/MallocSROA-section.ll b/llvm/test/Transforms/GlobalOpt/MallocSROA-section.ll --- a/llvm/test/Transforms/GlobalOpt/MallocSROA-section.ll +++ b/llvm/test/Transforms/GlobalOpt/MallocSROA-section.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s ; CHECK: @Y = {{.*}} section ".foo" %struct.xyz = type { double, i32 } diff --git a/llvm/test/Transforms/GlobalOpt/PowerPC/coldcc_coldsites.ll b/llvm/test/Transforms/GlobalOpt/PowerPC/coldcc_coldsites.ll --- a/llvm/test/Transforms/GlobalOpt/PowerPC/coldcc_coldsites.ll +++ b/llvm/test/Transforms/GlobalOpt/PowerPC/coldcc_coldsites.ll @@ -1,5 +1,5 @@ -; RUN: opt -globalopt -mtriple=powerpc64le-unknown-linux-gnu -ppc-enable-coldcc -S < %s | FileCheck %s -check-prefix=COLDCC -; RUN: opt -globalopt -S < %s | FileCheck %s -check-prefix=CHECK +; RUN: opt -passes=globalopt -mtriple=powerpc64le-unknown-linux-gnu -ppc-enable-coldcc -S < %s | FileCheck %s -check-prefix=COLDCC +; RUN: opt -passes=globalopt -S < %s | FileCheck %s -check-prefix=CHECK define signext i32 @caller(i32 signext %a, i32 signext %b, i32 signext %lim, i32 signext %i) local_unnamed_addr #0 !prof !30 { entry: diff --git a/llvm/test/Transforms/GlobalOpt/SROA-section.ll b/llvm/test/Transforms/GlobalOpt/SROA-section.ll --- a/llvm/test/Transforms/GlobalOpt/SROA-section.ll +++ b/llvm/test/Transforms/GlobalOpt/SROA-section.ll @@ -1,5 +1,5 @@ ; Verify that section assignment is copied during SROA -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK: @G.0 ; CHECK: section ".foo" ; CHECK: @G.1 diff --git a/llvm/test/Transforms/GlobalOpt/alias-resolve.ll b/llvm/test/Transforms/GlobalOpt/alias-resolve.ll --- a/llvm/test/Transforms/GlobalOpt/alias-resolve.ll +++ b/llvm/test/Transforms/GlobalOpt/alias-resolve.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @foo1 = alias void (), void ()* @foo2 ; CHECK: @foo1 = alias void (), void ()* @bar2 diff --git a/llvm/test/Transforms/GlobalOpt/alias-used-address-space.ll b/llvm/test/Transforms/GlobalOpt/alias-used-address-space.ll --- a/llvm/test/Transforms/GlobalOpt/alias-used-address-space.ll +++ b/llvm/test/Transforms/GlobalOpt/alias-used-address-space.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt < %s | FileCheck %s +; RUN: opt -S -passes=globalopt < %s | FileCheck %s target datalayout = "p:32:32:32-p1:16:16:16" diff --git a/llvm/test/Transforms/GlobalOpt/alias-used-section.ll b/llvm/test/Transforms/GlobalOpt/alias-used-section.ll --- a/llvm/test/Transforms/GlobalOpt/alias-used-section.ll +++ b/llvm/test/Transforms/GlobalOpt/alias-used-section.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt < %s | FileCheck %s +; RUN: opt -S -passes=globalopt < %s | FileCheck %s @_Z17in_custom_section = internal global i8 42, section "CUSTOM" @in_custom_section = internal dllexport alias i8, i8* @_Z17in_custom_section diff --git a/llvm/test/Transforms/GlobalOpt/alias-used.ll b/llvm/test/Transforms/GlobalOpt/alias-used.ll --- a/llvm/test/Transforms/GlobalOpt/alias-used.ll +++ b/llvm/test/Transforms/GlobalOpt/alias-used.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @c = global i8 42 diff --git a/llvm/test/Transforms/GlobalOpt/amdgcn-ctor-alloca.ll b/llvm/test/Transforms/GlobalOpt/amdgcn-ctor-alloca.ll --- a/llvm/test/Transforms/GlobalOpt/amdgcn-ctor-alloca.ll +++ b/llvm/test/Transforms/GlobalOpt/amdgcn-ctor-alloca.ll @@ -1,4 +1,4 @@ -; RUN: opt -data-layout=A5 -globalopt %s -S -o - | FileCheck %s +; RUN: opt -data-layout=A5 -passes=globalopt %s -S -o - | FileCheck %s ; CHECK-NOT: @g @g = internal addrspace(1) global i32* zeroinitializer diff --git a/llvm/test/Transforms/GlobalOpt/array-elem-refs.ll b/llvm/test/Transforms/GlobalOpt/array-elem-refs.ll --- a/llvm/test/Transforms/GlobalOpt/array-elem-refs.ll +++ b/llvm/test/Transforms/GlobalOpt/array-elem-refs.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -S -globalopt | FileCheck %s +; RUN: opt < %s -S -passes=globalopt | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/assume.ll b/llvm/test/Transforms/GlobalOpt/assume.ll --- a/llvm/test/Transforms/GlobalOpt/assume.ll +++ b/llvm/test/Transforms/GlobalOpt/assume.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt < %s | FileCheck %s +; RUN: opt -S -passes=globalopt < %s | FileCheck %s ; CHECK: @tmp = local_unnamed_addr global i32 42 diff --git a/llvm/test/Transforms/GlobalOpt/atexit.ll b/llvm/test/Transforms/GlobalOpt/atexit.ll --- a/llvm/test/Transforms/GlobalOpt/atexit.ll +++ b/llvm/test/Transforms/GlobalOpt/atexit.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK: ModuleID define internal i32 @__cxa_atexit(void (i8*)* nocapture %func, i8* nocapture %arg, i8* nocapture %dso_handle) nounwind readnone optsize noimplicitfloat { diff --git a/llvm/test/Transforms/GlobalOpt/atomic.ll b/llvm/test/Transforms/GlobalOpt/atomic.ll --- a/llvm/test/Transforms/GlobalOpt/atomic.ll +++ b/llvm/test/Transforms/GlobalOpt/atomic.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt < %s -S -o - | FileCheck %s +; RUN: opt -passes=globalopt < %s -S -o - | FileCheck %s @GV1 = internal global i64 1, align 8 @GV2 = internal global i32 0, align 4 diff --git a/llvm/test/Transforms/GlobalOpt/available_externally_global_ctors.ll b/llvm/test/Transforms/GlobalOpt/available_externally_global_ctors.ll --- a/llvm/test/Transforms/GlobalOpt/available_externally_global_ctors.ll +++ b/llvm/test/Transforms/GlobalOpt/available_externally_global_ctors.ll @@ -1,7 +1,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.11.0" -; RUN: opt -S -globalopt < %s | FileCheck %s +; RUN: opt -S -passes=globalopt < %s | FileCheck %s ; Verify that the initialization of the available_externally global is not eliminated ; CHECK: @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @foo_static_init, i8* null }] diff --git a/llvm/test/Transforms/GlobalOpt/basictest.ll b/llvm/test/Transforms/GlobalOpt/basictest.ll --- a/llvm/test/Transforms/GlobalOpt/basictest.ll +++ b/llvm/test/Transforms/GlobalOpt/basictest.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: global diff --git a/llvm/test/Transforms/GlobalOpt/blockaddress.ll b/llvm/test/Transforms/GlobalOpt/blockaddress.ll --- a/llvm/test/Transforms/GlobalOpt/blockaddress.ll +++ b/llvm/test/Transforms/GlobalOpt/blockaddress.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @x = internal global i8* zeroinitializer diff --git a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users.ll b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users.ll --- a/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users.ll +++ b/llvm/test/Transforms/GlobalOpt/cleanup-pointer-root-users.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S -o - < %s | FileCheck %s +; RUN: opt -passes=globalopt -S -o - < %s | FileCheck %s @glbl = internal global i8* null diff --git a/llvm/test/Transforms/GlobalOpt/coldcc_stress_test.ll b/llvm/test/Transforms/GlobalOpt/coldcc_stress_test.ll --- a/llvm/test/Transforms/GlobalOpt/coldcc_stress_test.ll +++ b/llvm/test/Transforms/GlobalOpt/coldcc_stress_test.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -globalopt -S -enable-coldcc-stress-test -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s -check-prefix=COLDCC -; RUN: opt < %s -globalopt -S | FileCheck %s -check-prefix=CHECK +; RUN: opt < %s -passes=globalopt -S -enable-coldcc-stress-test -mtriple=powerpc64le-unknown-linux-gnu | FileCheck %s -check-prefix=COLDCC +; RUN: opt < %s -passes=globalopt -S | FileCheck %s -check-prefix=CHECK define internal i32 @callee_default(i32* %m) { ; COLDCC-LABEL: define internal coldcc i32 @callee_default diff --git a/llvm/test/Transforms/GlobalOpt/compiler-used.ll b/llvm/test/Transforms/GlobalOpt/compiler-used.ll --- a/llvm/test/Transforms/GlobalOpt/compiler-used.ll +++ b/llvm/test/Transforms/GlobalOpt/compiler-used.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; Test that when all members of llvm.compiler.used are found to be redundant ; we delete it instead of crashing. diff --git a/llvm/test/Transforms/GlobalOpt/const-return-status-atomic.ll b/llvm/test/Transforms/GlobalOpt/const-return-status-atomic.ll --- a/llvm/test/Transforms/GlobalOpt/const-return-status-atomic.ll +++ b/llvm/test/Transforms/GlobalOpt/const-return-status-atomic.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt < %s -S -o - | FileCheck %s +; RUN: opt -passes=globalopt < %s -S -o - | FileCheck %s ; When simplifying users of a global variable, the pass could incorrectly ; return false if there were still some uses left, and no further optimizations diff --git a/llvm/test/Transforms/GlobalOpt/const-return-status.ll b/llvm/test/Transforms/GlobalOpt/const-return-status.ll --- a/llvm/test/Transforms/GlobalOpt/const-return-status.ll +++ b/llvm/test/Transforms/GlobalOpt/const-return-status.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt < %s -S -o - | FileCheck %s +; RUN: opt -passes=globalopt < %s -S -o - | FileCheck %s ; When simplifying users of a global variable, the pass could incorrectly ; return false if there were still some uses left, and no further optimizations diff --git a/llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll b/llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll --- a/llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll +++ b/llvm/test/Transforms/GlobalOpt/constantfold-initializers.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -S -globalopt | FileCheck %s +; RUN: opt < %s -S -passes=globalopt | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" diff --git a/llvm/test/Transforms/GlobalOpt/crash-2.ll b/llvm/test/Transforms/GlobalOpt/crash-2.ll --- a/llvm/test/Transforms/GlobalOpt/crash-2.ll +++ b/llvm/test/Transforms/GlobalOpt/crash-2.ll @@ -1,4 +1,4 @@ -; RUN: llvm-as < %s | opt -globalopt -disable-output +; RUN: llvm-as < %s | opt -passes=globalopt -disable-output ; NOTE: This needs to run through 'llvm-as' first to reproduce the error! ; PR15440 diff --git a/llvm/test/Transforms/GlobalOpt/crash.ll b/llvm/test/Transforms/GlobalOpt/crash.ll --- a/llvm/test/Transforms/GlobalOpt/crash.ll +++ b/llvm/test/Transforms/GlobalOpt/crash.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -disable-output < %s +; RUN: opt -passes=globalopt -disable-output < %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32" target triple = "i386-apple-darwin9.8" diff --git a/llvm/test/Transforms/GlobalOpt/ctor-list-opt-constexpr.ll b/llvm/test/Transforms/GlobalOpt/ctor-list-opt-constexpr.ll --- a/llvm/test/Transforms/GlobalOpt/ctor-list-opt-constexpr.ll +++ b/llvm/test/Transforms/GlobalOpt/ctor-list-opt-constexpr.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.0.0" diff --git a/llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll b/llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll --- a/llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll +++ b/llvm/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; Don't get fooled by the inbounds keyword; it doesn't change ; the computed address. diff --git a/llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll b/llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll --- a/llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/ctor-list-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: CTOR %ini = type { i32, void()*, i8* } @llvm.global_ctors = appending global [11 x %ini] [ diff --git a/llvm/test/Transforms/GlobalOpt/dead-store-status.ll b/llvm/test/Transforms/GlobalOpt/dead-store-status.ll --- a/llvm/test/Transforms/GlobalOpt/dead-store-status.ll +++ b/llvm/test/Transforms/GlobalOpt/dead-store-status.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; When removing the store to @global in @foo, the pass would incorrectly return ; false. This was caught by the pass return status check that is hidden under diff --git a/llvm/test/Transforms/GlobalOpt/deaddeclaration.ll b/llvm/test/Transforms/GlobalOpt/deaddeclaration.ll --- a/llvm/test/Transforms/GlobalOpt/deaddeclaration.ll +++ b/llvm/test/Transforms/GlobalOpt/deaddeclaration.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: aa ; CHECK-NOT: bb diff --git a/llvm/test/Transforms/GlobalOpt/deadfunction.ll b/llvm/test/Transforms/GlobalOpt/deadfunction.ll --- a/llvm/test/Transforms/GlobalOpt/deadfunction.ll +++ b/llvm/test/Transforms/GlobalOpt/deadfunction.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: test diff --git a/llvm/test/Transforms/GlobalOpt/deadglobal-2.ll b/llvm/test/Transforms/GlobalOpt/deadglobal-2.ll --- a/llvm/test/Transforms/GlobalOpt/deadglobal-2.ll +++ b/llvm/test/Transforms/GlobalOpt/deadglobal-2.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; This is a harder case to delete as the GEP has a variable index. ; CHECK-NOT: internal diff --git a/llvm/test/Transforms/GlobalOpt/deadglobal-diarglist-use.ll b/llvm/test/Transforms/GlobalOpt/deadglobal-diarglist-use.ll --- a/llvm/test/Transforms/GlobalOpt/deadglobal-diarglist-use.ll +++ b/llvm/test/Transforms/GlobalOpt/deadglobal-diarglist-use.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | llvm-as | llvm-dis | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | llvm-as | llvm-dis | FileCheck %s ; The %struct.S type would not get emitted after @s was removed, resulting in ; llvm-as failing to parse the dbg.value intrinsic using that type. The diff --git a/llvm/test/Transforms/GlobalOpt/deadglobal.ll b/llvm/test/Transforms/GlobalOpt/deadglobal.ll --- a/llvm/test/Transforms/GlobalOpt/deadglobal.ll +++ b/llvm/test/Transforms/GlobalOpt/deadglobal.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @G1 = internal global i32 123 ; [#uses=1] @A1 = internal alias i32, i32* @G1 diff --git a/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-2.ll b/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-2.ll --- a/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-2.ll +++ b/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-2.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; Test the evaluation of a store and a load via a bitcast, and check ; that globals are constant folded to the correct value. diff --git a/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-3.ll b/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-3.ll --- a/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-3.ll +++ b/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-3.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; Test the evaluation of a load via a bitcast and a store via a GEP. ; Check that globals are constant folded to the correct value. diff --git a/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-4.ll b/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-4.ll --- a/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-4.ll +++ b/llvm/test/Transforms/GlobalOpt/evaluate-bitcast-4.ll @@ -1,5 +1,5 @@ ; PR48055. Check that this does not crash. -; RUN: opt -globalopt %s -disable-output +; RUN: opt -passes=globalopt %s -disable-output target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll b/llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll --- a/llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll +++ b/llvm/test/Transforms/GlobalOpt/externally-initialized-aggregate.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -S -globalopt | FileCheck %s +; RUN: opt < %s -S -passes=globalopt | FileCheck %s ; This global is externally_initialized, so if we split it into scalars we ; should keep that flag set on all of the new globals. This will prevent the diff --git a/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll b/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll --- a/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll +++ b/llvm/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; rdar://12580965. ; ObjC++ test case. diff --git a/llvm/test/Transforms/GlobalOpt/externally-initialized.ll b/llvm/test/Transforms/GlobalOpt/externally-initialized.ll --- a/llvm/test/Transforms/GlobalOpt/externally-initialized.ll +++ b/llvm/test/Transforms/GlobalOpt/externally-initialized.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -S -globalopt | FileCheck %s +; RUN: opt < %s -S -passes=globalopt | FileCheck %s ; This global is externally_initialized, which may modify the value between ; it's static initializer and any code in this module being run, so the only diff --git a/llvm/test/Transforms/GlobalOpt/fastcc.ll b/llvm/test/Transforms/GlobalOpt/fastcc.ll --- a/llvm/test/Transforms/GlobalOpt/fastcc.ll +++ b/llvm/test/Transforms/GlobalOpt/fastcc.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s declare token @llvm.call.preallocated.setup(i32) declare i8* @llvm.call.preallocated.arg(token, i32) diff --git a/llvm/test/Transforms/GlobalOpt/global-demotion.ll b/llvm/test/Transforms/GlobalOpt/global-demotion.ll --- a/llvm/test/Transforms/GlobalOpt/global-demotion.ll +++ b/llvm/test/Transforms/GlobalOpt/global-demotion.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s @G1 = internal global i32 5 @G2 = internal global i32 5 diff --git a/llvm/test/Transforms/GlobalOpt/globalsra-align.ll b/llvm/test/Transforms/GlobalOpt/globalsra-align.ll --- a/llvm/test/Transforms/GlobalOpt/globalsra-align.ll +++ b/llvm/test/Transforms/GlobalOpt/globalsra-align.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "p:16:32:64" ; 16-bit pointers with 32-bit ABI alignment and 64-bit preferred alignmentt diff --git a/llvm/test/Transforms/GlobalOpt/globalsra-multigep.ll b/llvm/test/Transforms/GlobalOpt/globalsra-multigep.ll --- a/llvm/test/Transforms/GlobalOpt/globalsra-multigep.ll +++ b/llvm/test/Transforms/GlobalOpt/globalsra-multigep.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll b/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll --- a/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll +++ b/llvm/test/Transforms/GlobalOpt/globalsra-partial.ll @@ -1,6 +1,6 @@ ; In this case, the global cannot be merged as i may be out of range -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" @G = internal global { i32, [4 x float] } zeroinitializer ; <{ i32, [4 x float] }*> [#uses=3] diff --git a/llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll b/llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll --- a/llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll +++ b/llvm/test/Transforms/GlobalOpt/globalsra-unknown-index.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; globalopt should not sra the global, because it can't see the index. diff --git a/llvm/test/Transforms/GlobalOpt/globalsra.ll b/llvm/test/Transforms/GlobalOpt/globalsra.ll --- a/llvm/test/Transforms/GlobalOpt/globalsra.ll +++ b/llvm/test/Transforms/GlobalOpt/globalsra.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: global target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-1-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-1-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/heap-sra-1-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/heap-sra-1-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" %struct.foo = type { i32, i32 } diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-1.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-1.ll --- a/llvm/test/Transforms/GlobalOpt/heap-sra-1.ll +++ b/llvm/test/Transforms/GlobalOpt/heap-sra-1.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" ;; Heap SROA has been removed. This tests we don't perform heap SROA. diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-2-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-2-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/heap-sra-2-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/heap-sra-2-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" %struct.foo = type { i32, i32 } diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-2.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-2.ll --- a/llvm/test/Transforms/GlobalOpt/heap-sra-2.ll +++ b/llvm/test/Transforms/GlobalOpt/heap-sra-2.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" ;; Heap SROA has been removed. This tests we don't perform heap SROA. diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-phi-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-phi-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/heap-sra-phi-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/heap-sra-phi-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" %struct.foo = type { i32, i32 } diff --git a/llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll b/llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll --- a/llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll +++ b/llvm/test/Transforms/GlobalOpt/heap-sra-phi.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" diff --git a/llvm/test/Transforms/GlobalOpt/int_sideeffect.ll b/llvm/test/Transforms/GlobalOpt/int_sideeffect.ll --- a/llvm/test/Transforms/GlobalOpt/int_sideeffect.ll +++ b/llvm/test/Transforms/GlobalOpt/int_sideeffect.ll @@ -1,4 +1,4 @@ -; RUN: opt -S < %s -globalopt | FileCheck %s +; RUN: opt -S < %s -passes=globalopt | FileCheck %s ; Static evaluation across a @llvm.sideeffect. diff --git a/llvm/test/Transforms/GlobalOpt/integer-bool-dwarf.ll b/llvm/test/Transforms/GlobalOpt/integer-bool-dwarf.ll --- a/llvm/test/Transforms/GlobalOpt/integer-bool-dwarf.ll +++ b/llvm/test/Transforms/GlobalOpt/integer-bool-dwarf.ll @@ -1,4 +1,4 @@ -;RUN: opt -S -globalopt -f %s | FileCheck %s +;RUN: opt -S -passes=globalopt -f %s | FileCheck %s ;CHECK: @foo = internal unnamed_addr global i1 false, align 4, !dbg ![[VAR:.*]] ;CHECK: ![[VAR]] = !DIGlobalVariableExpression(var: !1, expr: diff --git a/llvm/test/Transforms/GlobalOpt/invariant-nodatalayout.ll b/llvm/test/Transforms/GlobalOpt/invariant-nodatalayout.ll --- a/llvm/test/Transforms/GlobalOpt/invariant-nodatalayout.ll +++ b/llvm/test/Transforms/GlobalOpt/invariant-nodatalayout.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S -o - < %s | FileCheck %s +; RUN: opt -passes=globalopt -S -o - < %s | FileCheck %s ; The check here is that it doesn't crash. declare {}* @llvm.invariant.start.p0i8(i64 %size, i8* nocapture %ptr) diff --git a/llvm/test/Transforms/GlobalOpt/invariant.group.ll b/llvm/test/Transforms/GlobalOpt/invariant.group.ll --- a/llvm/test/Transforms/GlobalOpt/invariant.group.ll +++ b/llvm/test/Transforms/GlobalOpt/invariant.group.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt < %s | FileCheck %s +; RUN: opt -S -passes=globalopt < %s | FileCheck %s ; CHECK: @llvm.global_ctors = appending global [1 x {{.*}}@_GLOBAL__I_c @llvm.global_ctors = appending global [3 x { i32, void ()*, i8* }] [{ i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_a, i8* null }, { i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_b, i8* null }, { i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I_c, i8* null }] diff --git a/llvm/test/Transforms/GlobalOpt/invariant.ll b/llvm/test/Transforms/GlobalOpt/invariant.ll --- a/llvm/test/Transforms/GlobalOpt/invariant.ll +++ b/llvm/test/Transforms/GlobalOpt/invariant.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S -o - < %s | FileCheck %s +; RUN: opt -passes=globalopt -S -o - < %s | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/invoke.ll b/llvm/test/Transforms/GlobalOpt/invoke.ll --- a/llvm/test/Transforms/GlobalOpt/invoke.ll +++ b/llvm/test/Transforms/GlobalOpt/invoke.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt < %s | FileCheck %s +; RUN: opt -S -passes=globalopt < %s | FileCheck %s ; rdar://11022897 ; Globalopt should be able to evaluate an invoke. diff --git a/llvm/test/Transforms/GlobalOpt/iterate.ll b/llvm/test/Transforms/GlobalOpt/iterate.ll --- a/llvm/test/Transforms/GlobalOpt/iterate.ll +++ b/llvm/test/Transforms/GlobalOpt/iterate.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: %G @G = internal global i32 0 ; [#uses=1] diff --git a/llvm/test/Transforms/GlobalOpt/large-int-crash.ll b/llvm/test/Transforms/GlobalOpt/large-int-crash.ll --- a/llvm/test/Transforms/GlobalOpt/large-int-crash.ll +++ b/llvm/test/Transforms/GlobalOpt/large-int-crash.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @X = internal global i128 0 diff --git a/llvm/test/Transforms/GlobalOpt/load-store-global-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/load-store-global-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/load-store-global-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/load-store-global-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @a = internal global i64* null, align 8 ; CHECK: @a diff --git a/llvm/test/Transforms/GlobalOpt/load-store-global.ll b/llvm/test/Transforms/GlobalOpt/load-store-global.ll --- a/llvm/test/Transforms/GlobalOpt/load-store-global.ll +++ b/llvm/test/Transforms/GlobalOpt/load-store-global.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @G = internal global i32 17 ; [#uses=3] ; CHECK-NOT: @G diff --git a/llvm/test/Transforms/GlobalOpt/localize-constexpr-debuginfo.ll b/llvm/test/Transforms/GlobalOpt/localize-constexpr-debuginfo.ll --- a/llvm/test/Transforms/GlobalOpt/localize-constexpr-debuginfo.ll +++ b/llvm/test/Transforms/GlobalOpt/localize-constexpr-debuginfo.ll @@ -1,4 +1,4 @@ -; RUN: opt -S < %s -globalopt | FileCheck %s +; RUN: opt -S < %s -passes=globalopt | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/localize-constexpr.ll b/llvm/test/Transforms/GlobalOpt/localize-constexpr.ll --- a/llvm/test/Transforms/GlobalOpt/localize-constexpr.ll +++ b/llvm/test/Transforms/GlobalOpt/localize-constexpr.ll @@ -1,4 +1,4 @@ -; RUN: opt -S < %s -globalopt | FileCheck %s +; RUN: opt -S < %s -passes=globalopt | FileCheck %s @G = internal global i32 42 diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-1-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-1-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/malloc-promote-1-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-1-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" @G = internal global i32* null ; [#uses=3] diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll --- a/llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll +++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-1.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" @G = internal global i32* null ; [#uses=4] diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-2-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-2-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/malloc-promote-2-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-2-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" @G = internal global i32* null diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll --- a/llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll +++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-2.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" @G = internal global i32* null diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll --- a/llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll +++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-3.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128" @G = internal global i32* null diff --git a/llvm/test/Transforms/GlobalOpt/malloc-promote-4.ll b/llvm/test/Transforms/GlobalOpt/malloc-promote-4.ll --- a/llvm/test/Transforms/GlobalOpt/malloc-promote-4.ll +++ b/llvm/test/Transforms/GlobalOpt/malloc-promote-4.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt -o - < %s | FileCheck %s +; RUN: opt -S -passes=globalopt -o - < %s | FileCheck %s ; CHECK: [[G_INIT:@.*]] = internal unnamed_addr global i1 false @g = internal global i32* null, align 8 diff --git a/llvm/test/Transforms/GlobalOpt/memcpy.ll b/llvm/test/Transforms/GlobalOpt/memcpy.ll --- a/llvm/test/Transforms/GlobalOpt/memcpy.ll +++ b/llvm/test/Transforms/GlobalOpt/memcpy.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK: G1 = internal unnamed_addr constant @G1 = internal global [58 x i8] c"asdlfkajsdlfkajsd;lfkajds;lfkjasd;flkajsd;lkfja;sdlkfjasd\00" ; <[58 x i8]*> [#uses=1] diff --git a/llvm/test/Transforms/GlobalOpt/memset-null.ll b/llvm/test/Transforms/GlobalOpt/memset-null.ll --- a/llvm/test/Transforms/GlobalOpt/memset-null.ll +++ b/llvm/test/Transforms/GlobalOpt/memset-null.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s ; PR10047 %0 = type { i32, void ()*, i8* } diff --git a/llvm/test/Transforms/GlobalOpt/memset.ll b/llvm/test/Transforms/GlobalOpt/memset.ll --- a/llvm/test/Transforms/GlobalOpt/memset.ll +++ b/llvm/test/Transforms/GlobalOpt/memset.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt < %s | FileCheck %s +; RUN: opt -S -passes=globalopt < %s | FileCheck %s ; CHECK-NOT: internal diff --git a/llvm/test/Transforms/GlobalOpt/metadata.ll b/llvm/test/Transforms/GlobalOpt/metadata.ll --- a/llvm/test/Transforms/GlobalOpt/metadata.ll +++ b/llvm/test/Transforms/GlobalOpt/metadata.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt < %s | FileCheck %s +; RUN: opt -S -passes=globalopt < %s | FileCheck %s ; PR6112 - When globalopt does RAUW(@G, %G), the metadata reference should drop ; to null. Function local metadata that references @G from a different function diff --git a/llvm/test/Transforms/GlobalOpt/musttail_cc.ll b/llvm/test/Transforms/GlobalOpt/musttail_cc.ll --- a/llvm/test/Transforms/GlobalOpt/musttail_cc.ll +++ b/llvm/test/Transforms/GlobalOpt/musttail_cc.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; PR36546 ; Check that musttail callee preserves its calling convention diff --git a/llvm/test/Transforms/GlobalOpt/naked_functions.ll b/llvm/test/Transforms/GlobalOpt/naked_functions.ll --- a/llvm/test/Transforms/GlobalOpt/naked_functions.ll +++ b/llvm/test/Transforms/GlobalOpt/naked_functions.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; Check that naked functions don't get marked with fast calling conventions diff --git a/llvm/test/Transforms/GlobalOpt/new-promote.ll b/llvm/test/Transforms/GlobalOpt/new-promote.ll --- a/llvm/test/Transforms/GlobalOpt/new-promote.ll +++ b/llvm/test/Transforms/GlobalOpt/new-promote.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s ; RUN: opt -passes=globalopt -S < %s | FileCheck %s %s = type { i32 } diff --git a/llvm/test/Transforms/GlobalOpt/null-check-global-value.ll b/llvm/test/Transforms/GlobalOpt/null-check-global-value.ll --- a/llvm/test/Transforms/GlobalOpt/null-check-global-value.ll +++ b/llvm/test/Transforms/GlobalOpt/null-check-global-value.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s %sometype = type { i8* } diff --git a/llvm/test/Transforms/GlobalOpt/null-check-is-use-pr35760.ll b/llvm/test/Transforms/GlobalOpt/null-check-is-use-pr35760.ll --- a/llvm/test/Transforms/GlobalOpt/null-check-is-use-pr35760.ll +++ b/llvm/test/Transforms/GlobalOpt/null-check-is-use-pr35760.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt -o - < %s | FileCheck %s +; RUN: opt -S -passes=globalopt -o - < %s | FileCheck %s target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/null-check-not-use-pr35760.ll b/llvm/test/Transforms/GlobalOpt/null-check-not-use-pr35760.ll --- a/llvm/test/Transforms/GlobalOpt/null-check-not-use-pr35760.ll +++ b/llvm/test/Transforms/GlobalOpt/null-check-not-use-pr35760.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -globalopt -o - < %s | FileCheck %s +; RUN: opt -S -passes=globalopt -o - < %s | FileCheck %s ; No malloc promotion with non-null check. diff --git a/llvm/test/Transforms/GlobalOpt/phi-select.ll b/llvm/test/Transforms/GlobalOpt/phi-select.ll --- a/llvm/test/Transforms/GlobalOpt/phi-select.ll +++ b/llvm/test/Transforms/GlobalOpt/phi-select.ll @@ -1,7 +1,7 @@ ; Test that PHI nodes and select instructions do not necessarily make stuff ; non-constant. -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: global @X = internal global i32 4 ; [#uses=2] diff --git a/llvm/test/Transforms/GlobalOpt/pr21191.ll b/llvm/test/Transforms/GlobalOpt/pr21191.ll --- a/llvm/test/Transforms/GlobalOpt/pr21191.ll +++ b/llvm/test/Transforms/GlobalOpt/pr21191.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s $c = comdat any ; CHECK: $c = comdat any diff --git a/llvm/test/Transforms/GlobalOpt/pr33686.ll b/llvm/test/Transforms/GlobalOpt/pr33686.ll --- a/llvm/test/Transforms/GlobalOpt/pr33686.ll +++ b/llvm/test/Transforms/GlobalOpt/pr33686.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -globalopt %s | FileCheck %s +; RUN: opt -S -passes=globalopt %s | FileCheck %s @glob = external global i16, align 1 diff --git a/llvm/test/Transforms/GlobalOpt/preallocated.ll b/llvm/test/Transforms/GlobalOpt/preallocated.ll --- a/llvm/test/Transforms/GlobalOpt/preallocated.ll +++ b/llvm/test/Transforms/GlobalOpt/preallocated.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s declare token @llvm.call.preallocated.setup(i32) declare i8* @llvm.call.preallocated.arg(token, i32) diff --git a/llvm/test/Transforms/GlobalOpt/preserve-comdats.ll b/llvm/test/Transforms/GlobalOpt/preserve-comdats.ll --- a/llvm/test/Transforms/GlobalOpt/preserve-comdats.ll +++ b/llvm/test/Transforms/GlobalOpt/preserve-comdats.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s $comdat_global = comdat any diff --git a/llvm/test/Transforms/GlobalOpt/shrink-address-to-bool.ll b/llvm/test/Transforms/GlobalOpt/shrink-address-to-bool.ll --- a/llvm/test/Transforms/GlobalOpt/shrink-address-to-bool.ll +++ b/llvm/test/Transforms/GlobalOpt/shrink-address-to-bool.ll @@ -1,4 +1,4 @@ -;RUN: opt -S -globalopt -f %s | FileCheck %s +;RUN: opt -S -passes=globalopt -f %s | FileCheck %s ;CHECK: @foo = {{.*}}, !dbg !0 @foo = global i64 ptrtoint ([1 x i64]* @baa to i64), align 8, !dbg !0 diff --git a/llvm/test/Transforms/GlobalOpt/shrink-global-to-bool-check-debug.ll b/llvm/test/Transforms/GlobalOpt/shrink-global-to-bool-check-debug.ll --- a/llvm/test/Transforms/GlobalOpt/shrink-global-to-bool-check-debug.ll +++ b/llvm/test/Transforms/GlobalOpt/shrink-global-to-bool-check-debug.ll @@ -1,4 +1,4 @@ -;RUN: opt -S -debugify -globalopt -f %s | FileCheck %s +; RUN: opt -S -passes=debugify,globalopt -f %s | FileCheck %s @foo = internal global i32 0, align 4 diff --git a/llvm/test/Transforms/GlobalOpt/static-const-bitcast.ll b/llvm/test/Transforms/GlobalOpt/static-const-bitcast.ll --- a/llvm/test/Transforms/GlobalOpt/static-const-bitcast.ll +++ b/llvm/test/Transforms/GlobalOpt/static-const-bitcast.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt %s -S -o - | FileCheck %s +; RUN: opt -passes=globalopt %s -S -o - | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/GlobalOpt/store-struct-element.ll b/llvm/test/Transforms/GlobalOpt/store-struct-element.ll --- a/llvm/test/Transforms/GlobalOpt/store-struct-element.ll +++ b/llvm/test/Transforms/GlobalOpt/store-struct-element.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S -o - | FileCheck %s +; RUN: opt < %s -passes=globalopt -S -o - | FileCheck %s %class.Class = type { i8, i8, i8, i8 } @A = local_unnamed_addr global %class.Class undef, align 4 diff --git a/llvm/test/Transforms/GlobalOpt/storepointer-compare-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/storepointer-compare-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/storepointer-compare-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/storepointer-compare-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK: global @G = internal global void ()* null ; [#uses=2] diff --git a/llvm/test/Transforms/GlobalOpt/storepointer-compare.ll b/llvm/test/Transforms/GlobalOpt/storepointer-compare.ll --- a/llvm/test/Transforms/GlobalOpt/storepointer-compare.ll +++ b/llvm/test/Transforms/GlobalOpt/storepointer-compare.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK: call void @Actual ; Check that a comparison does not prevent an indirect call from being made diff --git a/llvm/test/Transforms/GlobalOpt/storepointer-no-null-opt.ll b/llvm/test/Transforms/GlobalOpt/storepointer-no-null-opt.ll --- a/llvm/test/Transforms/GlobalOpt/storepointer-no-null-opt.ll +++ b/llvm/test/Transforms/GlobalOpt/storepointer-no-null-opt.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s @G = internal global void ()* null ; [#uses=2] ; CHECK: global diff --git a/llvm/test/Transforms/GlobalOpt/storepointer.ll b/llvm/test/Transforms/GlobalOpt/storepointer.ll --- a/llvm/test/Transforms/GlobalOpt/storepointer.ll +++ b/llvm/test/Transforms/GlobalOpt/storepointer.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: global @G = internal global void ()* null ; [#uses=2] diff --git a/llvm/test/Transforms/GlobalOpt/tls.ll b/llvm/test/Transforms/GlobalOpt/tls.ll --- a/llvm/test/Transforms/GlobalOpt/tls.ll +++ b/llvm/test/Transforms/GlobalOpt/tls.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s -; RUN: opt -emulated-tls < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s +; RUN: opt -emulated-tls < %s -passes=globalopt -S | FileCheck %s declare void @wait() declare void @signal() diff --git a/llvm/test/Transforms/GlobalOpt/trivialstore.ll b/llvm/test/Transforms/GlobalOpt/trivialstore.ll --- a/llvm/test/Transforms/GlobalOpt/trivialstore.ll +++ b/llvm/test/Transforms/GlobalOpt/trivialstore.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: G @G = internal global i32 17 ; [#uses=3] diff --git a/llvm/test/Transforms/GlobalOpt/undef-init.ll b/llvm/test/Transforms/GlobalOpt/undef-init.ll --- a/llvm/test/Transforms/GlobalOpt/undef-init.ll +++ b/llvm/test/Transforms/GlobalOpt/undef-init.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -globalopt -S | FileCheck %s +; RUN: opt < %s -passes=globalopt -S | FileCheck %s ; CHECK-NOT: store @llvm.global_ctors = appending global [1 x { i32, void ()*, i8* }] [ { i32, void ()*, i8* } { i32 65535, void ()* @_GLOBAL__I__Z3foov, i8* null } ] ; <[1 x { i32, void ()*, i8* }]*> [#uses=0] diff --git a/llvm/test/Transforms/GlobalOpt/unnamed-addr.ll b/llvm/test/Transforms/GlobalOpt/unnamed-addr.ll --- a/llvm/test/Transforms/GlobalOpt/unnamed-addr.ll +++ b/llvm/test/Transforms/GlobalOpt/unnamed-addr.ll @@ -1,4 +1,4 @@ -; RUN: opt -globalopt -S < %s | FileCheck %s +; RUN: opt -passes=globalopt -S < %s | FileCheck %s @a = internal global i32 0, align 4 @b = internal global i32 0, align 4 diff --git a/llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll b/llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll --- a/llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll +++ b/llvm/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -S -globalopt | FileCheck %s +; RUN: opt < %s -S -passes=globalopt | FileCheck %s @zero = internal global [10 x i32] zeroinitializer diff --git a/llvm/test/Transforms/InstCombine/and-compare.ll b/llvm/test/Transforms/InstCombine/and-compare.ll --- a/llvm/test/Transforms/InstCombine/and-compare.ll +++ b/llvm/test/Transforms/InstCombine/and-compare.ll @@ -33,8 +33,8 @@ define i1 @test2(i64 %A) { ; CHECK-LABEL: @test2( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i8 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[TMP1]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i64 [[A:%.*]], 128 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[AND]], 0 ; CHECK-NEXT: ret i1 [[CMP]] ; %and = and i64 %A, 128 @@ -44,8 +44,8 @@ define <2 x i1> @test2vec(<2 x i64> %A) { ; CHECK-LABEL: @test2vec( -; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i8> -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> [[TMP1]], +; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[A:%.*]], +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i64> [[AND]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %and = and <2 x i64> %A, @@ -55,8 +55,8 @@ define i1 @test3(i64 %A) { ; CHECK-LABEL: @test3( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i8 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[TMP1]], 0 +; CHECK-NEXT: [[AND:%.*]] = and i64 [[A:%.*]], 128 +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[AND]], 0 ; CHECK-NEXT: ret i1 [[CMP]] ; %and = and i64 %A, 128 @@ -66,8 +66,8 @@ define <2 x i1> @test3vec(<2 x i64> %A) { ; CHECK-LABEL: @test3vec( -; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i8> -; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[TMP1]], zeroinitializer +; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[A:%.*]], +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i64> [[AND]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %and = and <2 x i64> %A, diff --git a/llvm/test/Transforms/InstCombine/compare-signs.ll b/llvm/test/Transforms/InstCombine/compare-signs.ll --- a/llvm/test/Transforms/InstCombine/compare-signs.ll +++ b/llvm/test/Transforms/InstCombine/compare-signs.ll @@ -178,13 +178,12 @@ ret <2 x i1> %r } -; negative test +; negative test - but this reduces with a mask op define i1 @shift_trunc_wrong_shift(i32 %x) { ; CHECK-LABEL: @shift_trunc_wrong_shift( -; CHECK-NEXT: [[SH:%.*]] = lshr i32 [[X:%.*]], 23 -; CHECK-NEXT: [[TR:%.*]] = trunc i32 [[SH]] to i8 -; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[TR]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 1073741824 +; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[R]] ; %sh = lshr i32 %x, 23 diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll --- a/llvm/test/Transforms/InstCombine/getelementptr.ll +++ b/llvm/test/Transforms/InstCombine/getelementptr.ll @@ -359,8 +359,8 @@ ; Larger than the pointer size for a non-zero address space define i1 @test18_as1(i16 addrspace(1)* %P, i32 %I) { ; CHECK-LABEL: @test18_as1( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[I:%.*]] to i16 -; CHECK-NEXT: [[C:%.*]] = icmp slt i16 [[TMP1]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[I:%.*]], 32768 +; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[C]] ; %X = getelementptr inbounds i16, i16 addrspace(1)* %P, i32 %I @@ -371,8 +371,8 @@ ; Smaller than the pointer size for a non-zero address space define i1 @test18_as1_i32(i16 addrspace(1)* %P, i32 %I) { ; CHECK-LABEL: @test18_as1_i32( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[I:%.*]] to i16 -; CHECK-NEXT: [[C:%.*]] = icmp slt i16 [[TMP1]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[I:%.*]], 32768 +; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[C]] ; %X = getelementptr inbounds i16, i16 addrspace(1)* %P, i32 %I @@ -405,8 +405,8 @@ ; Larger than the pointer size define i1 @test18_i128(i16* %P, i128 %I) { ; CHECK-LABEL: @test18_i128( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i128 [[I:%.*]] to i64 -; CHECK-NEXT: [[C:%.*]] = icmp slt i64 [[TMP1]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = and i128 [[I:%.*]], 9223372036854775808 +; CHECK-NEXT: [[C:%.*]] = icmp ne i128 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[C]] ; %X = getelementptr inbounds i16, i16* %P, i128 %I diff --git a/llvm/test/Transforms/InstCombine/icmp-trunc.ll b/llvm/test/Transforms/InstCombine/icmp-trunc.ll --- a/llvm/test/Transforms/InstCombine/icmp-trunc.ll +++ b/llvm/test/Transforms/InstCombine/icmp-trunc.ll @@ -221,8 +221,8 @@ define i1 @slt_0(i32 %x) { ; CHECK-LABEL: @slt_0( -; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 128 +; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[R]] ; %t = trunc i32 %x to i8 @@ -232,8 +232,8 @@ define <2 x i1> @slt_0_splat(<2 x i16> %x) { ; CHECK-LABEL: @slt_0_splat( -; CHECK-NEXT: [[T:%.*]] = trunc <2 x i16> [[X:%.*]] to <2 x i11> -; CHECK-NEXT: [[R:%.*]] = icmp slt <2 x i11> [[T]], zeroinitializer +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i16> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[R]] ; %t = trunc <2 x i16> %x to <2 x i11> @@ -267,8 +267,8 @@ define i1 @sgt_n1(i32 %x) { ; CHECK-LABEL: @sgt_n1( -; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 128 +; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[R]] ; %t = trunc i32 %x to i8 @@ -278,8 +278,8 @@ define <2 x i1> @sgt_n1_splat(<2 x i16> %x) { ; CHECK-LABEL: @sgt_n1_splat( -; CHECK-NEXT: [[T:%.*]] = trunc <2 x i16> [[X:%.*]] to <2 x i11> -; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i11> [[T]], +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i16> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i16> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[R]] ; %t = trunc <2 x i16> %x to <2 x i11> diff --git a/llvm/test/Transforms/InstCombine/select-icmp-and.ll b/llvm/test/Transforms/InstCombine/select-icmp-and.ll --- a/llvm/test/Transforms/InstCombine/select-icmp-and.ll +++ b/llvm/test/Transforms/InstCombine/select-icmp-and.ll @@ -52,8 +52,8 @@ ; Make sure we can still perform this optimization with a truncate present define i32 @test35_with_trunc(i64 %x) { ; CHECK-LABEL: @test35_with_trunc( -; CHECK-NEXT: [[X1:%.*]] = trunc i64 [[X:%.*]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X1]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 2147483648 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], 0 ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 60, i32 100 ; CHECK-NEXT: ret i32 [[COND]] ; @@ -253,8 +253,8 @@ define i32 @test73(i32 %x) { ; CHECK-LABEL: @test73( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i8 [[TMP1]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 128 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0 ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 40, i32 42 ; CHECK-NEXT: ret i32 [[TMP3]] ; @@ -266,8 +266,8 @@ define <2 x i32> @test73vec(<2 x i32> %x) { ; CHECK-LABEL: @test73vec( -; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i8> -; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i8> [[TMP1]], +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP1]], zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> , <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[TMP3]] ; diff --git a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll --- a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll +++ b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll @@ -642,10 +642,10 @@ define i32 @test68(i32 %x, i32 %y) { ; CHECK-LABEL: @test68( -; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 6 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP2]], [[Y:%.*]] -; CHECK-NEXT: ret i32 [[TMP3]] +; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: ret i32 [[TMP2]] ; %and = and i32 %x, 128 %cmp = icmp eq i32 %and, 0 @@ -656,10 +656,10 @@ define <2 x i32> @test68vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @test68vec( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], -; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]] -; CHECK-NEXT: ret <2 x i32> [[TMP3]] +; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], +; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: ret <2 x i32> [[TMP2]] ; %and = and <2 x i32> %x, %cmp = icmp eq <2 x i32> %and, zeroinitializer @@ -670,8 +670,8 @@ define i32 @test68_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test68_xor( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[TMP1]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 128 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 2 ; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[Y]], i32 [[XOR]] ; CHECK-NEXT: ret i32 [[SELECT]] @@ -685,8 +685,8 @@ define i32 @test68_and(i32 %x, i32 %y) { ; CHECK-LABEL: @test68_and( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[TMP1]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 128 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y:%.*]], -3 ; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[Y]], i32 [[AND2]] ; CHECK-NEXT: ret i32 [[SELECT]] @@ -700,11 +700,11 @@ define i32 @test69(i32 %x, i32 %y) { ; CHECK-LABEL: @test69( -; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 6 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 2 -; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP2]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP3]], [[Y:%.*]] -; CHECK-NEXT: ret i32 [[TMP4]] +; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 +; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: ret i32 [[TMP3]] ; %and = and i32 %x, 128 %cmp = icmp ne i32 %and, 0 @@ -715,11 +715,11 @@ define <2 x i32> @test69vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @test69vec( -; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], -; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i32> [[TMP2]], -; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i32> [[TMP3]], [[Y:%.*]] -; CHECK-NEXT: ret <2 x i32> [[TMP4]] +; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], +; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: ret <2 x i32> [[TMP3]] ; %and = and <2 x i32> %x, %cmp = icmp ne <2 x i32> %and, zeroinitializer @@ -730,8 +730,8 @@ define i32 @test69_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test69_xor( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i8 [[TMP1]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 128 +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 2 ; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP_NOT]], i32 [[XOR]], i32 [[Y]] ; CHECK-NEXT: ret i32 [[SELECT]] @@ -745,8 +745,8 @@ define i32 @test69_and(i32 %x, i32 %y) { ; CHECK-LABEL: @test69_and( -; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8 -; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i8 [[TMP1]], -1 +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 128 +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y:%.*]], 2 ; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP_NOT]], i32 [[AND2]], i32 [[Y]] ; CHECK-NEXT: ret i32 [[SELECT]] diff --git a/llvm/test/Transforms/InstCombine/signed-truncation-check.ll b/llvm/test/Transforms/InstCombine/signed-truncation-check.ll --- a/llvm/test/Transforms/InstCombine/signed-truncation-check.ll +++ b/llvm/test/Transforms/InstCombine/signed-truncation-check.ll @@ -425,8 +425,8 @@ define i1 @positive_different_trunc_both(i32 %arg) { ; CHECK-LABEL: @positive_different_trunc_both( -; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i15 -; CHECK-NEXT: [[T2:%.*]] = icmp sgt i15 [[T1]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARG:%.*]], 16384 +; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP1]], 0 ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[ARG]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[T3]], 128 ; CHECK-NEXT: [[T5:%.*]] = icmp ult i16 [[T4]], 256 @@ -444,8 +444,8 @@ define i1 @positive_different_trunc_both_logical(i32 %arg) { ; CHECK-LABEL: @positive_different_trunc_both_logical( -; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i15 -; CHECK-NEXT: [[T2:%.*]] = icmp sgt i15 [[T1]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARG:%.*]], 16384 +; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP1]], 0 ; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[ARG]] to i16 ; CHECK-NEXT: [[T4:%.*]] = add i16 [[T3]], 128 ; CHECK-NEXT: [[T5:%.*]] = icmp ult i16 [[T4]], 256 @@ -717,8 +717,8 @@ define i1 @negative_trunc_not_arg(i32 %arg, i32 %arg2) { ; CHECK-LABEL: @negative_trunc_not_arg( -; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i8 -; CHECK-NEXT: [[T2:%.*]] = icmp sgt i8 [[T1]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARG:%.*]], 128 +; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP1]], 0 ; CHECK-NEXT: [[T3:%.*]] = add i32 [[ARG2:%.*]], 128 ; CHECK-NEXT: [[T4:%.*]] = icmp ult i32 [[T3]], 256 ; CHECK-NEXT: [[T5:%.*]] = and i1 [[T2]], [[T4]] @@ -734,8 +734,8 @@ define i1 @negative_trunc_not_arg_logical(i32 %arg, i32 %arg2) { ; CHECK-LABEL: @negative_trunc_not_arg_logical( -; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i8 -; CHECK-NEXT: [[T2:%.*]] = icmp sgt i8 [[T1]], -1 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARG:%.*]], 128 +; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP1]], 0 ; CHECK-NEXT: [[T3:%.*]] = add i32 [[ARG2:%.*]], 128 ; CHECK-NEXT: [[T4:%.*]] = icmp ult i32 [[T3]], 256 ; CHECK-NEXT: [[T5:%.*]] = select i1 [[T2]], i1 [[T4]], i1 false diff --git a/llvm/test/Transforms/LoopStrengthReduce/X86/nested-ptr-addrec.ll b/llvm/test/Transforms/LoopStrengthReduce/X86/nested-ptr-addrec.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/LoopStrengthReduce/X86/nested-ptr-addrec.ll @@ -0,0 +1,61 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -loop-reduce < %s | FileCheck %s + +; Test an assertion failure from D113349, where the SCEV for the outer phi +; gets computed and registered in the value map while attempting to compute it. + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define void @test() { +; CHECK-LABEL: @test( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] +; CHECK: loop.header: +; CHECK-NEXT: [[LSR_IV:%.*]] = phi i64* [ [[SCEVGEP:%.*]], [[LOOP_LATCH:%.*]] ], [ inttoptr (i64 -8 to i64*), [[ENTRY:%.*]] ] +; CHECK-NEXT: br i1 true, label [[LOOP_EXIT:%.*]], label [[LOOP2_PREHEADER:%.*]] +; CHECK: loop.exit: +; CHECK-NEXT: ret void +; CHECK: loop2.preheader: +; CHECK-NEXT: br label [[LOOP2_HEADER:%.*]] +; CHECK: loop2.header: +; CHECK-NEXT: [[LSR_IV1:%.*]] = phi i64* [ [[SCEVGEP2:%.*]], [[LOOP2_HEADER]] ], [ [[LSR_IV]], [[LOOP2_PREHEADER]] ] +; CHECK-NEXT: [[SCEVGEP2]] = getelementptr i64, i64* [[LSR_IV1]], i64 1 +; CHECK-NEXT: [[SCEVGEP23:%.*]] = bitcast i64* [[SCEVGEP2]] to i8* +; CHECK-NEXT: br i1 false, label [[LOOP2_HEADER]], label [[LOOP2_CONT:%.*]] +; CHECK: loop2.cont: +; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SCEVGEP23]], align 1 +; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[V]], 0 +; CHECK-NEXT: br i1 [[C]], label [[LOOP_EXIT]], label [[LOOP_LATCH]] +; CHECK: loop.latch: +; CHECK-NEXT: [[SCEVGEP]] = getelementptr i64, i64* [[LSR_IV]], i64 1 +; CHECK-NEXT: br label [[LOOP_HEADER]] +; +entry: + br label %loop.header + +loop.header: + %ptr = phi i64* [ %ptr.next, %loop.latch ], [ null, %entry ] + br i1 true, label %loop.exit, label %loop2.preheader + +loop.exit: + ret void + +loop2.preheader: + br label %loop2.header + +loop2.header: + %ptr2 = phi i64* [ %ptr, %loop2.preheader ], [ %ptr2.next, %loop2.header ] + %ptr2.next = getelementptr inbounds i64, i64* %ptr2, i64 1 + br i1 false, label %loop2.header, label %loop2.cont + +loop2.cont: + %ptr2.i8 = bitcast i64* %ptr2 to i8* + %v = load i8, i8* %ptr2.i8 + %c = icmp ne i8 %v, 0 + br i1 %c, label %loop.exit, label %loop.latch + +loop.latch: + %ptr.next = getelementptr inbounds i64, i64* %ptr, i64 1 + br label %loop.header +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/predication_costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/predication_costs.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/predication_costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/predication_costs.ll @@ -90,7 +90,7 @@ ; ; Same as predicate_store except we use a pointer PHI to maintain the address ; -; CHECK: Found new scalar instruction: %addr = phi i32* [ %a, %entry ], [ %addr.next, %for.inc ] +; CHECK: Found scalar instruction: %addr = phi i32* [ %a, %entry ], [ %addr.next, %for.inc ] ; CHECK: Found scalar instruction: %addr.next = getelementptr inbounds i32, i32* %addr, i64 1 ; CHECK: Scalarizing and predicating: store i32 %tmp2, i32* %addr, align 4 ; CHECK: Found an estimated cost of 0 for VF 2 For instruction: %addr = phi i32* [ %a, %entry ], [ %addr.next, %for.inc ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll @@ -41,35 +41,39 @@ ; CHECK-NEXT: [[IND_END3:%.*]] = getelementptr i8, i8* [[START_2:%.*]], i64 [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi i8* [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8*, i8** [[START_1]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.experimental.stepvector.nxv2i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 2 +; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 1 +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 1, [[TMP8]] +; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP7]], 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP10]], i32 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = add zeroinitializer, [[TMP6]] -; CHECK-NEXT: [[TMP8:%.*]] = add [[DOTSPLAT]], [[TMP7]] -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, i8* [[START_2]], [[TMP8]] -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, i8* [[START_2]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, i8* [[START_2]], i64 [[TMP10]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, [[NEXT_GEP4]], i64 1 -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8*, i8** [[NEXT_GEP]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to * -; CHECK-NEXT: store [[TMP11]], * [[TMP13]], align 8 -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[NEXT_GEP5]], i32 0 -; CHECK-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to * -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP15]], align 1 -; CHECK-NEXT: [[TMP16:%.*]] = add [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i8 1, i32 0), poison, zeroinitializer) -; CHECK-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP14]] to * -; CHECK-NEXT: store [[TMP16]], * [[TMP17]], align 1 -; CHECK-NEXT: [[TMP18:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP19:%.*]] = mul i64 [[TMP18]], 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP19]] -; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP11:%.*]] = call @llvm.experimental.stepvector.nxv2i64() +; CHECK-NEXT: [[TMP12:%.*]] = add [[DOTSPLAT]], [[TMP11]] +; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul [[TMP12]], shufflevector ( insertelement ( poison, i64 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[POINTER_PHI]], [[VECTOR_GEP]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, [[TMP13]], i64 1 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8*, i8** [[NEXT_GEP]], i32 0 +; CHECK-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to * +; CHECK-NEXT: store [[TMP14]], * [[TMP16]], align 8 +; CHECK-NEXT: [[TMP17:%.*]] = extractelement [[TMP13]], i32 0 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, i8* [[TMP17]], i32 0 +; CHECK-NEXT: [[TMP19:%.*]] = bitcast i8* [[TMP18]] to * +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP19]], align 1 +; CHECK-NEXT: [[TMP20:%.*]] = add [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i8 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP21:%.*]] = bitcast i8* [[TMP18]] to * +; CHECK-NEXT: store [[TMP20]], * [[TMP21]], align 1 +; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP23]] +; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, i8* [[POINTER_PHI]], i64 [[TMP9]] +; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] @@ -119,32 +123,66 @@ define void @pointer_induction(i8* noalias %start, i64 %N) { ; CHECK-LABEL: @pointer_induction( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[TMP1]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], [[TMP2]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: -; CHECK: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8* [[START:%.*]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 2 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], [[TMP4]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] +; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, i8* [[START:%.*]], i64 [[N_VEC]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i8* [[START]], i32 0 ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi i8* [ [[START]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.stepvector.nxv2i64() -; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[INDEX1]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 +; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 1 +; CHECK-NEXT: [[TMP8:%.*]] = mul i64 1, [[TMP7]] +; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[TMP6]], 0 +; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement poison, i64 [[TMP9]], i32 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector [[DOTSPLATINSERT]], poison, zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = add zeroinitializer, [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = add [[DOTSPLAT]], [[TMP6]] -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, i8* [[START]], [[TMP7]] -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX1]], 0 -; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, i8* [[START]], i64 [[TMP8]] -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX1]], 1 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, i8* [[START]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX1]], 0 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, i8* [[NEXT_GEP3]], i32 0 -; CHECK-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to * -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP12]], align 1 -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, [[NEXT_GEP]], i64 1 -; CHECK-NEXT: [[TMP14:%.*]] = icmp eq [[TMP13]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP16]] -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP10:%.*]] = call @llvm.experimental.stepvector.nxv2i64() +; CHECK-NEXT: [[TMP11:%.*]] = add [[DOTSPLAT]], [[TMP10]] +; CHECK-NEXT: [[VECTOR_GEP:%.*]] = mul [[TMP11]], shufflevector ( insertelement ( poison, i64 1, i32 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, i8* [[POINTER_PHI]], [[VECTOR_GEP]] +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX1]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = extractelement [[TMP12]], i32 0 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, i8* [[TMP14]], i32 0 +; CHECK-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP15]] to * +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP16]], align 1 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, [[TMP12]], i64 1 +; CHECK-NEXT: [[TMP18:%.*]] = icmp eq [[TMP17]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX1]], [[TMP20]] +; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, i8* [[POINTER_PHI]], i64 [[TMP8]] +; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label [[END:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i8* [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[START]], [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[PTR_PHI:%.*]] = phi i8* [ [[PTR_PHI_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ [[INDEX_NXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL2]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[INDEX_NXT]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP22:%.*]] = load i8, i8* [[PTR_PHI]], align 1 +; CHECK-NEXT: [[PTR_PHI_NEXT]] = getelementptr inbounds i8, i8* [[PTR_PHI]], i64 1 +; CHECK-NEXT: [[CMP_I_NOT:%.*]] = icmp eq i8* [[PTR_PHI_NEXT]], [[START]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDEX]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[END]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK: end: +; CHECK-NEXT: ret void ; entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll @@ -124,35 +124,27 @@ define i32 @pointer_iv_mixed(i32* noalias %a, i32** noalias %b, i64 %n) #0 { ; CHECK-LABEL: @pointer_iv_mixed( -; CHECK: vector.body -; CHECK: %[[IDX:.*]] = phi i64 [ 0, %vector.ph ], [ %{{.*}}, %vector.body ] -; CHECK: %[[STEPVEC:.*]] = call @llvm.experimental.stepvector.nxv2i64() -; CHECK-NEXT: %[[TMP1:.*]] = insertelement poison, i64 %[[IDX]], i32 0 -; CHECK-NEXT: %[[TMP2:.*]] = shufflevector %[[TMP1]], poison, zeroinitializer -; CHECK-NEXT: %[[VECIND1:.*]] = add %[[TMP2]], %[[STEPVEC]] -; CHECK-NEXT: %[[APTRS1:.*]] = getelementptr i32, i32* %a, %[[VECIND1]] -; CHECK-NEXT: %[[GEPA1:.*]] = getelementptr i32, i32* %a, i64 %[[IDX]] -; CHECK-NEXT: %[[VSCALE64:.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: %[[VSCALE64X2:.*]] = shl nuw nsw i64 %[[VSCALE64]], 1 -; CHECK-NEXT: %[[TMP3:.*]] = insertelement poison, i64 %[[VSCALE64X2]], i32 0 -; CHECK-NEXT: %[[TMP4:.*]] = shufflevector %[[TMP3]], poison, zeroinitializer -; CHECK-NEXT: %[[TMP5:.*]] = add %[[TMP4]], %[[STEPVEC]] -; CHECK-NEXT: %[[VECIND2:.*]] = add %[[TMP2]], %[[TMP5]] -; CHECK-NEXT: %[[APTRS2:.*]] = getelementptr i32, i32* %a, %[[VECIND2]] -; CHECK-NEXT: %[[GEPB1:.*]] = getelementptr i32*, i32** %b, i64 %[[IDX]] -; The following checks that there is no extractelement after -; vectorization when the stepvector has multiple uses, which demonstrates -; the removal of a redundant fmov instruction in the generated asm code. -; CHECK-NOT: %[[EXTRACT:.*]] = extractelement [[APTRS1]], i32 0 -; CHECK: %[[BPTR1:.*]] = bitcast i32** %[[GEPB1]] to * -; CHECK-NEXT: store %[[APTRS1]], * %[[BPTR1]], align 8 -; CHECK: %[[VSCALE32:.*]] = call i32 @llvm.vscale.i32() -; CHECK-NEXT: %[[VSCALE32X2:.*]] = shl nuw nsw i32 %[[VSCALE32]], 1 -; CHECK-NEXT: %[[TMP6:.*]] = zext i32 %[[VSCALE32X2]] to i64 -; CHECK-NEXT: %[[GEPB2:.*]] = getelementptr i32*, i32** %[[GEPB1]], i64 %[[TMP6]] -; CHECK-NEXT: %[[BPTR2:.*]] = bitcast i32** %[[GEPB2]] to * -; CHECK-NEXT store %[[APTRS2]], * %[[BPTR2]], align 8 - +; CHECK: vector.body: +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi i32* [ %a, %vector.ph ], [ [[PTR_IND:%.*]], %vector.body ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi [ insertelement ( zeroinitializer, i32 0, i32 0), %vector.ph ], [ [[TMP9:%.*]], %vector.body ] +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 1 +; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.experimental.stepvector.nxv2i64() +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, i32* [[POINTER_PHI]], [[TMP6]] +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i32*, i32** %b, i64 [[INDEX]] +; CHECK-NEXT: [[BC:%.*]] = bitcast [[TMP7]] to *> +; CHECK-NEXT: [[TMP8:%.*]] = extractelement *> [[BC]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , * [[TMP8]], align 8 +; CHECK-NEXT: [[TMP9]] = add [[WIDE_LOAD]], [[VEC_PHI]] +; CHECK-NEXT: [[TMP10:%.*]] = bitcast i32** [[NEXT_GEP]] to * +; CHECK-NEXT: store [[TMP7]], * [[TMP10]], align 8 +; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP12:%.*]] = shl nuw nsw i64 [[TMP11]], 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP12]] +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], {{.*}} +; CHECK-NEXT: [[PTR_IND]] = getelementptr i32, i32* [[POINTER_PHI]], i64 [[TMP5]] +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label %vector.body, !llvm.loop [[LOOP7:![0-9]+]] entry: br label %for.body @@ -175,12 +167,57 @@ ret i32 %tmp5 } +define void @phi_used_in_vector_compare_and_scalar_indvar_update_and_store(i16* %ptr) #0 { +; CHECK-LABEL: @phi_used_in_vector_compare_and_scalar_indvar_update_and_store( +; CHECK: vector.body: +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi i16* [ %ptr, %vector.ph ], [ [[PTR_IND:%.*]], %vector.body ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] +; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 1 +; CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.stepvector.nxv2i64() +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i16, i16* [[POINTER_PHI]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp ne [[TMP5]], zeroinitializer +; CHECK-NEXT: [[BC:%.*]] = bitcast [[TMP5]] to *> +; CHECK-NEXT: [[TMP7:%.*]] = extractelement *> [[BC]], i32 0 +; CHECK-NEXT: call void @llvm.masked.store.nxv2i16.p0nxv2i16( zeroinitializer, * [[TMP7]], i32 2, [[TMP6]]) +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP8]], 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP9]] +; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], {{.*}} +; CHECK-NEXT: [[PTR_IND]] = getelementptr i16, i16* [[POINTER_PHI]], i64 [[TMP3]] +; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label %vector.body, !llvm.loop [[LOOP9:![0-9]+]] +entry: + br label %for.body + +for.body: ; preds = %if.end, %entry + %iv = phi i64 [ %inc, %if.end ], [ 0, %entry ] + %iv.ptr = phi i16* [ %incdec.iv.ptr, %if.end ], [ %ptr, %entry ] + %cmp.i = icmp ne i16* %iv.ptr, null + br i1 %cmp.i, label %if.end.sink.split, label %if.end + +if.end.sink.split: ; preds = %for.body + store i16 0, i16* %iv.ptr, align 2 + br label %if.end + +if.end: ; preds = %if.end.sink.split, %for.body + %incdec.iv.ptr = getelementptr inbounds i16, i16* %iv.ptr, i64 1 + %inc = add nuw nsw i64 %iv, 1 + %exitcond.not = icmp ult i64 %inc, 1024 + br i1 %exitcond.not, label %for.body, label %for.end, !llvm.loop !6 + +for.end: ; preds = %if.end, %for.end + %iv.ptr.1.lcssa = phi i16* [ %incdec.iv.ptr, %if.end ] + ret void +} + attributes #0 = { vscale_range(0, 16) } + !0 = distinct !{!0, !1, !2, !3, !4, !5} !1 = !{!"llvm.loop.mustprogress"} !2 = !{!"llvm.loop.vectorize.width", i32 4} !3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true} !4 = !{!"llvm.loop.vectorize.enable", i1 true} !5 = !{!"llvm.loop.interleave.count", i32 2} -!6 = distinct !{!6, !1, !7, !3, !4, !5} +!6 = distinct !{!6, !1, !7, !3, !4, !8} !7 = !{!"llvm.loop.vectorize.width", i32 2} +!8 = !{!"llvm.loop.interleave.count", i32 1} diff --git a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll --- a/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll +++ b/llvm/test/Transforms/LoopVectorize/consecutive-ptr-uniforms.ll @@ -399,23 +399,13 @@ ; CHECK-NOT: LV: Found uniform instruction: %p = phi i32* [ %tmp3, %for.body ], [ %a, %entry ] ; CHECK: LV: Found uniform instruction: %q = phi i32** [ %tmp4, %for.body ], [ %b, %entry ] ; CHECK: vector.body +; CHECK: %pointer.phi = phi i32* [ %a, %vector.ph ], [ %ptr.ind, %vector.body ] ; CHECK: %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] -; CHECK: %next.gep = getelementptr i32, i32* %a, i64 %index -; CHECK: %[[I1:.+]] = or i64 %index, 1 -; CHECK: %next.gep10 = getelementptr i32, i32* %a, i64 %[[I1]] -; CHECK: %[[I2:.+]] = or i64 %index, 2 -; CHECK: %next.gep11 = getelementptr i32, i32* %a, i64 %[[I2]] -; CHECK: %[[I3:.+]] = or i64 %index, 3 -; CHECK: %next.gep12 = getelementptr i32, i32* %a, i64 %[[I3]] -; CHECK: %[[V0:.+]] = insertelement <4 x i32*> poison, i32* %next.gep, i32 0 -; CHECK: %[[V1:.+]] = insertelement <4 x i32*> %[[V0]], i32* %next.gep10, i32 1 -; CHECK: %[[V2:.+]] = insertelement <4 x i32*> %[[V1]], i32* %next.gep11, i32 2 -; CHECK: %[[V3:.+]] = insertelement <4 x i32*> %[[V2]], i32* %next.gep12, i32 3 -; CHECK-NOT: getelementptr -; CHECK: %next.gep13 = getelementptr i32*, i32** %b, i64 %index -; CHECK-NOT: getelementptr -; CHECK: %[[B0:.+]] = bitcast i32** %next.gep13 to <4 x i32*>* -; CHECK: store <4 x i32*> %[[V3]], <4 x i32*>* %[[B0]], align 8 +; CHECK: %[[PTRVEC:.+]] = getelementptr i32, i32* %pointer.phi, <4 x i64> +; CHECK: %next.gep = getelementptr i32*, i32** %b, i64 %index +; CHECK: %[[NEXTGEPBC:.+]] = bitcast i32** %next.gep to <4 x i32*>* +; CHECK: store <4 x i32*> %[[PTRVEC]], <4 x i32*>* %[[NEXTGEPBC]], align 8 +; CHECK: %ptr.ind = getelementptr i32, i32* %pointer.phi, i64 4 ; CHECK: br i1 {{.*}}, label %middle.block, label %vector.body ; define i32 @pointer_iv_mixed(i32* %a, i32** %b, i64 %n) { diff --git a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll --- a/llvm/test/Transforms/LoopVectorize/pointer-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/pointer-induction.ll @@ -21,13 +21,13 @@ ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, i8* null, i64 [[TMP1]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi i8* [ null, [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[PRED_STORE_CONTINUE7:%.*]] ] -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE7]] ] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, i8* [[POINTER_PHI]], <4 x i64> -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, <4 x i8*> [[TMP2]], i64 -1 -; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i8*> [[TMP3]], i32 0 -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, i8* [[TMP4]], i32 0 -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[TMP5]], i32 -3 +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE10:%.*]] ] +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], -1 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, i8* null, i64 [[TMP3]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[NEXT_GEP]], i64 -1 +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, i8* [[TMP5]], i32 -3 ; CHECK-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to <4 x i8>* ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP7]], align 1 ; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x i8> [[WIDE_LOAD]], <4 x i8> poison, <4 x i32> @@ -36,35 +36,43 @@ ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP9]], i32 0 ; CHECK-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]] ; CHECK: pred.store.if: -; CHECK-NEXT: [[TMP11:%.*]] = extractelement <4 x i8*> [[TMP3]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, i8* [[NEXT_GEP]], i64 -1 ; CHECK-NEXT: store i8 95, i8* [[TMP11]], align 1 ; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]] ; CHECK: pred.store.continue: ; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP9]], i32 1 -; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3:%.*]] -; CHECK: pred.store.if2: -; CHECK-NEXT: [[TMP13:%.*]] = extractelement <4 x i8*> [[TMP3]], i32 1 -; CHECK-NEXT: store i8 95, i8* [[TMP13]], align 1 -; CHECK-NEXT: br label [[PRED_STORE_CONTINUE3]] -; CHECK: pred.store.continue3: -; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP9]], i32 2 -; CHECK-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]] -; CHECK: pred.store.if4: -; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i8*> [[TMP3]], i32 2 +; CHECK-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6:%.*]] +; CHECK: pred.store.if5: +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], -1 +; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, i8* null, i64 [[TMP14]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, i8* [[NEXT_GEP2]], i64 -1 ; CHECK-NEXT: store i8 95, i8* [[TMP15]], align 1 -; CHECK-NEXT: br label [[PRED_STORE_CONTINUE5]] -; CHECK: pred.store.continue5: -; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP9]], i32 3 -; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7]] -; CHECK: pred.store.if6: -; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i8*> [[TMP3]], i32 3 -; CHECK-NEXT: store i8 95, i8* [[TMP17]], align 1 -; CHECK-NEXT: br label [[PRED_STORE_CONTINUE7]] -; CHECK: pred.store.continue7: +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]] +; CHECK: pred.store.continue6: +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP9]], i32 2 +; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]] +; CHECK: pred.store.if7: +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[TMP17]], -1 +; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, i8* null, i64 [[TMP18]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, i8* [[NEXT_GEP3]], i64 -1 +; CHECK-NEXT: store i8 95, i8* [[TMP19]], align 1 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]] +; CHECK: pred.store.continue8: +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i1> [[TMP9]], i32 3 +; CHECK-NEXT: br i1 [[TMP20]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10]] +; CHECK: pred.store.if9: +; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], -1 +; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, i8* null, i64 [[TMP22]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, i8* [[NEXT_GEP4]], i64 -1 +; CHECK-NEXT: store i8 95, i8* [[TMP23]], align 1 +; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]] +; CHECK: pred.store.continue10: ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, i8* [[POINTER_PHI]], i64 -4 -; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] @@ -78,8 +86,8 @@ ; CHECK: for.body: ; CHECK-NEXT: [[C_05:%.*]] = phi i8* [ [[INCDEC_PTR:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, i8* [[C_05]], i64 -1 -; CHECK-NEXT: [[TMP19:%.*]] = load i8, i8* [[INCDEC_PTR]], align 1 -; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP19]], 0 +; CHECK-NEXT: [[TMP25:%.*]] = load i8, i8* [[INCDEC_PTR]], align 1 +; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp eq i8 [[TMP25]], 0 ; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[IF_END]], label [[IF_THEN:%.*]] ; CHECK: if.then: ; CHECK-NEXT: store i8 95, i8* [[INCDEC_PTR]], align 1 @@ -134,35 +142,27 @@ ; CHECK-NEXT: [[IND_END3:%.*]] = getelementptr i8, i8* [[START_2:%.*]], i64 [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: +; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi i8* [ [[START_2]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8*, i8** [[START_1]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, i8* [[START_2]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, i8* [[START_2]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, i8* [[START_2]], i64 [[TMP4]] -; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 3 -; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, i8* [[START_2]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i8*> poison, i8* [[NEXT_GEP4]], i32 0 -; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i8*> [[TMP6]], i8* [[NEXT_GEP5]], i32 1 -; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i8*> [[TMP7]], i8* [[NEXT_GEP6]], i32 2 -; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i8*> [[TMP8]], i8* [[NEXT_GEP7]], i32 3 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, <4 x i8*> [[TMP9]], i64 1 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8*, i8** [[NEXT_GEP]], i32 0 -; CHECK-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to <4 x i8*>* -; CHECK-NEXT: store <4 x i8*> [[TMP10]], <4 x i8*>* [[TMP12]], align 8 -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, i8* [[NEXT_GEP4]], i32 0 -; CHECK-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to <4 x i8>* -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP14]], align 1 -; CHECK-NEXT: [[TMP15:%.*]] = add <4 x i8> [[WIDE_LOAD]], -; CHECK-NEXT: [[TMP16:%.*]] = bitcast i8* [[TMP13]] to <4 x i8>* -; CHECK-NEXT: store <4 x i8> [[TMP15]], <4 x i8>* [[TMP16]], align 1 +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, i8* [[POINTER_PHI]], <4 x i64> +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, <4 x i8*> [[TMP2]], i64 1 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8*, i8** [[NEXT_GEP]], i32 0 +; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to <4 x i8*>* +; CHECK-NEXT: store <4 x i8*> [[TMP3]], <4 x i8*>* [[TMP5]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <4 x i8*> [[TMP2]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, i8* [[TMP6]], i32 0 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to <4 x i8>* +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, <4 x i8>* [[TMP8]], align 1 +; CHECK-NEXT: [[TMP9:%.*]] = add <4 x i8> [[WIDE_LOAD]], +; CHECK-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP7]] to <4 x i8>* +; CHECK-NEXT: store <4 x i8> [[TMP9]], <4 x i8>* [[TMP10]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, i8* [[POINTER_PHI]], i64 4 +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] diff --git a/llvm/test/Transforms/SCCP/2002-05-02-MissSecondInst.ll b/llvm/test/Transforms/SCCP/2002-05-02-MissSecondInst.ll --- a/llvm/test/Transforms/SCCP/2002-05-02-MissSecondInst.ll +++ b/llvm/test/Transforms/SCCP/2002-05-02-MissSecondInst.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | not grep sub +; RUN: opt < %s -passes=sccp -S | not grep sub define void @test3(i32, i32) { add i32 0, 0 ; :3 [#uses=0] diff --git a/llvm/test/Transforms/SCCP/2002-05-20-MissedIncomingValue.ll b/llvm/test/Transforms/SCCP/2002-05-20-MissedIncomingValue.ll --- a/llvm/test/Transforms/SCCP/2002-05-20-MissedIncomingValue.ll +++ b/llvm/test/Transforms/SCCP/2002-05-20-MissedIncomingValue.ll @@ -1,7 +1,7 @@ ; This test shows a case where SCCP is incorrectly eliminating the PHI node ; because it thinks it has a constant 0 value, when it really doesn't. -; RUN: opt < %s -sccp -S | grep phi +; RUN: opt < %s -passes=sccp -S | grep phi define i32 @test(i32 %A, i1 %c) { bb1: diff --git a/llvm/test/Transforms/SCCP/2002-08-30-GetElementPtrTest.ll b/llvm/test/Transforms/SCCP/2002-08-30-GetElementPtrTest.ll --- a/llvm/test/Transforms/SCCP/2002-08-30-GetElementPtrTest.ll +++ b/llvm/test/Transforms/SCCP/2002-08-30-GetElementPtrTest.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | not grep %X +; RUN: opt < %s -passes=sccp -S | not grep %X @G = external global [40 x i32] ; <[40 x i32]*> [#uses=1] diff --git a/llvm/test/Transforms/SCCP/2003-08-26-InvokeHandling.ll b/llvm/test/Transforms/SCCP/2003-08-26-InvokeHandling.ll --- a/llvm/test/Transforms/SCCP/2003-08-26-InvokeHandling.ll +++ b/llvm/test/Transforms/SCCP/2003-08-26-InvokeHandling.ll @@ -1,5 +1,5 @@ ; The PHI cannot be eliminated from this testcase, SCCP is mishandling invoke's! -; RUN: opt < %s -sccp -S | grep phi +; RUN: opt < %s -passes=sccp -S | grep phi declare void @foo() diff --git a/llvm/test/Transforms/SCCP/2004-11-16-DeadInvoke.ll b/llvm/test/Transforms/SCCP/2004-11-16-DeadInvoke.ll --- a/llvm/test/Transforms/SCCP/2004-11-16-DeadInvoke.ll +++ b/llvm/test/Transforms/SCCP/2004-11-16-DeadInvoke.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -disable-output +; RUN: opt < %s -passes=sccp -disable-output declare i32 @foo() diff --git a/llvm/test/Transforms/SCCP/2004-12-10-UndefBranchBug.ll b/llvm/test/Transforms/SCCP/2004-12-10-UndefBranchBug.ll --- a/llvm/test/Transforms/SCCP/2004-12-10-UndefBranchBug.ll +++ b/llvm/test/Transforms/SCCP/2004-12-10-UndefBranchBug.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | grep "ret i32 %X" +; RUN: opt < %s -passes=sccp -S | grep "ret i32 %X" ; This function definitely returns 1, even if we don't know the direction ; of the branch. diff --git a/llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll b/llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll --- a/llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll +++ b/llvm/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -disable-output +; RUN: opt < %s -passes=sccp -disable-output ; END. target datalayout = "E-p:32:32" target triple = "powerpc-unknown-linux-gnu" diff --git a/llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll b/llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll --- a/llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll +++ b/llvm/test/Transforms/SCCP/2006-12-04-PackedType.ll @@ -1,7 +1,7 @@ ; Test VectorType handling by SCCP. ; SCCP ignores VectorTypes until PR 1034 is fixed ; -; RUN: opt < %s -sccp +; RUN: opt < %s -passes=sccp ; END. target datalayout = "E-p:32:32" diff --git a/llvm/test/Transforms/SCCP/2006-12-19-UndefBug.ll b/llvm/test/Transforms/SCCP/2006-12-19-UndefBug.ll --- a/llvm/test/Transforms/SCCP/2006-12-19-UndefBug.ll +++ b/llvm/test/Transforms/SCCP/2006-12-19-UndefBug.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s define i1 @foo() { ; CHECK-LABEL: @foo( diff --git a/llvm/test/Transforms/SCCP/2007-05-16-InvokeCrash.ll b/llvm/test/Transforms/SCCP/2007-05-16-InvokeCrash.ll --- a/llvm/test/Transforms/SCCP/2007-05-16-InvokeCrash.ll +++ b/llvm/test/Transforms/SCCP/2007-05-16-InvokeCrash.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -disable-output +; RUN: opt < %s -passes=sccp -disable-output ; PR1431 define void @_ada_bench() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { diff --git a/llvm/test/Transforms/SCCP/2008-01-27-UndefCorrelate.ll b/llvm/test/Transforms/SCCP/2008-01-27-UndefCorrelate.ll --- a/llvm/test/Transforms/SCCP/2008-01-27-UndefCorrelate.ll +++ b/llvm/test/Transforms/SCCP/2008-01-27-UndefCorrelate.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | grep undef | count 1 +; RUN: opt < %s -passes=sccp -S | grep undef | count 1 ; PR1938 define i32 @main() { diff --git a/llvm/test/Transforms/SCCP/2008-04-22-multiple-ret-sccp.ll b/llvm/test/Transforms/SCCP/2008-04-22-multiple-ret-sccp.ll --- a/llvm/test/Transforms/SCCP/2008-04-22-multiple-ret-sccp.ll +++ b/llvm/test/Transforms/SCCP/2008-04-22-multiple-ret-sccp.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | grep "ret i32 %Z" +; RUN: opt < %s -passes=sccp -S | grep "ret i32 %Z" ; rdar://5778210 declare {i32, i32} @bar(i32 %A) diff --git a/llvm/test/Transforms/SCCP/2008-05-23-UndefCallFold.ll b/llvm/test/Transforms/SCCP/2008-05-23-UndefCallFold.ll --- a/llvm/test/Transforms/SCCP/2008-05-23-UndefCallFold.ll +++ b/llvm/test/Transforms/SCCP/2008-05-23-UndefCallFold.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | not grep "ret i32 undef" +; RUN: opt < %s -passes=sccp -S | not grep "ret i32 undef" ; PR2358 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" diff --git a/llvm/test/Transforms/SCCP/2008-06-09-WeakProp.ll b/llvm/test/Transforms/SCCP/2008-06-09-WeakProp.ll --- a/llvm/test/Transforms/SCCP/2008-06-09-WeakProp.ll +++ b/llvm/test/Transforms/SCCP/2008-06-09-WeakProp.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; Should not propagate the result of a weak function. ; PR2411 diff --git a/llvm/test/Transforms/SCCP/2009-01-14-IPSCCP-Invoke.ll b/llvm/test/Transforms/SCCP/2009-01-14-IPSCCP-Invoke.ll --- a/llvm/test/Transforms/SCCP/2009-01-14-IPSCCP-Invoke.ll +++ b/llvm/test/Transforms/SCCP/2009-01-14-IPSCCP-Invoke.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -ipsccp -S | grep "ret i32 42" -; RUN: opt < %s -ipsccp -S | grep "ret i32 undef" +; RUN: opt < %s -passes=ipsccp -S | grep "ret i32 42" +; RUN: opt < %s -passes=ipsccp -S | grep "ret i32 undef" ; PR3325 define i32 @main() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { diff --git a/llvm/test/Transforms/SCCP/2009-05-27-VectorOperandZero.ll b/llvm/test/Transforms/SCCP/2009-05-27-VectorOperandZero.ll --- a/llvm/test/Transforms/SCCP/2009-05-27-VectorOperandZero.ll +++ b/llvm/test/Transforms/SCCP/2009-05-27-VectorOperandZero.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -disable-output +; RUN: opt < %s -passes=sccp -disable-output ; PR4277 define i32 @main() nounwind { diff --git a/llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll b/llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll --- a/llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll +++ b/llvm/test/Transforms/SCCP/2009-09-24-byval-ptr.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; Don't constant-propagate byval pointers, since they are not pointers! ; PR5038 %struct.MYstr = type { i8, i32 } diff --git a/llvm/test/Transforms/SCCP/PR16052.ll b/llvm/test/Transforms/SCCP/PR16052.ll --- a/llvm/test/Transforms/SCCP/PR16052.ll +++ b/llvm/test/Transforms/SCCP/PR16052.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes -; RUN: opt < %s -S -ipsccp | FileCheck %s +; RUN: opt < %s -S -passes=ipsccp | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/SCCP/PR26044.ll b/llvm/test/Transforms/SCCP/PR26044.ll --- a/llvm/test/Transforms/SCCP/PR26044.ll +++ b/llvm/test/Transforms/SCCP/PR26044.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --scrub-attributes -; RUN: opt < %s -S -ipsccp | FileCheck %s +; RUN: opt < %s -S -passes=ipsccp | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/SCCP/PR43857.ll b/llvm/test/Transforms/SCCP/PR43857.ll --- a/llvm/test/Transforms/SCCP/PR43857.ll +++ b/llvm/test/Transforms/SCCP/PR43857.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -ipsccp | FileCheck %s +; RUN: opt < %s -S -passes=ipsccp | FileCheck %s %struct.wobble = type { i32 } %struct.zot = type { %struct.wobble, %struct.wobble, %struct.wobble } diff --git a/llvm/test/Transforms/SCCP/apfloat-basictest.ll b/llvm/test/Transforms/SCCP/apfloat-basictest.ll --- a/llvm/test/Transforms/SCCP/apfloat-basictest.ll +++ b/llvm/test/Transforms/SCCP/apfloat-basictest.ll @@ -1,7 +1,7 @@ ; This is a basic correctness check for constant propagation. The fneg ; instruction should be eliminated. -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s define double @test(i1 %B) { br i1 %B, label %BB1, label %BB2 diff --git a/llvm/test/Transforms/SCCP/apint-array.ll b/llvm/test/Transforms/SCCP/apint-array.ll --- a/llvm/test/Transforms/SCCP/apint-array.ll +++ b/llvm/test/Transforms/SCCP/apint-array.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | grep "ret i101 12" +; RUN: opt < %s -passes=sccp -S | grep "ret i101 12" @Y = constant [6 x i101] [ i101 12, i101 123456789000000, i101 -12,i101 -123456789000000, i101 0,i101 9123456789000000] diff --git a/llvm/test/Transforms/SCCP/apint-basictest.ll b/llvm/test/Transforms/SCCP/apint-basictest.ll --- a/llvm/test/Transforms/SCCP/apint-basictest.ll +++ b/llvm/test/Transforms/SCCP/apint-basictest.ll @@ -1,7 +1,7 @@ ; This is a basic correctness check for constant propagation. The add ; instruction should be eliminated. -; RUN: opt < %s -sccp -S | not grep add +; RUN: opt < %s -passes=sccp -S | not grep add define i128 @test(i1 %B) { br i1 %B, label %BB1, label %BB2 diff --git a/llvm/test/Transforms/SCCP/apint-basictest2.ll b/llvm/test/Transforms/SCCP/apint-basictest2.ll --- a/llvm/test/Transforms/SCCP/apint-basictest2.ll +++ b/llvm/test/Transforms/SCCP/apint-basictest2.ll @@ -1,8 +1,8 @@ ; This is a basic correctness check for constant propagation. The add ; instruction and phi instruction should be eliminated. -; RUN: opt < %s -sccp -S | not grep phi -; RUN: opt < %s -sccp -S | not grep add +; RUN: opt < %s -passes=sccp -S | not grep phi +; RUN: opt < %s -passes=sccp -S | not grep add define i128 @test(i1 %B) { br i1 %B, label %BB1, label %BB2 diff --git a/llvm/test/Transforms/SCCP/apint-basictest3.ll b/llvm/test/Transforms/SCCP/apint-basictest3.ll --- a/llvm/test/Transforms/SCCP/apint-basictest3.ll +++ b/llvm/test/Transforms/SCCP/apint-basictest3.ll @@ -2,8 +2,8 @@ ; basic arithmetic operations. -; RUN: opt < %s -sccp -S | not grep mul -; RUN: opt < %s -sccp -S | not grep umod +; RUN: opt < %s -passes=sccp -S | not grep mul +; RUN: opt < %s -passes=sccp -S | not grep umod define i128 @test(i1 %B) { br i1 %B, label %BB1, label %BB2 diff --git a/llvm/test/Transforms/SCCP/apint-basictest4.ll b/llvm/test/Transforms/SCCP/apint-basictest4.ll --- a/llvm/test/Transforms/SCCP/apint-basictest4.ll +++ b/llvm/test/Transforms/SCCP/apint-basictest4.ll @@ -2,9 +2,9 @@ ; basic logic operations. -; RUN: opt < %s -sccp -S | not grep and -; RUN: opt < %s -sccp -S | not grep trunc -; RUN: opt < %s -sccp -S | grep "ret i100 -1" +; RUN: opt < %s -passes=sccp -S | not grep and +; RUN: opt < %s -passes=sccp -S | not grep trunc +; RUN: opt < %s -passes=sccp -S | grep "ret i100 -1" define i100 @test(i133 %A) { %B = and i133 0, %A diff --git a/llvm/test/Transforms/SCCP/apint-bigarray.ll b/llvm/test/Transforms/SCCP/apint-bigarray.ll --- a/llvm/test/Transforms/SCCP/apint-bigarray.ll +++ b/llvm/test/Transforms/SCCP/apint-bigarray.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | not grep %X +; RUN: opt < %s -passes=sccp -S | not grep %X @G = global [1000000 x i10000] zeroinitializer diff --git a/llvm/test/Transforms/SCCP/apint-bigint.ll b/llvm/test/Transforms/SCCP/apint-bigint.ll --- a/llvm/test/Transforms/SCCP/apint-bigint.ll +++ b/llvm/test/Transforms/SCCP/apint-bigint.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | not grep xor +; RUN: opt < %s -passes=sccp -S | not grep xor define i11129 @test1() { %B = shl i11129 1, 11128 diff --git a/llvm/test/Transforms/SCCP/apint-bigint2.ll b/llvm/test/Transforms/SCCP/apint-bigint2.ll --- a/llvm/test/Transforms/SCCP/apint-bigint2.ll +++ b/llvm/test/Transforms/SCCP/apint-bigint2.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s @Y = constant [6 x i101] [ i101 12, i101 123456789000000, i101 -12, i101 -123456789000000, i101 0,i101 9123456789000000] diff --git a/llvm/test/Transforms/SCCP/apint-ipsccp1.ll b/llvm/test/Transforms/SCCP/apint-ipsccp1.ll --- a/llvm/test/Transforms/SCCP/apint-ipsccp1.ll +++ b/llvm/test/Transforms/SCCP/apint-ipsccp1.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | grep -v "ret i512 undef" | \ +; RUN: opt < %s -passes=ipsccp -S | grep -v "ret i512 undef" | \ ; RUN: grep "ret i8 2" define internal i512 @test(i1 %B) { diff --git a/llvm/test/Transforms/SCCP/apint-ipsccp2.ll b/llvm/test/Transforms/SCCP/apint-ipsccp2.ll --- a/llvm/test/Transforms/SCCP/apint-ipsccp2.ll +++ b/llvm/test/Transforms/SCCP/apint-ipsccp2.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | grep -v "ret i101 0" | \ +; RUN: opt < %s -passes=ipsccp -S | grep -v "ret i101 0" | \ ; RUN: grep -v "ret i101 undef" | not grep ret diff --git a/llvm/test/Transforms/SCCP/apint-ipsccp3.ll b/llvm/test/Transforms/SCCP/apint-ipsccp3.ll --- a/llvm/test/Transforms/SCCP/apint-ipsccp3.ll +++ b/llvm/test/Transforms/SCCP/apint-ipsccp3.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s @G = internal global i66 undef diff --git a/llvm/test/Transforms/SCCP/apint-ipsccp4.ll b/llvm/test/Transforms/SCCP/apint-ipsccp4.ll --- a/llvm/test/Transforms/SCCP/apint-ipsccp4.ll +++ b/llvm/test/Transforms/SCCP/apint-ipsccp4.ll @@ -1,8 +1,8 @@ ; This test makes sure that these instructions are properly constant propagated. -; RUN: opt < %s -ipsccp -S | not grep load -; RUN: opt < %s -ipsccp -S | not grep add -; RUN: opt < %s -ipsccp -S | not grep phi +; RUN: opt < %s -passes=ipsccp -S | not grep load +; RUN: opt < %s -passes=ipsccp -S | not grep add +; RUN: opt < %s -passes=ipsccp -S | not grep phi @Y = constant [2 x { i212, float }] [ { i212, float } { i212 12, float 1.0 }, diff --git a/llvm/test/Transforms/SCCP/apint-load.ll b/llvm/test/Transforms/SCCP/apint-load.ll --- a/llvm/test/Transforms/SCCP/apint-load.ll +++ b/llvm/test/Transforms/SCCP/apint-load.ll @@ -1,7 +1,7 @@ ; This test makes sure that these instructions are properly constant propagated. -; RUN: opt < %s -ipsccp -S | not grep load -; RUN: opt < %s -ipsccp -S | not grep fdiv +; RUN: opt < %s -passes=ipsccp -S | not grep load +; RUN: opt < %s -passes=ipsccp -S | not grep fdiv @X = constant i212 42 @Y = constant [2 x { i212, float }] [ { i212, float } { i212 12, float 1.0 }, diff --git a/llvm/test/Transforms/SCCP/apint-phi.ll b/llvm/test/Transforms/SCCP/apint-phi.ll --- a/llvm/test/Transforms/SCCP/apint-phi.ll +++ b/llvm/test/Transforms/SCCP/apint-phi.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | not grep phi +; RUN: opt < %s -passes=sccp -S | not grep phi define i999 @test(i999%A, i1 %c) { bb1: diff --git a/llvm/test/Transforms/SCCP/apint-select.ll b/llvm/test/Transforms/SCCP/apint-select.ll --- a/llvm/test/Transforms/SCCP/apint-select.ll +++ b/llvm/test/Transforms/SCCP/apint-select.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s @A = constant i32 10 diff --git a/llvm/test/Transforms/SCCP/apint-xor.ll b/llvm/test/Transforms/SCCP/apint-xor.ll --- a/llvm/test/Transforms/SCCP/apint-xor.ll +++ b/llvm/test/Transforms/SCCP/apint-xor.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s ; Test some XOR simplifications / range propagation. define void@xor1(i1 %cmp) { diff --git a/llvm/test/Transforms/SCCP/arg-count-mismatch.ll b/llvm/test/Transforms/SCCP/arg-count-mismatch.ll --- a/llvm/test/Transforms/SCCP/arg-count-mismatch.ll +++ b/llvm/test/Transforms/SCCP/arg-count-mismatch.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S -o - | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S -o - | FileCheck %s ; The original C source looked like this: ; diff --git a/llvm/test/Transforms/SCCP/arg-type-mismatch.ll b/llvm/test/Transforms/SCCP/arg-type-mismatch.ll --- a/llvm/test/Transforms/SCCP/arg-type-mismatch.ll +++ b/llvm/test/Transforms/SCCP/arg-type-mismatch.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S -o - | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S -o - | FileCheck %s ; This test is just to verify that we do not crash/assert due to mismatch in ; argument type between the caller and callee. diff --git a/llvm/test/Transforms/SCCP/assume.ll b/llvm/test/Transforms/SCCP/assume.ll --- a/llvm/test/Transforms/SCCP/assume.ll +++ b/llvm/test/Transforms/SCCP/assume.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s declare void @use(i1) declare void @llvm.assume(i1) diff --git a/llvm/test/Transforms/SCCP/atomic-load-store.ll b/llvm/test/Transforms/SCCP/atomic-load-store.ll --- a/llvm/test/Transforms/SCCP/atomic-load-store.ll +++ b/llvm/test/Transforms/SCCP/atomic-load-store.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; This transformation is safe for atomic loads and stores; check that it works. diff --git a/llvm/test/Transforms/SCCP/atomic.ll b/llvm/test/Transforms/SCCP/atomic.ll --- a/llvm/test/Transforms/SCCP/atomic.ll +++ b/llvm/test/Transforms/SCCP/atomic.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s define i1 @test_cmpxchg(i32* %addr, i32 %desired, i32 %new) { ; CHECK-LABEL: @test_cmpxchg diff --git a/llvm/test/Transforms/SCCP/binaryops-constexprs.ll b/llvm/test/Transforms/SCCP/binaryops-constexprs.ll --- a/llvm/test/Transforms/SCCP/binaryops-constexprs.ll +++ b/llvm/test/Transforms/SCCP/binaryops-constexprs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s declare void @use.i32(i32) declare void @use.i1(i1) diff --git a/llvm/test/Transforms/SCCP/binaryops-range-special-cases.ll b/llvm/test/Transforms/SCCP/binaryops-range-special-cases.ll --- a/llvm/test/Transforms/SCCP/binaryops-range-special-cases.ll +++ b/llvm/test/Transforms/SCCP/binaryops-range-special-cases.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s declare void @use(i1) define void @sdiv1_cmp_constants(i32 %x) { diff --git a/llvm/test/Transforms/SCCP/bitcast.ll b/llvm/test/Transforms/SCCP/bitcast.ll --- a/llvm/test/Transforms/SCCP/bitcast.ll +++ b/llvm/test/Transforms/SCCP/bitcast.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s define i128 @vector_to_int_cast() { %A = bitcast <4 x i32> to i128 diff --git a/llvm/test/Transforms/SCCP/clang-arc-rv.ll b/llvm/test/Transforms/SCCP/clang-arc-rv.ll --- a/llvm/test/Transforms/SCCP/clang-arc-rv.ll +++ b/llvm/test/Transforms/SCCP/clang-arc-rv.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; Return value can't be zapped if there is a call that has operand bundle ; "clang.arc.attachedcall". diff --git a/llvm/test/Transforms/SCCP/comdat-ipo.ll b/llvm/test/Transforms/SCCP/comdat-ipo.ll --- a/llvm/test/Transforms/SCCP/comdat-ipo.ll +++ b/llvm/test/Transforms/SCCP/comdat-ipo.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; See PR26774 diff --git a/llvm/test/Transforms/SCCP/conditions-iter-order.ll b/llvm/test/Transforms/SCCP/conditions-iter-order.ll --- a/llvm/test/Transforms/SCCP/conditions-iter-order.ll +++ b/llvm/test/Transforms/SCCP/conditions-iter-order.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s declare noalias i8* @malloc(i64) diff --git a/llvm/test/Transforms/SCCP/conditions-ranges-with-undef.ll b/llvm/test/Transforms/SCCP/conditions-ranges-with-undef.ll --- a/llvm/test/Transforms/SCCP/conditions-ranges-with-undef.ll +++ b/llvm/test/Transforms/SCCP/conditions-ranges-with-undef.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/SCCP/conditions-ranges.ll b/llvm/test/Transforms/SCCP/conditions-ranges.ll --- a/llvm/test/Transforms/SCCP/conditions-ranges.ll +++ b/llvm/test/Transforms/SCCP/conditions-ranges.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/SCCP/constant-range-struct.ll b/llvm/test/Transforms/SCCP/constant-range-struct.ll --- a/llvm/test/Transforms/SCCP/constant-range-struct.ll +++ b/llvm/test/Transforms/SCCP/constant-range-struct.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt %s -ipsccp -S | FileCheck %s +; RUN: opt %s -passes=ipsccp -S | FileCheck %s declare i1 @cond() declare void @use(i1) diff --git a/llvm/test/Transforms/SCCP/constant-struct.ll b/llvm/test/Transforms/SCCP/constant-struct.ll --- a/llvm/test/Transforms/SCCP/constant-struct.ll +++ b/llvm/test/Transforms/SCCP/constant-struct.ll @@ -1,5 +1,5 @@ ; Test that constant structs are folded. -; RUN: opt %s -sccp -S | FileCheck %s +; RUN: opt %s -passes=sccp -S | FileCheck %s define internal {i64} @struct1() { %a = insertvalue {i64} undef, i64 24, 0 diff --git a/llvm/test/Transforms/SCCP/crash.ll b/llvm/test/Transforms/SCCP/crash.ll --- a/llvm/test/Transforms/SCCP/crash.ll +++ b/llvm/test/Transforms/SCCP/crash.ll @@ -1,4 +1,4 @@ -; RUN: opt -sccp -S < %s +; RUN: opt -passes=sccp -S < %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-apple-darwin10.0" diff --git a/llvm/test/Transforms/SCCP/deadarg.ll b/llvm/test/Transforms/SCCP/deadarg.ll --- a/llvm/test/Transforms/SCCP/deadarg.ll +++ b/llvm/test/Transforms/SCCP/deadarg.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -disable-output +; RUN: opt < %s -passes=ipsccp -disable-output define internal void @foo(i32 %X) { call void @foo( i32 %X ) ret void diff --git a/llvm/test/Transforms/SCCP/definite-initializer.ll b/llvm/test/Transforms/SCCP/definite-initializer.ll --- a/llvm/test/Transforms/SCCP/definite-initializer.ll +++ b/llvm/test/Transforms/SCCP/definite-initializer.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -ipsccp < %s | FileCheck %s +; RUN: opt -S -passes=ipsccp < %s | FileCheck %s @d = internal externally_initialized global i32 0, section ".openbsd.randomdata", align 4 ; CHECK-LABEL: @test1( diff --git a/llvm/test/Transforms/SCCP/domtree-update.ll b/llvm/test/Transforms/SCCP/domtree-update.ll --- a/llvm/test/Transforms/SCCP/domtree-update.ll +++ b/llvm/test/Transforms/SCCP/domtree-update.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -ipsccp < %s | FileCheck %s +; RUN: opt -S -passes=ipsccp < %s | FileCheck %s ; RUN: opt -S -passes='ipsccp,function(verify)' < %s | FileCheck %s ; DTU should not crash. diff --git a/llvm/test/Transforms/SCCP/dont-zap-return.ll b/llvm/test/Transforms/SCCP/dont-zap-return.ll --- a/llvm/test/Transforms/SCCP/dont-zap-return.ll +++ b/llvm/test/Transforms/SCCP/dont-zap-return.ll @@ -1,4 +1,4 @@ -; RUN: opt -ipsccp < %s -S | FileCheck %s +; RUN: opt -passes=ipsccp < %s -S | FileCheck %s define internal {i32, i32} @identity(i32 %patatino) { %foo = insertvalue {i32, i32} {i32 1, i32 undef}, i32 %patatino, 1 diff --git a/llvm/test/Transforms/SCCP/float-nan-simplification.ll b/llvm/test/Transforms/SCCP/float-nan-simplification.ll --- a/llvm/test/Transforms/SCCP/float-nan-simplification.ll +++ b/llvm/test/Transforms/SCCP/float-nan-simplification.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -sccp -S %s | FileCheck %s +; RUN: opt -passes=sccp -S %s | FileCheck %s ; When marking the edge from bb2 -> exit as executable first, %p will be NaN ; first and %v.1 will simplify to NaN. But when marking bb1 -> exit executable, diff --git a/llvm/test/Transforms/SCCP/float-phis.ll b/llvm/test/Transforms/SCCP/float-phis.ll --- a/llvm/test/Transforms/SCCP/float-phis.ll +++ b/llvm/test/Transforms/SCCP/float-phis.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/SCCP/fp-bc-icmp-const-fold.ll b/llvm/test/Transforms/SCCP/fp-bc-icmp-const-fold.ll --- a/llvm/test/Transforms/SCCP/fp-bc-icmp-const-fold.ll +++ b/llvm/test/Transforms/SCCP/fp-bc-icmp-const-fold.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -ipsccp < %s | FileCheck %s +; RUN: opt -S -passes=ipsccp < %s | FileCheck %s target datalayout = "E-m:e-i64:64-n32:64" target triple = "powerpc64le-unknown-linux" diff --git a/llvm/test/Transforms/SCCP/global-alias-constprop.ll b/llvm/test/Transforms/SCCP/global-alias-constprop.ll --- a/llvm/test/Transforms/SCCP/global-alias-constprop.ll +++ b/llvm/test/Transforms/SCCP/global-alias-constprop.ll @@ -1,4 +1,3 @@ -; RUN: opt < %s -sccp -S | FileCheck %s ; RUN: opt < %s -passes=sccp -S | FileCheck %s @0 = private unnamed_addr constant [2 x i32] [i32 -1, i32 1] diff --git a/llvm/test/Transforms/SCCP/global.ll b/llvm/test/Transforms/SCCP/global.ll --- a/llvm/test/Transforms/SCCP/global.ll +++ b/llvm/test/Transforms/SCCP/global.ll @@ -1,5 +1,4 @@ ; RUN: opt < %s -S -passes=ipsccp | FileCheck %s -; RUN: opt < %s -S -ipsccp | FileCheck %s @_ZL6test1g = internal global i32 42, align 4 diff --git a/llvm/test/Transforms/SCCP/indirectbr.ll b/llvm/test/Transforms/SCCP/indirectbr.ll --- a/llvm/test/Transforms/SCCP/indirectbr.ll +++ b/llvm/test/Transforms/SCCP/indirectbr.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -ipsccp < %s | FileCheck %s +; RUN: opt -S -passes=ipsccp < %s | FileCheck %s declare void @BB0_f() declare void @BB1_f() diff --git a/llvm/test/Transforms/SCCP/int-phis.ll b/llvm/test/Transforms/SCCP/int-phis.ll --- a/llvm/test/Transforms/SCCP/int-phis.ll +++ b/llvm/test/Transforms/SCCP/int-phis.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/SCCP/intrinsics.ll b/llvm/test/Transforms/SCCP/intrinsics.ll --- a/llvm/test/Transforms/SCCP/intrinsics.ll +++ b/llvm/test/Transforms/SCCP/intrinsics.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s declare i8 @llvm.abs.i8(i8, i1) declare <2 x i8> @llvm.abs.v2i8(<2 x i8>, i1) diff --git a/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll b/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll --- a/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll +++ b/llvm/test/Transforms/SCCP/ip-add-range-to-call.ll @@ -1,4 +1,4 @@ -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s ; Test 1. ; Both arguments and return value of @callee can be tracked. The inferred range diff --git a/llvm/test/Transforms/SCCP/ip-constant-ranges.ll b/llvm/test/Transforms/SCCP/ip-constant-ranges.ll --- a/llvm/test/Transforms/SCCP/ip-constant-ranges.ll +++ b/llvm/test/Transforms/SCCP/ip-constant-ranges.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; Constant range for %a is [1, 48) and for %b is [301, 1000) ; CHECK-LABEL: f1 diff --git a/llvm/test/Transforms/SCCP/ip-ranges-binaryops.ll b/llvm/test/Transforms/SCCP/ip-ranges-binaryops.ll --- a/llvm/test/Transforms/SCCP/ip-ranges-binaryops.ll +++ b/llvm/test/Transforms/SCCP/ip-ranges-binaryops.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; x = [10, 21), y = [100, 201) ; x + y = [110, 221) diff --git a/llvm/test/Transforms/SCCP/ip-ranges-casts.ll b/llvm/test/Transforms/SCCP/ip-ranges-casts.ll --- a/llvm/test/Transforms/SCCP/ip-ranges-casts.ll +++ b/llvm/test/Transforms/SCCP/ip-ranges-casts.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; x = [100, 301) define internal i1 @f.trunc(i32 %x) { diff --git a/llvm/test/Transforms/SCCP/ip-ranges-phis.ll b/llvm/test/Transforms/SCCP/ip-ranges-phis.ll --- a/llvm/test/Transforms/SCCP/ip-ranges-phis.ll +++ b/llvm/test/Transforms/SCCP/ip-ranges-phis.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s define internal i32 @f1(i32 %x) { ; CHECK-LABEL: define internal i32 @f1( diff --git a/llvm/test/Transforms/SCCP/ip-ranges-select.ll b/llvm/test/Transforms/SCCP/ip-ranges-select.ll --- a/llvm/test/Transforms/SCCP/ip-ranges-select.ll +++ b/llvm/test/Transforms/SCCP/ip-ranges-select.ll @@ -1,4 +1,4 @@ -; RUN: opt -ipsccp -S %s -o -| FileCheck %s +; RUN: opt -passes=ipsccp -S %s -o -| FileCheck %s define void @caller.1(i8* %arg) { ; CHECK-LABEL: define void @caller.1(i8* %arg) { diff --git a/llvm/test/Transforms/SCCP/ip-ranges-sext.ll b/llvm/test/Transforms/SCCP/ip-ranges-sext.ll --- a/llvm/test/Transforms/SCCP/ip-ranges-sext.ll +++ b/llvm/test/Transforms/SCCP/ip-ranges-sext.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s -o -| FileCheck %s +; RUN: opt -passes=ipsccp -S %s -o -| FileCheck %s define i64 @test1(i32 %x) { ; CHECK-LABEL: @test1( diff --git a/llvm/test/Transforms/SCCP/ipsccp-addr-taken.ll b/llvm/test/Transforms/SCCP/ipsccp-addr-taken.ll --- a/llvm/test/Transforms/SCCP/ipsccp-addr-taken.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-addr-taken.ll @@ -1,4 +1,4 @@ -; RUN: opt -ipsccp -S < %s | FileCheck %s +; RUN: opt -passes=ipsccp -S < %s | FileCheck %s ; PR7876 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" target triple = "x86_64-apple-darwin10.0.0" diff --git a/llvm/test/Transforms/SCCP/ipsccp-basic.ll b/llvm/test/Transforms/SCCP/ipsccp-basic.ll --- a/llvm/test/Transforms/SCCP/ipsccp-basic.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-basic.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s -; RUN: opt < %s -enable-debugify -ipsccp -debugify-quiet -disable-output +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s +; RUN: opt < %s -enable-debugify -passes=ipsccp -debugify-quiet -disable-output ;;======================== test1 diff --git a/llvm/test/Transforms/SCCP/ipsccp-branch-unresolved-undef.ll b/llvm/test/Transforms/SCCP/ipsccp-branch-unresolved-undef.ll --- a/llvm/test/Transforms/SCCP/ipsccp-branch-unresolved-undef.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-branch-unresolved-undef.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -ipsccp | FileCheck %s +; RUN: opt < %s -S -passes=ipsccp | FileCheck %s define void @main() { ; CHECK-LABEL: @main( diff --git a/llvm/test/Transforms/SCCP/ipsccp-clear-returned.ll b/llvm/test/Transforms/SCCP/ipsccp-clear-returned.ll --- a/llvm/test/Transforms/SCCP/ipsccp-clear-returned.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-clear-returned.ll @@ -2,7 +2,7 @@ ; then the "returned" attribute of input arguments ; should be cleared. -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s define i32 @main() { ; CHECK-LABEL: @main entry: diff --git a/llvm/test/Transforms/SCCP/ipsccp-cycles.ll b/llvm/test/Transforms/SCCP/ipsccp-cycles.ll --- a/llvm/test/Transforms/SCCP/ipsccp-cycles.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-cycles.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s define internal i32 @test1a(i32 %A, i32 %b) { ; CHECK-LABEL: @test1a( diff --git a/llvm/test/Transforms/SCCP/ipsccp-phi-one-pred-dead.ll b/llvm/test/Transforms/SCCP/ipsccp-phi-one-pred-dead.ll --- a/llvm/test/Transforms/SCCP/ipsccp-phi-one-pred-dead.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-phi-one-pred-dead.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -ipsccp | FileCheck %s +; RUN: opt < %s -S -passes=ipsccp | FileCheck %s target triple = "x86_64-unknown-linux-gnu" define void @test() { diff --git a/llvm/test/Transforms/SCCP/ipsccp-predinfo-order.ll b/llvm/test/Transforms/SCCP/ipsccp-predinfo-order.ll --- a/llvm/test/Transforms/SCCP/ipsccp-predinfo-order.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-predinfo-order.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s declare i32 @hoge() diff --git a/llvm/test/Transforms/SCCP/ipsccp-range-crashes.ll b/llvm/test/Transforms/SCCP/ipsccp-range-crashes.ll --- a/llvm/test/Transforms/SCCP/ipsccp-range-crashes.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-range-crashes.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s ; A few test cases exposing crashes with the initial range implementation. diff --git a/llvm/test/Transforms/SCCP/ipsccp-ssa-copy-nested-conds.ll b/llvm/test/Transforms/SCCP/ipsccp-ssa-copy-nested-conds.ll --- a/llvm/test/Transforms/SCCP/ipsccp-ssa-copy-nested-conds.ll +++ b/llvm/test/Transforms/SCCP/ipsccp-ssa-copy-nested-conds.ll @@ -1,5 +1,4 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s ; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; Test for PR39772 diff --git a/llvm/test/Transforms/SCCP/ipscp-drop-argmemonly.ll b/llvm/test/Transforms/SCCP/ipscp-drop-argmemonly.ll --- a/llvm/test/Transforms/SCCP/ipscp-drop-argmemonly.ll +++ b/llvm/test/Transforms/SCCP/ipscp-drop-argmemonly.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s ; Test cases to ensure argmemonly/inaccessiblemem_or_argmemonly attributes are ; dropped, if a function argument is replaced by a constant. diff --git a/llvm/test/Transforms/SCCP/latticeval-invalidate.ll b/llvm/test/Transforms/SCCP/latticeval-invalidate.ll --- a/llvm/test/Transforms/SCCP/latticeval-invalidate.ll +++ b/llvm/test/Transforms/SCCP/latticeval-invalidate.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -sccp %s +; RUN: opt -S -passes=sccp %s @A = external constant i32 diff --git a/llvm/test/Transforms/SCCP/load-store-range.ll b/llvm/test/Transforms/SCCP/load-store-range.ll --- a/llvm/test/Transforms/SCCP/load-store-range.ll +++ b/llvm/test/Transforms/SCCP/load-store-range.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/SCCP/loadtest.ll b/llvm/test/Transforms/SCCP/loadtest.ll --- a/llvm/test/Transforms/SCCP/loadtest.ll +++ b/llvm/test/Transforms/SCCP/loadtest.ll @@ -1,8 +1,8 @@ ; This test makes sure that these instructions are properly constant propagated. -; RUN: opt < %s -data-layout="e-p:32:32" -debugify -sccp -S | FileCheck %s -; RUN: opt < %s -data-layout="E-p:32:32" -debugify -sccp -S | FileCheck %s -; RUN: opt < %s -data-layout="E-p:32:32" -debugify -ipsccp -S | FileCheck %s +; RUN: opt < %s -data-layout="e-p:32:32" -passes=debugify,sccp -S | FileCheck %s +; RUN: opt < %s -data-layout="E-p:32:32" -passes=debugify,sccp -S | FileCheck %s +; RUN: opt < %s -data-layout="E-p:32:32" -passes=debugify,ipsccp -S | FileCheck %s @X = constant i32 42 ; [#uses=1] @Y = constant [2 x { i32, float }] [ { i32, float } { i32 12, float 1.000000e+00 }, { i32, float } { i32 37, float 0x3FF3B2FEC0000000 } ] ; <[2 x { i32, float }]*> [#uses=2] diff --git a/llvm/test/Transforms/SCCP/loadtest2.ll b/llvm/test/Transforms/SCCP/loadtest2.ll --- a/llvm/test/Transforms/SCCP/loadtest2.ll +++ b/llvm/test/Transforms/SCCP/loadtest2.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -data-layout="E-p:32:32" -ipsccp -S | FileCheck %s +; RUN: opt < %s -data-layout="E-p:32:32" -passes=ipsccp -S | FileCheck %s @j = internal global i32 undef, align 4 diff --git a/llvm/test/Transforms/SCCP/logical-nuke.ll b/llvm/test/Transforms/SCCP/logical-nuke.ll --- a/llvm/test/Transforms/SCCP/logical-nuke.ll +++ b/llvm/test/Transforms/SCCP/logical-nuke.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s ; Test that SCCP has basic knowledge of when and/or/mul nuke overdefined values. diff --git a/llvm/test/Transforms/SCCP/metadata.ll b/llvm/test/Transforms/SCCP/metadata.ll --- a/llvm/test/Transforms/SCCP/metadata.ll +++ b/llvm/test/Transforms/SCCP/metadata.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s declare void @use(i1) declare i32 @get_i32() diff --git a/llvm/test/Transforms/SCCP/multiple_callbacks.ll b/llvm/test/Transforms/SCCP/multiple_callbacks.ll --- a/llvm/test/Transforms/SCCP/multiple_callbacks.ll +++ b/llvm/test/Transforms/SCCP/multiple_callbacks.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s ; ; ; /---------------------------------------| diff --git a/llvm/test/Transforms/SCCP/musttail-call.ll b/llvm/test/Transforms/SCCP/musttail-call.ll --- a/llvm/test/Transforms/SCCP/musttail-call.ll +++ b/llvm/test/Transforms/SCCP/musttail-call.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; PR36485 ; musttail call result can\'t be replaced with a constant, unless the call ; can be removed diff --git a/llvm/test/Transforms/SCCP/naked-return.ll b/llvm/test/Transforms/SCCP/naked-return.ll --- a/llvm/test/Transforms/SCCP/naked-return.ll +++ b/llvm/test/Transforms/SCCP/naked-return.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32" target triple = "i686-pc-windows-msvc19.0.24215" diff --git a/llvm/test/Transforms/SCCP/openmp_parallel_for.ll b/llvm/test/Transforms/SCCP/openmp_parallel_for.ll --- a/llvm/test/Transforms/SCCP/openmp_parallel_for.ll +++ b/llvm/test/Transforms/SCCP/openmp_parallel_for.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -ipsccp < %s | FileCheck %s +; RUN: opt -S -passes=ipsccp < %s | FileCheck %s ; ; void bar(int, float, double); ; diff --git a/llvm/test/Transforms/SCCP/overdefined-div.ll b/llvm/test/Transforms/SCCP/overdefined-div.ll --- a/llvm/test/Transforms/SCCP/overdefined-div.ll +++ b/llvm/test/Transforms/SCCP/overdefined-div.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s ; Test that SCCP has basic knowledge of when div can nuke overdefined values. diff --git a/llvm/test/Transforms/SCCP/overdefined-ext.ll b/llvm/test/Transforms/SCCP/overdefined-ext.ll --- a/llvm/test/Transforms/SCCP/overdefined-ext.ll +++ b/llvm/test/Transforms/SCCP/overdefined-ext.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s define i32 @zext_lshr(i1 %t0) { ; CHECK-LABEL: @zext_lshr( diff --git a/llvm/test/Transforms/SCCP/phi-cycle.ll b/llvm/test/Transforms/SCCP/phi-cycle.ll --- a/llvm/test/Transforms/SCCP/phi-cycle.ll +++ b/llvm/test/Transforms/SCCP/phi-cycle.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s declare i1 @cond() diff --git a/llvm/test/Transforms/SCCP/phis.ll b/llvm/test/Transforms/SCCP/phis.ll --- a/llvm/test/Transforms/SCCP/phis.ll +++ b/llvm/test/Transforms/SCCP/phis.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s define i1 @float.1(i1 %cmp) { ; CHECK-LABEL: define i1 @float.1(i1 %cmp) { diff --git a/llvm/test/Transforms/SCCP/pr27712.ll b/llvm/test/Transforms/SCCP/pr27712.ll --- a/llvm/test/Transforms/SCCP/pr27712.ll +++ b/llvm/test/Transforms/SCCP/pr27712.ll @@ -1,4 +1,4 @@ -; RUN: opt -sccp -S < %s | FileCheck %s +; RUN: opt -passes=sccp -S < %s | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/SCCP/pr35357.ll b/llvm/test/Transforms/SCCP/pr35357.ll --- a/llvm/test/Transforms/SCCP/pr35357.ll +++ b/llvm/test/Transforms/SCCP/pr35357.ll @@ -1,4 +1,4 @@ -; RUN: opt -S %s -ipsccp | FileCheck %s +; RUN: opt -S %s -passes=ipsccp | FileCheck %s @a = internal global i32 2 diff --git a/llvm/test/Transforms/SCCP/pr45185-range-predinfo.ll b/llvm/test/Transforms/SCCP/pr45185-range-predinfo.ll --- a/llvm/test/Transforms/SCCP/pr45185-range-predinfo.ll +++ b/llvm/test/Transforms/SCCP/pr45185-range-predinfo.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s ;Test for PR45185. diff --git a/llvm/test/Transforms/SCCP/pr49582-iterator-invalidation.ll b/llvm/test/Transforms/SCCP/pr49582-iterator-invalidation.ll --- a/llvm/test/Transforms/SCCP/pr49582-iterator-invalidation.ll +++ b/llvm/test/Transforms/SCCP/pr49582-iterator-invalidation.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -disable-output +; RUN: opt < %s -passes=ipsccp -disable-output ; PR49582: This test checks for an iterator invalidation issue, which only gets ; exposed on a large-enough test case. We intentionally do not check the output. diff --git a/llvm/test/Transforms/SCCP/pr52253.ll b/llvm/test/Transforms/SCCP/pr52253.ll --- a/llvm/test/Transforms/SCCP/pr52253.ll +++ b/llvm/test/Transforms/SCCP/pr52253.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s define i1 @foo(i32 %t4, i32 %t10) { ; CHECK-LABEL: @foo( diff --git a/llvm/test/Transforms/SCCP/predicateinfo-cond.ll b/llvm/test/Transforms/SCCP/predicateinfo-cond.ll --- a/llvm/test/Transforms/SCCP/predicateinfo-cond.ll +++ b/llvm/test/Transforms/SCCP/predicateinfo-cond.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -ipsccp < %s | FileCheck %s +; RUN: opt -S -passes=ipsccp < %s | FileCheck %s ; Test that information about the true/false value of conditions themselves ; is also used, not information implied by comparisions. diff --git a/llvm/test/Transforms/SCCP/preserve-analysis.ll b/llvm/test/Transforms/SCCP/preserve-analysis.ll --- a/llvm/test/Transforms/SCCP/preserve-analysis.ll +++ b/llvm/test/Transforms/SCCP/preserve-analysis.ll @@ -1,19 +1,8 @@ -; RUN: opt < %s -debug-pass=Structure -globals-aa -loop-vectorize -sccp -loop-vectorize -globals-aa -enable-new-pm=0 2>&1 -S | FileCheck %s ; RUN: opt < %s -debug-pass-manager -passes='loop-vectorize,sccp,loop-vectorize' 2>&1 -S | FileCheck --check-prefix=NEW-PM %s ; Check CFG-only analysis are preserved by SCCP by running it between 2 ; loop-vectorize runs. -; CHECK: Globals Alias Analysis -; CHECK: Dominator Tree Construction -; CHECK: Natural Loop Information -; CHECK: Sparse Conditional Constant Propagation -; CHECK: Post-Dominator Tree Construction -; CHECK-NOT: Dominator Tree Construction -; CHECK-NOT: Natural Loop Information -; CHECK-NOT: Globals Alias Analysis -; CHECK: Loop Vectorization - ; NEW-PM-DAG: Running analysis: LoopAnalysis on test ; NEW-PM-DAG: Running analysis: DominatorTreeAnalysis on test ; NEW-PM-DAG: Running analysis: AssumptionAnalysis on test diff --git a/llvm/test/Transforms/SCCP/pthreads.ll b/llvm/test/Transforms/SCCP/pthreads.ll --- a/llvm/test/Transforms/SCCP/pthreads.ll +++ b/llvm/test/Transforms/SCCP/pthreads.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S < %s | FileCheck %s +; RUN: opt -passes=ipsccp -S < %s | FileCheck %s ; ; #include ; diff --git a/llvm/test/Transforms/SCCP/range-and-ip.ll b/llvm/test/Transforms/SCCP/range-and-ip.ll --- a/llvm/test/Transforms/SCCP/range-and-ip.ll +++ b/llvm/test/Transforms/SCCP/range-and-ip.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -ipsccp %s | FileCheck %s +; RUN: opt -S -passes=ipsccp %s | FileCheck %s ; Make sure IPSCCP does not assume %r < 256 for @f1. Undef is passed at a call ; site, which won't be eliminated. diff --git a/llvm/test/Transforms/SCCP/range-and.ll b/llvm/test/Transforms/SCCP/range-and.ll --- a/llvm/test/Transforms/SCCP/range-and.ll +++ b/llvm/test/Transforms/SCCP/range-and.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --verbose -; RUN: opt -S -sccp %s | FileCheck %s +; RUN: opt -S -passes=sccp %s | FileCheck %s declare void @use(i1) diff --git a/llvm/test/Transforms/SCCP/ranges-sext.ll b/llvm/test/Transforms/SCCP/ranges-sext.ll --- a/llvm/test/Transforms/SCCP/ranges-sext.ll +++ b/llvm/test/Transforms/SCCP/ranges-sext.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -sccp -S %s -o -| FileCheck %s +; RUN: opt -passes=sccp -S %s -o -| FileCheck %s define i64 @test1_sext_op_can_be_undef(i1 %c.1, i1 %c.2) { ; CHECK-LABEL: @test1_sext_op_can_be_undef( diff --git a/llvm/test/Transforms/SCCP/remove-call-inst.ll b/llvm/test/Transforms/SCCP/remove-call-inst.ll --- a/llvm/test/Transforms/SCCP/remove-call-inst.ll +++ b/llvm/test/Transforms/SCCP/remove-call-inst.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -S -ipsccp | FileCheck %s +; RUN: opt < %s -S -passes=ipsccp | FileCheck %s ; PR5596 ; IPSCCP should propagate the 0 argument, eliminate the switch, and propagate diff --git a/llvm/test/Transforms/SCCP/replace-dereferenceable-ptr-with-undereferenceable.ll b/llvm/test/Transforms/SCCP/replace-dereferenceable-ptr-with-undereferenceable.ll --- a/llvm/test/Transforms/SCCP/replace-dereferenceable-ptr-with-undereferenceable.ll +++ b/llvm/test/Transforms/SCCP/replace-dereferenceable-ptr-with-undereferenceable.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s @y = common global [1 x i32] zeroinitializer, align 4 @x = common global [1 x i32] zeroinitializer, align 4 diff --git a/llvm/test/Transforms/SCCP/resolvedundefsin-tracked-fn.ll b/llvm/test/Transforms/SCCP/resolvedundefsin-tracked-fn.ll --- a/llvm/test/Transforms/SCCP/resolvedundefsin-tracked-fn.ll +++ b/llvm/test/Transforms/SCCP/resolvedundefsin-tracked-fn.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s %t1 = type opaque diff --git a/llvm/test/Transforms/SCCP/return-argument.ll b/llvm/test/Transforms/SCCP/return-argument.ll --- a/llvm/test/Transforms/SCCP/return-argument.ll +++ b/llvm/test/Transforms/SCCP/return-argument.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ;; This function returns its second argument on all return statements define internal i32* @incdec(i1 %C, i32* %V) { diff --git a/llvm/test/Transforms/SCCP/return-constants.ll b/llvm/test/Transforms/SCCP/return-constants.ll --- a/llvm/test/Transforms/SCCP/return-constants.ll +++ b/llvm/test/Transforms/SCCP/return-constants.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s %0 = type { i32, i32 } diff --git a/llvm/test/Transforms/SCCP/return-zapped.ll b/llvm/test/Transforms/SCCP/return-zapped.ll --- a/llvm/test/Transforms/SCCP/return-zapped.ll +++ b/llvm/test/Transforms/SCCP/return-zapped.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -S -ipsccp | FileCheck %s +; RUN: opt < %s -S -passes=ipsccp | FileCheck %s ; After the first round of Solver.Solve(), the return value of @testf still ; undefined as we hit a branch on undef. Therefore the conditional branch on diff --git a/llvm/test/Transforms/SCCP/retvalue-undef.ll b/llvm/test/Transforms/SCCP/retvalue-undef.ll --- a/llvm/test/Transforms/SCCP/retvalue-undef.ll +++ b/llvm/test/Transforms/SCCP/retvalue-undef.ll @@ -1,4 +1,4 @@ -; RUN: opt -ipsccp -S < %s | FileCheck %s +; RUN: opt -passes=ipsccp -S < %s | FileCheck %s ; PR6414 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/SCCP/sccptest.ll b/llvm/test/Transforms/SCCP/sccptest.ll --- a/llvm/test/Transforms/SCCP/sccptest.ll +++ b/llvm/test/Transforms/SCCP/sccptest.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s ; This is a basic correctness check for constant propagation. The add ; instruction should be eliminated. diff --git a/llvm/test/Transforms/SCCP/select.ll b/llvm/test/Transforms/SCCP/select.ll --- a/llvm/test/Transforms/SCCP/select.ll +++ b/llvm/test/Transforms/SCCP/select.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s define i32 @test1(i1 %C) { ; CHECK-LABEL: define i32 @test1( diff --git a/llvm/test/Transforms/SCCP/solve-after-each-resolving-undefs-for-function.ll b/llvm/test/Transforms/SCCP/solve-after-each-resolving-undefs-for-function.ll --- a/llvm/test/Transforms/SCCP/solve-after-each-resolving-undefs-for-function.ll +++ b/llvm/test/Transforms/SCCP/solve-after-each-resolving-undefs-for-function.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; CHECK-LABEL: @testf( ; CHECK: ret i32 undef diff --git a/llvm/test/Transforms/SCCP/struct-arg-resolve-undefs.ll b/llvm/test/Transforms/SCCP/struct-arg-resolve-undefs.ll --- a/llvm/test/Transforms/SCCP/struct-arg-resolve-undefs.ll +++ b/llvm/test/Transforms/SCCP/struct-arg-resolve-undefs.ll @@ -1,4 +1,4 @@ -; RUN: opt -ipsccp -S %s | FileCheck %s +; RUN: opt -passes=ipsccp -S %s | FileCheck %s %struct.S = type { i32 } diff --git a/llvm/test/Transforms/SCCP/switch-constantfold-crash.ll b/llvm/test/Transforms/SCCP/switch-constantfold-crash.ll --- a/llvm/test/Transforms/SCCP/switch-constantfold-crash.ll +++ b/llvm/test/Transforms/SCCP/switch-constantfold-crash.ll @@ -1,5 +1,4 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp < %s -S | FileCheck %s ; RUN: opt -passes=ipsccp < %s -S | FileCheck %s define void @barney() { diff --git a/llvm/test/Transforms/SCCP/switch-multiple-undef.ll b/llvm/test/Transforms/SCCP/switch-multiple-undef.ll --- a/llvm/test/Transforms/SCCP/switch-multiple-undef.ll +++ b/llvm/test/Transforms/SCCP/switch-multiple-undef.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -ipsccp < %s | FileCheck %s +; RUN: opt -S -passes=ipsccp < %s | FileCheck %s declare void @foo() declare void @goo() diff --git a/llvm/test/Transforms/SCCP/switch-undef-constantfoldterminator.ll b/llvm/test/Transforms/SCCP/switch-undef-constantfoldterminator.ll --- a/llvm/test/Transforms/SCCP/switch-undef-constantfoldterminator.ll +++ b/llvm/test/Transforms/SCCP/switch-undef-constantfoldterminator.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -ipsccp -S | FileCheck %s +; RUN: opt < %s -passes=ipsccp -S | FileCheck %s ; This test case used to end up like this: ; diff --git a/llvm/test/Transforms/SCCP/switch.ll b/llvm/test/Transforms/SCCP/switch.ll --- a/llvm/test/Transforms/SCCP/switch.ll +++ b/llvm/test/Transforms/SCCP/switch.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -ipsccp < %s | FileCheck %s +; RUN: opt -S -passes=ipsccp < %s | FileCheck %s ; Make sure we always consider the default edge executable for a switch ; with no cases. diff --git a/llvm/test/Transforms/SCCP/thread_local_acs.ll b/llvm/test/Transforms/SCCP/thread_local_acs.ll --- a/llvm/test/Transforms/SCCP/thread_local_acs.ll +++ b/llvm/test/Transforms/SCCP/thread_local_acs.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -ipsccp -S < %s | FileCheck %s +; RUN: opt -passes=ipsccp -S < %s | FileCheck %s ; ; #include ; thread_local int gtl = 0; diff --git a/llvm/test/Transforms/SCCP/ub-shift.ll b/llvm/test/Transforms/SCCP/ub-shift.ll --- a/llvm/test/Transforms/SCCP/ub-shift.ll +++ b/llvm/test/Transforms/SCCP/ub-shift.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -sccp -S | FileCheck %s +; RUN: opt < %s -passes=sccp -S | FileCheck %s define void @shift_undef_64(i64* %p) { ; CHECK-LABEL: @shift_undef_64( diff --git a/llvm/test/Transforms/SCCP/ubsan_overflow.ll b/llvm/test/Transforms/SCCP/ubsan_overflow.ll --- a/llvm/test/Transforms/SCCP/ubsan_overflow.ll +++ b/llvm/test/Transforms/SCCP/ubsan_overflow.ll @@ -1,4 +1,4 @@ -; RUN: opt -sccp -S %s | FileCheck %s +; RUN: opt -passes=sccp -S %s | FileCheck %s @0 = private unnamed_addr constant [16 x i8] c"\01\00\00\00\01\01\00\00\01\01\01\00\01\01\01\01" diff --git a/llvm/test/Transforms/SCCP/undef-resolve.ll b/llvm/test/Transforms/SCCP/undef-resolve.ll --- a/llvm/test/Transforms/SCCP/undef-resolve.ll +++ b/llvm/test/Transforms/SCCP/undef-resolve.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -sccp -S < %s | FileCheck %s +; RUN: opt -passes=sccp -S < %s | FileCheck %s ; PR6940 diff --git a/llvm/test/Transforms/SCCP/user-with-multiple-uses.ll b/llvm/test/Transforms/SCCP/user-with-multiple-uses.ll --- a/llvm/test/Transforms/SCCP/user-with-multiple-uses.ll +++ b/llvm/test/Transforms/SCCP/user-with-multiple-uses.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -S -ipsccp | FileCheck %s +; RUN: opt < %s -S -passes=ipsccp | FileCheck %s ; PR5596 ; IPSCCP should propagate the 0 argument, eliminate the switch, and propagate diff --git a/llvm/test/Transforms/SCCP/vector-bitcast.ll b/llvm/test/Transforms/SCCP/vector-bitcast.ll --- a/llvm/test/Transforms/SCCP/vector-bitcast.ll +++ b/llvm/test/Transforms/SCCP/vector-bitcast.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -sccp -S < %s | FileCheck %s +; RUN: opt -passes=sccp -S < %s | FileCheck %s target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128" diff --git a/llvm/test/Transforms/SCCP/widening.ll b/llvm/test/Transforms/SCCP/widening.ll --- a/llvm/test/Transforms/SCCP/widening.ll +++ b/llvm/test/Transforms/SCCP/widening.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt %s -sccp -S | FileCheck --check-prefix=SCCP %s -; RUN: opt %s -ipsccp -S | FileCheck --check-prefix=IPSCCP %s +; RUN: opt %s -passes=sccp -S | FileCheck --check-prefix=SCCP %s +; RUN: opt %s -passes=ipsccp -S | FileCheck --check-prefix=IPSCCP %s ; Test different widening scenarios. diff --git a/llvm/test/Transforms/TailCallElim/2010-06-26-MultipleReturnValues.ll b/llvm/test/Transforms/TailCallElim/2010-06-26-MultipleReturnValues.ll --- a/llvm/test/Transforms/TailCallElim/2010-06-26-MultipleReturnValues.ll +++ b/llvm/test/Transforms/TailCallElim/2010-06-26-MultipleReturnValues.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; PR7328 ; PR7506 define i32 @test1_constants(i32 %x) { diff --git a/llvm/test/Transforms/TailCallElim/EraseBB.ll b/llvm/test/Transforms/TailCallElim/EraseBB.ll --- a/llvm/test/Transforms/TailCallElim/EraseBB.ll +++ b/llvm/test/Transforms/TailCallElim/EraseBB.ll @@ -1,4 +1,4 @@ -; RUN: opt -tailcallelim -verify-dom-info -S < %s 2>&1 | FileCheck %s +; RUN: opt -passes=tailcallelim -verify-dom-info -S < %s 2>&1 | FileCheck %s ; CHECK: add nsw i32 ; CHECK-NEXT: br label diff --git a/llvm/test/Transforms/TailCallElim/accum_recursion.ll b/llvm/test/Transforms/TailCallElim/accum_recursion.ll --- a/llvm/test/Transforms/TailCallElim/accum_recursion.ll +++ b/llvm/test/Transforms/TailCallElim/accum_recursion.ll @@ -1,4 +1,3 @@ -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s ; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s define i32 @test1_factorial(i32 %x) { diff --git a/llvm/test/Transforms/TailCallElim/ackermann.ll b/llvm/test/Transforms/TailCallElim/ackermann.ll --- a/llvm/test/Transforms/TailCallElim/ackermann.ll +++ b/llvm/test/Transforms/TailCallElim/ackermann.ll @@ -1,6 +1,6 @@ ; REQUIRES: asserts ; This function contains two tail calls, which should be eliminated -; RUN: opt < %s -tailcallelim -verify-dom-info -stats -disable-output 2>&1 | grep "2 tailcallelim" +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -stats -disable-output 2>&1 | grep "2 tailcallelim" define i32 @Ack(i32 %M.1, i32 %N.1) { entry: diff --git a/llvm/test/Transforms/TailCallElim/basic.ll b/llvm/test/Transforms/TailCallElim/basic.ll --- a/llvm/test/Transforms/TailCallElim/basic.ll +++ b/llvm/test/Transforms/TailCallElim/basic.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s declare void @noarg() declare void @use(i32*) diff --git a/llvm/test/Transforms/TailCallElim/debugloc.ll b/llvm/test/Transforms/TailCallElim/debugloc.ll --- a/llvm/test/Transforms/TailCallElim/debugloc.ll +++ b/llvm/test/Transforms/TailCallElim/debugloc.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -debugify -tailcallelim -S | FileCheck %s +; RUN: opt < %s -passes=debugify,tailcallelim -S | FileCheck %s define void @foo() { entry: diff --git a/llvm/test/Transforms/TailCallElim/deopt-bundle.ll b/llvm/test/Transforms/TailCallElim/deopt-bundle.ll --- a/llvm/test/Transforms/TailCallElim/deopt-bundle.ll +++ b/llvm/test/Transforms/TailCallElim/deopt-bundle.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s define i32 @f_1(i32 %x) { ; CHECK-LABEL: @f_1( diff --git a/llvm/test/Transforms/TailCallElim/dont_reorder_load.ll b/llvm/test/Transforms/TailCallElim/dont_reorder_load.ll --- a/llvm/test/Transforms/TailCallElim/dont_reorder_load.ll +++ b/llvm/test/Transforms/TailCallElim/dont_reorder_load.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -tailcallelim -verify-dom-info -S | grep call | count 4 +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | grep call | count 4 ; PR4323 ; Several cases where tail call elimination should not move the load above the diff --git a/llvm/test/Transforms/TailCallElim/dup_tail.ll b/llvm/test/Transforms/TailCallElim/dup_tail.ll --- a/llvm/test/Transforms/TailCallElim/dup_tail.ll +++ b/llvm/test/Transforms/TailCallElim/dup_tail.ll @@ -1,6 +1,6 @@ ; REQUIRES: asserts ; Duplicate the return into if.end to enable TCE. -; RUN: opt -tailcallelim -verify-dom-info -stats -disable-output < %s 2>&1 | FileCheck %s +; RUN: opt -passes=tailcallelim -verify-dom-info -stats -disable-output < %s 2>&1 | FileCheck %s ; CHECK: Number of return duplicated diff --git a/llvm/test/Transforms/TailCallElim/inf-recursion.ll b/llvm/test/Transforms/TailCallElim/inf-recursion.ll --- a/llvm/test/Transforms/TailCallElim/inf-recursion.ll +++ b/llvm/test/Transforms/TailCallElim/inf-recursion.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; Don't turn this into an infinite loop, this is probably the implementation ; of fabs and we expect the codegen to lower fabs. diff --git a/llvm/test/Transforms/TailCallElim/notail.ll b/llvm/test/Transforms/TailCallElim/notail.ll --- a/llvm/test/Transforms/TailCallElim/notail.ll +++ b/llvm/test/Transforms/TailCallElim/notail.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; CHECK: tail call void @callee0() ; CHECK: notail call void @callee1() diff --git a/llvm/test/Transforms/TailCallElim/reorder_load.ll b/llvm/test/Transforms/TailCallElim/reorder_load.ll --- a/llvm/test/Transforms/TailCallElim/reorder_load.ll +++ b/llvm/test/Transforms/TailCallElim/reorder_load.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; PR4323 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" diff --git a/llvm/test/Transforms/TailCallElim/setjmp.ll b/llvm/test/Transforms/TailCallElim/setjmp.ll --- a/llvm/test/Transforms/TailCallElim/setjmp.ll +++ b/llvm/test/Transforms/TailCallElim/setjmp.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; Test that we don't tail call in a functions that calls returns_twice ; functions. diff --git a/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll b/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll --- a/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll +++ b/llvm/test/Transforms/TailCallElim/tre-byval-parameter-2.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; the test was generated from the following C++ source: ; diff --git a/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll b/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll --- a/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll +++ b/llvm/test/Transforms/TailCallElim/tre-byval-parameter.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; the test was generated from the following C++ source: ; diff --git a/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll b/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll --- a/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll +++ b/llvm/test/Transforms/TailCallElim/tre-multiple-exits.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; This test checks that TRE would be done for only one recursive call. ; The test_multiple_exits function has three recursive calls. diff --git a/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll b/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll --- a/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll +++ b/llvm/test/Transforms/TailCallElim/tre-noncapturing-alloca-calls.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -tailcallelim -verify-dom-info -S | FileCheck %s +; RUN: opt < %s -passes=tailcallelim -verify-dom-info -S | FileCheck %s ; IR for that test was generated from the following C++ source: ; diff --git a/llvm/test/Transforms/Util/PredicateInfo/branch-on-same-cond.ll b/llvm/test/Transforms/Util/PredicateInfo/branch-on-same-cond.ll --- a/llvm/test/Transforms/Util/PredicateInfo/branch-on-same-cond.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/branch-on-same-cond.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -print-predicateinfo < %s 2>&1 >/dev/null | FileCheck %s +; RUN: opt -S -passes=print-predicateinfo < %s 2>&1 >/dev/null | FileCheck %s ; FIXME: RenamedOp should be %cmp or %x in all cases here, ; which is the value used in the condition. diff --git a/llvm/test/Transforms/Util/PredicateInfo/condprop.ll b/llvm/test/Transforms/Util/PredicateInfo/condprop.ll --- a/llvm/test/Transforms/Util/PredicateInfo/condprop.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/condprop.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -print-predicateinfo -disable-output < %s 2>&1 | FileCheck %s +; RUN: opt -passes=print-predicateinfo -disable-output < %s 2>&1 | FileCheck %s @a = external global i32 ; [#uses=7] diff --git a/llvm/test/Transforms/Util/PredicateInfo/diamond.ll b/llvm/test/Transforms/Util/PredicateInfo/diamond.ll --- a/llvm/test/Transforms/Util/PredicateInfo/diamond.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/diamond.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -print-predicateinfo < %s 2>&1 | FileCheck %s +; RUN: opt -passes=print-predicateinfo < %s 2>&1 | FileCheck %s define i1 @f(i32 %x, i1 %y) { ; CHECK-LABEL: @f( ; CHECK-NEXT: br i1 [[Y:%.*]], label [[BB0:%.*]], label [[BB1:%.*]] diff --git a/llvm/test/Transforms/Util/PredicateInfo/edge.ll b/llvm/test/Transforms/Util/PredicateInfo/edge.ll --- a/llvm/test/Transforms/Util/PredicateInfo/edge.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/edge.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -print-predicateinfo < %s 2>&1 | FileCheck %s +; RUN: opt -passes=print-predicateinfo < %s 2>&1 | FileCheck %s define i32 @f1(i32 %x) { ; CHECK-LABEL: @f1( diff --git a/llvm/test/Transforms/Util/PredicateInfo/ordering.ll b/llvm/test/Transforms/Util/PredicateInfo/ordering.ll --- a/llvm/test/Transforms/Util/PredicateInfo/ordering.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/ordering.ll @@ -1,4 +1,4 @@ -; REQUIRES: assert +; REQUIRES: assert-but-this-is-misspelled-and-should-say-asserts-but-then-the-test-case-will-fail ; RUN: opt -passes=print-predicateinfo -debug < %s 2>&1 | FileCheck %s declare void @use(i32) diff --git a/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll b/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll --- a/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/pr33456.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -print-predicateinfo < %s 2>&1 | FileCheck %s +; RUN: opt -passes=print-predicateinfo < %s 2>&1 | FileCheck %s ; Don't insert predicate info for conditions with a single target. @a = global i32 1, align 4 @d = common global i32 0, align 4 diff --git a/llvm/test/Transforms/Util/PredicateInfo/pr33457.ll b/llvm/test/Transforms/Util/PredicateInfo/pr33457.ll --- a/llvm/test/Transforms/Util/PredicateInfo/pr33457.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/pr33457.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -print-predicateinfo < %s 2>&1 | FileCheck %s +; RUN: opt -passes=print-predicateinfo < %s 2>&1 | FileCheck %s ; Don't insert predicate info for conditions with a single target. @a = global i32 6, align 4 @c = global i32 -1, align 4 diff --git a/llvm/test/Transforms/Util/PredicateInfo/testandor.ll b/llvm/test/Transforms/Util/PredicateInfo/testandor.ll --- a/llvm/test/Transforms/Util/PredicateInfo/testandor.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/testandor.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -print-predicateinfo -disable-output < %s 2>&1 | FileCheck %s +; RUN: opt -passes=print-predicateinfo -disable-output < %s 2>&1 | FileCheck %s declare void @foo(i1) declare void @bar(i32) diff --git a/llvm/test/Transforms/Util/PredicateInfo/unnamed-types.ll b/llvm/test/Transforms/Util/PredicateInfo/unnamed-types.ll --- a/llvm/test/Transforms/Util/PredicateInfo/unnamed-types.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/unnamed-types.ll @@ -1,4 +1,4 @@ -; RUN: opt < %s -print-predicateinfo 2>&1 | FileCheck %s +; RUN: opt < %s -passes=print-predicateinfo 2>&1 | FileCheck %s %1 = type opaque %0 = type opaque diff --git a/llvm/test/Transforms/Util/PredicateInfo/unreachable.ll b/llvm/test/Transforms/Util/PredicateInfo/unreachable.ll --- a/llvm/test/Transforms/Util/PredicateInfo/unreachable.ll +++ b/llvm/test/Transforms/Util/PredicateInfo/unreachable.ll @@ -1,4 +1,4 @@ -; RUN: opt -print-predicateinfo < %s 2>&1 | FileCheck %s +; RUN: opt -passes=print-predicateinfo < %s 2>&1 | FileCheck %s declare void @foo() declare void @llvm.assume(i1) diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll @@ -1,5 +1,5 @@ ; Example input for update_test_checks (taken from test/Transforms/InstSimplify/add.ll) -; RUN: opt < %s -instsimplify -S | FileCheck %s +; RUN: opt < %s -passes=instsimplify -S | FileCheck %s define i32 @common_sub_operand(i32 %X, i32 %Y) { ; CHECK-LABEL: @common_sub_operand( diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll.expected @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; Example input for update_test_checks (taken from test/Transforms/InstSimplify/add.ll) -; RUN: opt < %s -instsimplify -S | FileCheck %s +; RUN: opt < %s -passes=instsimplify -S | FileCheck %s define i32 @common_sub_operand(i32 %X, i32 %Y) { ; CHECK-LABEL: @common_sub_operand( diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll.funcsig.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll.funcsig.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll.funcsig.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/basic.ll.funcsig.expected @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature ; Example input for update_test_checks (taken from test/Transforms/InstSimplify/add.ll) -; RUN: opt < %s -instsimplify -S | FileCheck %s +; RUN: opt < %s -passes=instsimplify -S | FileCheck %s define i32 @common_sub_operand(i32 %X, i32 %Y) { ; CHECK-LABEL: define {{[^@]+}}@common_sub_operand diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll @@ -1,7 +1,5 @@ -; RUN: opt -attributor -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_NPM,NOT_CGSCC_OPM,NOT_TUNIT_NPM,IS__TUNIT____,IS________OPM,IS__TUNIT_OPM -; RUN: opt -aa-pipeline=basic-aa -passes=attributor -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_OPM,NOT_CGSCC_NPM,NOT_TUNIT_OPM,IS__TUNIT____,IS________NPM,IS__TUNIT_NPM -; RUN: opt -attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_NPM,IS__CGSCC____,IS________OPM,IS__CGSCC_OPM -; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_OPM,IS__CGSCC____,IS________NPM,IS__CGSCC_NPM +; RUN: opt -aa-pipeline=basic-aa -passes=attributor -S < %s | FileCheck %s --check-prefixes=CHECK,IS__TUNIT____ +; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,IS__CGSCC____ %struct.RT = type { i8, [10 x [20 x i32]], i8 } %struct.ST = type { i32, double, %struct.RT } diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll.funcattrs.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll.funcattrs.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll.funcattrs.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll.funcattrs.expected @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes -; RUN: opt -attributor -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_NPM,NOT_CGSCC_OPM,NOT_TUNIT_NPM,IS__TUNIT____,IS________OPM,IS__TUNIT_OPM -; RUN: opt -aa-pipeline=basic-aa -passes=attributor -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_OPM,NOT_CGSCC_NPM,NOT_TUNIT_OPM,IS__TUNIT____,IS________NPM,IS__TUNIT_NPM -; RUN: opt -attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_NPM,IS__CGSCC____,IS________OPM,IS__CGSCC_OPM -; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_OPM,IS__CGSCC____,IS________NPM,IS__CGSCC_NPM +; RUN: opt -aa-pipeline=basic-aa -passes=attributor -S < %s | FileCheck %s --check-prefixes=CHECK,IS__TUNIT____ +; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,IS__CGSCC____ %struct.RT = type { i8, [10 x [20 x i32]], i8 } %struct.ST = type { i32, double, %struct.RT } diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll.plain.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll.plain.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll.plain.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/check_attrs.ll.plain.expected @@ -1,8 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature -; RUN: opt -attributor -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_NPM,NOT_CGSCC_OPM,NOT_TUNIT_NPM,IS__TUNIT____,IS________OPM,IS__TUNIT_OPM -; RUN: opt -aa-pipeline=basic-aa -passes=attributor -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_OPM,NOT_CGSCC_NPM,NOT_TUNIT_OPM,IS__TUNIT____,IS________NPM,IS__TUNIT_NPM -; RUN: opt -attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_NPM,IS__CGSCC____,IS________OPM,IS__CGSCC_OPM -; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_OPM,IS__CGSCC____,IS________NPM,IS__CGSCC_NPM +; RUN: opt -aa-pipeline=basic-aa -passes=attributor -S < %s | FileCheck %s --check-prefixes=CHECK,IS__TUNIT____ +; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -S < %s | FileCheck %s --check-prefixes=CHECK,IS__CGSCC____ %struct.RT = type { i8, [10 x [20 x i32]], i8 } %struct.ST = type { i32, double, %struct.RT } diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/function_name.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/function_name.ll --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/function_name.ll +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/function_name.ll @@ -1,6 +1,6 @@ ; Check that we accept functions with '$' in the name. ; -; RUN: opt < %s -instsimplify -S | FileCheck %s +; RUN: opt < %s -passes=instsimplify -S | FileCheck %s ; define hidden i32 @"_Z54bar$ompvariant$bar"() { entry: diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/function_name.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/function_name.ll.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/function_name.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/function_name.ll.expected @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; Check that we accept functions with '$' in the name. ; -; RUN: opt < %s -instsimplify -S | FileCheck %s +; RUN: opt < %s -passes=instsimplify -S | FileCheck %s ; define hidden i32 @"_Z54bar$ompvariant$bar"() { entry: diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll @@ -1,4 +1,4 @@ -; RUN: opt -hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s +; RUN: opt -passes=hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.14.0" diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.expected @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --include-generated-funcs -; RUN: opt -hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s +; RUN: opt -passes=hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.14.0" diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.generated.globals.expected @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals --include-generated-funcs -; RUN: opt -hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s +; RUN: opt -passes=hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.14.0" diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.expected @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s +; RUN: opt -passes=hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.14.0" diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/generated_funcs.ll.nogenerated.globals.expected @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals -; RUN: opt -hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s +; RUN: opt -passes=hotcoldsplit -hotcoldsplit-threshold=0 -S < %s | FileCheck %s target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.14.0" diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/pre-process.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/pre-process.ll --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/pre-process.ll +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/pre-process.ll @@ -1,12 +1,12 @@ ; Test that update_test_checks.py can run pre-processing commands. -; RUN: opt < %s -instsimplify -S | FileCheck %s --check-prefix=CHECK-AS200 +; RUN: opt < %s -passes=instsimplify -S | FileCheck %s --check-prefix=CHECK-AS200 ; RUN: sed -e 's/addrspace(200)/addrspace(0)/g' -e 's/-A200-P200-G200//g' %s \ -; RUN: | opt -instsimplify -S | FileCheck %s --check-prefix=CHECK-AS0 +; RUN: | opt -passes=instsimplify -S | FileCheck %s --check-prefix=CHECK-AS0 ; Check that multiple pre-processing commands are handled ; RUN: sed 's/addrspace(200)/addrspace(1)/g' %s | sed 's/-A1-P1-G1//g' \ -; RUN: | opt -instsimplify -S | FileCheck %s --check-prefix=CHECK-AS1 +; RUN: | opt -passes=instsimplify -S | FileCheck %s --check-prefix=CHECK-AS1 ; More than two commands should also be fine -; RUN: cat %s | cat | cat | cat | opt < %s -instsimplify -S \ +; RUN: cat %s | cat | cat | cat | opt < %s -passes=instsimplify -S \ ; RUN: | FileCheck %s --check-prefix=CHECK-AS200-NOOP-PRE-PROCESS target datalayout = "e-m:e-p200:128:128:128:64-p:64:64-A200-P200-G200" diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/pre-process.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/pre-process.ll.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/pre-process.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/pre-process.ll.expected @@ -1,13 +1,13 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature ; Test that update_test_checks.py can run pre-processing commands. -; RUN: opt < %s -instsimplify -S | FileCheck %s --check-prefix=CHECK-AS200 +; RUN: opt < %s -passes=instsimplify -S | FileCheck %s --check-prefix=CHECK-AS200 ; RUN: sed -e 's/addrspace(200)/addrspace(0)/g' -e 's/-A200-P200-G200//g' %s \ -; RUN: | opt -instsimplify -S | FileCheck %s --check-prefix=CHECK-AS0 +; RUN: | opt -passes=instsimplify -S | FileCheck %s --check-prefix=CHECK-AS0 ; Check that multiple pre-processing commands are handled ; RUN: sed 's/addrspace(200)/addrspace(1)/g' %s | sed 's/-A1-P1-G1//g' \ -; RUN: | opt -instsimplify -S | FileCheck %s --check-prefix=CHECK-AS1 +; RUN: | opt -passes=instsimplify -S | FileCheck %s --check-prefix=CHECK-AS1 ; More than two commands should also be fine -; RUN: cat %s | cat | cat | cat | opt < %s -instsimplify -S \ +; RUN: cat %s | cat | cat | cat | opt < %s -passes=instsimplify -S \ ; RUN: | FileCheck %s --check-prefix=CHECK-AS200-NOOP-PRE-PROCESS target datalayout = "e-m:e-p200:128:128:128:64-p:64:64-A200-P200-G200" diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/sometimes_deleted_function.ll b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/sometimes_deleted_function.ll --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/sometimes_deleted_function.ll +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/sometimes_deleted_function.ll @@ -1,6 +1,6 @@ ; RUN: opt -S < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_TWO,ALL_BUT_THREE,ALL_BUT_FOUR,ONE_AND_TWO,ONE_AND_THREE,ONE_AND_FOUR,ONE -; RUN: opt -S -globalopt < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_THREE,ALL_BUT_FOUR,ONE_AND_TWO,TWO_AND_THREE,TWO_AND_FOUR,TWO -; RUN: opt -S -instsimplify < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_TWO,ALL_BUT_FOUR,ONE_AND_THREE,TWO_AND_THREE,THREE_AND_FOUR,THREE +; RUN: opt -S -passes=globalopt < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_THREE,ALL_BUT_FOUR,ONE_AND_TWO,TWO_AND_THREE,TWO_AND_FOUR,TWO +; RUN: opt -S -passes=instsimplify < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_TWO,ALL_BUT_FOUR,ONE_AND_THREE,TWO_AND_THREE,THREE_AND_FOUR,THREE ; RUN: opt -S < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_TWO,ALL_BUT_THREE,ONE_AND_FOUR,TWO_AND_FOUR,THREE_AND_FOUR,FOUR ; ; Make sure we don't use anything to check for @sometimes_here that contains "ALL" or "TWO". diff --git a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/sometimes_deleted_function.ll.expected b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/sometimes_deleted_function.ll.expected --- a/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/sometimes_deleted_function.ll.expected +++ b/llvm/test/tools/UpdateTestChecks/update_test_checks/Inputs/sometimes_deleted_function.ll.expected @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_TWO,ALL_BUT_THREE,ALL_BUT_FOUR,ONE_AND_TWO,ONE_AND_THREE,ONE_AND_FOUR,ONE -; RUN: opt -S -globalopt < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_THREE,ALL_BUT_FOUR,ONE_AND_TWO,TWO_AND_THREE,TWO_AND_FOUR,TWO -; RUN: opt -S -instsimplify < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_TWO,ALL_BUT_FOUR,ONE_AND_THREE,TWO_AND_THREE,THREE_AND_FOUR,THREE +; RUN: opt -S -passes=globalopt < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_THREE,ALL_BUT_FOUR,ONE_AND_TWO,TWO_AND_THREE,TWO_AND_FOUR,TWO +; RUN: opt -S -passes=instsimplify < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_TWO,ALL_BUT_FOUR,ONE_AND_THREE,TWO_AND_THREE,THREE_AND_FOUR,THREE ; RUN: opt -S < %s | FileCheck %s --check-prefixes=ALL,ALL_BUT_ONE,ALL_BUT_TWO,ALL_BUT_THREE,ONE_AND_FOUR,TWO_AND_FOUR,THREE_AND_FOUR,FOUR ; ; Make sure we don't use anything to check for @sometimes_here that contains "ALL" or "TWO". diff --git a/mlir/lib/Bindings/Python/IRCore.cpp b/mlir/lib/Bindings/Python/IRCore.cpp --- a/mlir/lib/Bindings/Python/IRCore.cpp +++ b/mlir/lib/Bindings/Python/IRCore.cpp @@ -93,6 +93,13 @@ use_local_Scope: Whether to print in a way that is more optimized for multi-threaded access but may not be consistent with how the overall module prints. + assume_verified: By default, if not printing generic form, the verifier + will be run and if it fails, generic form will be printed with a comment + about failed verification. While a reasonable default for interactive use, + for systematic use, it is often better for the caller to verify explicitly + and report failures in a more robust fashion. Set this to True if doing this + in order to avoid running a redundant verification. If the IR is actually + invalid, behavior is undefined. )"; static const char kOperationGetAsmDocstring[] = @@ -828,14 +835,21 @@ void PyOperationBase::print(py::object fileObject, bool binary, llvm::Optional largeElementsLimit, bool enableDebugInfo, bool prettyDebugInfo, - bool printGenericOpForm, bool useLocalScope) { + bool printGenericOpForm, bool useLocalScope, + bool assumeVerified) { PyOperation &operation = getOperation(); operation.checkValid(); if (fileObject.is_none()) fileObject = py::module::import("sys").attr("stdout"); - if (!printGenericOpForm && !mlirOperationVerify(operation)) { - fileObject.attr("write")("// Verification failed, printing generic form\n"); + if (!assumeVerified && !printGenericOpForm && + !mlirOperationVerify(operation)) { + std::string message("// Verification failed, printing generic form\n"); + if (binary) { + fileObject.attr("write")(py::bytes(message)); + } else { + fileObject.attr("write")(py::str(message)); + } printGenericOpForm = true; } @@ -857,8 +871,8 @@ py::object PyOperationBase::getAsm(bool binary, llvm::Optional largeElementsLimit, bool enableDebugInfo, bool prettyDebugInfo, - bool printGenericOpForm, - bool useLocalScope) { + bool printGenericOpForm, bool useLocalScope, + bool assumeVerified) { py::object fileObject; if (binary) { fileObject = py::module::import("io").attr("BytesIO")(); @@ -870,7 +884,8 @@ /*enableDebugInfo=*/enableDebugInfo, /*prettyDebugInfo=*/prettyDebugInfo, /*printGenericOpForm=*/printGenericOpForm, - /*useLocalScope=*/useLocalScope); + /*useLocalScope=*/useLocalScope, + /*assumeVerified=*/assumeVerified); return fileObject.attr("getvalue")(); } @@ -2149,12 +2164,9 @@ kDumpDocstring) .def( "__str__", - [](PyModule &self) { - MlirOperation operation = mlirModuleGetOperation(self.get()); - PyPrintAccumulator printAccum; - mlirOperationPrint(operation, printAccum.getCallback(), - printAccum.getUserData()); - return printAccum.join(); + [](py::object self) { + // Defer to the operation's __str__. + return self.attr("operation").attr("__str__")(); }, kOperationStrDunderDocstring); @@ -2234,7 +2246,8 @@ /*enableDebugInfo=*/false, /*prettyDebugInfo=*/false, /*printGenericOpForm=*/false, - /*useLocalScope=*/false); + /*useLocalScope=*/false, + /*assumeVerified=*/false); }, "Returns the assembly form of the operation.") .def("print", &PyOperationBase::print, @@ -2244,7 +2257,8 @@ py::arg("enable_debug_info") = false, py::arg("pretty_debug_info") = false, py::arg("print_generic_op_form") = false, - py::arg("use_local_scope") = false, kOperationPrintDocstring) + py::arg("use_local_scope") = false, + py::arg("assume_verified") = false, kOperationPrintDocstring) .def("get_asm", &PyOperationBase::getAsm, // Careful: Lots of arguments must match up with get_asm method. py::arg("binary") = false, @@ -2252,7 +2266,8 @@ py::arg("enable_debug_info") = false, py::arg("pretty_debug_info") = false, py::arg("print_generic_op_form") = false, - py::arg("use_local_scope") = false, kOperationGetAsmDocstring) + py::arg("use_local_scope") = false, + py::arg("assume_verified") = false, kOperationGetAsmDocstring) .def( "verify", [](PyOperationBase &self) { diff --git a/mlir/lib/Bindings/Python/IRModule.h b/mlir/lib/Bindings/Python/IRModule.h --- a/mlir/lib/Bindings/Python/IRModule.h +++ b/mlir/lib/Bindings/Python/IRModule.h @@ -394,11 +394,13 @@ /// Implements the bound 'print' method and helps with others. void print(pybind11::object fileObject, bool binary, llvm::Optional largeElementsLimit, bool enableDebugInfo, - bool prettyDebugInfo, bool printGenericOpForm, bool useLocalScope); + bool prettyDebugInfo, bool printGenericOpForm, bool useLocalScope, + bool assumeVerified); pybind11::object getAsm(bool binary, llvm::Optional largeElementsLimit, bool enableDebugInfo, bool prettyDebugInfo, - bool printGenericOpForm, bool useLocalScope); + bool printGenericOpForm, bool useLocalScope, + bool assumeVerified); /// Moves the operation before or after the other operation. void moveAfter(PyOperationBase &other); diff --git a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp --- a/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp +++ b/mlir/lib/Conversion/PDLToPDLInterp/PDLToPDLInterp.cpp @@ -208,8 +208,9 @@ // Dispatch to the correct method based on derived node type. TypeSwitch(&node) - .Case( - [&](auto *derivedNode) { generate(derivedNode, currentBlock, val); }) + .Case([&](auto *derivedNode) { + this->generate(derivedNode, currentBlock, val); + }) .Case([&](SuccessNode *successNode) { generate(successNode, currentBlock); }); diff --git a/mlir/test/Dialect/MemRef/invalid.mlir b/mlir/test/Dialect/MemRef/invalid.mlir --- a/mlir/test/Dialect/MemRef/invalid.mlir +++ b/mlir/test/Dialect/MemRef/invalid.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -split-input-file %s -verify-diagnostics +// RUN: mlir-opt -allow-unregistered-dialect -split-input-file %s -verify-diagnostics func @dma_start_not_enough_operands() { // expected-error@+1 {{expected at least 4 operands}} @@ -488,9 +488,323 @@ // ----- +func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<2048xi8> + // expected-error@+1 {{expects 1 offset operand}} + %1 = memref.view %0[][%arg0, %arg1] + : memref<2048xi8> to memref + return +} + +// ----- + +func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> + // expected-error@+1 {{unsupported map for base memref type}} + %1 = memref.view %0[%arg2][%arg0, %arg1] + : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> to + memref (d0 * 4 + d1 + s0)>> + return +} + +// ----- + +func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<2048xi8> + // expected-error@+1 {{unsupported map for result memref type}} + %1 = memref.view %0[%arg2][%arg0, %arg1] + : memref<2048xi8> to memref (d0, d1, s0)>> + return +} + +// ----- + +func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<2048xi8, 2> + // expected-error@+1 {{different memory spaces}} + %1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8, 2> to memref + return +} + +// ----- + +func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<2048xi8> + // expected-error@+1 {{incorrect number of size operands for type}} + %1 = memref.view %0[%arg2][%arg0] + : memref<2048xi8> to memref + return +} + +// ----- + +func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32> + // expected-error@+1 {{expected mixed offsets rank to match mixed sizes rank (2 vs 3) so the rank of the result type is well-formed}} + %1 = memref.subview %0[0, 0][2, 2, 2][1, 1, 1] + : memref<8x16x4xf32> to memref<8x16x4xf32> + return +} + +// ----- + +func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32> + // expected-error@+1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}} + %1 = memref.subview %0[0, 0, 0][2, 2, 2][1, 1] + : memref<8x16x4xf32> to memref<8x16x4xf32> + return +} + +// ----- + +func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32> + // expected-error@+1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}} + %1 = memref.reinterpret_cast %0 to offset: [0], sizes: [2, 2, 2], strides:[1, 1] + : memref<8x16x4xf32> to memref<8x16x4xf32> + return +} + +// ----- + +func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2> + // expected-error@+1 {{different memory spaces}} + %1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1] + : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2> to + memref<8x?x4xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 4 + d2)>> + return +} + +// ----- + +func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>> + // expected-error@+1 {{is not strided}} + %1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1] + : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>> to + memref<8x?x4xf32, offset: 0, strides: [?, 4, 1]> + return +} + +// ----- + +func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32> + // expected-error@+1 {{expected <= 3 offset values}} + %1 = memref.subview %0[%arg0, %arg1, 0, 0][%arg2, 0, 0, 0][1, 1, 1, 1] + : memref<8x16x4xf32> to + memref<8x?x4xf32, offset: 0, strides:[?, ?, 4]> + return +} + +// ----- + +func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32> + // expected-error@+1 {{expected result element type to be 'f32'}} + %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1] + : memref<8x16x4xf32> to + memref<8x16x4xi32> + return +} + +// ----- + +func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32> + // expected-error@+1 {{expected result rank to be smaller or equal to the source rank.}} + %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1] + : memref<8x16x4xf32> to + memref<8x16x4x3xi32> + return +} + +// ----- + +func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32> + // expected-error@+1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>' or a rank-reduced version. (mismatch of result sizes)}} + %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1] + : memref<8x16x4xf32> to memref<16x4xf32> + return +} + +// ----- + +func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) { + %0 = memref.alloc() : memref<8x16x4xf32> + // expected-error@+1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>>' or a rank-reduced version. (mismatch of result sizes)}} + %1 = memref.subview %0[0, 2, 0][8, 16, 4][1, 1, 1] + : memref<8x16x4xf32> to memref<16x4xf32> + return +} + +// ----- + +func @invalid_rank_reducing_subview(%arg0 : memref, %arg1 : index, %arg2 : index) { + // expected-error@+1 {{expected result type to be 'memref (d0 * s1 + s0 + d1)>>' or a rank-reduced version. (mismatch of result sizes)}} + %0 = memref.subview %arg0[0, %arg1][%arg2, 1][1, 1] : memref to memref + return +} + +// ----- + func @static_stride_to_dynamic_stride(%arg0 : memref, %arg1 : index, %arg2 : index) -> memref { // expected-error @+1 {{expected result type to be 'memref<1x?x?xf32, affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>>' or a rank-reduced version. (mismatch of result sizes)}} %0 = memref.subview %arg0[0, 0, 0] [1, %arg1, %arg2] [1, 1, 1] : memref to memref return %0 : memref } + +// ----- + +func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) { + // expected-error@+1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 32 + d2 * 2)>>' are cast incompatible}} + %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:0, strides:[128, 32, 2]> + return +} + +// ----- + +func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) { + // expected-error@+1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2 + 16)>>' are cast incompatible}} + %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:16, strides:[64, 16, 1]> + return +} + +// ----- + +// incompatible element types +func @invalid_memref_cast() { + %0 = memref.alloc() : memref<2x5xf32, 0> + // expected-error@+1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xi32>' are cast incompatible}} + %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xi32> + return +} + +// ----- + +func @invalid_prefetch_rw(%i : index) { + %0 = memref.alloc() : memref<10xf32> + // expected-error@+1 {{rw specifier has to be 'read' or 'write'}} + memref.prefetch %0[%i], rw, locality<0>, data : memref<10xf32> + return +} + +// ----- + +func @invalid_prefetch_cache_type(%i : index) { + %0 = memref.alloc() : memref<10xf32> + // expected-error@+1 {{cache type has to be 'data' or 'instr'}} + memref.prefetch %0[%i], read, locality<0>, false : memref<10xf32> + return +} + +// ----- + +func @invalid_prefetch_locality_hint(%i : index) { + %0 = memref.alloc() : memref<10xf32> + // expected-error@+1 {{32-bit signless integer attribute whose minimum value is 0 whose maximum value is 3}} + memref.prefetch %0[%i], read, locality<5>, data : memref<10xf32> + return +} + +// ----- + +// incompatible memory space +func @invalid_memref_cast() { + %0 = memref.alloc() : memref<2x5xf32, 0> + // expected-error@+1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xf32, 1>' are cast incompatible}} + %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 1> + return +} + +// ----- + +// unranked to unranked +func @invalid_memref_cast() { + %0 = memref.alloc() : memref<2x5xf32, 0> + %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 0> + // expected-error@+1 {{operand type 'memref<*xf32>' and result type 'memref<*xf32>' are cast incompatible}} + %2 = memref.cast %1 : memref<*xf32, 0> to memref<*xf32, 0> + return +} + +// ----- + +// alignment is not power of 2. +func @assume_alignment(%0: memref<4x4xf16>) { + // expected-error@+1 {{alignment must be power of 2}} + memref.assume_alignment %0, 12 : memref<4x4xf16> + return +} + +// ----- + +// 0 alignment value. +func @assume_alignment(%0: memref<4x4xf16>) { + // expected-error@+1 {{attribute 'alignment' failed to satisfy constraint: 32-bit signless integer attribute whose value is positive}} + memref.assume_alignment %0, 0 : memref<4x4xf16> + return +} + +// ----- + +"alloca_without_scoped_alloc_parent"() ( { + memref.alloca() : memref<1xf32> + // expected-error@-1 {{requires an ancestor op with AutomaticAllocationScope trait}} + return +}) : () -> () + +// ----- + +func @bad_alloc_wrong_dynamic_dim_count() { +^bb0: + %0 = arith.constant 7 : index + // Test alloc with wrong number of dynamic dimensions. + // expected-error@+1 {{dimension operand count does not equal memref dynamic dimension count}} + %1 = memref.alloc(%0)[%0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1> + return +} + +// ----- + +func @bad_alloc_wrong_symbol_count() { +^bb0: + %0 = arith.constant 7 : index + // Test alloc with wrong number of symbols + // expected-error@+1 {{symbol operand count does not equal memref symbol count}} + %1 = memref.alloc(%0) : memref<2x?xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1> + return +} + +// ----- + +func @test_store_zero_results() { +^bb0: + %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> + %1 = arith.constant 0 : index + %2 = arith.constant 1 : index + %3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> + // Test that store returns zero results. + %4 = memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> // expected-error {{cannot name an operation with no results}} + return +} + +// ----- + +func @test_store_zero_results2(%x: i32, %p: memref) { + "memref.store"(%x,%p) : (i32, memref) -> i32 // expected-error {{'memref.store' op requires zero results}} + return +} + +// ----- + +func @test_alloc_memref_map_rank_mismatch() { +^bb0: + // expected-error@+1 {{memref layout mismatch between rank and affine map: 2 != 1}} + %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0) -> (d0)>, 1> + return +} diff --git a/mlir/test/Dialect/Tensor/invalid.mlir b/mlir/test/Dialect/Tensor/invalid.mlir --- a/mlir/test/Dialect/Tensor/invalid.mlir +++ b/mlir/test/Dialect/Tensor/invalid.mlir @@ -1,5 +1,13 @@ // RUN: mlir-opt <%s -split-input-file -verify-diagnostics +func @dim(%arg : tensor<1x?xf32>) { + %c2 = arith.constant 2 : index + tensor.dim %arg, %c2 : tensor<1x?xf32> // expected-error {{'tensor.dim' op index is out of range}} + return +} + +// ----- + func @tensor.cast_mismatching_constants(%arg0: tensor<1xf32>) { // expected-error@+1 {{operand type 'tensor<1xf32>' and result type 'tensor<2xf32>' are cast incompatible}} %0 = tensor.cast %arg0 : tensor<1xf32> to tensor<2xf32> @@ -138,3 +146,23 @@ tensor.reshape %buf(%shape) : (tensor<1xf32>, tensor<1xi32>) -> tensor<10xf32> } + +// ----- + +func @slice_wrong_dynamic_type(%t: tensor<8x16x4xf32>, %idx : index) { + // expected-error @+1 {{expected result type to be 'tensor<4x4x4xf32>' or a rank-reduced version. (mismatch of result sizes)}} + %0 = tensor.extract_slice %t[0, 2, 0][4, 4, 4][1, 1, 1] + : tensor<8x16x4xf32> to tensor + + return +} + +// ----- + +func @slice_wrong_static_type(%t: tensor<8x16x4xf32>, %idx : index) { + // expected-error @+1 {{expected result type to be 'tensor' or a rank-reduced version. (mismatch of result sizes)}} + %0 = tensor.extract_slice %t[0, 0, 0][%idx, 3, %idx][1, 1, 1] + : tensor<8x16x4xf32> to tensor<4x4x4xf32> + + return +} diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir --- a/mlir/test/IR/invalid-ops.mlir +++ b/mlir/test/IR/invalid-ops.mlir @@ -1,13 +1,5 @@ // RUN: mlir-opt -allow-unregistered-dialect %s -split-input-file -verify-diagnostics -func @dim(%arg : tensor<1x?xf32>) { - %c2 = arith.constant 2 : index - tensor.dim %arg, %c2 : tensor<1x?xf32> // expected-error {{'tensor.dim' op index is out of range}} - return -} - -// ----- - func @rank(f32) { ^bb(%0: f32): "std.rank"(%0): (f32)->index // expected-error {{'std.rank' op operand #0 must be any memref or tensor type}} @@ -60,57 +52,6 @@ // ----- -func @bad_alloc_wrong_dynamic_dim_count() { -^bb0: - %0 = arith.constant 7 : index - // Test alloc with wrong number of dynamic dimensions. - // expected-error@+1 {{dimension operand count does not equal memref dynamic dimension count}} - %1 = memref.alloc(%0)[%0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1> - return -} - -// ----- - -func @bad_alloc_wrong_symbol_count() { -^bb0: - %0 = arith.constant 7 : index - // Test alloc with wrong number of symbols - // expected-error@+1 {{symbol operand count does not equal memref symbol count}} - %1 = memref.alloc(%0) : memref<2x?xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1> - return -} - -// ----- - -func @test_store_zero_results() { -^bb0: - %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> - %1 = arith.constant 0 : index - %2 = arith.constant 1 : index - %3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> - // Test that store returns zero results. - %4 = memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> // expected-error {{cannot name an operation with no results}} - return -} - -// ----- - -func @test_store_zero_results2(%x: i32, %p: memref) { - "memref.store"(%x,%p) : (i32, memref) -> i32 // expected-error {{'memref.store' op requires zero results}} - return -} - -// ----- - -func @test_alloc_memref_map_rank_mismatch() { -^bb0: - // expected-error@+1 {{memref layout mismatch between rank and affine map: 2 != 1}} - %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0) -> (d0)>, 1> - return -} - -// ----- - func @calls(%arg0: i32) { %x = call @calls() : () -> i32 // expected-error {{incorrect number of operands for callee}} return @@ -197,243 +138,6 @@ // ----- -func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<2048xi8> - // expected-error@+1 {{expects 1 offset operand}} - %1 = memref.view %0[][%arg0, %arg1] - : memref<2048xi8> to memref - return -} - -// ----- - -func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> - // expected-error@+1 {{unsupported map for base memref type}} - %1 = memref.view %0[%arg2][%arg0, %arg1] - : memref<2048xi8, affine_map<(d0) -> (d0 floordiv 8, d0 mod 8)>> to - memref (d0 * 4 + d1 + s0)>> - return -} - -// ----- - -func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<2048xi8> - // expected-error@+1 {{unsupported map for result memref type}} - %1 = memref.view %0[%arg2][%arg0, %arg1] - : memref<2048xi8> to memref (d0, d1, s0)>> - return -} - -// ----- - -func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<2048xi8, 2> - // expected-error@+1 {{different memory spaces}} - %1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8, 2> to memref - return -} - -// ----- - -func @invalid_view(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<2048xi8> - // expected-error@+1 {{incorrect number of size operands for type}} - %1 = memref.view %0[%arg2][%arg0] - : memref<2048xi8> to memref - return -} - -// ----- - -func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32> - // expected-error@+1 {{expected mixed offsets rank to match mixed sizes rank (2 vs 3) so the rank of the result type is well-formed}} - %1 = memref.subview %0[0, 0][2, 2, 2][1, 1, 1] - : memref<8x16x4xf32> to memref<8x16x4xf32> - return -} - -// ----- - -func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32> - // expected-error@+1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}} - %1 = memref.subview %0[0, 0, 0][2, 2, 2][1, 1] - : memref<8x16x4xf32> to memref<8x16x4xf32> - return -} - -// ----- - -func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32> - // expected-error@+1 {{expected mixed sizes rank to match mixed strides rank (3 vs 2) so the rank of the result type is well-formed}} - %1 = memref.reinterpret_cast %0 to offset: [0], sizes: [2, 2, 2], strides:[1, 1] - : memref<8x16x4xf32> to memref<8x16x4xf32> - return -} - -// ----- - -func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2> - // expected-error@+1 {{different memory spaces}} - %1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1] - : memref<8x16x4xf32, offset: 0, strides: [64, 4, 1], 2> to - memref<8x?x4xf32, affine_map<(d0, d1, d2)[s0] -> (d0 * s0 + d1 * 4 + d2)>> - return -} - -// ----- - -func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>> - // expected-error@+1 {{is not strided}} - %1 = memref.subview %0[0, 0, 0][%arg2, %arg2, %arg2][1, 1, 1] - : memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 + d1, d1 + d2, d2)>> to - memref<8x?x4xf32, offset: 0, strides: [?, 4, 1]> - return -} - -// ----- - -func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32> - // expected-error@+1 {{expected <= 3 offset values}} - %1 = memref.subview %0[%arg0, %arg1, 0, 0][%arg2, 0, 0, 0][1, 1, 1, 1] - : memref<8x16x4xf32> to - memref<8x?x4xf32, offset: 0, strides:[?, ?, 4]> - return -} - -// ----- - -func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32> - // expected-error@+1 {{expected result element type to be 'f32'}} - %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1] - : memref<8x16x4xf32> to - memref<8x16x4xi32> - return -} - -// ----- - -func @invalid_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32> - // expected-error@+1 {{expected result rank to be smaller or equal to the source rank.}} - %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1] - : memref<8x16x4xf32> to - memref<8x16x4x3xi32> - return -} - -// ----- - -func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32> - // expected-error@+1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2)>>' or a rank-reduced version. (mismatch of result sizes)}} - %1 = memref.subview %0[0, 0, 0][8, 16, 4][1, 1, 1] - : memref<8x16x4xf32> to memref<16x4xf32> - return -} - -// ----- - -func @invalid_rank_reducing_subview(%arg0 : index, %arg1 : index, %arg2 : index) { - %0 = memref.alloc() : memref<8x16x4xf32> - // expected-error@+1 {{expected result type to be 'memref<8x16x4xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 4 + d2 + 8)>>' or a rank-reduced version. (mismatch of result sizes)}} - %1 = memref.subview %0[0, 2, 0][8, 16, 4][1, 1, 1] - : memref<8x16x4xf32> to memref<16x4xf32> - return -} - -// ----- - -func @invalid_rank_reducing_subview(%arg0 : memref, %arg1 : index, %arg2 : index) { - // expected-error@+1 {{expected result type to be 'memref (d0 * s1 + s0 + d1)>>' or a rank-reduced version. (mismatch of result sizes)}} - %0 = memref.subview %arg0[0, %arg1][%arg2, 1][1, 1] : memref to memref - return -} - -// ----- - -func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) { - // expected-error@+1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 128 + d1 * 32 + d2 * 2)>>' are cast incompatible}} - %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:0, strides:[128, 32, 2]> - return -} - -// ----- - -func @invalid_memref_cast(%arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]>) { - // expected-error@+1{{operand type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2)>>' and result type 'memref<12x4x16xf32, affine_map<(d0, d1, d2) -> (d0 * 64 + d1 * 16 + d2 + 16)>>' are cast incompatible}} - %0 = memref.cast %arg0 : memref<12x4x16xf32, offset:0, strides:[64, 16, 1]> to memref<12x4x16xf32, offset:16, strides:[64, 16, 1]> - return -} - -// ----- - -// incompatible element types -func @invalid_memref_cast() { - %0 = memref.alloc() : memref<2x5xf32, 0> - // expected-error@+1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xi32>' are cast incompatible}} - %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xi32> - return -} - -// ----- - -func @invalid_prefetch_rw(%i : index) { - %0 = memref.alloc() : memref<10xf32> - // expected-error@+1 {{rw specifier has to be 'read' or 'write'}} - memref.prefetch %0[%i], rw, locality<0>, data : memref<10xf32> - return -} - -// ----- - -func @invalid_prefetch_cache_type(%i : index) { - %0 = memref.alloc() : memref<10xf32> - // expected-error@+1 {{cache type has to be 'data' or 'instr'}} - memref.prefetch %0[%i], read, locality<0>, false : memref<10xf32> - return -} - -// ----- - -func @invalid_prefetch_locality_hint(%i : index) { - %0 = memref.alloc() : memref<10xf32> - // expected-error@+1 {{32-bit signless integer attribute whose minimum value is 0 whose maximum value is 3}} - memref.prefetch %0[%i], read, locality<5>, data : memref<10xf32> - return -} - -// ----- - -// incompatible memory space -func @invalid_memref_cast() { - %0 = memref.alloc() : memref<2x5xf32, 0> - // expected-error@+1 {{operand type 'memref<2x5xf32>' and result type 'memref<*xf32, 1>' are cast incompatible}} - %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 1> - return -} - -// ----- - -// unranked to unranked -func @invalid_memref_cast() { - %0 = memref.alloc() : memref<2x5xf32, 0> - %1 = memref.cast %0 : memref<2x5xf32, 0> to memref<*xf32, 0> - // expected-error@+1 {{operand type 'memref<*xf32>' and result type 'memref<*xf32>' are cast incompatible}} - %2 = memref.cast %1 : memref<*xf32, 0> to memref<*xf32, 0> - return -} - -// ----- - func @atomic_rmw_idxs_rank_mismatch(%I: memref<16x10xf32>, %i : index, %val : f32) { // expected-error@+1 {{expects the number of subscripts to be equal to memref rank}} %x = atomic_rmw addf %val, %I[%i] : (f32, memref<16x10xf32>) -> f32 @@ -518,52 +222,6 @@ // ----- -// alignment is not power of 2. -func @assume_alignment(%0: memref<4x4xf16>) { - // expected-error@+1 {{alignment must be power of 2}} - memref.assume_alignment %0, 12 : memref<4x4xf16> - return -} - -// ----- - -// 0 alignment value. -func @assume_alignment(%0: memref<4x4xf16>) { - // expected-error@+1 {{attribute 'alignment' failed to satisfy constraint: 32-bit signless integer attribute whose value is positive}} - memref.assume_alignment %0, 0 : memref<4x4xf16> - return -} - -// ----- - -"alloca_without_scoped_alloc_parent"() ( { - memref.alloca() : memref<1xf32> - // expected-error@-1 {{requires an ancestor op with AutomaticAllocationScope trait}} - return -}) : () -> () - -// ----- - -func @slice_wrong_dynamic_type(%t: tensor<8x16x4xf32>, %idx : index) { - // expected-error @+1 {{expected result type to be 'tensor<4x4x4xf32>' or a rank-reduced version. (mismatch of result sizes)}} - %0 = tensor.extract_slice %t[0, 2, 0][4, 4, 4][1, 1, 1] - : tensor<8x16x4xf32> to tensor - - return -} - -// ----- - -func @slice_wrong_static_type(%t: tensor<8x16x4xf32>, %idx : index) { - // expected-error @+1 {{expected result type to be 'tensor' or a rank-reduced version. (mismatch of result sizes)}} - %0 = tensor.extract_slice %t[0, 0, 0][%idx, 3, %idx][1, 1, 1] - : tensor<8x16x4xf32> to tensor<4x4x4xf32> - - return -} - -// ----- - func @no_zero_bit_integer_attrs() { // expected-error @+1 {{integer constant out of range for attribute}} %x = "some.op"(){value = 0 : i0} : () -> f32 diff --git a/mlir/test/python/dialects/builtin.py b/mlir/test/python/dialects/builtin.py --- a/mlir/test/python/dialects/builtin.py +++ b/mlir/test/python/dialects/builtin.py @@ -175,7 +175,8 @@ # CHECK-LABEL: TEST: testFuncArgumentAccess @run def testFuncArgumentAccess(): - with Context(), Location.unknown(): + with Context() as ctx, Location.unknown(): + ctx.allow_unregistered_dialects = True module = Module.create() f32 = F32Type.get() f64 = F64Type.get() @@ -185,38 +186,38 @@ std.ReturnOp(func.arguments) func.arg_attrs = ArrayAttr.get([ DictAttr.get({ - "foo": StringAttr.get("bar"), - "baz": UnitAttr.get() + "custom_dialect.foo": StringAttr.get("bar"), + "custom_dialect.baz": UnitAttr.get() }), - DictAttr.get({"qux": ArrayAttr.get([])}) + DictAttr.get({"custom_dialect.qux": ArrayAttr.get([])}) ]) func.result_attrs = ArrayAttr.get([ - DictAttr.get({"res1": FloatAttr.get(f32, 42.0)}), - DictAttr.get({"res2": FloatAttr.get(f64, 256.0)}) + DictAttr.get({"custom_dialect.res1": FloatAttr.get(f32, 42.0)}), + DictAttr.get({"custom_dialect.res2": FloatAttr.get(f64, 256.0)}) ]) other = builtin.FuncOp("other_func", ([f32, f32], [])) with InsertionPoint(other.add_entry_block()): std.ReturnOp([]) other.arg_attrs = [ - DictAttr.get({"foo": StringAttr.get("qux")}), + DictAttr.get({"custom_dialect.foo": StringAttr.get("qux")}), DictAttr.get() ] - # CHECK: [{baz, foo = "bar"}, {qux = []}] + # CHECK: [{custom_dialect.baz, custom_dialect.foo = "bar"}, {custom_dialect.qux = []}] print(func.arg_attrs) - # CHECK: [{res1 = 4.200000e+01 : f32}, {res2 = 2.560000e+02 : f64}] + # CHECK: [{custom_dialect.res1 = 4.200000e+01 : f32}, {custom_dialect.res2 = 2.560000e+02 : f64}] print(func.result_attrs) # CHECK: func @some_func( - # CHECK: %[[ARG0:.*]]: f32 {baz, foo = "bar"}, - # CHECK: %[[ARG1:.*]]: f32 {qux = []}) -> - # CHECK: f32 {res1 = 4.200000e+01 : f32}, - # CHECK: f32 {res2 = 2.560000e+02 : f64}) + # CHECK: %[[ARG0:.*]]: f32 {custom_dialect.baz, custom_dialect.foo = "bar"}, + # CHECK: %[[ARG1:.*]]: f32 {custom_dialect.qux = []}) -> + # CHECK: f32 {custom_dialect.res1 = 4.200000e+01 : f32}, + # CHECK: f32 {custom_dialect.res2 = 2.560000e+02 : f64}) # CHECK: return %[[ARG0]], %[[ARG1]] : f32, f32 # # CHECK: func @other_func( - # CHECK: %{{.*}}: f32 {foo = "qux"}, + # CHECK: %{{.*}}: f32 {custom_dialect.foo = "qux"}, # CHECK: %{{.*}}: f32) print(module) diff --git a/mlir/test/python/dialects/linalg/opdsl/emit_structured_generic.py b/mlir/test/python/dialects/linalg/opdsl/emit_structured_generic.py --- a/mlir/test/python/dialects/linalg/opdsl/emit_structured_generic.py +++ b/mlir/test/python/dialects/linalg/opdsl/emit_structured_generic.py @@ -405,4 +405,7 @@ return non_default_op_name(input, outs=[init_result]) -print(module) +# TODO: Fix me! Conv and pooling ops above do not verify, which was uncovered +# when switching to more robust module verification. For now, reverting to the +# old behavior which does not verify on module print. +print(module.operation.get_asm(assume_verified=True)) diff --git a/mlir/test/python/dialects/linalg/ops.py b/mlir/test/python/dialects/linalg/ops.py --- a/mlir/test/python/dialects/linalg/ops.py +++ b/mlir/test/python/dialects/linalg/ops.py @@ -83,49 +83,6 @@ print(module) -# CHECK-LABEL: TEST: testStructuredOpOnTensors -@run -def testStructuredOpOnTensors(): - with Context() as ctx, Location.unknown(): - module = Module.create() - f32 = F32Type.get() - tensor_type = RankedTensorType.get((2, 3, 4), f32) - with InsertionPoint(module.body): - func = builtin.FuncOp( - name="matmul_test", - type=FunctionType.get( - inputs=[tensor_type, tensor_type], results=[tensor_type])) - with InsertionPoint(func.add_entry_block()): - lhs, rhs = func.entry_block.arguments - result = linalg.MatmulOp([lhs, rhs], results=[tensor_type]).result - std.ReturnOp([result]) - - # CHECK: %[[R:.*]] = linalg.matmul ins(%arg0, %arg1 : tensor<2x3x4xf32>, tensor<2x3x4xf32>) -> tensor<2x3x4xf32> - print(module) - - -# CHECK-LABEL: TEST: testStructuredOpOnBuffers -@run -def testStructuredOpOnBuffers(): - with Context() as ctx, Location.unknown(): - module = Module.create() - f32 = F32Type.get() - memref_type = MemRefType.get((2, 3, 4), f32) - with InsertionPoint(module.body): - func = builtin.FuncOp( - name="matmul_test", - type=FunctionType.get( - inputs=[memref_type, memref_type, memref_type], results=[])) - with InsertionPoint(func.add_entry_block()): - lhs, rhs, result = func.entry_block.arguments - # TODO: prperly hook up the region. - linalg.MatmulOp([lhs, rhs], outputs=[result]) - std.ReturnOp([]) - - # CHECK: linalg.matmul ins(%arg0, %arg1 : memref<2x3x4xf32>, memref<2x3x4xf32>) outs(%arg2 : memref<2x3x4xf32>) - print(module) - - # CHECK-LABEL: TEST: testNamedStructuredOpCustomForm @run def testNamedStructuredOpCustomForm(): diff --git a/mlir/test/python/dialects/shape.py b/mlir/test/python/dialects/shape.py --- a/mlir/test/python/dialects/shape.py +++ b/mlir/test/python/dialects/shape.py @@ -22,7 +22,8 @@ @builtin.FuncOp.from_py_func( RankedTensorType.get((12, -1), f32)) def const_shape_tensor(arg): - return shape.ConstShapeOp(DenseElementsAttr.get(np.array([10, 20]))) + return shape.ConstShapeOp( + DenseElementsAttr.get(np.array([10, 20]), type=IndexType.get())) # CHECK-LABEL: func @const_shape_tensor(%arg0: tensor<12x?xf32>) # CHECK: shape.const_shape [10, 20] : tensor<2xindex> diff --git a/mlir/test/python/dialects/std.py b/mlir/test/python/dialects/std.py --- a/mlir/test/python/dialects/std.py +++ b/mlir/test/python/dialects/std.py @@ -78,8 +78,11 @@ @constructAndPrintInModule def testFunctionCalls(): foo = builtin.FuncOp("foo", ([], [])) + foo.sym_visibility = StringAttr.get("private") bar = builtin.FuncOp("bar", ([], [IndexType.get()])) + bar.sym_visibility = StringAttr.get("private") qux = builtin.FuncOp("qux", ([], [F32Type.get()])) + qux.sym_visibility = StringAttr.get("private") with InsertionPoint(builtin.FuncOp("caller", ([], [])).add_entry_block()): std.CallOp(foo, []) @@ -88,9 +91,9 @@ std.ReturnOp([]) -# CHECK: func @foo() -# CHECK: func @bar() -> index -# CHECK: func @qux() -> f32 +# CHECK: func private @foo() +# CHECK: func private @bar() -> index +# CHECK: func private @qux() -> f32 # CHECK: func @caller() { # CHECK: call @foo() : () -> () # CHECK: %0 = call @bar() : () -> index diff --git a/mlir/test/python/ir/module.py b/mlir/test/python/ir/module.py --- a/mlir/test/python/ir/module.py +++ b/mlir/test/python/ir/module.py @@ -8,11 +8,13 @@ f() gc.collect() assert Context._get_live_count() == 0 + return f # Verify successful parse. # CHECK-LABEL: TEST: testParseSuccess # CHECK: module @successfulParse +@run def testParseSuccess(): ctx = Context() module = Module.parse(r"""module @successfulParse {}""", ctx) @@ -23,12 +25,11 @@ module.dump() # Just outputs to stderr. Verifies that it functions. print(str(module)) -run(testParseSuccess) - # Verify parse error. # CHECK-LABEL: TEST: testParseError # CHECK: testParseError: Unable to parse module assembly (see diagnostics) +@run def testParseError(): ctx = Context() try: @@ -38,12 +39,11 @@ else: print("Exception not produced") -run(testParseError) - # Verify successful parse. # CHECK-LABEL: TEST: testCreateEmpty # CHECK: module { +@run def testCreateEmpty(): ctx = Context() loc = Location.unknown(ctx) @@ -53,8 +53,6 @@ gc.collect() print(str(module)) -run(testCreateEmpty) - # Verify round-trip of ASM that contains unicode. # Note that this does not test that the print path converts unicode properly @@ -62,6 +60,7 @@ # CHECK-LABEL: TEST: testRoundtripUnicode # CHECK: func private @roundtripUnicode() # CHECK: foo = "\F0\9F\98\8A" +@run def testRoundtripUnicode(): ctx = Context() module = Module.parse(r""" @@ -69,11 +68,28 @@ """, ctx) print(str(module)) -run(testRoundtripUnicode) + +# Verify round-trip of ASM that contains unicode. +# Note that this does not test that the print path converts unicode properly +# because MLIR asm always normalizes it to the hex encoding. +# CHECK-LABEL: TEST: testRoundtripBinary +# CHECK: func private @roundtripUnicode() +# CHECK: foo = "\F0\9F\98\8A" +@run +def testRoundtripBinary(): + with Context(): + module = Module.parse(r""" + func private @roundtripUnicode() attributes { foo = "😊" } + """) + binary_asm = module.operation.get_asm(binary=True) + assert isinstance(binary_asm, bytes) + module = Module.parse(binary_asm) + print(module) # Tests that module.operation works and correctly interns instances. # CHECK-LABEL: TEST: testModuleOperation +@run def testModuleOperation(): ctx = Context() module = Module.parse(r"""module @successfulParse {}""", ctx) @@ -101,10 +117,9 @@ assert ctx._get_live_operation_count() == 0 assert ctx._get_live_module_count() == 0 -run(testModuleOperation) - # CHECK-LABEL: TEST: testModuleCapsule +@run def testModuleCapsule(): ctx = Context() module = Module.parse(r"""module @successfulParse {}""", ctx) @@ -122,5 +137,3 @@ gc.collect() assert ctx._get_live_module_count() == 0 - -run(testModuleCapsule) diff --git a/mlir/test/python/ir/operation.py b/mlir/test/python/ir/operation.py --- a/mlir/test/python/ir/operation.py +++ b/mlir/test/python/ir/operation.py @@ -630,21 +630,50 @@ print(module.body.operations[2]) -# CHECK-LABEL: TEST: testPrintInvalidOperation +def create_invalid_operation(): + # This module has two region and is invalid verify that we fallback + # to the generic printer for safety. + op = Operation.create("builtin.module", regions=2) + op.regions[0].blocks.append() + return op + +# CHECK-LABEL: TEST: testInvalidOperationStrSoftFails @run -def testPrintInvalidOperation(): +def testInvalidOperationStrSoftFails(): ctx = Context() with Location.unknown(ctx): - module = Operation.create("builtin.module", regions=2) - # This module has two region and is invalid verify that we fallback - # to the generic printer for safety. - block = module.regions[0].blocks.append() + invalid_op = create_invalid_operation() + # Verify that we fallback to the generic printer for safety. # CHECK: // Verification failed, printing generic form # CHECK: "builtin.module"() ( { # CHECK: }) : () -> () - print(module) + print(invalid_op) # CHECK: .verify = False - print(f".verify = {module.operation.verify()}") + print(f".verify = {invalid_op.operation.verify()}") + + +# CHECK-LABEL: TEST: testInvalidModuleStrSoftFails +@run +def testInvalidModuleStrSoftFails(): + ctx = Context() + with Location.unknown(ctx): + module = Module.create() + with InsertionPoint(module.body): + invalid_op = create_invalid_operation() + # Verify that we fallback to the generic printer for safety. + # CHECK: // Verification failed, printing generic form + print(module) + + +# CHECK-LABEL: TEST: testInvalidOperationGetAsmBinarySoftFails +@run +def testInvalidOperationGetAsmBinarySoftFails(): + ctx = Context() + with Location.unknown(ctx): + invalid_op = create_invalid_operation() + # Verify that we fallback to the generic printer for safety. + # CHECK: b'// Verification failed, printing generic form\n + print(invalid_op.get_asm(binary=True)) # CHECK-LABEL: TEST: testCreateWithInvalidAttributes diff --git a/mlir/unittests/Conversion/PDLToPDLInterp/RootOrderingTest.cpp b/mlir/unittests/Conversion/PDLToPDLInterp/RootOrderingTest.cpp --- a/mlir/unittests/Conversion/PDLToPDLInterp/RootOrderingTest.cpp +++ b/mlir/unittests/Conversion/PDLToPDLInterp/RootOrderingTest.cpp @@ -7,12 +7,13 @@ //===----------------------------------------------------------------------===// #include "../lib/Conversion/PDLToPDLInterp/RootOrdering.h" -#include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" #include "mlir/IR/Builders.h" #include "mlir/IR/MLIRContext.h" #include "gtest/gtest.h" using namespace mlir; +using namespace mlir::arith; using namespace mlir::pdl_to_pdl_interp; namespace { @@ -29,16 +30,22 @@ class RootOrderingTest : public ::testing::Test { protected: RootOrderingTest() { - context.loadDialect(); + context.loadDialect(); createValues(); } + ~RootOrderingTest() { + for (unsigned i = 0; i < 4; ++i) { + assert(v[i] && v[i].getDefiningOp() && "empty value / defining op"); + v[i].getDefiningOp()->erase(); + } + } + /// Creates the test values. void createValues() { OpBuilder builder(&context); - for (int i = 0; i < 4; ++i) - v[i] = builder.create(builder.getUnknownLoc(), - builder.getI32IntegerAttr(i)); + for (unsigned i = 0; i < 4; ++i) + v[i] = builder.create(builder.getUnknownLoc(), i, 32); } /// Checks that optimal branching on graph has the given cost and diff --git a/mlir/unittests/Dialect/CMakeLists.txt b/mlir/unittests/Dialect/CMakeLists.txt --- a/mlir/unittests/Dialect/CMakeLists.txt +++ b/mlir/unittests/Dialect/CMakeLists.txt @@ -9,3 +9,4 @@ add_subdirectory(Quant) add_subdirectory(SparseTensor) add_subdirectory(SPIRV) +add_subdirectory(Utils) diff --git a/mlir/unittests/Dialect/Utils/StructuredOpsUtilsTest.cpp b/mlir/unittests/Dialect/Utils/StructuredOpsUtilsTest.cpp --- a/mlir/unittests/Dialect/Utils/StructuredOpsUtilsTest.cpp +++ b/mlir/unittests/Dialect/Utils/StructuredOpsUtilsTest.cpp @@ -110,19 +110,6 @@ EXPECT_THAT(maps, Not(Truly(isRowMajorMatmul))); } -TEST(isRowMajorMatmul, TooFewDims) { - MLIRContext context; - - AffineExpr m, n, k; - bindDims(&context, m, n, k); - auto mapA = AffineMapAttr::get(AffineMap::get(3, 0, {m, k}, &context)); - auto mapB = AffineMapAttr::get(AffineMap::get(2, 0, {k, n}, &context)); - auto mapC = AffineMapAttr::get(AffineMap::get(3, 0, {m, n}, &context)); - auto maps = ArrayAttr::get(&context, {mapA, mapB, mapC}); - - EXPECT_THAT(maps, Not(Truly(isRowMajorMatmul))); -} - TEST(isRowMajorMatmul, TooFewOutputs) { MLIRContext context;