Index: CMakeLists.txt =================================================================== --- CMakeLists.txt +++ CMakeLists.txt @@ -534,7 +534,7 @@ endif() option(LLVM_LINK_LLVM_DYLIB "Link tools against the libllvm dynamic library" OFF) if(MSVC) - option(LLVM_BUILD_LLVM_C_DYLIB "Build LLVM-C.dll (Windows only)" ON) + option(LLVM_BUILD_LLVM_C_DYLIB "Build LLVM-C.dll (Windows only)" OFF) else() option(LLVM_BUILD_LLVM_C_DYLIB "Build libllvm-c re-export library (Darwin only)" OFF) endif() Index: cmake/modules/AddLLVM.cmake =================================================================== --- cmake/modules/AddLLVM.cmake +++ cmake/modules/AddLLVM.cmake @@ -380,7 +380,7 @@ function(llvm_add_library name) cmake_parse_arguments(ARG "MODULE;SHARED;STATIC;OBJECT;DISABLE_LLVM_LINK_LLVM_DYLIB;SONAME;NO_INSTALL_RPATH" - "OUTPUT_NAME;PLUGIN_TOOL" + "OUTPUT_NAME;PLUGIN_TOOL;ENTITLEMENTS" "ADDITIONAL_HEADERS;DEPENDS;LINK_COMPONENTS;LINK_LIBS;OBJLIBS" ${ARGN}) list(APPEND LLVM_COMMON_DEPENDS ${ARG_DEPENDS}) @@ -584,7 +584,7 @@ if(ARG_SHARED OR ARG_MODULE) llvm_externalize_debuginfo(${name}) - llvm_codesign(${name}) + llvm_codesign(${name} ENTITLEMENTS ${ARG_ENTITLEMENTS}) endif() endfunction() Index: docs/CommandGuide/llvm-exegesis.rst =================================================================== --- docs/CommandGuide/llvm-exegesis.rst +++ docs/CommandGuide/llvm-exegesis.rst @@ -10,13 +10,13 @@ ----------- :program:`llvm-exegesis` is a benchmarking tool that uses information available -in LLVM to measure host machine instruction characteristics like latency or port -decomposition. +in LLVM to measure host machine instruction characteristics like latency, +throughput, or port decomposition. Given an LLVM opcode name and a benchmarking mode, :program:`llvm-exegesis` generates a code snippet that makes execution as serial (resp. as parallel) as -possible so that we can measure the latency (resp. uop decomposition) of the -instruction. +possible so that we can measure the latency (resp. inverse throughput/uop decomposition) +of the instruction. The code snippet is jitted and executed on the host subtarget. The time taken (resp. resource usage) is measured using hardware performance counters. The result is printed out as YAML to the standard output. @@ -37,11 +37,13 @@ $ llvm-exegesis -mode=latency -opcode-name=ADD64rr -Measuring the uop decomposition of an instruction works similarly: +Measuring the uop decomposition or inverse throughput of an instruction works similarly: .. code-block:: bash $ llvm-exegesis -mode=uops -opcode-name=ADD64rr + $ llvm-exegesis -mode=inverse_throughput -opcode-name=ADD64rr + The output is a YAML document (the default is to write to stdout, but you can redirect the output to a file using `-benchmarks-file`): @@ -186,7 +188,7 @@ Specify the custom code snippet to measure. See example 2 for details. Either `opcode-index`, `opcode-name` or `snippets-file` must be set. -.. option:: -mode=[latency|uops|analysis] +.. option:: -mode=[latency|uops|inverse_throughput|analysis] Specify the run mode. @@ -197,8 +199,8 @@ .. option:: -benchmarks-file= - File to read (`analysis` mode) or write (`latency`/`uops` modes) benchmark - results. "-" uses stdin/stdout. + File to read (`analysis` mode) or write (`latency`/`uops`/`inverse_throughput` + modes) benchmark results. "-" uses stdin/stdout. .. option:: -analysis-clusters-output-file= Index: docs/CommandGuide/llvm-symbolizer.rst =================================================================== --- docs/CommandGuide/llvm-symbolizer.rst +++ docs/CommandGuide/llvm-symbolizer.rst @@ -126,8 +126,7 @@ .. option:: -adjust-vma= Add the specified offset to object file addresses when performing lookups. This - can be used to simplify lookups when the object is not loaded at a dynamically - relocated address. + can be used to perform lookups as if the object were relocated by the offset. EXIT STATUS ----------- Index: docs/CompileCudaWithLLVM.rst =================================================================== --- docs/CompileCudaWithLLVM.rst +++ docs/CompileCudaWithLLVM.rst @@ -143,9 +143,9 @@ ---------------------------- In clang, ``math.h`` and ``cmath`` are available and `pass -`_ +`_ `tests -`_ +`_ adapted from libc++'s test suite. In nvcc ``math.h`` and ``cmath`` are mostly available. Versions of ``::foof`` Index: docs/HowToSubmitABug.rst =================================================================== --- docs/HowToSubmitABug.rst +++ docs/HowToSubmitABug.rst @@ -84,8 +84,8 @@ ------------------------------ If you find that a bug crashes in the optimizer, compile your test-case to a -``.bc`` file by passing "``-emit-llvm -O0 -c -o foo.bc``". -Then run: +``.bc`` file by passing "``-emit-llvm -O1 -Xclang -disable-llvm-passes -c -o +foo.bc``". Then run: .. code-block:: bash Index: docs/LibFuzzer.rst =================================================================== --- docs/LibFuzzer.rst +++ docs/LibFuzzer.rst @@ -544,7 +544,7 @@ Currently, there is no simple way to run both fuzzing engines in parallel while sharing the same corpus dir. You may also use AFL on your target function ``LLVMFuzzerTestOneInput``: -see an example `here `__. +see an example `here `__. How good is my fuzzer? ---------------------- @@ -741,7 +741,7 @@ .. _AddressSanitizer: http://clang.llvm.org/docs/AddressSanitizer.html .. _LeakSanitizer: http://clang.llvm.org/docs/LeakSanitizer.html .. _Heartbleed: http://en.wikipedia.org/wiki/Heartbleed -.. _FuzzerInterface.h: https://github.com/llvm-mirror/compiler-rt/blob/master/lib/fuzzer/FuzzerInterface.h +.. _FuzzerInterface.h: https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/fuzzer/FuzzerInterface.h .. _3.7.0: http://llvm.org/releases/3.7.0/docs/LibFuzzer.html .. _building Clang from trunk: http://clang.llvm.org/get_started.html .. _MemorySanitizer: http://clang.llvm.org/docs/MemorySanitizer.html Index: docs/TestSuiteGuide.md =================================================================== --- docs/TestSuiteGuide.md +++ docs/TestSuiteGuide.md @@ -27,7 +27,7 @@ 2. Check out the `test-suite` module with: ```bash - % svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite + % git clone https://github.com/llvm/llvm-test-suite.git test-suite ``` 3. Create a build directory and use CMake to configure the suite. Use the Index: docs/TestSuiteMakefileGuide.rst =================================================================== --- docs/TestSuiteMakefileGuide.rst +++ docs/TestSuiteMakefileGuide.rst @@ -14,14 +14,14 @@ To run the test suite, you need to use the following steps: -#. ``cd`` into the ``llvm/projects`` directory in your source tree. #. Check out the ``test-suite`` module with: .. code-block:: bash - % svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite + % git clone https://github.com/llvm/llvm-test-suite.git test-suite - This will get the test suite into ``llvm/projects/test-suite``. +#. FIXME: these directions are outdated and won't work. Figure out + what the correct thing to do is, and write it down here. #. Configure and build ``llvm``. Index: docs/TestingGuide.rst =================================================================== --- docs/TestingGuide.rst +++ docs/TestingGuide.rst @@ -165,15 +165,9 @@ Debugging Information tests --------------------------- -To run debugging information tests simply checkout the tests inside -clang/test directory. - -.. code-block:: bash - - % cd clang/test - % svn co http://llvm.org/svn/llvm-project/debuginfo-tests/trunk debuginfo-tests - -These tests are already set up to run as part of clang regression tests. +To run debugging information tests simply add the ``debuginfo-tests`` +project to your ``LLVM_ENABLE_PROJECTS`` define on the cmake +command-line. Regression test structure ========================= Index: include/llvm/Analysis/ScalarEvolution.h =================================================================== --- include/llvm/Analysis/ScalarEvolution.h +++ include/llvm/Analysis/ScalarEvolution.h @@ -1289,7 +1289,7 @@ using EdgeExitInfo = std::pair; /// Initialize BackedgeTakenInfo from a list of exact exit counts. - BackedgeTakenInfo(SmallVectorImpl &&ExitCounts, bool Complete, + BackedgeTakenInfo(ArrayRef ExitCounts, bool Complete, const SCEV *MaxCount, bool MaxOrZero); /// Test whether this BackedgeTakenInfo contains any computed information, @@ -1842,15 +1842,15 @@ bool NoWrap); /// Get add expr already created or create a new one. - const SCEV *getOrCreateAddExpr(SmallVectorImpl &Ops, + const SCEV *getOrCreateAddExpr(ArrayRef Ops, SCEV::NoWrapFlags Flags); /// Get mul expr already created or create a new one. - const SCEV *getOrCreateMulExpr(SmallVectorImpl &Ops, + const SCEV *getOrCreateMulExpr(ArrayRef Ops, SCEV::NoWrapFlags Flags); // Get addrec expr already created or create a new one. - const SCEV *getOrCreateAddRecExpr(SmallVectorImpl &Ops, + const SCEV *getOrCreateAddRecExpr(ArrayRef Ops, const Loop *L, SCEV::NoWrapFlags Flags); /// Return x if \p Val is f(x) where f is a 1-1 function. Index: include/llvm/CodeGen/GlobalISel/LegalizerHelper.h =================================================================== --- include/llvm/CodeGen/GlobalISel/LegalizerHelper.h +++ include/llvm/CodeGen/GlobalISel/LegalizerHelper.h @@ -134,6 +134,9 @@ LegalizeResult fewerElementsVectorCmp(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy); + LegalizeResult + fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy); + LegalizeResult fewerElementsVectorLoadStore(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy); Index: include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h =================================================================== --- include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -229,6 +229,11 @@ return *State.MF; } + const MachineFunction &getMF() const { + assert(State.MF && "MachineFunction is not set"); + return *State.MF; + } + /// Getter for DebugLoc const DebugLoc &getDL() { return State.DL; } @@ -457,6 +462,15 @@ /// \return The newly created instruction. MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op); + /// \return The opcode of the extension the target wants to use for boolean + /// values. + unsigned getBoolExtOp(bool IsVec, bool IsFP) const; + + // Build and insert \p Res = G_ANYEXT \p Op, \p Res = G_SEXT \p Op, or \p Res + // = G_ZEXT \p Op depending on how the target wants to extend boolean values. + MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, + bool IsFP); + /// Build and insert \p Res = G_ZEXT \p Op /// /// G_ZEXT produces a register of the specified width, with bits 0 to Index: include/llvm/DebugInfo/PDB/Native/DbiStream.h =================================================================== --- include/llvm/DebugInfo/PDB/Native/DbiStream.h +++ include/llvm/DebugInfo/PDB/Native/DbiStream.h @@ -10,6 +10,7 @@ #define LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAM_H #include "llvm/DebugInfo/CodeView/DebugSubsection.h" +#include "llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h" #include "llvm/DebugInfo/MSF/MappedBlockStream.h" #include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h" #include "llvm/DebugInfo/PDB/Native/DbiModuleList.h" @@ -79,7 +80,10 @@ FixedStreamArray getSectionHeaders() const; - FixedStreamArray getFpoRecords(); + bool hasOldFpoRecords() const; + FixedStreamArray getOldFpoRecords() const; + bool hasNewFpoRecords() const; + const codeview::DebugFrameDataSubsectionRef &getNewFpoRecords() const; FixedStreamArray getSectionMap() const; void visitSectionContributions(ISectionContribVisitor &Visitor) const; @@ -90,7 +94,11 @@ Error initializeSectionContributionData(); Error initializeSectionHeadersData(PDBFile *Pdb); Error initializeSectionMapData(); - Error initializeFpoRecords(PDBFile *Pdb); + Error initializeOldFpoRecords(PDBFile *Pdb); + Error initializeNewFpoRecords(PDBFile *Pdb); + + Expected> + createIndexedStreamForHeaderType(PDBFile *Pdb, DbgHeaderType Type) const; std::unique_ptr Stream; @@ -116,8 +124,11 @@ std::unique_ptr SectionHeaderStream; FixedStreamArray SectionHeaders; - std::unique_ptr FpoStream; - FixedStreamArray FpoRecords; + std::unique_ptr OldFpoStream; + FixedStreamArray OldFpoRecords; + + std::unique_ptr NewFpoStream; + codeview::DebugFrameDataSubsectionRef NewFpoRecords; const DbiStreamHeader *Header; }; Index: include/llvm/IR/CallSite.h =================================================================== --- include/llvm/IR/CallSite.h +++ include/llvm/IR/CallSite.h @@ -243,11 +243,11 @@ IterTy data_operands_begin() const { assert(getInstruction() && "Not a call or invoke instruction!"); - return (*this)->op_begin(); + return cast(getInstruction())->data_operands_begin(); } IterTy data_operands_end() const { assert(getInstruction() && "Not a call or invoke instruction!"); - return (*this)->op_end() - (isCall() ? 1 : 3); + return cast(getInstruction())->data_operands_end(); } iterator_range data_ops() const { return make_range(data_operands_begin(), data_operands_end()); @@ -579,13 +579,9 @@ #undef CALLSITE_DELEGATE_SETTER void getOperandBundlesAsDefs(SmallVectorImpl &Defs) const { - const Instruction *II = getInstruction(); // Since this is actually a getter that "looks like" a setter, don't use the // above macros to avoid confusion. - if (isCall()) - cast(II)->getOperandBundlesAsDefs(Defs); - else - cast(II)->getOperandBundlesAsDefs(Defs); + cast(getInstruction())->getOperandBundlesAsDefs(Defs); } /// Determine whether this data operand is not captured. Index: include/llvm/IR/InstrTypes.h =================================================================== --- include/llvm/IR/InstrTypes.h +++ include/llvm/IR/InstrTypes.h @@ -1232,6 +1232,9 @@ (ID << 2)); } + /// Check if this call is an inline asm statement. + bool isInlineAsm() const { return isa(getCalledOperand()); } + /// \name Attribute API /// /// These methods access and modify attributes on this call (including Index: include/llvm/IR/Instructions.h =================================================================== --- include/llvm/IR/Instructions.h +++ include/llvm/IR/Instructions.h @@ -1703,9 +1703,6 @@ addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); } - /// Check if this call is an inline asm statement. - bool isInlineAsm() const { return isa(getCalledOperand()); } - // Methods for support type inquiry through isa, cast, and dyn_cast: static bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Call; Index: include/llvm/IR/IntrinsicsWebAssembly.td =================================================================== --- include/llvm/IR/IntrinsicsWebAssembly.td +++ include/llvm/IR/IntrinsicsWebAssembly.td @@ -49,11 +49,12 @@ [IntrHasSideEffects]>; def int_wasm_get_ehselector : Intrinsic<[llvm_i32_ty], [llvm_token_ty], [IntrHasSideEffects]>; - -// wasm.catch returns the pointer to the exception object caught by wasm 'catch' -// instruction. -def int_wasm_catch : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], - [IntrHasSideEffects]>; +// This is the same as llvm.wasm.get.exception except that it does not take a +// token operand. This is only for instruction selection purpose. +// TODO Remove this redundant intrinsic and do custom lowering on +// int_wasm_get_exception instead +def int_wasm_extract_exception : Intrinsic<[llvm_ptr_ty], [], + [IntrHasSideEffects]>; // WebAssembly EH must maintain the landingpads in the order assigned to them // by WasmEHPrepare pass to generate landingpad table in EHStreamer. This is Index: include/llvm/MC/MCAsmBackend.h =================================================================== --- include/llvm/MC/MCAsmBackend.h +++ include/llvm/MC/MCAsmBackend.h @@ -87,6 +87,22 @@ return false; } + /// Hook to check if extra nop bytes must be inserted for alignment directive. + /// For some targets this may be necessary in order to support linker + /// relaxation. The number of bytes to insert are returned in Size. + virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF, + unsigned &Size) { + return false; + } + + /// Hook which indicates if the target requires a fixup to be generated when + /// handling an align directive in an executable section + virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, + const MCAsmLayout &Layout, + MCAlignFragment &AF) { + return false; + } + /// Apply the \p Value for given \p Fixup into the provided data fragment, at /// the offset specified by the fixup and following the fixup kind as /// appropriate. Errors (such as an out of range fixup value) should be Index: include/llvm/Support/CodeGen.h =================================================================== --- include/llvm/Support/CodeGen.h +++ include/llvm/Support/CodeGen.h @@ -49,10 +49,10 @@ // Code generation optimization level. namespace CodeGenOpt { enum Level { - None, // -O0 - Less, // -O1 - Default, // -O2, -Os - Aggressive // -O3 + None = 0, // -O0 + Less = 1, // -O1 + Default = 2, // -O2, -Os + Aggressive = 3 // -O3 }; } Index: lib/Analysis/DependenceAnalysis.cpp =================================================================== --- lib/Analysis/DependenceAnalysis.cpp +++ lib/Analysis/DependenceAnalysis.cpp @@ -3509,7 +3509,7 @@ // to either Separable or Coupled). // // Next, we consider 1 and 2. The intersection of the GroupLoops is empty. - // Next, 1 and 3. The intersectionof their GroupLoops = {2}, not empty, + // Next, 1 and 3. The intersection of their GroupLoops = {2}, not empty, // so Pair[3].Group = {0, 1, 3} and Done = false. // // Next, we compare 2 against 3. The intersection of the GroupLoops is empty. Index: lib/Analysis/LoopAccessAnalysis.cpp =================================================================== --- lib/Analysis/LoopAccessAnalysis.cpp +++ lib/Analysis/LoopAccessAnalysis.cpp @@ -1013,7 +1013,7 @@ return 0; } - // The accesss function must stride over the innermost loop. + // The access function must stride over the innermost loop. if (Lp != AR->getLoop()) { LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop " << *Ptr << " SCEV: " << *AR << "\n"); @@ -1085,7 +1085,7 @@ if (Assume) { // We can avoid this case by adding a run-time check. LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either " - << "inbouds or in address space 0 may wrap:\n" + << "inbounds or in address space 0 may wrap:\n" << "LAA: Pointer: " << *Ptr << "\n" << "LAA: SCEV: " << *AR << "\n" << "LAA: Added an overflow assumption\n"); @@ -1345,7 +1345,7 @@ // where Step is the absolute stride of the memory accesses in bytes, // then there is no dependence. // - // Ratioanle: + // Rationale: // We basically want to check if the absolute distance (|Dist/Step|) // is >= the loop iteration count (or > BackedgeTakenCount). // This is equivalent to the Strong SIV Test (Practical Dependence Testing, @@ -1368,7 +1368,7 @@ // The dependence distance can be positive/negative, so we sign extend Dist; // The multiplication of the absolute stride in bytes and the - // backdgeTakenCount is non-negative, so we zero extend Product. + // backedgeTakenCount is non-negative, so we zero extend Product. if (DistTypeSize > ProductTypeSize) CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType()); else Index: lib/Analysis/ScalarEvolution.cpp =================================================================== --- lib/Analysis/ScalarEvolution.cpp +++ lib/Analysis/ScalarEvolution.cpp @@ -2294,7 +2294,7 @@ // can't-overflow flags for the operation if possible. static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, - const SmallVectorImpl &Ops, + const ArrayRef Ops, SCEV::NoWrapFlags Flags) { using namespace std::placeholders; @@ -2742,7 +2742,7 @@ } const SCEV * -ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl &Ops, +ScalarEvolution::getOrCreateAddExpr(ArrayRef Ops, SCEV::NoWrapFlags Flags) { FoldingSetNodeID ID; ID.AddInteger(scAddExpr); @@ -2764,7 +2764,7 @@ } const SCEV * -ScalarEvolution::getOrCreateAddRecExpr(SmallVectorImpl &Ops, +ScalarEvolution::getOrCreateAddRecExpr(ArrayRef Ops, const Loop *L, SCEV::NoWrapFlags Flags) { FoldingSetNodeID ID; ID.AddInteger(scAddRecExpr); @@ -2787,7 +2787,7 @@ } const SCEV * -ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl &Ops, +ScalarEvolution::getOrCreateMulExpr(ArrayRef Ops, SCEV::NoWrapFlags Flags) { FoldingSetNodeID ID; ID.AddInteger(scMulExpr); @@ -3891,7 +3891,7 @@ } /// Check whether value has nuw/nsw/exact set but SCEV does not. -/// TODO: In reality it is better to check the poison recursevely +/// TODO: In reality it is better to check the poison recursively /// but this is better than nothing. static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) { if (auto *I = dyn_cast(V)) { @@ -6971,8 +6971,8 @@ /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each /// computable exit into a persistent ExitNotTakenInfo array. ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo( - SmallVectorImpl - &&ExitCounts, + ArrayRef + ExitCounts, bool Complete, const SCEV *MaxCount, bool MaxOrZero) : MaxAndComplete(MaxCount, Complete), MaxOrZero(MaxOrZero) { using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo; Index: lib/Analysis/ValueTracking.cpp =================================================================== --- lib/Analysis/ValueTracking.cpp +++ lib/Analysis/ValueTracking.cpp @@ -4354,7 +4354,7 @@ } bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { - // TODO: This is slightly consdervative for invoke instruction since exiting + // TODO: This is slightly conservative for invoke instruction since exiting // via an exception *is* normal control for them. for (auto I = BB->begin(), E = BB->end(); I != E; ++I) if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) Index: lib/CodeGen/AsmPrinter/WasmException.cpp =================================================================== --- lib/CodeGen/AsmPrinter/WasmException.cpp +++ lib/CodeGen/AsmPrinter/WasmException.cpp @@ -18,10 +18,10 @@ using namespace llvm; void WasmException::endModule() { - // This is the symbol used in 'throw' and 'if_except' instruction to denote + // This is the symbol used in 'throw' and 'br_on_exn' instruction to denote // this is a C++ exception. This symbol has to be emitted somewhere once in // the module. Check if the symbol has already been created, i.e., we have at - // least one 'throw' or 'if_except' instruction in the module, and emit the + // least one 'throw' or 'br_on_exn' instruction in the module, and emit the // symbol only if so. SmallString<60> NameStr; Mangler::getNameWithPrefix(NameStr, "__cpp_exception", Asm->getDataLayout()); Index: lib/CodeGen/CodeGenPrepare.cpp =================================================================== --- lib/CodeGen/CodeGenPrepare.cpp +++ lib/CodeGen/CodeGenPrepare.cpp @@ -1845,10 +1845,8 @@ // return is the first instruction in the block. if (PN) { BasicBlock::iterator BI = BB->begin(); - do { ++BI; } while (isa(BI)); - if (&*BI == BCI) - // Also skip over the bitcast. - ++BI; + // Skip over debug and the bitcast. + do { ++BI; } while (isa(BI) || &*BI == BCI); if (&*BI != RetI) return false; } else { Index: lib/CodeGen/GlobalISel/LegalizerHelper.cpp =================================================================== --- lib/CodeGen/GlobalISel/LegalizerHelper.cpp +++ lib/CodeGen/GlobalISel/LegalizerHelper.cpp @@ -531,25 +531,37 @@ return Legalized; } case TargetOpcode::G_LOAD: { - // FIXME: add support for when SizeOp0 isn't an exact multiple of - // NarrowSize. - if (SizeOp0 % NarrowSize != 0) + const auto &MMO = **MI.memoperands_begin(); + unsigned DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + int NumParts = SizeOp0 / NarrowSize; + unsigned HandledSize = NumParts * NarrowTy.getSizeInBits(); + unsigned LeftoverBits = DstTy.getSizeInBits() - HandledSize; + + if (DstTy.isVector() && LeftoverBits != 0) return UnableToLegalize; - const auto &MMO = **MI.memoperands_begin(); + if (8 * MMO.getSize() != DstTy.getSizeInBits()) { + unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + auto &MMO = **MI.memoperands_begin(); + MIRBuilder.buildLoad(TmpReg, MI.getOperand(1).getReg(), MMO); + MIRBuilder.buildAnyExt(DstReg, TmpReg); + MI.eraseFromParent(); + return Legalized; + } + // This implementation doesn't work for atomics. Give up instead of doing // something invalid. if (MMO.getOrdering() != AtomicOrdering::NotAtomic || MMO.getFailureOrdering() != AtomicOrdering::NotAtomic) return UnableToLegalize; - int NumParts = SizeOp0 / NarrowSize; LLT OffsetTy = LLT::scalar( MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits()); SmallVector DstRegs; for (int i = 0; i < NumParts; ++i) { - unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy); + unsigned PartDstReg = MRI.createGenericVirtualRegister(NarrowTy); unsigned SrcReg = 0; unsigned Adjustment = i * NarrowSize / 8; unsigned Alignment = MinAlign(MMO.getAlignment(), Adjustment); @@ -562,15 +574,44 @@ MIRBuilder.materializeGEP(SrcReg, MI.getOperand(1).getReg(), OffsetTy, Adjustment); - MIRBuilder.buildLoad(DstReg, SrcReg, *SplitMMO); + MIRBuilder.buildLoad(PartDstReg, SrcReg, *SplitMMO); - DstRegs.push_back(DstReg); + DstRegs.push_back(PartDstReg); } - unsigned DstReg = MI.getOperand(0).getReg(); - if(MRI.getType(DstReg).isVector()) - MIRBuilder.buildBuildVector(DstReg, DstRegs); + + unsigned MergeResultReg = LeftoverBits == 0 ? DstReg : + MRI.createGenericVirtualRegister(LLT::scalar(HandledSize)); + + // For the leftover piece, still create the merge and insert it. + // TODO: Would it be better to directly insert the intermediate pieces? + if (DstTy.isVector()) + MIRBuilder.buildBuildVector(MergeResultReg, DstRegs); else - MIRBuilder.buildMerge(DstReg, DstRegs); + MIRBuilder.buildMerge(MergeResultReg, DstRegs); + + if (LeftoverBits == 0) { + MI.eraseFromParent(); + return Legalized; + } + + unsigned ImpDefReg = MRI.createGenericVirtualRegister(DstTy); + unsigned Insert0Reg = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildUndef(ImpDefReg); + MIRBuilder.buildInsert(Insert0Reg, ImpDefReg, MergeResultReg, 0); + + unsigned PartDstReg + = MRI.createGenericVirtualRegister(LLT::scalar(LeftoverBits)); + unsigned Offset = HandledSize / 8; + + MachineMemOperand *SplitMMO = MIRBuilder.getMF().getMachineMemOperand( + &MMO, Offset, LeftoverBits / 8); + + unsigned SrcReg = 0; + MIRBuilder.materializeGEP(SrcReg, MI.getOperand(1).getReg(), OffsetTy, + Offset); + MIRBuilder.buildLoad(PartDstReg, SrcReg, *SplitMMO); + MIRBuilder.buildInsert(DstReg, Insert0Reg, PartDstReg, HandledSize); + MI.eraseFromParent(); return Legalized; } @@ -608,6 +649,19 @@ return UnableToLegalize; const auto &MMO = **MI.memoperands_begin(); + + unsigned SrcReg = MI.getOperand(0).getReg(); + LLT SrcTy = MRI.getType(SrcReg); + + if (8 * MMO.getSize() != SrcTy.getSizeInBits()) { + unsigned TmpReg = MRI.createGenericVirtualRegister(NarrowTy); + auto &MMO = **MI.memoperands_begin(); + MIRBuilder.buildTrunc(TmpReg, SrcReg); + MIRBuilder.buildStore(TmpReg, MI.getOperand(1).getReg(), MMO); + MI.eraseFromParent(); + return Legalized; + } + // This implementation doesn't work for atomics. Give up instead of doing // something invalid. if (MMO.getOrdering() != AtomicOrdering::NotAtomic || @@ -759,6 +813,45 @@ switch (MI.getOpcode()) { default: return UnableToLegalize; + case TargetOpcode::G_MERGE_VALUES: { + if (TypeIdx != 1) + return UnableToLegalize; + + unsigned DstReg = MI.getOperand(0).getReg(); + LLT DstTy = MRI.getType(DstReg); + if (!DstTy.isScalar()) + return UnableToLegalize; + + unsigned NumSrc = MI.getNumOperands() - 1; + unsigned EltSize = DstTy.getSizeInBits() / NumSrc; + + unsigned ResultReg = MRI.createGenericVirtualRegister(DstTy); + unsigned Offset = 0; + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I, + Offset += EltSize) { + assert(MRI.getType(MI.getOperand(I).getReg()) == LLT::scalar(EltSize)); + + unsigned ShiftAmt = MRI.createGenericVirtualRegister(DstTy); + unsigned Shl = MRI.createGenericVirtualRegister(DstTy); + unsigned ZextInput = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildZExt(ZextInput, MI.getOperand(I).getReg()); + + if (Offset != 0) { + unsigned NextResult = I + 1 == E ? DstReg : + MRI.createGenericVirtualRegister(DstTy); + + MIRBuilder.buildConstant(ShiftAmt, Offset); + MIRBuilder.buildShl(Shl, ZextInput, ShiftAmt); + MIRBuilder.buildOr(NextResult, ResultReg, Shl); + ResultReg = NextResult; + } else { + ResultReg = ZextInput; + } + } + + MI.eraseFromParent(); + return Legalized; + } case TargetOpcode::G_UADDO: case TargetOpcode::G_USUBO: { if (TypeIdx == 1) @@ -902,8 +995,9 @@ widenScalarSrc(MI, WideTy, 3, TargetOpcode::G_ANYEXT); widenScalarDst(MI, WideTy); } else { + bool IsVec = MRI.getType(MI.getOperand(1).getReg()).isVector(); // Explicit extension is required here since high bits affect the result. - widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT); + widenScalarSrc(MI, WideTy, 1, MIRBuilder.getBoolExtOp(IsVec, false)); } Observer.changedInstr(MI); return Legalized; @@ -958,12 +1052,19 @@ return Legalized; case TargetOpcode::G_STORE: { - if (MRI.getType(MI.getOperand(0).getReg()) != LLT::scalar(1) || - WideTy != LLT::scalar(8)) + if (TypeIdx != 0) + return UnableToLegalize; + + LLT Ty = MRI.getType(MI.getOperand(0).getReg()); + if (!isPowerOf2_32(Ty.getSizeInBits())) return UnableToLegalize; Observer.changingInstr(MI); - widenScalarSrc(MI, WideTy, 0, TargetOpcode::G_ZEXT); + + unsigned ExtType = Ty.getScalarSizeInBits() == 1 ? + TargetOpcode::G_ZEXT : TargetOpcode::G_ANYEXT; + widenScalarSrc(MI, WideTy, 0, ExtType); + Observer.changedInstr(MI); return Legalized; } @@ -1370,18 +1471,63 @@ LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorBasic(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { - unsigned Opc = MI.getOpcode(); - unsigned NarrowSize = NarrowTy.getSizeInBits(); - unsigned DstReg = MI.getOperand(0).getReg(); - unsigned Flags = MI.getFlags(); - unsigned Size = MRI.getType(DstReg).getSizeInBits(); - int NumParts = Size / NarrowSize; - // FIXME: Don't know how to handle the situation where the small vectors - // aren't all the same size yet. - if (Size % NarrowSize != 0) + const unsigned Opc = MI.getOpcode(); + const unsigned NumOps = MI.getNumOperands() - 1; + const unsigned NarrowSize = NarrowTy.getSizeInBits(); + const unsigned DstReg = MI.getOperand(0).getReg(); + const unsigned Flags = MI.getFlags(); + const LLT DstTy = MRI.getType(DstReg); + const unsigned Size = DstTy.getSizeInBits(); + const int NumParts = Size / NarrowSize; + const LLT EltTy = DstTy.getElementType(); + const unsigned EltSize = EltTy.getSizeInBits(); + const unsigned BitsForNumParts = NarrowSize * NumParts; + + // Check if we have any leftovers. If we do, then only handle the case where + // the leftover is one element. + if (BitsForNumParts != Size && BitsForNumParts + EltSize != Size) return UnableToLegalize; - unsigned NumOps = MI.getNumOperands() - 1; + if (BitsForNumParts != Size) { + unsigned AccumDstReg = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildUndef(AccumDstReg); + + // Handle the pieces which evenly divide into the requested type with + // extract/op/insert sequence. + for (unsigned Offset = 0; Offset < BitsForNumParts; Offset += NarrowSize) { + SmallVector SrcOps; + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { + unsigned PartOpReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), Offset); + SrcOps.push_back(PartOpReg); + } + + unsigned PartDstReg = MRI.createGenericVirtualRegister(NarrowTy); + MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); + + unsigned PartInsertReg = MRI.createGenericVirtualRegister(DstTy); + MIRBuilder.buildInsert(PartInsertReg, AccumDstReg, PartDstReg, Offset); + AccumDstReg = PartInsertReg; + Offset += NarrowSize; + } + + // Handle the remaining element sized leftover piece. + SmallVector SrcOps; + for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) { + unsigned PartOpReg = MRI.createGenericVirtualRegister(EltTy); + MIRBuilder.buildExtract(PartOpReg, MI.getOperand(I).getReg(), + BitsForNumParts); + SrcOps.push_back(PartOpReg); + } + + unsigned PartDstReg = MRI.createGenericVirtualRegister(EltTy); + MIRBuilder.buildInstr(Opc, {PartDstReg}, SrcOps, Flags); + MIRBuilder.buildInsert(DstReg, AccumDstReg, PartDstReg, BitsForNumParts); + MI.eraseFromParent(); + + return Legalized; + } + SmallVector DstRegs, Src0Regs, Src1Regs, Src2Regs; extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs); @@ -1532,6 +1678,78 @@ return Legalized; } +LegalizerHelper::LegalizeResult +LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx, + LLT NarrowTy) { + unsigned DstReg = MI.getOperand(0).getReg(); + unsigned CondReg = MI.getOperand(1).getReg(); + + unsigned NumParts = 0; + LLT NarrowTy0, NarrowTy1; + + LLT DstTy = MRI.getType(DstReg); + LLT CondTy = MRI.getType(CondReg); + unsigned Size = DstTy.getSizeInBits(); + + assert(TypeIdx == 0 || CondTy.isVector()); + + if (TypeIdx == 0) { + NarrowTy0 = NarrowTy; + NarrowTy1 = CondTy; + + unsigned NarrowSize = NarrowTy0.getSizeInBits(); + // FIXME: Don't know how to handle the situation where the small vectors + // aren't all the same size yet. + if (Size % NarrowSize != 0) + return UnableToLegalize; + + NumParts = Size / NarrowSize; + + // Need to break down the condition type + if (CondTy.isVector()) { + if (CondTy.getNumElements() == NumParts) + NarrowTy1 = CondTy.getElementType(); + else + NarrowTy1 = LLT::vector(CondTy.getNumElements() / NumParts, + CondTy.getScalarSizeInBits()); + } + } else { + NumParts = CondTy.getNumElements(); + if (NarrowTy.isVector()) { + // TODO: Handle uneven breakdown. + if (NumParts * NarrowTy.getNumElements() != CondTy.getNumElements()) + return UnableToLegalize; + + return UnableToLegalize; + } else { + NarrowTy0 = DstTy.getElementType(); + NarrowTy1 = NarrowTy; + } + } + + SmallVector DstRegs, Src0Regs, Src1Regs, Src2Regs; + if (CondTy.isVector()) + extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs); + + extractParts(MI.getOperand(2).getReg(), NarrowTy0, NumParts, Src1Regs); + extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs); + + for (unsigned i = 0; i < NumParts; ++i) { + unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0); + MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg, + Src1Regs[i], Src2Regs[i]); + DstRegs.push_back(DstReg); + } + + if (NarrowTy0.isVector()) + MIRBuilder.buildConcatVectors(DstReg, DstRegs); + else + MIRBuilder.buildBuildVector(DstReg, DstRegs); + + MI.eraseFromParent(); + return Legalized; +} + LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorLoadStore(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) { @@ -1638,6 +1856,8 @@ case G_ICMP: case G_FCMP: return fewerElementsVectorCmp(MI, TypeIdx, NarrowTy); + case G_SELECT: + return fewerElementsVectorSelect(MI, TypeIdx, NarrowTy); case G_LOAD: case G_STORE: return fewerElementsVectorLoadStore(MI, TypeIdx, NarrowTy); Index: lib/CodeGen/GlobalISel/MachineIRBuilder.cpp =================================================================== --- lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -16,6 +16,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/TargetInstrInfo.h" +#include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/DebugInfo.h" @@ -375,6 +376,25 @@ return buildInstr(TargetOpcode::G_ZEXT, Res, Op); } +unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const { + const auto *TLI = getMF().getSubtarget().getTargetLowering(); + switch (TLI->getBooleanContents(IsVec, IsFP)) { + case TargetLoweringBase::ZeroOrNegativeOneBooleanContent: + return TargetOpcode::G_SEXT; + case TargetLoweringBase::ZeroOrOneBooleanContent: + return TargetOpcode::G_ZEXT; + default: + return TargetOpcode::G_ANYEXT; + } +} + +MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res, + const SrcOp &Op, + bool IsFP) { + unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP); + return buildInstr(ExtOp, Res, Op); +} + MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op) { Index: lib/CodeGen/MachineVerifier.cpp =================================================================== --- lib/CodeGen/MachineVerifier.cpp +++ lib/CodeGen/MachineVerifier.cpp @@ -1004,6 +1004,7 @@ case TargetOpcode::G_STORE: case TargetOpcode::G_ZEXTLOAD: case TargetOpcode::G_SEXTLOAD: { + LLT ValTy = MRI->getType(MI->getOperand(0).getReg()); LLT PtrTy = MRI->getType(MI->getOperand(1).getReg()); if (!PtrTy.isPointer()) report("Generic memory instruction must access a pointer", MI); @@ -1014,13 +1015,17 @@ report("Generic instruction accessing memory must have one mem operand", MI); } else { + const MachineMemOperand &MMO = **MI->memoperands_begin(); if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD || MI->getOpcode() == TargetOpcode::G_SEXTLOAD) { - const MachineMemOperand &MMO = **MI->memoperands_begin(); - LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); - if (MMO.getSize() * 8 >= DstTy.getSizeInBits()) { + if (MMO.getSize() * 8 >= ValTy.getSizeInBits()) report("Generic extload must have a narrower memory type", MI); - } + } else if (MI->getOpcode() == TargetOpcode::G_LOAD) { + if (MMO.getSize() > (ValTy.getSizeInBits() + 7) / 8) + report("load memory size cannot exceed result size", MI); + } else if (MI->getOpcode() == TargetOpcode::G_STORE) { + if ((ValTy.getSizeInBits() + 7) / 8 < MMO.getSize()) + report("store memory size cannot exceed value size", MI); } } @@ -1056,6 +1061,50 @@ report("bitcast sizes must match", MI); break; } + case TargetOpcode::G_INTTOPTR: + case TargetOpcode::G_PTRTOINT: + case TargetOpcode::G_ADDRSPACE_CAST: { + LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); + LLT SrcTy = MRI->getType(MI->getOperand(1).getReg()); + if (!DstTy.isValid() || !SrcTy.isValid()) + break; + + if (DstTy.isVector() != SrcTy.isVector()) + report("pointer casts must be all-vector or all-scalar", MI); + else { + if (DstTy.isVector() ) { + if (DstTy.getNumElements() != SrcTy.getNumElements()) { + report("pointer casts must preserve number of elements", MI); + break; + } + } + } + + DstTy = DstTy.getScalarType(); + SrcTy = SrcTy.getScalarType(); + + if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) { + if (!DstTy.isPointer()) + report("inttoptr result type must be a pointer", MI); + if (SrcTy.isPointer()) + report("inttoptr source type must not be a pointer", MI); + } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) { + if (!SrcTy.isPointer()) + report("ptrtoint source type must be a pointer", MI); + if (DstTy.isPointer()) + report("ptrtoint result type must not be a pointer", MI); + } else { + assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST); + if (!SrcTy.isPointer() || !DstTy.isPointer()) + report("addrspacecast types must be pointers", MI); + else { + if (SrcTy.getAddressSpace() == DstTy.getAddressSpace()) + report("addrspacecast must convert different address spaces", MI); + } + } + + break; + } case TargetOpcode::G_SEXT: case TargetOpcode::G_ZEXT: case TargetOpcode::G_ANYEXT: Index: lib/CodeGen/SelectionDAG/DAGCombiner.cpp =================================================================== --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -384,6 +384,8 @@ SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain); SDValue replaceStoreOfFPConstant(StoreSDNode *ST); + SDValue visitLIFETIME_END(SDNode *N); + SDValue visitSTORE(SDNode *N); SDValue visitINSERT_VECTOR_ELT(SDNode *N); SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); @@ -1589,6 +1591,7 @@ case ISD::MLOAD: return visitMLOAD(N); case ISD::MSCATTER: return visitMSCATTER(N); case ISD::MSTORE: return visitMSTORE(N); + case ISD::LIFETIME_END: return visitLIFETIME_END(N); case ISD::FP_TO_FP16: return visitFP_TO_FP16(N); case ISD::FP16_TO_FP: return visitFP16_TO_FP(N); } @@ -15342,6 +15345,35 @@ return ReduceLoadOpStoreWidth(N); } +SDValue DAGCombiner::visitLIFETIME_END(SDNode *N) { + const int FrameIndex = cast(N->getOperand(1))->getIndex(); + + // We walk up the chains to find stores. + SmallVector Chains = {N->getOperand(0)}; + while (!Chains.empty()) { + SDValue Chain = Chains.back(); + Chains.pop_back(); + if (Chain.getOpcode() == ISD::TokenFactor) { + for (unsigned Nops = Chain.getNumOperands(); Nops;) + Chains.push_back(Chain.getOperand(--Nops)); + continue; + } + if (StoreSDNode *ST = dyn_cast(Chain)) { + auto *const FI = dyn_cast(ST->getBasePtr()); + if (!FI) + continue; + // We store to the alloca just before its lifetime ends, we can remove the + // store. + if (!ST->isVolatile() && ST->hasOneUse() && + FI->getIndex() == FrameIndex) { + CombineTo(ST, ST->getChain()); + return SDValue(); + } + } + } + return SDValue(); +} + /// For the instruction sequence of store below, F and I values /// are bundled together as an i64 value before being stored into memory. /// Sometimes it is more efficent to generate separate stores for F and I, @@ -17079,6 +17111,19 @@ if (SDValue NarrowLoad = narrowExtractedVectorLoad(N, DAG)) return NarrowLoad; + // Combine an extract of an extract into a single extract_subvector. + // ext (ext X, C), 0 --> ext X, C + if (isNullConstant(N->getOperand(1)) && + V.getOpcode() == ISD::EXTRACT_SUBVECTOR && V.hasOneUse() && + isa(V.getOperand(1))) { + if (TLI.isExtractSubvectorCheap(NVT, V.getOperand(0).getValueType(), + V.getConstantOperandVal(1)) && + TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NVT)) { + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT, V.getOperand(0), + V.getOperand(1)); + } + } + // Combine: // (extract_subvec (concat V1, V2, ...), i) // Into: Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp =================================================================== --- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -1456,6 +1456,36 @@ } } +// For wasm, there's alwyas a single catch pad attached to a catchswitch, and +// the control flow always stops at the single catch pad, as it does for a +// cleanup pad. In case the exception caught is not of the types the catch pad +// catches, it will be rethrown by a rethrow. +static void findWasmUnwindDestinations( + FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, + BranchProbability Prob, + SmallVectorImpl> + &UnwindDests) { + while (EHPadBB) { + const Instruction *Pad = EHPadBB->getFirstNonPHI(); + if (isa(Pad)) { + // Stop on cleanup pads. + UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); + UnwindDests.back().first->setIsEHScopeEntry(); + break; + } else if (auto *CatchSwitch = dyn_cast(Pad)) { + // Add the catchpad handlers to the possible destinations. We don't + // continue to the unwind destination of the catchswitch for wasm. + for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { + UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob); + UnwindDests.back().first->setIsEHScopeEntry(); + } + break; + } else { + continue; + } + } +} + /// When an invoke or a cleanupret unwinds to the next EH pad, there are /// many places it could ultimately go. In the IR, we have a single unwind /// destination, but in the machine CFG, we enumerate all the possible blocks. @@ -1476,6 +1506,11 @@ bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX; bool IsSEH = isAsynchronousEHPersonality(Personality); + if (IsWasmCXX) { + findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests); + return; + } + while (EHPadBB) { const Instruction *Pad = EHPadBB->getFirstNonPHI(); BasicBlock *NewEHPadBB = nullptr; @@ -1488,8 +1523,7 @@ // personalities. UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); UnwindDests.back().first->setIsEHScopeEntry(); - if (!IsWasmCXX) - UnwindDests.back().first->setIsEHFuncletEntry(); + UnwindDests.back().first->setIsEHFuncletEntry(); break; } else if (auto *CatchSwitch = dyn_cast(Pad)) { // Add the catchpad handlers to the possible destinations. @@ -5320,6 +5354,21 @@ return nullptr; } + // If the Value is a frame index, we can create a FrameIndex debug value + // without relying on the DAG at all. + if (const AllocaInst *AI = dyn_cast(V)) { + auto SI = FuncInfo.StaticAllocaMap.find(AI); + if (SI != FuncInfo.StaticAllocaMap.end()) { + auto SDV = + DAG.getFrameIndexDbgValue(Variable, Expression, SI->second, + /*IsIndirect*/ false, dl, SDNodeOrder); + // Do not attach the SDNodeDbgValue to an SDNode: this variable location + // is still available even if the SDNode gets optimized out. + DAG.AddDbgValue(SDV, nullptr, false); + return nullptr; + } + } + // Do not use getValue() in here; we don't want to generate code at // this point if it hasn't been done yet. SDValue N = NodeMap[V]; @@ -7831,15 +7880,11 @@ SmallVector OutChains; llvm::Type *CSResultType = CS.getType(); - unsigned NumReturns = 0; ArrayRef ResultTypes; - if (StructType *StructResult = dyn_cast(CSResultType)) { - NumReturns = StructResult->getNumElements(); + if (StructType *StructResult = dyn_cast(CSResultType)) ResultTypes = StructResult->elements(); - } else if (!CSResultType->isVoidTy()) { - NumReturns = 1; + else if (!CSResultType->isVoidTy()) ResultTypes = makeArrayRef(CSResultType); - } auto CurResultType = ResultTypes.begin(); auto handleRegAssign = [&](SDValue V) { @@ -7904,7 +7949,7 @@ if (!ResultValues.empty()) { assert(CurResultType == ResultTypes.end() && "Mismatch in number of ResultTypes"); - assert(ResultValues.size() == NumReturns && + assert(ResultValues.size() == ResultTypes.size() && "Mismatch in number of output operands in asm result"); SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), Index: lib/CodeGen/SelectionDAG/TargetLowering.cpp =================================================================== --- lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -4319,7 +4319,7 @@ // Expand f32 -> i64 conversion // This algorithm comes from compiler-rt's implementation of fixsfdi: - // https://github.com/llvm-mirror/compiler-rt/blob/master/lib/builtins/fixsfdi.c + // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); EVT IntVT = SrcVT.changeTypeToInteger(); EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); Index: lib/CodeGen/WasmEHPrepare.cpp =================================================================== --- lib/CodeGen/WasmEHPrepare.cpp +++ lib/CodeGen/WasmEHPrepare.cpp @@ -7,7 +7,8 @@ //===----------------------------------------------------------------------===// // // This transformation is designed for use by code generators which use -// WebAssembly exception handling scheme. +// WebAssembly exception handling scheme. This currently supports C++ +// exceptions. // // WebAssembly exception handling uses Windows exception IR for the middle level // representation. This pass does the following transformation for every @@ -22,53 +23,20 @@ // // - After: // catchpad ... -// exn = wasm.catch(0); // 0 is a tag for C++ -// wasm.landingpad.index(index); +// exn = wasm.extract.exception(); // // Only add below in case it's not a single catch (...) +// wasm.landingpad.index(index); // __wasm_lpad_context.lpad_index = index; // __wasm_lpad_context.lsda = wasm.lsda(); // _Unwind_CallPersonality(exn); -// int selector = __wasm.landingpad_context.selector; +// selector = __wasm.landingpad_context.selector; // ... // -// Also, does the following for a cleanuppad block with a call to -// __clang_call_terminate(): -// - Before: -// cleanuppad ... -// exn = wasm.get.exception(); -// __clang_call_terminate(exn); -// -// - After: -// cleanuppad ... -// exn = wasm.catch(0); // 0 is a tag for C++ -// __clang_call_terminate(exn); -// -// -// * Background: WebAssembly EH instructions -// WebAssembly's try and catch instructions are structured as follows: -// try -// instruction* -// catch (C++ tag) -// instruction* -// ... -// catch_all -// instruction* -// try_end -// -// A catch instruction in WebAssembly does not correspond to a C++ catch clause. -// In WebAssembly, there is a single catch instruction for all C++ exceptions. -// There can be more catch instructions for exceptions in other languages, but -// they are not generated for now. catch_all catches all exceptions including -// foreign exceptions (e.g. JavaScript). We turn catchpads into catch (C++ tag) -// and cleanuppads into catch_all, with one exception: cleanuppad with a call to -// __clang_call_terminate should be both in catch (C++ tag) and catch_all. -// // // * Background: Direct personality function call // In WebAssembly EH, the VM is responsible for unwinding the stack once an // exception is thrown. After the stack is unwound, the control flow is -// transfered to WebAssembly 'catch' instruction, which returns a caught -// exception object. +// transfered to WebAssembly 'catch' instruction. // // Unwinding the stack is not done by libunwind but the VM, so the personality // function in libcxxabi cannot be called from libunwind during the unwinding @@ -137,18 +105,18 @@ Value *SelectorField = nullptr; // selector Function *ThrowF = nullptr; // wasm.throw() intrinsic - Function *CatchF = nullptr; // wasm.catch.extract() intrinsic + Function *RethrowF = nullptr; // wasm.rethrow() intrinsic Function *LPadIndexF = nullptr; // wasm.landingpad.index() intrinsic Function *LSDAF = nullptr; // wasm.lsda() intrinsic Function *GetExnF = nullptr; // wasm.get.exception() intrinsic + Function *ExtractExnF = nullptr; // wasm.extract.exception() intrinsic Function *GetSelectorF = nullptr; // wasm.get.ehselector() intrinsic Function *CallPersonalityF = nullptr; // _Unwind_CallPersonality() wrapper - Function *ClangCallTermF = nullptr; // __clang_call_terminate() function bool prepareEHPads(Function &F); bool prepareThrows(Function &F); - void prepareEHPad(BasicBlock *BB, unsigned Index); + void prepareEHPad(BasicBlock *BB, bool NeedLSDA, unsigned Index = 0); void prepareTerminateCleanupPad(BasicBlock *BB); public: @@ -208,25 +176,29 @@ // wasm.throw() intinsic, which will be lowered to wasm 'throw' instruction. ThrowF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_throw); - - // Insert an unreachable instruction after a call to @llvm.wasm.throw and - // delete all following instructions within the BB, and delete all the dead - // children of the BB as well. - for (User *U : ThrowF->users()) { - // A call to @llvm.wasm.throw() is only generated from - // __builtin_wasm_throw() builtin call within libcxxabi, and cannot be an - // InvokeInst. - auto *ThrowI = cast(U); - if (ThrowI->getFunction() != &F) - continue; - Changed = true; - auto *BB = ThrowI->getParent(); - SmallVector Succs(succ_begin(BB), succ_end(BB)); - auto &InstList = BB->getInstList(); - InstList.erase(std::next(BasicBlock::iterator(ThrowI)), InstList.end()); - IRB.SetInsertPoint(BB); - IRB.CreateUnreachable(); - eraseDeadBBsAndChildren(Succs); + // wasm.rethrow() intinsic, which will be lowered to wasm 'rethrow' + // instruction. + RethrowF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_rethrow); + + // Insert an unreachable instruction after a call to @llvm.wasm.throw / + // @llvm.wasm.rethrow and delete all following instructions within the BB, and + // delete all the dead children of the BB as well. + for (auto L : {ThrowF->users(), RethrowF->users()}) { + for (User *U : L) { + // A call to @llvm.wasm.throw() is only generated from __cxa_throw() + // builtin call within libcxxabi, and cannot be an InvokeInst. + auto *ThrowI = cast(U); + if (ThrowI->getFunction() != &F) + continue; + Changed = true; + auto *BB = ThrowI->getParent(); + SmallVector Succs(succ_begin(BB), succ_end(BB)); + auto &InstList = BB->getInstList(); + InstList.erase(std::next(BasicBlock::iterator(ThrowI)), InstList.end()); + IRB.SetInsertPoint(BB); + IRB.CreateUnreachable(); + eraseDeadBBsAndChildren(Succs); + } } return Changed; @@ -262,8 +234,6 @@ SelectorField = IRB.CreateConstGEP2_32(LPadContextTy, LPadContextGV, 0, 2, "selector_gep"); - // wasm.catch() intinsic, which will be lowered to wasm 'catch' instruction. - CatchF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_catch); // wasm.landingpad.index() intrinsic, which is to specify landingpad index LPadIndexF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_landingpad_index); // wasm.lsda() intrinsic. Returns the address of LSDA table for the current @@ -274,75 +244,70 @@ GetExnF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_get_exception); GetSelectorF = Intrinsic::getDeclaration(&M, Intrinsic::wasm_get_ehselector); + // wasm.extract.exception() is the same as wasm.get.exception() but it does + // not take a token argument. This will be lowered down to EXTRACT_EXCEPTION + // pseudo instruction in instruction selection, which will be expanded using + // 'br_on_exn' instruction later. + ExtractExnF = + Intrinsic::getDeclaration(&M, Intrinsic::wasm_extract_exception); + // _Unwind_CallPersonality() wrapper function, which calls the personality CallPersonalityF = cast(M.getOrInsertFunction( "_Unwind_CallPersonality", IRB.getInt32Ty(), IRB.getInt8PtrTy())); CallPersonalityF->setDoesNotThrow(); - // __clang_call_terminate() function, which is inserted by clang in case a - // cleanup throws - ClangCallTermF = M.getFunction("__clang_call_terminate"); - unsigned Index = 0; for (auto *BB : CatchPads) { auto *CPI = cast(BB->getFirstNonPHI()); // In case of a single catch (...), we don't need to emit LSDA if (CPI->getNumArgOperands() == 1 && cast(CPI->getArgOperand(0))->isNullValue()) - prepareEHPad(BB, -1); + prepareEHPad(BB, false); else - prepareEHPad(BB, Index++); + prepareEHPad(BB, true, Index++); } - if (!ClangCallTermF) - return !CatchPads.empty(); - - // Cleanuppads will turn into catch_all later, but cleanuppads with a call to - // __clang_call_terminate() is a special case. __clang_call_terminate() takes - // an exception object, so we have to duplicate call in both 'catch ' - // and 'catch_all' clauses. Here we only insert a call to catch; the - // duplication will be done later. In catch_all, the exception object will be - // set to null. + // Cleanup pads don't need LSDA. for (auto *BB : CleanupPads) - for (auto &I : *BB) - if (auto *CI = dyn_cast(&I)) - if (CI->getCalledValue() == ClangCallTermF) - prepareEHPad(BB, -1); + prepareEHPad(BB, false); return true; } -void WasmEHPrepare::prepareEHPad(BasicBlock *BB, unsigned Index) { +// Prepare an EH pad for Wasm EH handling. If NeedLSDA is false, Index is +// ignored. +void WasmEHPrepare::prepareEHPad(BasicBlock *BB, bool NeedLSDA, + unsigned Index) { assert(BB->isEHPad() && "BB is not an EHPad!"); IRBuilder<> IRB(BB->getContext()); - IRB.SetInsertPoint(&*BB->getFirstInsertionPt()); - // The argument to wasm.catch() is the tag for C++ exceptions, which we set to - // 0 for this module. - // Pseudocode: void *exn = wasm.catch(0); - Instruction *Exn = IRB.CreateCall(CatchF, IRB.getInt32(0), "exn"); - // Replace the return value of wasm.get.exception() with the return value from - // wasm.catch(). + auto *FPI = cast(BB->getFirstNonPHI()); Instruction *GetExnCI = nullptr, *GetSelectorCI = nullptr; for (auto &U : FPI->uses()) { if (auto *CI = dyn_cast(U.getUser())) { if (CI->getCalledValue() == GetExnF) GetExnCI = CI; - else if (CI->getCalledValue() == GetSelectorF) + if (CI->getCalledValue() == GetSelectorF) GetSelectorCI = CI; } } - assert(GetExnCI && "wasm.get.exception() call does not exist"); - GetExnCI->replaceAllUsesWith(Exn); + // Cleanup pads w/o __clang_call_terminate call do not have any of + // wasm.get.exception() or wasm.get.ehselector() calls. We need to do nothing. + if (!GetExnCI) { + assert(!GetSelectorCI && + "wasm.get.ehselector() cannot exist w/o wasm.get.exception()"); + return; + } + + Instruction *ExtractExnCI = IRB.CreateCall(ExtractExnF, {}, "exn"); + GetExnCI->replaceAllUsesWith(ExtractExnCI); GetExnCI->eraseFromParent(); // In case it is a catchpad with single catch (...) or a cleanuppad, we don't // need to call personality function because we don't need a selector. - if (FPI->getNumArgOperands() == 0 || - (FPI->getNumArgOperands() == 1 && - cast(FPI->getArgOperand(0))->isNullValue())) { + if (!NeedLSDA) { if (GetSelectorCI) { assert(GetSelectorCI->use_empty() && "wasm.get.ehselector() still has uses!"); @@ -350,7 +315,7 @@ } return; } - IRB.SetInsertPoint(Exn->getNextNode()); + IRB.SetInsertPoint(ExtractExnCI->getNextNode()); // This is to create a map of in // SelectionDAGISel, which is to be used in EHStreamer to emit LSDA tables. @@ -372,8 +337,8 @@ IRB.CreateStore(IRB.CreateCall(LSDAF), LSDAField); // Pseudocode: _Unwind_CallPersonality(exn); - CallInst *PersCI = - IRB.CreateCall(CallPersonalityF, Exn, OperandBundleDef("funclet", CPI)); + CallInst *PersCI = IRB.CreateCall(CallPersonalityF, ExtractExnCI, + OperandBundleDef("funclet", CPI)); PersCI->setDoesNotThrow(); // Pseudocode: int selector = __wasm.landingpad_context.selector; @@ -387,15 +352,15 @@ } void llvm::calculateWasmEHInfo(const Function *F, WasmEHFuncInfo &EHInfo) { + // If an exception is not caught by a catchpad (i.e., it is a foreign + // exception), it will unwind to its parent catchswitch's unwind destination. + // We don't record an unwind destination for cleanuppads because every + // exception should be caught by it. for (const auto &BB : *F) { if (!BB.isEHPad()) continue; const Instruction *Pad = BB.getFirstNonPHI(); - // If an exception is not caught by a catchpad (i.e., it is a foreign - // exception), it will unwind to its parent catchswitch's unwind - // destination. We don't record an unwind destination for cleanuppads - // because every exception should be caught by it. if (const auto *CatchPad = dyn_cast(Pad)) { const auto *UnwindBB = CatchPad->getCatchSwitch()->getUnwindDest(); if (!UnwindBB) Index: lib/DebugInfo/PDB/Native/DbiStream.cpp =================================================================== --- lib/DebugInfo/PDB/Native/DbiStream.cpp +++ lib/DebugInfo/PDB/Native/DbiStream.cpp @@ -126,8 +126,10 @@ return EC; if (auto EC = initializeSectionMapData()) return EC; - if (auto EC = initializeFpoRecords(Pdb)) + if (auto EC = initializeOldFpoRecords(Pdb)) return EC; + if (auto EC = initializeNewFpoRecords(Pdb)) + return EC; if (Reader.bytesRemaining() > 0) return make_error(raw_error_code::corrupt_file, @@ -200,8 +202,16 @@ return SectionHeaders; } -FixedStreamArray DbiStream::getFpoRecords() { - return FpoRecords; +bool DbiStream::hasOldFpoRecords() const { return OldFpoStream != nullptr; } + +FixedStreamArray DbiStream::getOldFpoRecords() const { + return OldFpoRecords; +} + +bool DbiStream::hasNewFpoRecords() const { return NewFpoStream != nullptr; } + +const DebugFrameDataSubsectionRef &DbiStream::getNewFpoRecords() const { + return NewFpoRecords; } const DbiModuleList &DbiStream::modules() const { return Modules; } @@ -246,22 +256,15 @@ // Initializes this->SectionHeaders. Error DbiStream::initializeSectionHeadersData(PDBFile *Pdb) { - if (!Pdb) - return Error::success(); - - if (DbgStreams.size() == 0) - return Error::success(); + Expected> ExpectedStream = + createIndexedStreamForHeaderType(Pdb, DbgHeaderType::SectionHdr); + if (auto EC = ExpectedStream.takeError()) + return EC; - uint32_t StreamNum = getDebugStreamIndex(DbgHeaderType::SectionHdr); - if (StreamNum == kInvalidStreamIndex) + auto &SHS = *ExpectedStream; + if (!SHS) return Error::success(); - if (StreamNum >= Pdb->getNumStreams()) - return make_error(raw_error_code::no_stream); - - auto SHS = MappedBlockStream::createIndexedStream( - Pdb->getMsfLayout(), Pdb->getMsfBuffer(), StreamNum, Pdb->getAllocator()); - size_t StreamLen = SHS->getLength(); if (StreamLen % sizeof(object::coff_section)) return make_error(raw_error_code::corrupt_file, @@ -278,39 +281,69 @@ } // Initializes this->Fpos. -Error DbiStream::initializeFpoRecords(PDBFile *Pdb) { - if (!Pdb) - return Error::success(); - - if (DbgStreams.size() == 0) - return Error::success(); - - uint32_t StreamNum = getDebugStreamIndex(DbgHeaderType::NewFPO); +Error DbiStream::initializeOldFpoRecords(PDBFile *Pdb) { + Expected> ExpectedStream = + createIndexedStreamForHeaderType(Pdb, DbgHeaderType::FPO); + if (auto EC = ExpectedStream.takeError()) + return EC; - // This means there is no FPO data. - if (StreamNum == kInvalidStreamIndex) + auto &FS = *ExpectedStream; + if (!FS) return Error::success(); - if (StreamNum >= Pdb->getNumStreams()) - return make_error(raw_error_code::no_stream); - - auto FS = MappedBlockStream::createIndexedStream( - Pdb->getMsfLayout(), Pdb->getMsfBuffer(), StreamNum, Pdb->getAllocator()); - size_t StreamLen = FS->getLength(); if (StreamLen % sizeof(object::FpoData)) return make_error(raw_error_code::corrupt_file, - "Corrupted New FPO stream."); + "Corrupted Old FPO stream."); size_t NumRecords = StreamLen / sizeof(object::FpoData); BinaryStreamReader Reader(*FS); - if (auto EC = Reader.readArray(FpoRecords, NumRecords)) + if (auto EC = Reader.readArray(OldFpoRecords, NumRecords)) return make_error(raw_error_code::corrupt_file, - "Corrupted New FPO stream."); - FpoStream = std::move(FS); + "Corrupted Old FPO stream."); + OldFpoStream = std::move(FS); + return Error::success(); +} + +Error DbiStream::initializeNewFpoRecords(PDBFile *Pdb) { + Expected> ExpectedStream = + createIndexedStreamForHeaderType(Pdb, DbgHeaderType::NewFPO); + if (auto EC = ExpectedStream.takeError()) + return EC; + + auto &FS = *ExpectedStream; + if (!FS) + return Error::success(); + + if (auto EC = NewFpoRecords.initialize(*FS)) + return EC; + + NewFpoStream = std::move(FS); return Error::success(); } +Expected> +DbiStream::createIndexedStreamForHeaderType(PDBFile *Pdb, + DbgHeaderType Type) const { + if (!Pdb) + return nullptr; + + if (DbgStreams.empty()) + return nullptr; + + uint32_t StreamNum = getDebugStreamIndex(Type); + + // This means there is no such stream + if (StreamNum == kInvalidStreamIndex) + return nullptr; + + if (StreamNum >= Pdb->getNumStreams()) + return make_error(raw_error_code::no_stream); + + return MappedBlockStream::createIndexedStream( + Pdb->getMsfLayout(), Pdb->getMsfBuffer(), StreamNum, Pdb->getAllocator()); +} + BinarySubstreamRef DbiStream::getSectionContributionData() const { return SecContrSubstream; } Index: lib/IR/Instruction.cpp =================================================================== --- lib/IR/Instruction.cpp +++ lib/IR/Instruction.cpp @@ -515,9 +515,8 @@ case Instruction::CatchRet: return true; case Instruction::Call: - return !cast(this)->doesNotAccessMemory(); case Instruction::Invoke: - return !cast(this)->doesNotAccessMemory(); + return !cast(this)->doesNotAccessMemory(); case Instruction::Store: return !cast(this)->isUnordered(); } @@ -535,9 +534,8 @@ case Instruction::CatchRet: return true; case Instruction::Call: - return !cast(this)->onlyReadsMemory(); case Instruction::Invoke: - return !cast(this)->onlyReadsMemory(); + return !cast(this)->onlyReadsMemory(); case Instruction::Load: return !cast(this)->isUnordered(); } Index: lib/IR/Value.cpp =================================================================== --- lib/IR/Value.cpp +++ lib/IR/Value.cpp @@ -930,7 +930,7 @@ << Old->getName() << " to " << *New->getType() << " %" << New->getName() << "\n"; llvm_unreachable( - "A weak tracking value handle still pointed to the old value!\n"); + "A weak tracking value handle still pointed to the old value!\n"); default: break; } Index: lib/MC/MCAssembler.cpp =================================================================== --- lib/MC/MCAssembler.cpp +++ lib/MC/MCAssembler.cpp @@ -322,6 +322,13 @@ const MCAlignFragment &AF = cast(F); unsigned Offset = Layout.getFragmentOffset(&AF); unsigned Size = OffsetToAlignment(Offset, AF.getAlignment()); + + // Insert extra Nops for code alignment if the target define + // shouldInsertExtraNopBytesForCodeAlign target hook. + if (AF.getParent()->UseCodeAlign() && AF.hasEmitNops() && + getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) + return Size; + // If we are padding with nops, force the padding to be larger than the // minimum nop size. if (Size > 0 && AF.hasEmitNops()) { @@ -804,7 +811,8 @@ if (isa(&Frag) && isa(&Frag)) continue; - if (!isa(&Frag) && !isa(&Frag)) + if (!isa(&Frag) && !isa(&Frag) && + !isa(&Frag)) continue; ArrayRef Fixups; MutableArrayRef Contents; @@ -825,6 +833,13 @@ } else if (auto *FragWithFixups = dyn_cast(&Frag)) { Fixups = FragWithFixups->getFixups(); Contents = FragWithFixups->getContents(); + } else if (auto *AF = dyn_cast(&Frag)) { + // Insert fixup type for code alignment if the target define + // shouldInsertFixupForCodeAlign target hook. + if (Sec.UseCodeAlign() && AF->hasEmitNops()) { + getBackend().shouldInsertFixupForCodeAlign(*this, Layout, *AF); + } + continue; } else llvm_unreachable("Unknown fragment with fixups!"); for (const MCFixup &Fixup : Fixups) { Index: lib/MC/MCParser/AsmParser.cpp =================================================================== --- lib/MC/MCParser/AsmParser.cpp +++ lib/MC/MCParser/AsmParser.cpp @@ -900,6 +900,9 @@ eatToEndOfStatement(); } + // Make sure we get proper DWARF even for empty files. + (void)enabledGenDwarfForAssembly(); + getTargetParser().onEndOfFile(); printPendingErrors(); Index: lib/Object/WasmObjectFile.cpp =================================================================== --- lib/Object/WasmObjectFile.cpp +++ lib/Object/WasmObjectFile.cpp @@ -1181,7 +1181,7 @@ return Header; } -void WasmObjectFile::moveSymbolNext(DataRefImpl &Symb) const { Symb.d.a++; } +void WasmObjectFile::moveSymbolNext(DataRefImpl &Symb) const { Symb.d.b++; } uint32_t WasmObjectFile::getSymbolFlags(DataRefImpl Symb) const { uint32_t Result = SymbolRef::SF_None; @@ -1203,18 +1203,20 @@ basic_symbol_iterator WasmObjectFile::symbol_begin() const { DataRefImpl Ref; - Ref.d.a = 0; + Ref.d.a = 1; // Arbitrary non-zero value so that Ref.p is non-null + Ref.d.b = 0; // Symbol index return BasicSymbolRef(Ref, this); } basic_symbol_iterator WasmObjectFile::symbol_end() const { DataRefImpl Ref; - Ref.d.a = Symbols.size(); + Ref.d.a = 1; // Arbitrary non-zero value so that Ref.p is non-null + Ref.d.b = Symbols.size(); // Symbol index return BasicSymbolRef(Ref, this); } const WasmSymbol &WasmObjectFile::getWasmSymbol(const DataRefImpl &Symb) const { - return Symbols[Symb.d.a]; + return Symbols[Symb.d.b]; } const WasmSymbol &WasmObjectFile::getWasmSymbol(const SymbolRef &Symb) const { @@ -1420,8 +1422,8 @@ if (Rel.Type == wasm::R_WEBASSEMBLY_TYPE_INDEX_LEB) return symbol_end(); DataRefImpl Sym; - Sym.d.a = Rel.Index; - Sym.d.b = 0; + Sym.d.a = 1; + Sym.d.b = Rel.Index; return symbol_iterator(SymbolRef(Sym, this)); } Index: lib/Target/AArch64/AArch64AsmPrinter.cpp =================================================================== --- lib/Target/AArch64/AArch64AsmPrinter.cpp +++ lib/Target/AArch64/AArch64AsmPrinter.cpp @@ -578,9 +578,18 @@ const std::vector &JT = MJTI->getJumpTables(); if (JT.empty()) return; + const Function &F = MF->getFunction(); const TargetLoweringObjectFile &TLOF = getObjFileLowering(); - MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(MF->getFunction(), TM); - OutStreamer->SwitchSection(ReadOnlySec); + bool JTInDiffSection = + !STI->isTargetCOFF() || + !TLOF.shouldPutJumpTableInFunctionSection( + MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32, + F); + if (JTInDiffSection) { + // Drop it in the readonly section. + MCSection *ReadOnlySec = TLOF.getSectionForJumpTable(F, TM); + OutStreamer->SwitchSection(ReadOnlySec); + } auto AFI = MF->getInfo(); for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) { Index: lib/Target/AArch64/AArch64RegisterBankInfo.cpp =================================================================== --- lib/Target/AArch64/AArch64RegisterBankInfo.cpp +++ lib/Target/AArch64/AArch64RegisterBankInfo.cpp @@ -669,7 +669,11 @@ &AArch64::FPRRegBank; }; - if (any_of(MRI.use_instructions(MI.getOperand(0).getReg()), + LLT SrcTy = MRI.getType(MI.getOperand(MI.getNumOperands()-1).getReg()); + // UNMERGE into scalars from a vector should always use FPR. + // Likewise if any of the uses are FP instructions. + if (SrcTy.isVector() || + any_of(MRI.use_instructions(MI.getOperand(0).getReg()), [&](MachineInstr &MI) { return HasFPConstraints(MI); })) { // Set the register bank of every operand to FPR. for (unsigned Idx = 0, NumOperands = MI.getNumOperands(); Index: lib/Target/AArch64/AArch64TargetMachine.cpp =================================================================== --- lib/Target/AArch64/AArch64TargetMachine.cpp +++ lib/Target/AArch64/AArch64TargetMachine.cpp @@ -208,8 +208,8 @@ static Reloc::Model getEffectiveRelocModel(const Triple &TT, Optional RM) { - // AArch64 Darwin is always PIC. - if (TT.isOSDarwin()) + // AArch64 Darwin and Windows are always PIC. + if (TT.isOSDarwin() || TT.isOSWindows()) return Reloc::PIC_; // On ELF platforms the default static relocation model has a smart enough // linker to cope with referencing external symbols defined in a shared Index: lib/Target/AMDGPU/AMDGPUISelLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.h +++ lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -211,14 +211,14 @@ const char* getTargetNodeName(unsigned Opcode) const override; - // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection - // for AMDGPU. - // A commit ( git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319036 - // 91177308-0d34-0410-b5e6-96231b3b80d8 ) turned on - // MergeConsecutiveStores() before Instruction Selection for all targets. - // Enough AMDGPU compiles go into an infinite loop ( MergeConsecutiveStores() - // merges two stores; LegalizeStoreOps() un-merges; MergeConsecutiveStores() - // re-merges, etc. ) to warrant turning it off for now. + // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for + // AMDGPU. Commit r319036, + // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6) + // turned on MergeConsecutiveStores() before Instruction Selection for all + // targets. Enough AMDGPU compiles go into an infinite loop ( + // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges; + // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for + // now. bool mergeStoresAfterLegalization() const override { return false; } bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override { Index: lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp +++ lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp @@ -168,7 +168,7 @@ .legalFor({{S64, S32}, {S32, S16}, {S64, S16}, {S32, S1}, {S64, S1}, {S16, S1}, // FIXME: Hack - {S128, S32}}) + {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}}) .scalarize(0); getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) @@ -229,12 +229,39 @@ }); getActionDefinitionsBuilder({G_LOAD, G_STORE}) + .narrowScalarIf([](const LegalityQuery &Query) { + unsigned Size = Query.Types[0].getSizeInBits(); + unsigned MemSize = Query.MMODescrs[0].SizeInBits; + return (Size > 32 && MemSize < Size); + }, + [](const LegalityQuery &Query) { + return std::make_pair(0, LLT::scalar(32)); + }) + .fewerElementsIf([=, &ST](const LegalityQuery &Query) { + unsigned MemSize = Query.MMODescrs[0].SizeInBits; + return Query.Types[0].isVector() && (MemSize == 96) && + ST.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS; + }, + [=](const LegalityQuery &Query) { + return std::make_pair(0, V2S32); + }) .legalIf([=, &ST](const LegalityQuery &Query) { const LLT &Ty0 = Query.Types[0]; + unsigned Size = Ty0.getSizeInBits(); + unsigned MemSize = Query.MMODescrs[0].SizeInBits; + if (Size > 32 && MemSize < Size) + return false; + + if (Ty0.isVector() && Size != MemSize) + return false; + // TODO: Decompose private loads into 4-byte components. // TODO: Illegal flat loads on SI - switch (Ty0.getSizeInBits()) { + switch (MemSize) { + case 8: + case 16: + return Size == 32; case 32: case 64: case 128: @@ -250,7 +277,8 @@ default: return false; } - }); + }) + .clampScalar(0, S32, S64); auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD}) @@ -286,15 +314,35 @@ getActionDefinitionsBuilder(G_SELECT) .legalFor({{S32, S1}, {S64, S1}, {V2S32, S1}, {V2S16, S1}}) .clampScalar(0, S32, S64) - .scalarize(0); + .fewerElementsIf( + [=](const LegalityQuery &Query) { + if (Query.Types[1].isVector()) + return true; + + LLT Ty = Query.Types[0]; + + // FIXME: Hack until odd splits handled + return Ty.isVector() && + (Ty.getScalarSizeInBits() > 32 || Ty.getNumElements() % 2 != 0); + }, + scalarize(0)) + // FIXME: Handle 16-bit vectors better + .fewerElementsIf( + [=](const LegalityQuery &Query) { + return Query.Types[0].isVector() && + Query.Types[0].getElementType().getSizeInBits() < 32;}, + scalarize(0)) + .scalarize(1) + .clampMaxNumElements(0, S32, 2); // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can // be more flexible with the shift amount type. auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR}) .legalFor({{S32, S32}, {S64, S32}}); - if (ST.has16BitInsts()) + if (ST.has16BitInsts()) { Shifts.legalFor({{S16, S32}, {S16, S16}}); - else + Shifts.clampScalar(0, S16, S64); + } else Shifts.clampScalar(0, S32, S64); Shifts.clampScalar(1, S32, S32); @@ -370,6 +418,13 @@ }; getActionDefinitionsBuilder(Op) + .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16) + // Clamp the little scalar to s8-s256 and make it a power of 2. It's not + // worth considering the multiples of 64 since 2*192 and 2*384 are not + // valid. + .clampScalar(LitTyIdx, S16, S256) + .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32) + // Break up vectors with weird elements into scalars .fewerElementsIf( [=](const LegalityQuery &Query) { return notValidElt(Query, 0); }, @@ -396,12 +451,6 @@ } return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits)); }) - .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16) - // Clamp the little scalar to s8-s256 and make it a power of 2. It's not - // worth considering the multiples of 64 since 2*192 and 2*384 are not - // valid. - .clampScalar(LitTyIdx, S16, S256) - .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32) .legalIf([=](const LegalityQuery &Query) { const LLT &BigTy = Query.Types[BigTyIdx]; const LLT &LitTy = Query.Types[LitTyIdx]; Index: lib/Target/AMDGPU/SIInstrInfo.h =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.h +++ lib/Target/AMDGPU/SIInstrInfo.h @@ -762,10 +762,6 @@ return RI.getRegSizeInBits(*getOpRegClass(MI, OpNo)) / 8; } - /// \returns true if it is legal for the operand at index \p OpNo - /// to read a VGPR. - bool canReadVGPR(const MachineInstr &MI, unsigned OpNo) const; - /// Legalize the \p OpIndex operand of this instruction by inserting /// a MOV. For example: /// ADD_I32_e32 VGPR0, 15 Index: lib/Target/AMDGPU/SIInstrInfo.cpp =================================================================== --- lib/Target/AMDGPU/SIInstrInfo.cpp +++ lib/Target/AMDGPU/SIInstrInfo.cpp @@ -3277,18 +3277,6 @@ return RI.getRegClass(RCID); } -bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { - switch (MI.getOpcode()) { - case AMDGPU::COPY: - case AMDGPU::REG_SEQUENCE: - case AMDGPU::PHI: - case AMDGPU::INSERT_SUBREG: - return RI.hasVGPRs(getOpRegClass(MI, 0)); - default: - return RI.hasVGPRs(getOpRegClass(MI, OpNo)); - } -} - void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { MachineBasicBlock::iterator I = MI; MachineBasicBlock *MBB = MI.getParent(); @@ -4957,7 +4945,23 @@ for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), E = MRI.use_end(); I != E;) { MachineInstr &UseMI = *I->getParent(); - if (!canReadVGPR(UseMI, I.getOperandNo())) { + + unsigned OpNo = 0; + + switch (UseMI.getOpcode()) { + case AMDGPU::COPY: + case AMDGPU::WQM: + case AMDGPU::WWM: + case AMDGPU::REG_SEQUENCE: + case AMDGPU::PHI: + case AMDGPU::INSERT_SUBREG: + break; + default: + OpNo = I.getOperandNo(); + break; + } + + if (!RI.hasVGPRs(getOpRegClass(UseMI, OpNo))) { Worklist.insert(&UseMI); do { Index: lib/Target/ARM/ARMISelDAGToDAG.cpp =================================================================== --- lib/Target/ARM/ARMISelDAGToDAG.cpp +++ lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -130,6 +130,7 @@ // Thumb Addressing Modes: bool SelectThumbAddrModeRR(SDValue N, SDValue &Base, SDValue &Offset); + bool SelectThumbAddrModeRRSext(SDValue N, SDValue &Base, SDValue &Offset); bool SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base, SDValue &OffImm); bool SelectThumbAddrModeImm5S1(SDValue N, SDValue &Base, @@ -1032,8 +1033,22 @@ // Thumb Addressing Modes //===----------------------------------------------------------------------===// -bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, - SDValue &Base, SDValue &Offset){ +static bool shouldUseZeroOffsetLdSt(SDValue N) { + // Negative numbers are difficult to materialise in thumb1. If we are + // selecting the add of a negative, instead try to select ri with a zero + // offset, so create the add node directly which will become a sub. + if (N.getOpcode() != ISD::ADD) + return false; + + // Look for an imm which is not legal for ld/st, but is legal for sub. + if (auto C = dyn_cast(N.getOperand(1))) + return C->getSExtValue() < 0 && C->getSExtValue() >= -255; + + return false; +} + +bool ARMDAGToDAGISel::SelectThumbAddrModeRRSext(SDValue N, SDValue &Base, + SDValue &Offset) { if (N.getOpcode() != ISD::ADD && !CurDAG->isBaseWithConstantOffset(N)) { ConstantSDNode *NC = dyn_cast(N); if (!NC || !NC->isNullValue()) @@ -1048,9 +1063,22 @@ return true; } +bool ARMDAGToDAGISel::SelectThumbAddrModeRR(SDValue N, SDValue &Base, + SDValue &Offset) { + if (shouldUseZeroOffsetLdSt(N)) + return false; // Select ri instead + return SelectThumbAddrModeRRSext(N, Base, Offset); +} + bool ARMDAGToDAGISel::SelectThumbAddrModeImm5S(SDValue N, unsigned Scale, SDValue &Base, SDValue &OffImm) { + if (shouldUseZeroOffsetLdSt(N)) { + Base = N; + OffImm = CurDAG->getTargetConstant(0, SDLoc(N), MVT::i32); + return true; + } + if (!CurDAG->isBaseWithConstantOffset(N)) { if (N.getOpcode() == ISD::ADD) { return false; // We want to select register offset instead Index: lib/Target/ARM/ARMInstrThumb.td =================================================================== --- lib/Target/ARM/ARMInstrThumb.td +++ lib/Target/ARM/ARMInstrThumb.td @@ -187,6 +187,19 @@ let MIOperandInfo = (ops tGPR:$base, tGPR:$offsreg); } +// t_addrmode_rr_sext := reg + reg +// +// This is similar to t_addrmode_rr, but uses different heuristics for +// ldrsb/ldrsh. +def t_addrmode_rr_sext : MemOperand, + ComplexPattern { + let EncoderMethod = "getThumbAddrModeRegRegOpValue"; + let PrintMethod = "printThumbAddrModeRROperand"; + let DecoderMethod = "DecodeThumbAddrModeRR"; + let ParserMatchClass = t_addrmode_rr_asm_operand; + let MIOperandInfo = (ops tGPR:$base, tGPR:$offsreg); +} + // t_addrmode_rrs := reg + reg // // We use separate scaled versions because the Select* functions need @@ -731,17 +744,17 @@ let AddedComplexity = 10 in def tLDRSB : // A8.6.80 - T1pILdStEncode<0b011, (outs tGPR:$Rt), (ins t_addrmode_rr:$addr), + T1pILdStEncode<0b011, (outs tGPR:$Rt), (ins t_addrmode_rr_sext:$addr), AddrModeT1_1, IIC_iLoad_bh_r, "ldrsb", "\t$Rt, $addr", - [(set tGPR:$Rt, (sextloadi8 t_addrmode_rr:$addr))]>; + [(set tGPR:$Rt, (sextloadi8 t_addrmode_rr_sext:$addr))]>; let AddedComplexity = 10 in def tLDRSH : // A8.6.84 - T1pILdStEncode<0b111, (outs tGPR:$Rt), (ins t_addrmode_rr:$addr), + T1pILdStEncode<0b111, (outs tGPR:$Rt), (ins t_addrmode_rr_sext:$addr), AddrModeT1_2, IIC_iLoad_bh_r, "ldrsh", "\t$Rt, $addr", - [(set tGPR:$Rt, (sextloadi16 t_addrmode_rr:$addr))]>; + [(set tGPR:$Rt, (sextloadi16 t_addrmode_rr_sext:$addr))]>; def tSTRspi : T1pIs<(outs), (ins tGPR:$Rt, t_addrmode_sp:$addr), IIC_iStore_i, Index: lib/Target/PowerPC/PPCFastISel.cpp =================================================================== --- lib/Target/PowerPC/PPCFastISel.cpp +++ lib/Target/PowerPC/PPCFastISel.cpp @@ -151,6 +151,14 @@ bool isVSSRCRegClass(const TargetRegisterClass *RC) const { return RC->getID() == PPC::VSSRCRegClassID; } + unsigned copyRegToRegClass(const TargetRegisterClass *ToRC, + unsigned SrcReg, unsigned Flag = 0, + unsigned SubReg = 0) { + unsigned TmpReg = createResultReg(ToRC); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), TmpReg).addReg(SrcReg, Flag, SubReg); + return TmpReg; + } bool PPCEmitCmp(const Value *Src1Value, const Value *Src2Value, bool isZExt, unsigned DestReg, const PPC::Predicate Pred); @@ -877,18 +885,10 @@ } } else { CmpOpc = PPC::FCMPUS; - if (isVSSRCRegClass(RC1)) { - unsigned TmpReg = createResultReg(&PPC::F4RCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), TmpReg).addReg(SrcReg1); - SrcReg1 = TmpReg; - } - if (RC2 && isVSSRCRegClass(RC2)) { - unsigned TmpReg = createResultReg(&PPC::F4RCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), TmpReg).addReg(SrcReg2); - SrcReg2 = TmpReg; - } + if (isVSSRCRegClass(RC1)) + SrcReg1 = copyRegToRegClass(&PPC::F4RCRegClass, SrcReg1); + if (RC2 && isVSSRCRegClass(RC2)) + SrcReg2 = copyRegToRegClass(&PPC::F4RCRegClass, SrcReg2); } break; case MVT::f64: @@ -1210,13 +1210,8 @@ // Convert f32 to f64 if necessary. This is just a meaningless copy // to get the register class right. const TargetRegisterClass *InRC = MRI.getRegClass(SrcReg); - if (InRC == &PPC::F4RCRegClass) { - unsigned TmpReg = createResultReg(&PPC::F8RCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), TmpReg) - .addReg(SrcReg); - SrcReg = TmpReg; - } + if (InRC == &PPC::F4RCRegClass) + SrcReg = copyRegToRegClass(&PPC::F8RCRegClass, SrcReg); // Determine the opcode for the conversion, which takes place // entirely within FPRs. @@ -1510,11 +1505,7 @@ if (RetVT == CopyVT) { const TargetRegisterClass *CpyRC = TLI.getRegClassFor(CopyVT); - ResultReg = createResultReg(CpyRC); - - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), ResultReg) - .addReg(SourcePhysReg); + ResultReg = copyRegToRegClass(CpyRC, SourcePhysReg); // If necessary, round the floating result to single precision. } else if (CopyVT == MVT::f64) { @@ -1527,12 +1518,9 @@ // used along the fast-isel path (not lowered), and downstream logic // also doesn't like a direct subreg copy on a physical reg.) } else if (RetVT == MVT::i8 || RetVT == MVT::i16 || RetVT == MVT::i32) { - ResultReg = createResultReg(&PPC::GPRCRegClass); // Convert physical register from G8RC to GPRC. SourcePhysReg -= PPC::X0 - PPC::R0; - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), ResultReg) - .addReg(SourcePhysReg); + ResultReg = copyRegToRegClass(&PPC::GPRCRegClass, SourcePhysReg); } assert(ResultReg && "ResultReg unset!"); @@ -1884,13 +1872,8 @@ return false; // The only interesting case is when we need to switch register classes. - if (SrcVT == MVT::i64) { - unsigned ResultReg = createResultReg(&PPC::GPRCRegClass); - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(TargetOpcode::COPY), - ResultReg).addReg(SrcReg, 0, PPC::sub_32); - SrcReg = ResultReg; - } + if (SrcVT == MVT::i64) + SrcReg = copyRegToRegClass(&PPC::GPRCRegClass, SrcReg, 0, PPC::sub_32); updateValueMap(I, SrcReg); return true; Index: lib/Target/PowerPC/PPCInstrInfo.cpp =================================================================== --- lib/Target/PowerPC/PPCInstrInfo.cpp +++ lib/Target/PowerPC/PPCInstrInfo.cpp @@ -3205,9 +3205,9 @@ } assert((&*It) == &DefMI && "DefMI is missing"); - // If DefMI also uses the register to be forwarded, we can only forward it + // If DefMI also defines the register to be forwarded, we can only forward it // if DefMI is being erased. - if (DefMI.readsRegister(Reg, &getRegisterInfo())) + if (DefMI.modifiesRegister(Reg, &getRegisterInfo())) return KillDefMI; return true; Index: lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h +++ lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h @@ -40,6 +40,16 @@ bool requiresDiffExpressionRelocations() const override { return STI.getFeatureBits()[RISCV::FeatureRelax] || ForceRelocs; } + + // Return Size with extra Nop Bytes for alignment directive in code section. + bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF, + unsigned &Size) override; + + // Insert target specific fixup type for alignment directive in code section. + bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, + const MCAsmLayout &Layout, + MCAlignFragment &AF) override; + void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, MutableArrayRef Data, uint64_t Value, bool IsResolved, @@ -84,7 +94,8 @@ { "fixup_riscv_rvc_jump", 2, 11, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_riscv_rvc_branch", 0, 16, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_riscv_call", 0, 64, MCFixupKindInfo::FKF_IsPCRel }, - { "fixup_riscv_relax", 0, 0, 0 } + { "fixup_riscv_relax", 0, 0, 0 }, + { "fixup_riscv_align", 0, 0, 0 } }; static_assert((array_lengthof(Infos)) == RISCV::NumTargetFixupKinds, "Not all fixup kinds added to Infos array"); Index: lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -16,6 +16,7 @@ #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCObjectWriter.h" #include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCValue.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" @@ -286,6 +287,57 @@ } } +// Linker relaxation may change code size. We have to insert Nops +// for .align directive when linker relaxation enabled. So then Linker +// could satisfy alignment by removing Nops. +// The function return the total Nops Size we need to insert. +bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign( + const MCAlignFragment &AF, unsigned &Size) { + // Calculate Nops Size only when linker relaxation enabled. + if (!STI.getFeatureBits()[RISCV::FeatureRelax]) + return false; + + bool HasStdExtC = STI.getFeatureBits()[RISCV::FeatureStdExtC]; + unsigned MinNopLen = HasStdExtC ? 2 : 4; + + Size = AF.getAlignment() - MinNopLen; + return true; +} + +// We need to insert R_RISCV_ALIGN relocation type to indicate the +// position of Nops and the total bytes of the Nops have been inserted +// when linker relaxation enabled. +// The function insert fixup_riscv_align fixup which eventually will +// transfer to R_RISCV_ALIGN relocation type. +bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, + const MCAsmLayout &Layout, + MCAlignFragment &AF) { + // Insert the fixup only when linker relaxation enabled. + if (!STI.getFeatureBits()[RISCV::FeatureRelax]) + return false; + + // Calculate total Nops we need to insert. + unsigned Count; + shouldInsertExtraNopBytesForCodeAlign(AF, Count); + // No Nop need to insert, simply return. + if (Count == 0) + return false; + + MCContext &Ctx = Asm.getContext(); + const MCExpr *Dummy = MCConstantExpr::create(0, Ctx); + // Create fixup_riscv_align fixup. + MCFixup Fixup = + MCFixup::create(0, Dummy, MCFixupKind(RISCV::fixup_riscv_align), SMLoc()); + + uint64_t FixedValue = 0; + MCValue NopBytes = MCValue::get(Count); + + Asm.getWriter().recordRelocation(Asm, Layout, &AF, Fixup, NopBytes, + FixedValue); + + return true; +} + std::unique_ptr RISCVAsmBackend::createObjectTargetWriter() const { return createRISCVELFObjectWriter(OSABI, Is64Bit); Index: lib/Target/RISCV/MCTargetDesc/RISCVELFObjectWriter.cpp =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVELFObjectWriter.cpp +++ lib/Target/RISCV/MCTargetDesc/RISCVELFObjectWriter.cpp @@ -95,6 +95,8 @@ return ELF::R_RISCV_CALL; case RISCV::fixup_riscv_relax: return ELF::R_RISCV_RELAX; + case RISCV::fixup_riscv_align: + return ELF::R_RISCV_ALIGN; } } Index: lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h +++ lib/Target/RISCV/MCTargetDesc/RISCVFixupKinds.h @@ -52,6 +52,10 @@ // fixup_riscv_relax - Used to generate an R_RISCV_RELAX relocation type, // which indicates the linker may relax the instruction pair. fixup_riscv_relax, + // fixup_riscv_align - Used to generate an R_RISCV_ALIGN relocation type, + // which indicates the linker should fixup the alignment after linker + // relaxation. + fixup_riscv_align, // fixup_riscv_invalid - used as a sentinel and a marker, must be last fixup fixup_riscv_invalid, Index: lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp =================================================================== --- lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp +++ lib/Target/WebAssembly/InstPrinter/WebAssemblyInstPrinter.cpp @@ -122,61 +122,48 @@ } break; - case WebAssembly::CATCH_I32: - case WebAssembly::CATCH_I32_S: - case WebAssembly::CATCH_I64: - case WebAssembly::CATCH_I64_S: - case WebAssembly::CATCH_ALL: - case WebAssembly::CATCH_ALL_S: - // There can be multiple catch instructions for one try instruction, so we - // print a label only for the first 'catch' label. - if (LastSeenEHInst != CATCH) { - if (EHPadStack.empty()) { - printAnnotation(OS, "try-catch mismatch!"); - } else { - printAnnotation(OS, - "catch" + utostr(EHPadStack.pop_back_val()) + ':'); - } + case WebAssembly::CATCH: + case WebAssembly::CATCH_S: + if (EHPadStack.empty()) { + printAnnotation(OS, "try-catch mismatch!"); + } else { + printAnnotation(OS, "catch" + utostr(EHPadStack.pop_back_val()) + ':'); } - LastSeenEHInst = CATCH; break; } // Annotate any control flow label references. - unsigned NumFixedOperands = Desc.NumOperands; - SmallSet Printed; - for (unsigned i = 0, e = MI->getNumOperands(); i < e; ++i) { - // See if this operand denotes a basic block target. - if (i < NumFixedOperands) { - // A non-variable_ops operand, check its type. - if (Desc.OpInfo[i].OperandType != WebAssembly::OPERAND_BASIC_BLOCK) - continue; + + // rethrow instruction does not take any depth argument and rethrows to the + // nearest enclosing catch scope, if any. If there's no enclosing catch + // scope, it throws up to the caller. + if (Opc == WebAssembly::RETHROW || Opc == WebAssembly::RETHROW_S) { + if (EHPadStack.empty()) { + printAnnotation(OS, "to caller"); } else { - // A variable_ops operand, which currently can be immediates (used in - // br_table) which are basic block targets, or for call instructions - // when using -wasm-keep-registers (in which case they are registers, - // and should not be processed). - if (!MI->getOperand(i).isImm()) - continue; + printAnnotation(OS, "down to catch" + utostr(EHPadStack.back())); } - uint64_t Depth = MI->getOperand(i).getImm(); - if (!Printed.insert(Depth).second) - continue; - if (Opc == WebAssembly::RETHROW || Opc == WebAssembly::RETHROW_S) { - if (Depth > EHPadStack.size()) { - printAnnotation(OS, "Invalid depth argument!"); - } else if (Depth == EHPadStack.size()) { - // This can happen when rethrow instruction breaks out of all nests - // and throws up to the current function's caller. - printAnnotation(OS, utostr(Depth) + ": " + "to caller"); + } else { + unsigned NumFixedOperands = Desc.NumOperands; + SmallSet Printed; + for (unsigned I = 0, E = MI->getNumOperands(); I < E; ++I) { + // See if this operand denotes a basic block target. + if (I < NumFixedOperands) { + // A non-variable_ops operand, check its type. + if (Desc.OpInfo[I].OperandType != WebAssembly::OPERAND_BASIC_BLOCK) + continue; } else { - uint64_t CatchNo = EHPadStack.rbegin()[Depth]; - printAnnotation(OS, utostr(Depth) + ": " + "down to catch" + - utostr(CatchNo)); + // A variable_ops operand, which currently can be immediates (used in + // br_table) which are basic block targets, or for call instructions + // when using -wasm-keep-registers (in which case they are registers, + // and should not be processed). + if (!MI->getOperand(I).isImm()) + continue; } - - } else { + uint64_t Depth = MI->getOperand(I).getImm(); + if (!Printed.insert(Depth).second) + continue; if (Depth >= ControlFlowStack.size()) { printAnnotation(OS, "Invalid depth argument!"); } else { Index: lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp +++ lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp @@ -43,6 +43,8 @@ #define DEBUG_TYPE "asm-printer" +extern cl::opt WasmKeepRegisters; + //===----------------------------------------------------------------------===// // Helpers. //===----------------------------------------------------------------------===// @@ -304,6 +306,14 @@ OutStreamer->AddBlankLine(); } break; + case WebAssembly::EXTRACT_EXCEPTION_I32: + case WebAssembly::EXTRACT_EXCEPTION_I32_S: + // These are pseudo instructions that simulates popping values from stack. + // We print these only when we have -wasm-keep-registers on for assembly + // readability. + if (!WasmKeepRegisters) + break; + LLVM_FALLTHROUGH; default: { WebAssemblyMCInstLower MCInstLowering(OutContext, *this); MCInst TmpInst; Index: lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp +++ lib/Target/WebAssembly/WebAssemblyCFGStackify.cpp @@ -37,6 +37,7 @@ #include "llvm/MC/MCAsmInfo.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" +#include using namespace llvm; #define DEBUG_TYPE "wasm-cfg-stackify" @@ -110,11 +111,9 @@ static bool ExplicitlyBranchesTo(MachineBasicBlock *Pred, MachineBasicBlock *MBB) { for (MachineInstr &MI : Pred->terminators()) - // Even if a rethrow takes a BB argument, it is not a branch - if (!WebAssembly::isRethrow(MI)) - for (MachineOperand &MO : MI.explicit_operands()) - if (MO.isMBB() && MO.getMBB() == MBB) - return true; + for (MachineOperand &MO : MI.explicit_operands()) + if (MO.isMBB() && MO.getMBB() == MBB) + return true; return false; } @@ -217,12 +216,20 @@ // which reduces overall stack height. MachineBasicBlock *Header = nullptr; bool IsBranchedTo = false; + bool IsBrOnExn = false; + MachineInstr *BrOnExn = nullptr; int MBBNumber = MBB.getNumber(); for (MachineBasicBlock *Pred : MBB.predecessors()) { if (Pred->getNumber() < MBBNumber) { Header = Header ? MDT.findNearestCommonDominator(Header, Pred) : Pred; - if (ExplicitlyBranchesTo(Pred, &MBB)) + if (ExplicitlyBranchesTo(Pred, &MBB)) { IsBranchedTo = true; + if (Pred->getFirstTerminator()->getOpcode() == WebAssembly::BR_ON_EXN) { + IsBrOnExn = true; + assert(!BrOnExn && "There should be only one br_on_exn per block"); + BrOnExn = &*Pred->getFirstTerminator(); + } + } } } if (!Header) @@ -299,11 +306,27 @@ } // Add the BLOCK. + + // 'br_on_exn' extracts except_ref object and pushes variable number of values + // depending on its tag. For C++ exception, its a single i32 value, and the + // generated code will be in the form of: + // block i32 + // br_on_exn 0, $__cpp_exception + // rethrow + // end_block + WebAssembly::ExprType ReturnType = WebAssembly::ExprType::Void; + if (IsBrOnExn) { + const char *TagName = BrOnExn->getOperand(1).getSymbolName(); + if (std::strcmp(TagName, "__cpp_exception") != 0) + llvm_unreachable("Only C++ exception is supported"); + ReturnType = WebAssembly::ExprType::I32; + } + auto InsertPos = GetLatestInsertPos(Header, BeforeSet, AfterSet); MachineInstr *Begin = BuildMI(*Header, InsertPos, Header->findDebugLoc(InsertPos), TII.get(WebAssembly::BLOCK)) - .addImm(int64_t(WebAssembly::ExprType::Void)); + .addImm(int64_t(ReturnType)); // Decide where in Header to put the END_BLOCK. BeforeSet.clear(); @@ -416,11 +439,6 @@ if (!MBB.isEHPad()) return; - // catch_all terminate pad is grouped together with catch terminate pad and - // does not need a separate TRY and END_TRY marker. - if (WebAssembly::isCatchAllTerminatePad(MBB)) - return; - MachineFunction &MF = *MBB.getParent(); auto &MDT = getAnalysis(); const auto &TII = *MF.getSubtarget().getInstrInfo(); @@ -529,7 +547,8 @@ // throw. if (MBB.isPredecessor(Header)) { auto TermPos = Header->getFirstTerminator(); - if (TermPos == Header->end() || !WebAssembly::isRethrow(*TermPos)) { + if (TermPos == Header->end() || + TermPos->getOpcode() != WebAssembly::RETHROW) { for (const auto &MI : reverse(*Header)) { if (MI.isCall()) { AfterSet.insert(&MI); @@ -674,7 +693,6 @@ /// Insert LOOP/TRY/BLOCK markers at appropriate places. void WebAssemblyCFGStackify::placeMarkers(MachineFunction &MF) { - const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo(); // We allocate one more than the number of blocks in the function to // accommodate for the possible fake block we may insert at the end. ScopeTops.resize(MF.getNumBlockIDs() + 1); @@ -682,6 +700,7 @@ for (auto &MBB : MF) placeLoopMarker(MBB); // Place the TRY for MBB if MBB is the EH pad of an exception. + const MCAsmInfo *MCAI = MF.getTarget().getMCAsmInfo(); if (MCAI->getExceptionHandlingType() == ExceptionHandling::Wasm && MF.getFunction().hasPersonalityFn()) for (auto &MBB : MF) @@ -692,12 +711,8 @@ } void WebAssemblyCFGStackify::rewriteDepthImmediates(MachineFunction &MF) { - const auto &TII = *MF.getSubtarget().getInstrInfo(); // Now rewrite references to basic blocks to be depth immediates. - // We need two stacks: one for normal scopes and the other for EH pad scopes. - // EH pad stack is used to rewrite depths in rethrow instructions. SmallVector Stack; - SmallVector EHPadStack; for (auto &MBB : reverse(MF)) { for (auto I = MBB.rbegin(), E = MBB.rend(); I != E; ++I) { MachineInstr &MI = *I; @@ -714,26 +729,6 @@ MBB.getNumber() && "Block/try marker should be balanced"); Stack.pop_back(); - EHPadStack.pop_back(); - break; - - case WebAssembly::CATCH_I32: - case WebAssembly::CATCH_I64: - case WebAssembly::CATCH_ALL: - // Currently the only case there are more than one catch for a try is - // for catch terminate pad, in the form of - // try - // catch - // call @__clang_call_terminate - // unreachable - // catch_all - // call @std::terminate - // unreachable - // end - // So we shouldn't push the current BB for the second catch_all block - // here. - if (!WebAssembly::isCatchAllTerminatePad(MBB)) - EHPadStack.push_back(&MBB); break; case WebAssembly::LOOP: @@ -750,23 +745,6 @@ Stack.push_back(EndToBegin[&MI]->getParent()); break; - case WebAssembly::RETHROW: { - // Rewrite MBB operands to be depth immediates. - unsigned EHPadDepth = GetDepth(EHPadStack, MI.getOperand(0).getMBB()); - MI.RemoveOperand(0); - MI.addOperand(MF, MachineOperand::CreateImm(EHPadDepth)); - break; - } - - case WebAssembly::RETHROW_TO_CALLER: { - MachineInstr *Rethrow = - BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(WebAssembly::RETHROW)) - .addImm(EHPadStack.size()); - MI.eraseFromParent(); - I = MachineBasicBlock::reverse_iterator(Rethrow); - break; - } - default: if (MI.isTerminator()) { // Rewrite MBB operands to be depth immediates. Index: lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp +++ lib/Target/WebAssembly/WebAssemblyEHRestoreStackPointer.cpp @@ -77,7 +77,7 @@ // function uses the red zone, but that only happens with leaf functions, // and we don't restore __stack_pointer in leaf functions anyway. auto InsertPos = MBB.begin(); - if (WebAssembly::isCatch(*MBB.begin())) + if (MBB.begin()->getOpcode() == WebAssembly::CATCH) InsertPos++; FrameLowering->writeSPToGlobal(WebAssembly::SP32, MF, MBB, InsertPos, MBB.begin()->getDebugLoc()); Index: lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp +++ lib/Target/WebAssembly/WebAssemblyExceptionInfo.cpp @@ -50,10 +50,6 @@ MachineBasicBlock *EHPad = DomNode->getBlock(); if (!EHPad->isEHPad()) continue; - // We group catch & catch-all terminate pads together, so skip the second - // one - if (WebAssembly::isCatchAllTerminatePad(*EHPad)) - continue; auto *WE = new WebAssemblyException(EHPad); discoverAndMapException(WE, MDT, MDF); Exceptions.push_back(WE); @@ -104,16 +100,6 @@ // Map blocks that belong to a catchpad / cleanuppad MachineBasicBlock *EHPad = WE->getEHPad(); - - // We group catch & catch-all terminate pads together within an exception - if (WebAssembly::isCatchTerminatePad(*EHPad)) { - assert(EHPad->succ_size() == 1 && - "Catch terminate pad has more than one successors"); - changeExceptionFor(EHPad, WE); - changeExceptionFor(*(EHPad->succ_begin()), WE); - return; - } - SmallVector WL; WL.push_back(EHPad); while (!WL.empty()) { Index: lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp +++ lib/Target/WebAssembly/WebAssemblyFixFunctionBitcasts.cpp @@ -35,11 +35,6 @@ #define DEBUG_TYPE "wasm-fix-function-bitcasts" -static cl::opt - TemporaryWorkarounds("wasm-temporary-workarounds", - cl::desc("Apply certain temporary workarounds"), - cl::init(true), cl::Hidden); - namespace { class FixFunctionBitcasts final : public ModulePass { StringRef getPassName() const override { @@ -226,6 +221,17 @@ return Wrapper; } +// Test whether a main function with type FuncTy should be rewritten to have +// type MainTy. +bool ShouldFixMainFunction(FunctionType *FuncTy, FunctionType *MainTy) { + // Only fix the main function if it's the standard zero-arg form. That way, + // the standard cases will work as expected, and users will see signature + // mismatches from the linker for non-standard cases. + return FuncTy->getReturnType() == MainTy->getReturnType() && + FuncTy->getNumParams() == 0 && + !FuncTy->isVarArg(); +} + bool FixFunctionBitcasts::runOnModule(Module &M) { LLVM_DEBUG(dbgs() << "********** Fix Function Bitcasts **********\n"); @@ -242,14 +248,14 @@ // "int main(int argc, char *argv[])", create an artificial call with it // bitcasted to that type so that we generate a wrapper for it, so that // the C runtime can call it. - if (!TemporaryWorkarounds && !F.isDeclaration() && F.getName() == "main") { + if (F.getName() == "main") { Main = &F; LLVMContext &C = M.getContext(); Type *MainArgTys[] = {Type::getInt32Ty(C), PointerType::get(Type::getInt8PtrTy(C), 0)}; FunctionType *MainTy = FunctionType::get(Type::getInt32Ty(C), MainArgTys, /*isVarArg=*/false); - if (F.getFunctionType() != MainTy) { + if (ShouldFixMainFunction(F.getFunctionType(), MainTy)) { LLVM_DEBUG(dbgs() << "Found `main` function with incorrect type: " << *F.getFunctionType() << "\n"); Value *Args[] = {UndefValue::get(MainArgTys[0]), @@ -297,12 +303,18 @@ Main->setName("__original_main"); Function *MainWrapper = cast(CallMain->getCalledValue()->stripPointerCasts()); - MainWrapper->setName("main"); - MainWrapper->setLinkage(Main->getLinkage()); - MainWrapper->setVisibility(Main->getVisibility()); - Main->setLinkage(Function::PrivateLinkage); - Main->setVisibility(Function::DefaultVisibility); delete CallMain; + if (Main->isDeclaration()) { + // The wrapper is not needed in this case as we don't need to export + // it to anyone else. + MainWrapper->eraseFromParent(); + } else { + // Otherwise give the wrapper the same linkage as the original main + // function, so that it can be called from the same places. + MainWrapper->setName("main"); + MainWrapper->setLinkage(Main->getLinkage()); + MainWrapper->setVisibility(Main->getVisibility()); + } } return true; Index: lib/Target/WebAssembly/WebAssemblyISelLowering.h =================================================================== --- lib/Target/WebAssembly/WebAssemblyISelLowering.h +++ lib/Target/WebAssembly/WebAssemblyISelLowering.h @@ -96,9 +96,9 @@ SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCopyToReg(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerIntrinsic(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerAccessVectorElement(SDValue Op, SelectionDAG &DAG) const; SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; Index: lib/Target/WebAssembly/WebAssemblyISelLowering.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -131,6 +131,13 @@ for (auto T : {MVT::v16i8, MVT::v8i16}) setOperationAction(Op, T, Legal); + // Custom lower BUILD_VECTORs to minimize number of replace_lanes + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) + setOperationAction(ISD::BUILD_VECTOR, T, Custom); + if (Subtarget->hasUnimplementedSIMD128()) + for (auto T : {MVT::v2i64, MVT::v2f64}) + setOperationAction(ISD::BUILD_VECTOR, T, Custom); + // We have custom shuffle lowering to expose the shuffle mask for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); @@ -877,15 +884,17 @@ return LowerFRAMEADDR(Op, DAG); case ISD::CopyToReg: return LowerCopyToReg(Op, DAG); - case ISD::INTRINSIC_WO_CHAIN: - return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: case ISD::INSERT_VECTOR_ELT: return LowerAccessVectorElement(Op, DAG); case ISD::INTRINSIC_VOID: - return LowerINTRINSIC_VOID(Op, DAG); + case ISD::INTRINSIC_WO_CHAIN: + case ISD::INTRINSIC_W_CHAIN: + return LowerIntrinsic(Op, DAG); case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); + case ISD::BUILD_VECTOR: + return LowerBUILD_VECTOR(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::SHL: @@ -1026,17 +1035,28 @@ MachinePointerInfo(SV), 0); } -SDValue -WebAssemblyTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, - SelectionDAG &DAG) const { - unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); +SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op, + SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + unsigned IntNo; + switch (Op.getOpcode()) { + case ISD::INTRINSIC_VOID: + case ISD::INTRINSIC_W_CHAIN: + IntNo = cast(Op.getOperand(1))->getZExtValue(); + break; + case ISD::INTRINSIC_WO_CHAIN: + IntNo = cast(Op.getOperand(0))->getZExtValue(); + break; + default: + llvm_unreachable("Invalid intrinsic"); + } SDLoc DL(Op); + switch (IntNo) { default: return {}; // Don't custom lower most intrinsics. case Intrinsic::wasm_lsda: { - MachineFunction &MF = DAG.getMachineFunction(); EVT VT = Op.getValueType(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); @@ -1046,43 +1066,26 @@ return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT, DAG.getMCSymbol(S, PtrVT)); } - } -} - -SDValue -WebAssemblyTargetLowering::LowerINTRINSIC_VOID(SDValue Op, - SelectionDAG &DAG) const { - MachineFunction &MF = DAG.getMachineFunction(); - unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); - SDLoc DL(Op); - - switch (IntNo) { - default: - return {}; // Don't custom lower most intrinsics. case Intrinsic::wasm_throw: { + // We only support C++ exceptions for now int Tag = cast(Op.getOperand(2).getNode())->getZExtValue(); - switch (Tag) { - case CPP_EXCEPTION: { - const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); - const char *SymName = MF.createExternalSymbolName("__cpp_exception"); - SDValue SymNode = - DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, - DAG.getTargetExternalSymbol( - SymName, PtrVT, WebAssemblyII::MO_SYMBOL_EVENT)); - return DAG.getNode(WebAssemblyISD::THROW, DL, - MVT::Other, // outchain type - { - Op.getOperand(0), // inchain - SymNode, // exception symbol - Op.getOperand(3) // thrown value - }); - } - default: + if (Tag != CPP_EXCEPTION) llvm_unreachable("Invalid tag!"); - } - break; + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); + const char *SymName = MF.createExternalSymbolName("__cpp_exception"); + SDValue SymNode = + DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT, + DAG.getTargetExternalSymbol( + SymName, PtrVT, WebAssemblyII::MO_SYMBOL_EVENT)); + return DAG.getNode(WebAssemblyISD::THROW, DL, + MVT::Other, // outchain type + { + Op.getOperand(0), // inchain + SymNode, // exception symbol + Op.getOperand(3) // thrown value + }); } } } @@ -1103,6 +1106,107 @@ return SDValue(); } +SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + const EVT VecT = Op.getValueType(); + const EVT LaneT = Op.getOperand(0).getValueType(); + const size_t Lanes = Op.getNumOperands(); + auto IsConstant = [](const SDValue &V) { + return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP; + }; + + // Find the most common operand, which is approximately the best to splat + using Entry = std::pair; + SmallVector ValueCounts; + size_t NumConst = 0, NumDynamic = 0; + for (const SDValue &Lane : Op->op_values()) { + if (Lane.isUndef()) { + continue; + } else if (IsConstant(Lane)) { + NumConst++; + } else { + NumDynamic++; + } + auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(), + [&Lane](Entry A) { return A.first == Lane; }); + if (CountIt == ValueCounts.end()) { + ValueCounts.emplace_back(Lane, 1); + } else { + CountIt->second++; + } + } + auto CommonIt = + std::max_element(ValueCounts.begin(), ValueCounts.end(), + [](Entry A, Entry B) { return A.second < B.second; }); + assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector"); + SDValue SplatValue = CommonIt->first; + size_t NumCommon = CommonIt->second; + + // If v128.const is available, consider using it instead of a splat + if (Subtarget->hasUnimplementedSIMD128()) { + // {i32,i64,f32,f64}.const opcode, and value + const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes); + // SIMD prefix and opcode + const size_t SplatBytes = 2; + const size_t SplatConstBytes = SplatBytes + ConstBytes; + // SIMD prefix, opcode, and lane index + const size_t ReplaceBytes = 3; + const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes; + // SIMD prefix, v128.const opcode, and 128-bit value + const size_t VecConstBytes = 18; + // Initial v128.const and a replace_lane for each non-const operand + const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes; + // Initial splat and all necessary replace_lanes + const size_t SplatInitBytes = + IsConstant(SplatValue) + // Initial constant splat + ? (SplatConstBytes + + // Constant replace_lanes + (NumConst - NumCommon) * ReplaceConstBytes + + // Dynamic replace_lanes + (NumDynamic * ReplaceBytes)) + // Initial dynamic splat + : (SplatBytes + + // Constant replace_lanes + (NumConst * ReplaceConstBytes) + + // Dynamic replace_lanes + (NumDynamic - NumCommon) * ReplaceBytes); + if (ConstInitBytes < SplatInitBytes) { + // Create build_vector that will lower to initial v128.const + SmallVector ConstLanes; + for (const SDValue &Lane : Op->op_values()) { + if (IsConstant(Lane)) { + ConstLanes.push_back(Lane); + } else if (LaneT.isFloatingPoint()) { + ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT)); + } else { + ConstLanes.push_back(DAG.getConstant(0, DL, LaneT)); + } + } + SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes); + // Add replace_lane instructions for non-const lanes + for (size_t I = 0; I < Lanes; ++I) { + const SDValue &Lane = Op->getOperand(I); + if (!Lane.isUndef() && !IsConstant(Lane)) + Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane, + DAG.getConstant(I, DL, MVT::i32)); + } + return Result; + } + } + // Use a splat for the initial vector + SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue); + // Add replace_lane instructions for other values + for (size_t I = 0; I < Lanes; ++I) { + const SDValue &Lane = Op->getOperand(I); + if (Lane != SplatValue) + Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane, + DAG.getConstant(I, DL, MVT::i32)); + } + return Result; +} + SDValue WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { Index: lib/Target/WebAssembly/WebAssemblyInstrControl.td =================================================================== --- lib/Target/WebAssembly/WebAssemblyInstrControl.td +++ lib/Target/WebAssembly/WebAssemblyInstrControl.td @@ -141,23 +141,11 @@ // Throwing an exception: throw / rethrow let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in { -defm THROW_I32 : I<(outs), (ins event_op:$tag, I32:$val), - (outs), (ins event_op:$tag), - [(WebAssemblythrow (WebAssemblywrapper texternalsym:$tag), - I32:$val)], - "throw \t$tag, $val", "throw \t$tag", - 0x08>; -defm THROW_I64 : I<(outs), (ins event_op:$tag, I64:$val), - (outs), (ins event_op:$tag), - [(WebAssemblythrow (WebAssemblywrapper texternalsym:$tag), - I64:$val)], - "throw \t$tag, $val", "throw \t$tag", - 0x08>; -defm RETHROW : NRI<(outs), (ins bb_op:$dst), [], "rethrow \t$dst", 0x09>; -let isCodeGenOnly = 1 in -// This is used when the destination for rethrow is the caller function. This -// will be converted to a rethrow in CFGStackify. -defm RETHROW_TO_CALLER : NRI<(outs), (ins), [], "rethrow">; +defm THROW : I<(outs), (ins event_op:$tag, variable_ops), + (outs), (ins event_op:$tag), + [(WebAssemblythrow (WebAssemblywrapper texternalsym:$tag))], + "throw \t$tag", "throw \t$tag", 0x08>; +defm RETHROW : NRI<(outs), (ins), [(int_wasm_rethrow)], "rethrow", 0x09>; } // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 // Region within which an exception is caught: try / end_try @@ -166,24 +154,33 @@ defm END_TRY : NRI<(outs), (ins), [], "end_try", 0x0b>; } // Uses = [VALUE_STACK], Defs = [VALUE_STACK] -// Catching an exception: catch / catch_all -let hasCtrlDep = 1, hasSideEffects = 1 in { -defm CATCH_I32 : I<(outs I32:$dst), (ins i32imm:$tag), - (outs), (ins i32imm:$tag), - [(set I32:$dst, (int_wasm_catch imm:$tag))], - "i32.catch \t$dst, $tag", "i32.catch \t$tag", 0x07>; -defm CATCH_I64 : I<(outs I64:$dst), (ins i32imm:$tag), - (outs), (ins i32imm:$tag), - [(set I64:$dst, (int_wasm_catch imm:$tag))], - "i64.catch \t$dst, $tag", "i64.catch \t$tag", 0x07>; -defm CATCH_ALL : NRI<(outs), (ins), [], "catch_all", 0x05>; -} +// Catching an exception: catch / extract_exception +let hasCtrlDep = 1, hasSideEffects = 1 in +defm CATCH : I<(outs EXCEPT_REF:$dst), (ins), (outs), (ins), [], + "catch \t$dst", "catch", 0x07>; + +// Querying / extracing exception: br_on_exn +// br_on_exn queries an except_ref to see if it matches the corresponding +// exception tag index. If true it branches to the given label and pushes the +// corresponding argument values of the exception onto the stack. +let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in +defm BR_ON_EXN : I<(outs), (ins bb_op:$dst, event_op:$tag, EXCEPT_REF:$exn), + (outs), (ins bb_op:$dst, event_op:$tag), [], + "br_on_exn \t$dst, $tag, $exn", "br_on_exn \t$dst, $tag", + 0x0a>; +// This is a pseudo instruction that simulates popping a value from stack, which +// has been pushed by br_on_exn +let isCodeGenOnly = 1, hasSideEffects = 1 in +defm EXTRACT_EXCEPTION_I32 : NRI<(outs I32:$dst), (ins), + [(set I32:$dst, (int_wasm_extract_exception))], + "extract_exception\t$dst">; // Pseudo instructions: cleanupret / catchret let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, - isCodeGenOnly = 1, isEHScopeReturn = 1 in { - defm CLEANUPRET : NRI<(outs), (ins), [(cleanupret)], "", 0>; + isPseudo = 1, isEHScopeReturn = 1 in { + defm CLEANUPRET : NRI<(outs), (ins), [(cleanupret)], "cleanupret", 0>; defm CATCHRET : NRI<(outs), (ins bb_op:$dst, bb_op:$from), - [(catchret bb:$dst, bb:$from)], "", 0>; -} + [(catchret bb:$dst, bb:$from)], "catchret", 0>; +} // isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1, + // isPseudo = 1, isEHScopeReturn = 1 } Index: lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -134,6 +134,17 @@ else FBB = MI.getOperand(0).getMBB(); break; + case WebAssembly::BR_ON_EXN: + if (HaveCond) + return true; + // If we're running after CFGStackify, we can't optimize further. + if (!MI.getOperand(0).isMBB()) + return true; + Cond.push_back(MachineOperand::CreateImm(true)); + Cond.push_back(MI.getOperand(2)); + TBB = MI.getOperand(0).getMBB(); + HaveCond = true; + break; } if (MI.isBarrier()) break; @@ -179,9 +190,22 @@ assert(Cond.size() == 2 && "Expected a flag and a successor block"); + MachineFunction &MF = *MBB.getParent(); + auto &MRI = MF.getRegInfo(); + bool IsBrOnExn = Cond[1].isReg() && MRI.getRegClass(Cond[1].getReg()) == + &WebAssembly::EXCEPT_REFRegClass; + if (Cond[0].getImm()) { - BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).add(Cond[1]); + if (IsBrOnExn) { + const char *CPPExnSymbol = MF.createExternalSymbolName("__cpp_exception"); + BuildMI(&MBB, DL, get(WebAssembly::BR_ON_EXN)) + .addMBB(TBB) + .addExternalSymbol(CPPExnSymbol, WebAssemblyII::MO_SYMBOL_EVENT) + .add(Cond[1]); + } else + BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).add(Cond[1]); } else { + assert(!IsBrOnExn && "br_on_exn does not have a reversed condition"); BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS)).addMBB(TBB).add(Cond[1]); } if (!FBB) @@ -193,7 +217,15 @@ bool WebAssemblyInstrInfo::reverseBranchCondition( SmallVectorImpl &Cond) const { - assert(Cond.size() == 2 && "Expected a flag and a successor block"); + assert(Cond.size() == 2 && "Expected a flag and a condition expression"); + + // br_on_exn's condition cannot be reversed + MachineFunction &MF = *Cond[1].getParent()->getParent()->getParent(); + auto &MRI = MF.getRegInfo(); + if (Cond[1].isReg() && + MRI.getRegClass(Cond[1].getReg()) == &WebAssembly::EXCEPT_REFRegClass) + return true; + Cond.front() = MachineOperand::CreateImm(!Cond.front().getImm()); return false; } Index: lib/Target/WebAssembly/WebAssemblyInstrInfo.td =================================================================== --- lib/Target/WebAssembly/WebAssemblyInstrInfo.td +++ lib/Target/WebAssembly/WebAssemblyInstrInfo.td @@ -66,7 +66,7 @@ def SDT_WebAssemblyReturn : SDTypeProfile<0, -1, []>; def SDT_WebAssemblyWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>; -def SDT_WebAssemblyThrow : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>; +def SDT_WebAssemblyThrow : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>; //===----------------------------------------------------------------------===// // WebAssembly-specific DAG Nodes. @@ -94,7 +94,7 @@ def WebAssemblywrapper : SDNode<"WebAssemblyISD::Wrapper", SDT_WebAssemblyWrapper>; def WebAssemblythrow : SDNode<"WebAssemblyISD::THROW", SDT_WebAssemblyThrow, - [SDNPHasChain]>; + [SDNPHasChain, SDNPVariadic]>; //===----------------------------------------------------------------------===// // WebAssembly-specific Operands. Index: lib/Target/WebAssembly/WebAssemblyInstrSIMD.td =================================================================== --- lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -230,6 +230,19 @@ defm "" : Splat; defm "" : Splat; +// scalar_to_vector leaves high lanes undefined, so can be a splat +class ScalarSplatPat : + Pat<(vec_t (scalar_to_vector (lane_t reg_t:$x))), + (!cast("SPLAT_"#vec_t) reg_t:$x)>; + +def : ScalarSplatPat; +def : ScalarSplatPat; +def : ScalarSplatPat; +def : ScalarSplatPat; +def : ScalarSplatPat; +def : ScalarSplatPat; + //===----------------------------------------------------------------------===// // Accessing lanes //===----------------------------------------------------------------------===// @@ -346,118 +359,6 @@ def : Pat<(vector_insert (v2f64 V128:$vec), F64:$x, undef), (REPLACE_LANE_v2f64 V128:$vec, 0, F64:$x)>; -// Arbitrary other BUILD_VECTOR patterns -def : Pat<(v16i8 (build_vector - (i32 I32:$x0), (i32 I32:$x1), (i32 I32:$x2), (i32 I32:$x3), - (i32 I32:$x4), (i32 I32:$x5), (i32 I32:$x6), (i32 I32:$x7), - (i32 I32:$x8), (i32 I32:$x9), (i32 I32:$x10), (i32 I32:$x11), - (i32 I32:$x12), (i32 I32:$x13), (i32 I32:$x14), (i32 I32:$x15) - )), - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (REPLACE_LANE_v16i8 - (v16i8 (SPLAT_v16i8 (i32 I32:$x0))), - 1, I32:$x1 - )), - 2, I32:$x2 - )), - 3, I32:$x3 - )), - 4, I32:$x4 - )), - 5, I32:$x5 - )), - 6, I32:$x6 - )), - 7, I32:$x7 - )), - 8, I32:$x8 - )), - 9, I32:$x9 - )), - 10, I32:$x10 - )), - 11, I32:$x11 - )), - 12, I32:$x12 - )), - 13, I32:$x13 - )), - 14, I32:$x14 - )), - 15, I32:$x15 - ))>; -def : Pat<(v8i16 (build_vector - (i32 I32:$x0), (i32 I32:$x1), (i32 I32:$x2), (i32 I32:$x3), - (i32 I32:$x4), (i32 I32:$x5), (i32 I32:$x6), (i32 I32:$x7) - )), - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (REPLACE_LANE_v8i16 - (v8i16 (SPLAT_v8i16 (i32 I32:$x0))), - 1, I32:$x1 - )), - 2, I32:$x2 - )), - 3, I32:$x3 - )), - 4, I32:$x4 - )), - 5, I32:$x5 - )), - 6, I32:$x6 - )), - 7, I32:$x7 - ))>; -def : Pat<(v4i32 (build_vector - (i32 I32:$x0), (i32 I32:$x1), (i32 I32:$x2), (i32 I32:$x3) - )), - (v4i32 (REPLACE_LANE_v4i32 - (v4i32 (REPLACE_LANE_v4i32 - (v4i32 (REPLACE_LANE_v4i32 - (v4i32 (SPLAT_v4i32 (i32 I32:$x0))), - 1, I32:$x1 - )), - 2, I32:$x2 - )), - 3, I32:$x3 - ))>; -def : Pat<(v2i64 (build_vector (i64 I64:$x0), (i64 I64:$x1))), - (v2i64 (REPLACE_LANE_v2i64 - (v2i64 (SPLAT_v2i64 (i64 I64:$x0))), 1, I64:$x1))>; -def : Pat<(v4f32 (build_vector - (f32 F32:$x0), (f32 F32:$x1), (f32 F32:$x2), (f32 F32:$x3) - )), - (v4f32 (REPLACE_LANE_v4f32 - (v4f32 (REPLACE_LANE_v4f32 - (v4f32 (REPLACE_LANE_v4f32 - (v4f32 (SPLAT_v4f32 (f32 F32:$x0))), - 1, F32:$x1 - )), - 2, F32:$x2 - )), - 3, F32:$x3 - ))>; -def : Pat<(v2f64 (build_vector (f64 F64:$x0), (f64 F64:$x1))), - (v2f64 (REPLACE_LANE_v2f64 - (v2f64 (SPLAT_v2f64 (f64 F64:$x0))), 1, F64:$x1))>; - //===----------------------------------------------------------------------===// // Comparisons //===----------------------------------------------------------------------===// Index: lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp +++ lib/Target/WebAssembly/WebAssemblyLateEHPrepare.cpp @@ -15,6 +15,7 @@ #include "WebAssembly.h" #include "WebAssemblySubtarget.h" #include "WebAssemblyUtilities.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/WasmEHFuncInfo.h" #include "llvm/MC/MCAsmInfo.h" @@ -25,19 +26,14 @@ namespace { class WebAssemblyLateEHPrepare final : public MachineFunctionPass { StringRef getPassName() const override { - return "WebAssembly Prepare Exception"; + return "WebAssembly Late Prepare Exception"; } bool runOnMachineFunction(MachineFunction &MF) override; - bool removeUnnecessaryUnreachables(MachineFunction &MF); bool replaceFuncletReturns(MachineFunction &MF); - bool hoistCatches(MachineFunction &MF); - bool addCatchAlls(MachineFunction &MF); - bool addRethrows(MachineFunction &MF); - bool ensureSingleBBTermPads(MachineFunction &MF); - bool mergeTerminatePads(MachineFunction &MF); - bool addCatchAllTerminatePads(MachineFunction &MF); + bool addCatches(MachineFunction &MF); + bool addExceptionExtraction(MachineFunction &MF); public: static char ID; // Pass identification, replacement for typeid @@ -112,15 +108,11 @@ bool Changed = false; Changed |= removeUnnecessaryUnreachables(MF); - Changed |= addRethrows(MF); if (!MF.getFunction().hasPersonalityFn()) return Changed; Changed |= replaceFuncletReturns(MF); - Changed |= hoistCatches(MF); - Changed |= addCatchAlls(MF); - Changed |= ensureSingleBBTermPads(MF); - Changed |= mergeTerminatePads(MF); - Changed |= addCatchAllTerminatePads(MF); + Changed |= addCatches(MF); + Changed |= addExceptionExtraction(MF); return Changed; } @@ -129,7 +121,8 @@ bool Changed = false; for (auto &MBB : MF) { for (auto &MI : MBB) { - if (!WebAssembly::isThrow(MI)) + if (MI.getOpcode() != WebAssembly::THROW && + MI.getOpcode() != WebAssembly::RETHROW) continue; Changed = true; @@ -152,7 +145,6 @@ bool WebAssemblyLateEHPrepare::replaceFuncletReturns(MachineFunction &MF) { bool Changed = false; const auto &TII = *MF.getSubtarget().getInstrInfo(); - auto *EHInfo = MF.getWasmEHFuncInfo(); for (auto &MBB : MF) { auto Pos = MBB.getFirstTerminator(); @@ -173,13 +165,7 @@ } case WebAssembly::CLEANUPRET: { // Replace a cleanupret with a rethrow - if (EHInfo->hasThrowUnwindDest(&MBB)) - BuildMI(MBB, TI, TI->getDebugLoc(), TII.get(WebAssembly::RETHROW)) - .addMBB(EHInfo->getThrowUnwindDest(&MBB)); - else - BuildMI(MBB, TI, TI->getDebugLoc(), - TII.get(WebAssembly::RETHROW_TO_CALLER)); - + BuildMI(MBB, TI, TI->getDebugLoc(), TII.get(WebAssembly::RETHROW)); TI->eraseFromParent(); Changed = true; break; @@ -189,233 +175,158 @@ return Changed; } -// Hoist catch instructions to the beginning of their matching EH pad BBs in -// case, -// (1) catch instruction is not the first instruction in EH pad. -// ehpad: -// some_other_instruction -// ... -// %exn = catch 0 -// (2) catch instruction is in a non-EH pad BB. For example, -// ehpad: -// br bb0 -// bb0: -// %exn = catch 0 -bool WebAssemblyLateEHPrepare::hoistCatches(MachineFunction &MF) { - bool Changed = false; - SmallVector Catches; - for (auto &MBB : MF) - for (auto &MI : MBB) - if (WebAssembly::isCatch(MI)) - Catches.push_back(&MI); - - for (auto *Catch : Catches) { - MachineBasicBlock *EHPad = getMatchingEHPad(Catch); - assert(EHPad && "No matching EH pad for catch"); - if (EHPad->begin() == Catch) - continue; - Changed = true; - EHPad->insert(EHPad->begin(), Catch->removeFromParent()); - } - return Changed; -} - -// Add catch_all to beginning of cleanup pads. -bool WebAssemblyLateEHPrepare::addCatchAlls(MachineFunction &MF) { +// Add catch instruction to beginning of catchpads and cleanuppads. +bool WebAssemblyLateEHPrepare::addCatches(MachineFunction &MF) { bool Changed = false; const auto &TII = *MF.getSubtarget().getInstrInfo(); - + MachineRegisterInfo &MRI = MF.getRegInfo(); for (auto &MBB : MF) { - if (!MBB.isEHPad()) - continue; - // This runs after hoistCatches(), so we assume that if there is a catch, - // that should be the first instruction in an EH pad. - if (!WebAssembly::isCatch(*MBB.begin())) { + if (MBB.isEHPad()) { Changed = true; + unsigned DstReg = + MRI.createVirtualRegister(&WebAssembly::EXCEPT_REFRegClass); BuildMI(MBB, MBB.begin(), MBB.begin()->getDebugLoc(), - TII.get(WebAssembly::CATCH_ALL)); + TII.get(WebAssembly::CATCH), DstReg); } } return Changed; } -// Add a 'rethrow' instruction after __cxa_rethrow() call -bool WebAssemblyLateEHPrepare::addRethrows(MachineFunction &MF) { - bool Changed = false; +// Wasm uses 'br_on_exn' instruction to check the tag of an exception. It takes +// except_ref type object returned by 'catch', and branches to the destination +// if it matches a given tag. We currently use __cpp_exception symbol to +// represent the tag for all C++ exceptions. +// +// block $l (result i32) +// ... +// ;; except_ref $e is on the stack at this point +// br_on_exn $l $e ;; branch to $l with $e's arguments +// ... +// end +// ;; Here we expect the extracted values are on top of the wasm value stack +// ... Handle exception using values ... +// +// br_on_exn takes an except_ref object and branches if it matches the given +// tag. There can be multiple br_on_exn instructions if we want to match for +// another tag, but for now we only test for __cpp_exception tag, and if it does +// not match, i.e., it is a foreign exception, we rethrow it. +// +// In the destination BB that's the target of br_on_exn, extracted exception +// values (in C++'s case a single i32, which represents an exception pointer) +// are placed on top of the wasm stack. Because we can't model wasm stack in +// LLVM instruction, we use 'extract_exception' pseudo instruction to retrieve +// it. The pseudo instruction will be deleted later. +bool WebAssemblyLateEHPrepare::addExceptionExtraction(MachineFunction &MF) { const auto &TII = *MF.getSubtarget().getInstrInfo(); auto *EHInfo = MF.getWasmEHFuncInfo(); - - for (auto &MBB : MF) + SmallVector ExtractInstrs; + for (auto &MBB : MF) { for (auto &MI : MBB) { - // Check if it is a call to __cxa_rethrow() - if (!MI.isCall()) - continue; - MachineOperand &CalleeOp = MI.getOperand(0); - if (!CalleeOp.isGlobal() || - CalleeOp.getGlobal()->getName() != WebAssembly::CxaRethrowFn) - continue; - - // Now we have __cxa_rethrow() call - Changed = true; - auto InsertPt = std::next(MachineBasicBlock::iterator(MI)); - while (InsertPt != MBB.end() && InsertPt->isLabel()) // Skip EH_LABELs - ++InsertPt; - MachineInstr *Rethrow = nullptr; - if (EHInfo->hasThrowUnwindDest(&MBB)) - Rethrow = BuildMI(MBB, InsertPt, MI.getDebugLoc(), - TII.get(WebAssembly::RETHROW)) - .addMBB(EHInfo->getThrowUnwindDest(&MBB)); - else - Rethrow = BuildMI(MBB, InsertPt, MI.getDebugLoc(), - TII.get(WebAssembly::RETHROW_TO_CALLER)); - - // Because __cxa_rethrow does not return, the instruction after the - // rethrow should be an unreachable or a branch to another BB that should - // eventually lead to an unreachable. Delete it because rethrow itself is - // a terminator, and also delete non-EH pad successors if any. - MBB.erase(std::next(MachineBasicBlock::iterator(Rethrow)), MBB.end()); - SmallVector NonPadSuccessors; - for (auto *Succ : MBB.successors()) - if (!Succ->isEHPad()) - NonPadSuccessors.push_back(Succ); - for (auto *Succ : NonPadSuccessors) - MBB.removeSuccessor(Succ); - eraseDeadBBsAndChildren(NonPadSuccessors); + if (MI.getOpcode() == WebAssembly::EXTRACT_EXCEPTION_I32) { + if (MI.getOperand(0).isDead()) + MI.eraseFromParent(); + else + ExtractInstrs.push_back(&MI); + } } - return Changed; -} - -// Terminate pads are an single-BB EH pad in the form of -// termpad: -// %exn = catch 0 -// call @__clang_call_terminate(%exn) -// unreachable -// (There can be local.set and local.gets before the call if we didn't run -// RegStackify) -// But code transformations can change or add more control flow, so the call to -// __clang_call_terminate() function may not be in the original EH pad anymore. -// This ensures every terminate pad is a single BB in the form illustrated -// above. -bool WebAssemblyLateEHPrepare::ensureSingleBBTermPads(MachineFunction &MF) { - const auto &TII = *MF.getSubtarget().getInstrInfo(); + } + if (ExtractInstrs.empty()) + return false; - // Find calls to __clang_call_terminate() - SmallVector ClangCallTerminateCalls; - for (auto &MBB : MF) - for (auto &MI : MBB) + // Find terminate pads. + SmallSet TerminatePads; + for (auto &MBB : MF) { + for (auto &MI : MBB) { if (MI.isCall()) { const MachineOperand &CalleeOp = MI.getOperand(0); if (CalleeOp.isGlobal() && CalleeOp.getGlobal()->getName() == WebAssembly::ClangCallTerminateFn) - ClangCallTerminateCalls.push_back(&MI); + TerminatePads.insert(getMatchingEHPad(&MI)); } - - bool Changed = false; - for (auto *Call : ClangCallTerminateCalls) { - MachineBasicBlock *EHPad = getMatchingEHPad(Call); - assert(EHPad && "No matching EH pad for catch"); - - // If it is already the form we want, skip it - if (Call->getParent() == EHPad && - Call->getNextNode()->getOpcode() == WebAssembly::UNREACHABLE) - continue; - - // In case the __clang_call_terminate() call is not in its matching EH pad, - // move the call to the end of EH pad and add an unreachable instruction - // after that. Delete all successors and their children if any, because here - // the program terminates. - Changed = true; - MachineInstr *Catch = &*EHPad->begin(); - // This runs after hoistCatches(), so catch instruction should be at the top - assert(WebAssembly::isCatch(*Catch)); - // Takes the result register of the catch instruction as argument. There may - // have been some other local.set/local.gets in between, but at this point - // we don't care. - Call->getOperand(1).setReg(Catch->getOperand(0).getReg()); - auto InsertPos = std::next(MachineBasicBlock::iterator(Catch)); - EHPad->insert(InsertPos, Call->removeFromParent()); - BuildMI(*EHPad, InsertPos, Call->getDebugLoc(), - TII.get(WebAssembly::UNREACHABLE)); - EHPad->erase(InsertPos, EHPad->end()); - SmallVector Succs(EHPad->succ_begin(), - EHPad->succ_end()); - for (auto *Succ : Succs) - EHPad->removeSuccessor(Succ); - eraseDeadBBsAndChildren(Succs); + } } - return Changed; -} -// In case there are multiple terminate pads, merge them into one for code size. -// This runs after ensureSingleBBTermPads() and assumes every terminate pad is a -// single BB. -// In principle this violates EH scope relationship because it can merge -// multiple inner EH scopes, each of which is in different outer EH scope. But -// getEHScopeMembership() function will not be called after this, so it is fine. -bool WebAssemblyLateEHPrepare::mergeTerminatePads(MachineFunction &MF) { - SmallVector TermPads; - for (auto &MBB : MF) - if (WebAssembly::isCatchTerminatePad(MBB)) - TermPads.push_back(&MBB); - if (TermPads.empty()) - return false; - - MachineBasicBlock *UniqueTermPad = TermPads.front(); - for (auto *TermPad : - llvm::make_range(std::next(TermPads.begin()), TermPads.end())) { - SmallVector Preds(TermPad->pred_begin(), - TermPad->pred_end()); - for (auto *Pred : Preds) - Pred->replaceSuccessor(TermPad, UniqueTermPad); - TermPad->eraseFromParent(); + for (auto *Extract : ExtractInstrs) { + MachineBasicBlock *EHPad = getMatchingEHPad(Extract); + assert(EHPad && "No matching EH pad for extract_exception"); + MachineInstr *Catch = &*EHPad->begin(); + if (Catch->getNextNode() != Extract) + EHPad->insert(Catch->getNextNode(), Extract->removeFromParent()); + + // - Before: + // ehpad: + // %exnref:except_ref = catch + // %exn:i32 = extract_exception + // ... use exn ... + // + // - After: + // ehpad: + // %exnref:except_ref = catch + // br_on_exn %thenbb, $__cpp_exception, %exnref + // br %elsebb + // elsebb: + // rethrow + // thenbb: + // %exn:i32 = extract_exception + // ... use exn ... + unsigned ExnRefReg = Catch->getOperand(0).getReg(); + auto *ThenMBB = MF.CreateMachineBasicBlock(); + auto *ElseMBB = MF.CreateMachineBasicBlock(); + MF.insert(std::next(MachineFunction::iterator(EHPad)), ElseMBB); + MF.insert(std::next(MachineFunction::iterator(ElseMBB)), ThenMBB); + ThenMBB->splice(ThenMBB->end(), EHPad, Extract, EHPad->end()); + ThenMBB->transferSuccessors(EHPad); + EHPad->addSuccessor(ThenMBB); + EHPad->addSuccessor(ElseMBB); + + DebugLoc DL = Extract->getDebugLoc(); + const char *CPPExnSymbol = MF.createExternalSymbolName("__cpp_exception"); + BuildMI(EHPad, DL, TII.get(WebAssembly::BR_ON_EXN)) + .addMBB(ThenMBB) + .addExternalSymbol(CPPExnSymbol, WebAssemblyII::MO_SYMBOL_EVENT) + .addReg(ExnRefReg); + BuildMI(EHPad, DL, TII.get(WebAssembly::BR)).addMBB(ElseMBB); + + // When this is a terminate pad with __clang_call_terminate() call, we don't + // rethrow it anymore and call __clang_call_terminate() with a nullptr + // argument, which will call std::terminate(). + // + // - Before: + // ehpad: + // %exnref:except_ref = catch + // %exn:i32 = extract_exception + // call @__clang_call_terminate(%exn) + // unreachable + // + // - After: + // ehpad: + // %exnref:except_ref = catch + // br_on_exn %thenbb, $__cpp_exception, %exnref + // br %elsebb + // elsebb: + // call @__clang_call_terminate(0) + // unreachable + // thenbb: + // %exn:i32 = extract_exception + // call @__clang_call_terminate(%exn) + // unreachable + if (TerminatePads.count(EHPad)) { + Function *ClangCallTerminateFn = + MF.getFunction().getParent()->getFunction( + WebAssembly::ClangCallTerminateFn); + assert(ClangCallTerminateFn && + "There is no __clang_call_terminate() function"); + BuildMI(ElseMBB, DL, TII.get(WebAssembly::CALL_VOID)) + .addGlobalAddress(ClangCallTerminateFn) + .addImm(0); + BuildMI(ElseMBB, DL, TII.get(WebAssembly::UNREACHABLE)); + + } else { + BuildMI(ElseMBB, DL, TII.get(WebAssembly::RETHROW)); + if (EHInfo->hasEHPadUnwindDest(EHPad)) + EHInfo->setThrowUnwindDest(ElseMBB, EHInfo->getEHPadUnwindDest(EHPad)); + } } - return true; -} -// Terminate pads are cleanup pads, so they should start with a 'catch_all' -// instruction. But in the Itanium model, when we have a C++ exception object, -// we pass them to __clang_call_terminate function, which calls __cxa_end_catch -// with the passed exception pointer and then std::terminate. This is the reason -// that terminate pads are generated with not a catch_all but a catch -// instruction in clang and earlier llvm passes. Here we append a terminate pad -// with a catch_all after each existing terminate pad so we can also catch -// foreign exceptions. For every terminate pad: -// %exn = catch 0 -// call @__clang_call_terminate(%exn) -// unreachable -// We append this BB right after that: -// catch_all -// call @std::terminate() -// unreachable -bool WebAssemblyLateEHPrepare::addCatchAllTerminatePads(MachineFunction &MF) { - const auto &TII = *MF.getSubtarget().getInstrInfo(); - SmallVector TermPads; - for (auto &MBB : MF) - if (WebAssembly::isCatchTerminatePad(MBB)) - TermPads.push_back(&MBB); - if (TermPads.empty()) - return false; - - Function *StdTerminateFn = - MF.getFunction().getParent()->getFunction(WebAssembly::StdTerminateFn); - assert(StdTerminateFn && "There is no std::terminate() function"); - for (auto *CatchTermPad : TermPads) { - DebugLoc DL = CatchTermPad->findDebugLoc(CatchTermPad->begin()); - auto *CatchAllTermPad = MF.CreateMachineBasicBlock(); - MF.insert(std::next(MachineFunction::iterator(CatchTermPad)), - CatchAllTermPad); - CatchAllTermPad->setIsEHPad(); - BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::CATCH_ALL)); - BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::CALL_VOID)) - .addGlobalAddress(StdTerminateFn); - BuildMI(CatchAllTermPad, DL, TII.get(WebAssembly::UNREACHABLE)); - - // Actually this CatchAllTermPad (new terminate pad with a catch_all) is not - // a successor of an existing terminate pad. CatchAllTermPad should have all - // predecessors CatchTermPad has instead. This is a hack to force - // CatchAllTermPad be always sorted right after CatchTermPad; the correct - // predecessor-successor relationships will be restored in CFGStackify pass. - CatchTermPad->addSuccessor(CatchAllTermPad); - } return true; } Index: lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp +++ lib/Target/WebAssembly/WebAssemblyMCInstLower.cpp @@ -36,7 +36,7 @@ // This disables the removal of registers when lowering into MC, as required // by some current tests. -static cl::opt +cl::opt WasmKeepRegisters("wasm-keep-registers", cl::Hidden, cl::desc("WebAssembly: output stack registers in" " instruction output for test purposes only."), Index: lib/Target/WebAssembly/WebAssemblyRegStackify.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyRegStackify.cpp +++ lib/Target/WebAssembly/WebAssemblyRegStackify.cpp @@ -317,6 +317,18 @@ AliasAnalysis &AA, const MachineRegisterInfo &MRI) { assert(Def->getParent() == Insert->getParent()); + // 'catch' and 'extract_exception' should be the first instruction of a BB and + // cannot move. + if (Def->getOpcode() == WebAssembly::CATCH || + Def->getOpcode() == WebAssembly::EXTRACT_EXCEPTION_I32) { + const MachineBasicBlock *MBB = Def->getParent(); + auto NextI = std::next(MachineBasicBlock::const_iterator(Def)); + for (auto E = MBB->end(); NextI != E && NextI->isDebugInstr(); ++NextI) + ; + if (NextI != Insert) + return false; + } + // Check for register dependencies. SmallVector MutableRegisters; for (const MachineOperand &MO : Def->operands()) { @@ -819,6 +831,24 @@ if (WebAssembly::isArgument(*Def)) continue; + // Currently catch's return value register cannot be stackified, because + // the wasm LLVM backend currently does not support live-in values + // entering blocks, which is a part of multi-value proposal. + // + // Once we support live-in values of wasm blocks, this can be: + // catch ; push except_ref value onto stack + // block except_ref -> i32 + // br_on_exn $__cpp_exception ; pop the except_ref value + // end_block + // + // But because we don't support it yet, the catch instruction's dst + // register should be assigned to a local to be propagated across + // 'block' boundary now. + // + // TODO Fix this once we support the multi-value proposal. + if (Def->getOpcode() == WebAssembly::CATCH) + continue; + // Decide which strategy to take. Prefer to move a single-use value // over cloning it, and prefer cloning over introducing a tee. // For moving, we require the def to be in the same block as the use; Index: lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp +++ lib/Target/WebAssembly/WebAssemblyTargetMachine.cpp @@ -301,8 +301,10 @@ addPass(createWebAssemblyFixIrreducibleControlFlow()); // Do various transformations for exception handling. + // Every CFG-changing optimizations should come before this. addPass(createWebAssemblyLateEHPrepare()); + // Preparations and optimizations related to register stackification. if (getOptLevel() != CodeGenOpt::None) { // LiveIntervals isn't commonly run this late. Re-establish preconditions. addPass(createWebAssemblyPrepareForLiveIntervals()); Index: lib/Target/WebAssembly/WebAssemblyUtilities.h =================================================================== --- lib/Target/WebAssembly/WebAssemblyUtilities.h +++ lib/Target/WebAssembly/WebAssemblyUtilities.h @@ -30,22 +30,12 @@ bool isCallDirect(const MachineInstr &MI); bool isCallIndirect(const MachineInstr &MI); bool isMarker(const MachineInstr &MI); -bool isThrow(const MachineInstr &MI); -bool isRethrow(const MachineInstr &MI); -bool isCatch(const MachineInstr &MI); bool mayThrow(const MachineInstr &MI); /// Returns the operand number of a callee, assuming the argument is a call /// instruction. unsigned getCalleeOpNo(const MachineInstr &MI); -/// Returns if the given BB is a single BB terminate pad which starts with a -/// 'catch' instruction. -bool isCatchTerminatePad(const MachineBasicBlock &MBB); -/// Returns if the given BB is a single BB terminate pad which starts with a -/// 'catch_all' insrtruction. -bool isCatchAllTerminatePad(const MachineBasicBlock &MBB); - // Exception-related function names extern const char *const ClangCallTerminateFn; extern const char *const CxaBeginCatchFn; Index: lib/Target/WebAssembly/WebAssemblyUtilities.cpp =================================================================== --- lib/Target/WebAssembly/WebAssemblyUtilities.cpp +++ lib/Target/WebAssembly/WebAssemblyUtilities.cpp @@ -242,50 +242,10 @@ } } -bool WebAssembly::isThrow(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::THROW_I32: - case WebAssembly::THROW_I32_S: - case WebAssembly::THROW_I64: - case WebAssembly::THROW_I64_S: - return true; - default: - return false; - } -} - -bool WebAssembly::isRethrow(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::RETHROW: - case WebAssembly::RETHROW_S: - case WebAssembly::RETHROW_TO_CALLER: - case WebAssembly::RETHROW_TO_CALLER_S: - return true; - default: - return false; - } -} - -bool WebAssembly::isCatch(const MachineInstr &MI) { - switch (MI.getOpcode()) { - case WebAssembly::CATCH_I32: - case WebAssembly::CATCH_I32_S: - case WebAssembly::CATCH_I64: - case WebAssembly::CATCH_I64_S: - case WebAssembly::CATCH_ALL: - case WebAssembly::CATCH_ALL_S: - return true; - default: - return false; - } -} - bool WebAssembly::mayThrow(const MachineInstr &MI) { switch (MI.getOpcode()) { - case WebAssembly::THROW_I32: - case WebAssembly::THROW_I32_S: - case WebAssembly::THROW_I64: - case WebAssembly::THROW_I64_S: + case WebAssembly::THROW: + case WebAssembly::THROW_S: case WebAssembly::RETHROW: case WebAssembly::RETHROW_S: return true; @@ -308,41 +268,3 @@ return false; return true; } - -bool WebAssembly::isCatchTerminatePad(const MachineBasicBlock &MBB) { - if (!MBB.isEHPad()) - return false; - bool SeenCatch = false; - for (auto &MI : MBB) { - if (MI.getOpcode() == WebAssembly::CATCH_I32 || - MI.getOpcode() == WebAssembly::CATCH_I64 || - MI.getOpcode() == WebAssembly::CATCH_I32_S || - MI.getOpcode() == WebAssembly::CATCH_I64_S) - SeenCatch = true; - if (SeenCatch && MI.isCall()) { - const MachineOperand &CalleeOp = MI.getOperand(getCalleeOpNo(MI)); - if (CalleeOp.isGlobal() && - CalleeOp.getGlobal()->getName() == ClangCallTerminateFn) - return true; - } - } - return false; -} - -bool WebAssembly::isCatchAllTerminatePad(const MachineBasicBlock &MBB) { - if (!MBB.isEHPad()) - return false; - bool SeenCatchAll = false; - for (auto &MI : MBB) { - if (MI.getOpcode() == WebAssembly::CATCH_ALL || - MI.getOpcode() == WebAssembly::CATCH_ALL_S) - SeenCatchAll = true; - if (SeenCatchAll && MI.isCall()) { - const MachineOperand &CalleeOp = MI.getOperand(getCalleeOpNo(MI)); - if (CalleeOp.isGlobal() && - CalleeOp.getGlobal()->getName() == StdTerminateFn) - return true; - } - } - return false; -} Index: lib/Target/X86/X86FloatingPoint.cpp =================================================================== --- lib/Target/X86/X86FloatingPoint.cpp +++ lib/Target/X86/X86FloatingPoint.cpp @@ -1368,8 +1368,6 @@ /// register arguments and no explicit destinations. /// void FPS::handleCompareFP(MachineBasicBlock::iterator &I) { - ASSERT_SORTED(ForwardST0Table); ASSERT_SORTED(ReverseST0Table); - ASSERT_SORTED(ForwardSTiTable); ASSERT_SORTED(ReverseSTiTable); MachineInstr &MI = *I; unsigned NumOperands = MI.getDesc().getNumOperands(); Index: lib/Target/X86/X86InstrAVX512.td =================================================================== --- lib/Target/X86/X86InstrAVX512.td +++ lib/Target/X86/X86InstrAVX512.td @@ -7508,7 +7508,7 @@ def rr : SI, - EVEX_4V, Sched<[sched]>; + EVEX_4V, Sched<[sched, ReadDefault, ReadInt2Fpu]>; let mayLoad = 1 in def rm : SI, - EVEX_4V, Sched<[sched]>; + EVEX_4V, Sched<[sched, ReadDefault, ReadInt2Fpu]>; def rm_Int : SI, - EVEX_4V, EVEX_B, EVEX_RC, Sched<[sched]>; + EVEX_4V, EVEX_B, EVEX_RC, Sched<[sched, ReadDefault, ReadInt2Fpu]>; } multiclass avx512_vcvtsi_common opc, SDNode OpNode, Index: lib/Target/X86/X86InstrFPStack.td =================================================================== --- lib/Target/X86/X86InstrFPStack.td +++ lib/Target/X86/X86InstrFPStack.td @@ -266,7 +266,7 @@ // NOTE: GAS and apparently all other AT&T style assemblers have a broken notion // of some of the 'reverse' forms of the fsub and fdiv instructions. As such, // we have to put some 'r's in and take them out of weird places. -let SchedRW = [WriteFAdd] in { +let SchedRW = [WriteFAdd], Defs = [FPSW] in { def ADD_FST0r : FPST0rInst ; def ADD_FrST0 : FPrST0Inst ; def ADD_FPrST0 : FPrST0PInst; @@ -277,16 +277,16 @@ def SUBR_FrST0 : FPrST0Inst ; def SUBR_FPrST0 : FPrST0PInst; } // SchedRW -let SchedRW = [WriteFCom] in { +let SchedRW = [WriteFCom], Defs = [FPSW] in { def COM_FST0r : FPST0rInst ; def COMP_FST0r : FPST0rInst ; } // SchedRW -let SchedRW = [WriteFMul] in { +let SchedRW = [WriteFMul], Defs = [FPSW] in { def MUL_FST0r : FPST0rInst ; def MUL_FrST0 : FPrST0Inst ; def MUL_FPrST0 : FPrST0PInst; } // SchedRW -let SchedRW = [WriteFDiv] in { +let SchedRW = [WriteFDiv], Defs = [FPSW] in { def DIVR_FST0r : FPST0rInst ; def DIV_FrST0 : FPrST0Inst ; def DIV_FPrST0 : FPrST0PInst; @@ -334,7 +334,7 @@ // Versions of FP instructions that take a single memory operand. Added for the // disassembler; remove as they are included with patterns elsewhere. -let SchedRW = [WriteFComLd] in { +let SchedRW = [WriteFComLd], Defs = [FPSW] in { def FCOM32m : FPI<0xD8, MRM2m, (outs), (ins f32mem:$src), "fcom{s}\t$src">; def FCOMP32m : FPI<0xD8, MRM3m, (outs), (ins f32mem:$src), "fcomp{s}\t$src">; Index: lib/Target/X86/X86InstrInfo.td =================================================================== --- lib/Target/X86/X86InstrInfo.td +++ lib/Target/X86/X86InstrInfo.td @@ -3245,7 +3245,7 @@ } defm : FpUnaryAlias<"fadd", ADD_FST0r>; -defm : FpUnaryAlias<"faddp", ADD_FPrST0, 0>; +defm : FpUnaryAlias<"faddp", ADD_FPrST0>; defm : FpUnaryAlias<"fsub", SUB_FST0r>; defm : FpUnaryAlias<"fsub{|r}p", SUBR_FPrST0>; defm : FpUnaryAlias<"fsubr", SUBR_FST0r>; @@ -3256,8 +3256,8 @@ defm : FpUnaryAlias<"fdiv{|r}p", DIVR_FPrST0>; defm : FpUnaryAlias<"fdivr", DIVR_FST0r>; defm : FpUnaryAlias<"fdiv{r|}p", DIV_FPrST0>; -defm : FpUnaryAlias<"fcomi", COM_FIr, 0>; -defm : FpUnaryAlias<"fucomi", UCOM_FIr, 0>; +defm : FpUnaryAlias<"fcomi", COM_FIr>; +defm : FpUnaryAlias<"fucomi", UCOM_FIr>; defm : FpUnaryAlias<"fcompi", COM_FIPr>; defm : FpUnaryAlias<"fucompi", UCOM_FIPr>; @@ -3265,7 +3265,7 @@ // Handle "f{mulp,addp} st(0), $op" the same as "f{mulp,addp} $op", since they // commute. We also allow fdiv[r]p/fsubrp even though they don't commute, // solely because gas supports it. -def : InstAlias<"faddp\t{%st(0), $op|$op, st(0)}", (ADD_FPrST0 RST:$op), 0>; +def : InstAlias<"faddp\t{%st(0), $op|$op, st(0)}", (ADD_FPrST0 RST:$op)>; def : InstAlias<"fmulp\t{%st(0), $op|$op, st(0)}", (MUL_FPrST0 RST:$op)>; def : InstAlias<"fsub{|r}p\t{%st(0), $op|$op, st(0)}", (SUBR_FPrST0 RST:$op)>; def : InstAlias<"fsub{r|}p\t{%st(0), $op|$op, st(0)}", (SUB_FPrST0 RST:$op)>; Index: lib/Target/X86/X86InstrSSE.td =================================================================== --- lib/Target/X86/X86InstrSSE.td +++ lib/Target/X86/X86InstrSSE.td @@ -846,10 +846,11 @@ multiclass sse12_cvt_s opc, RegisterClass SrcRC, RegisterClass DstRC, SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag, - string asm, X86FoldableSchedWrite sched> { + string asm, X86FoldableSchedWrite sched, + SchedRead Int2Fpu = ReadDefault> { def rr : SI, - Sched<[sched]>; + Sched<[sched, Int2Fpu]>; def rm : SI, Sched<[sched.Folded]>; @@ -876,7 +877,7 @@ let hasSideEffects = 0, Predicates = [UseAVX] in { def rr : SI, - Sched<[sched]>; + Sched<[sched, ReadDefault, ReadInt2Fpu]>; let mayLoad = 1 in def rm : SI, XD, REX_W; defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32, "cvtsi2ss{l}\t{$src, $dst|$dst, $src}", - WriteCvtI2SS>, XS; + WriteCvtI2SS, ReadInt2Fpu>, XS; defm CVTSI642SS : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64, "cvtsi2ss{q}\t{$src, $dst|$dst, $src}", - WriteCvtI2SS>, XS, REX_W; + WriteCvtI2SS, ReadInt2Fpu>, XS, REX_W; defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32, "cvtsi2sd{l}\t{$src, $dst|$dst, $src}", - WriteCvtI2SD>, XD; + WriteCvtI2SD, ReadInt2Fpu>, XD; defm CVTSI642SD : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64, "cvtsi2sd{q}\t{$src, $dst|$dst, $src}", - WriteCvtI2SD>, XD, REX_W; + WriteCvtI2SD, ReadInt2Fpu>, XD, REX_W; def : InstAlias<"cvttss2si{l}\t{$src, $dst|$dst, $src}", (CVTTSS2SIrr GR32:$dst, FR32:$src), 0, "att">; @@ -1031,7 +1032,7 @@ !if(Is2Addr, !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - []>, Sched<[sched]>; + []>, Sched<[sched, ReadDefault, ReadInt2Fpu]>; let mayLoad = 1 in def rm_Int : SI; defm : X86WriteResPairUnsupported; -// FIXME: f+3 ST, LD+STC latency -defm : JWriteResFpuPair; +defm : X86WriteRes; +defm : X86WriteRes; defm : JWriteResFpuPair; defm : JWriteResYMMPair; defm : X86WriteResPairUnsupported; -defm : JWriteResFpuPair; +defm : X86WriteRes; +defm : X86WriteRes; defm : JWriteResFpuPair; defm : JWriteResYMMPair; defm : X86WriteResPairUnsupported; Index: lib/Transforms/IPO/IPConstantPropagation.cpp =================================================================== --- lib/Transforms/IPO/IPConstantPropagation.cpp +++ lib/Transforms/IPO/IPConstantPropagation.cpp @@ -66,6 +66,13 @@ if (!ACS) return false; + // Mismatched argument count is undefined behavior. Simply bail out to avoid + // handling of such situations below (avoiding asserts/crashes). + unsigned NumActualArgs = ACS.getNumArgOperands(); + if (F.isVarArg() ? ArgumentConstants.size() > NumActualArgs + : ArgumentConstants.size() != NumActualArgs) + return false; + // Check out all of the potentially constant arguments. Note that we don't // inspect varargs here. Function::arg_iterator Arg = F.arg_begin(); @@ -78,6 +85,11 @@ Value *V = ACS.getCallArgOperand(i); Constant *C = dyn_cast_or_null(V); + // Mismatched argument type is undefined behavior. Simply bail out to avoid + // handling of such situations below (avoiding asserts/crashes). + if (C && Arg->getType() != C->getType()) + return false; + // We can only propagate thread independent values through callbacks. // This is different to direct/indirect call sites because for them we // know the thread executing the caller and callee is the same. For Index: lib/Transforms/InstCombine/InstCombineSelect.cpp =================================================================== --- lib/Transforms/InstCombine/InstCombineSelect.cpp +++ lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -675,6 +675,23 @@ return IsNegative ? Builder.CreateSub(B, Max) : Builder.CreateSub(Max, B); } +static Value *canonicalizeSaturatedAdd(ICmpInst *Cmp, Value *TVal, Value *FVal, + InstCombiner::BuilderTy &Builder) { + // Match an unsigned saturated add with constant. + Value *X = Cmp->getOperand(0); + const APInt *CmpC, *AddC; + if (!Cmp->hasOneUse() || Cmp->getPredicate() != ICmpInst::ICMP_ULT || + !match(Cmp->getOperand(1), m_APInt(CmpC)) || !match(FVal, m_AllOnes()) || + !match(TVal, m_Add(m_Specific(X), m_APInt(AddC))) || ~(*AddC) != *CmpC) + return nullptr; + + // Commute compare and select operands: + // select (icmp ult X, C), (add X, ~C), -1 --> + // select (icmp ugt X, C), -1, (add X, ~C) + Value *NewCmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, X, Cmp->getOperand(1)); + return Builder.CreateSelect(NewCmp, FVal, TVal); +} + /// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single /// call to cttz/ctlz with flag 'is_zero_undef' cleared. /// @@ -1048,6 +1065,9 @@ if (Value *V = canonicalizeSaturatedSubtract(ICI, TrueVal, FalseVal, Builder)) return replaceInstUsesWith(SI, V); + if (Value *V = canonicalizeSaturatedAdd(ICI, TrueVal, FalseVal, Builder)) + return replaceInstUsesWith(SI, V); + return Changed ? &SI : nullptr; } Index: lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp =================================================================== --- lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp +++ lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp @@ -116,7 +116,7 @@ cl::Hidden, cl::init(true)); static cl::opt AllowNarrowLatchCondition( - "irce-allow-narrow-latch", cl::Hidden, cl::init(false), + "irce-allow-narrow-latch", cl::Hidden, cl::init(true), cl::desc("If set to true, IRCE may eliminate wide range checks in loops " "with narrow latch condition.")); Index: lib/Transforms/Scalar/LoopSimplifyCFG.cpp =================================================================== --- lib/Transforms/Scalar/LoopSimplifyCFG.cpp +++ lib/Transforms/Scalar/LoopSimplifyCFG.cpp @@ -541,7 +541,7 @@ #ifndef NDEBUG // Make sure that we have preserved all data structures after the transform. - DT.verify(); + assert(DT.verify() && "DT broken after transform!"); assert(DT.isReachableFromEntry(L.getHeader())); LI.verify(DT); #endif Index: lib/Transforms/Utils/LoopUtils.cpp =================================================================== --- lib/Transforms/Utils/LoopUtils.cpp +++ lib/Transforms/Utils/LoopUtils.cpp @@ -216,7 +216,10 @@ // When the value is absent it is interpreted as 'attribute set'. return true; case 2: - return mdconst::extract_or_null(MD->getOperand(1).get()); + if (ConstantInt *IntMD = + mdconst::extract_or_null(MD->getOperand(1).get())) + return IntMD->getZExtValue(); + return true; } llvm_unreachable("unexpected number of options"); } Index: test/CodeGen/AArch64/GlobalISel/legalize-inttoptr-xfail-1.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-inttoptr-xfail-1.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-inttoptr-xfail-1.mir @@ -15,9 +15,9 @@ # and fix the mistake: check that type index 0 is p0 and type index 1 # is s64 (in that order). -# CHECK: LLVM ERROR: unable to legalize instruction: -# CHECK-SAME: %{{[0-9]+}}:_(s64) = G_INTTOPTR %{{[0-9]+}}:_(p0) -# CHECK-SAME: (in function: broken) +# CHECK: Bad machine code: inttoptr result type must be a pointer +# CHECK: Bad machine code: inttoptr source type must not be a pointer +# CHECK: LLVM ERROR: Found 2 machine code errors. --- name: broken Index: test/CodeGen/AArch64/GlobalISel/legalize-inttoptr-xfail-2.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-inttoptr-xfail-2.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-inttoptr-xfail-2.mir @@ -19,9 +19,8 @@ # and fix the mistake: check that type index 0 is p0 and type index 1 # is s64. -# CHECK: LLVM ERROR: unable to legalize instruction: -# CHECK-SAME: %{{[0-9]+}}:_(p0) = G_INTTOPTR %{{[0-9]+}}:_(<4 x s16>) -# CHECK-SAME: (in function: broken) +# CHECK: Bad machine code: pointer casts must be all-vector or all-scalar +# CHECK: LLVM ERROR: Found 1 machine code errors. --- name: broken Index: test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir +++ test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir @@ -1,30 +1,34 @@ -# RUN: llc -O0 -run-pass=legalizer -global-isel-abort=0 -pass-remarks-missed='gisel*' %s -o - 2>&1 | FileCheck %s - ---- | - target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" - target triple = "aarch64--" - define void @test_merge_s4() { - ret void - } -... +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -march=aarch64 -O0 -run-pass=legalizer %s -o - | FileCheck %s --- name: test_merge_s4 -registers: - - { id: 0, class: _ } - - { id: 1, class: _ } - - { id: 2, class: _ } - - { id: 3, class: _ } - - { id: 4, class: _ } body: | bb.0: - %0(s64) = G_CONSTANT i64 0 - %1(s4) = G_TRUNC %0(s64) - ; Previously, LegalizerInfo was assuming all G_MERGE_VALUES and G_UNMERGE_VALUES - ; instructions are legal. Make sure that is no longer happening. - ; CHECK: unable to legalize instruction: {{.*}} G_MERGE_VALUES - %2(s8) = G_MERGE_VALUES %1(s4), %1(s4) - %3(s8) = COPY %2(s8) - %4(s64) = G_ANYEXT %3(s8) - $x0 = COPY %4(s64) + ; CHECK-LABEL: name: test_merge_s4 + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 + ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C2]] + ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32) + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]] + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32) + ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64) + ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C4]] + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[SHL]](s32) + ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[COPY1]] + ; CHECK: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[OR]](s32) + ; CHECK: [[COPY2:%[0-9]+]]:_(s8) = COPY [[TRUNC2]](s8) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY2]](s8) + ; CHECK: $x0 = COPY [[ANYEXT]](s64) + %0:_(s64) = G_CONSTANT i64 0 + %1:_(s4) = G_TRUNC %0 + + %2:_(s8) = G_MERGE_VALUES %1, %1 + %3:_(s8) = COPY %2 + %4:_(s64) = G_ANYEXT %3 + $x0 = COPY %4 ... Index: test/CodeGen/AArch64/GlobalISel/regbankselect-unmerge-vec.mir =================================================================== --- /dev/null +++ test/CodeGen/AArch64/GlobalISel/regbankselect-unmerge-vec.mir @@ -0,0 +1,26 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -O0 -mtriple arm64-- -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s +--- +name: unmerge +alignment: 2 +legalized: true +tracksRegLiveness: true +frameInfo: + maxCallFrameSize: 0 +body: | + bb.0: + liveins: $q0 + + ; Ensure that the dest regs have FPR since we're unmerging from a vector + ; CHECK-LABEL: name: unmerge + ; CHECK: liveins: $q0 + ; CHECK: [[COPY:%[0-9]+]]:fpr(<2 x s64>) = COPY $q0 + ; CHECK: [[UV:%[0-9]+]]:fpr(s64), [[UV1:%[0-9]+]]:fpr(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) + ; CHECK: $x0 = COPY [[UV]](s64) + ; CHECK: RET_ReallyLR implicit $x0 + %0:_(<2 x s64>) = COPY $q0 + %1:_(s64), %2:_(s64) = G_UNMERGE_VALUES %0(<2 x s64>) + $x0 = COPY %1(s64) + RET_ReallyLR implicit $x0 + +... Index: test/CodeGen/AArch64/bool-ext-inc.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/bool-ext-inc.ll @@ -0,0 +1,29 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s + +define <4 x i32> @sextbool_add_vector(<4 x i32> %c1, <4 x i32> %c2, <4 x i32> %x) { +; CHECK-LABEL: sextbool_add_vector: +; CHECK: // %bb.0: +; CHECK-NEXT: cmeq v0.4s, v0.4s, v1.4s +; CHECK-NEXT: add v0.4s, v2.4s, v0.4s +; CHECK-NEXT: ret + %c = icmp eq <4 x i32> %c1, %c2 + %b = sext <4 x i1> %c to <4 x i32> + %s = add <4 x i32> %x, %b + ret <4 x i32> %s +} + +define <4 x i32> @zextbool_sub_vector(<4 x i32> %c1, <4 x i32> %c2, <4 x i32> %x) { +; CHECK-LABEL: zextbool_sub_vector: +; CHECK: // %bb.0: +; CHECK-NEXT: cmeq v0.4s, v0.4s, v1.4s +; CHECK-NEXT: movi v1.4s, #1 +; CHECK-NEXT: and v0.16b, v0.16b, v1.16b +; CHECK-NEXT: sub v0.4s, v2.4s, v0.4s +; CHECK-NEXT: ret + %c = icmp eq <4 x i32> %c1, %c2 + %b = zext <4 x i1> %c to <4 x i32> + %s = sub <4 x i32> %x, %b + ret <4 x i32> %s +} + Index: test/CodeGen/AArch64/win64-jumptable.ll =================================================================== --- /dev/null +++ test/CodeGen/AArch64/win64-jumptable.ll @@ -0,0 +1,48 @@ +; RUN: llc -o - %s -mtriple=aarch64-windows -aarch64-enable-compress-jump-tables=0 | FileCheck %s + +define void @f(i32 %x) { +entry: + switch i32 %x, label %sw.epilog [ + i32 0, label %sw.bb + i32 1, label %sw.bb1 + i32 2, label %sw.bb2 + i32 3, label %sw.bb3 + ] + +sw.bb: ; preds = %entry + tail call void @g(i32 0) #2 + br label %sw.epilog + +sw.bb1: ; preds = %entry + tail call void @g(i32 1) #2 + br label %sw.epilog + +sw.bb2: ; preds = %entry + tail call void @g(i32 2) #2 + br label %sw.epilog + +sw.bb3: ; preds = %entry + tail call void @g(i32 3) #2 + br label %sw.epilog + +sw.epilog: ; preds = %entry, %sw.bb3, %sw.bb2, %sw.bb1, %sw.bb + tail call void @g(i32 10) #2 + ret void +} + +declare void @g(i32) + +; CHECK: .text +; CHECK: f: +; CHECK: .seh_proc f +; CHECK: b g +; CHECK-NEXT: .p2align 2 +; CHECK-NEXT: .LJTI0_0: +; CHECK: .word .LBB0_2-.LJTI0_0 +; CHECK: .word .LBB0_3-.LJTI0_0 +; CHECK: .word .LBB0_4-.LJTI0_0 +; CHECK: .word .LBB0_5-.LJTI0_0 +; CHECK: .section .xdata,"dr" +; CHECK: .seh_handlerdata +; CHECK: .text +; CHECK: .seh_endproc Index: test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir @@ -140,9 +140,9 @@ ; CHECK: $vgpr0 = COPY [[COPY4]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 - %2:_(s16) = G_TRUNC %0 - %3:_(s16) = G_TRUNC %1 - %4:_(s16) = G_AND %2, %3 + %2:_(s24) = G_TRUNC %0 + %3:_(s24) = G_TRUNC %1 + %4:_(s24) = G_AND %2, %3 %5:_(s32) = G_ANYEXT %4 $vgpr0 = COPY %5 ... @@ -164,6 +164,78 @@ $vgpr0_vgpr1 = COPY %2 ... +--- +name: test_and_v3i32 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5 + + ; CHECK-LABEL: name: test_and_v3i32 + ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>) + ; CHECK: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>) + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[UV3]] + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[UV4]] + ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[UV5]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32), [[AND2]](s32) + ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>) + %0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 + %1:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5 + %2:_(<3 x s32>) = G_AND %0, %1 + $vgpr0_vgpr1_vgpr2 = COPY %2 +... + +--- +name: test_and_v4i32 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7 + + ; CHECK-LABEL: name: test_and_v4i32 + ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>) + ; CHECK: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>) + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[UV4]] + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[UV5]] + ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[UV6]] + ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[UV7]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32), [[AND2]](s32), [[AND3]](s32) + ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) + %0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + %1:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + %2:_(<4 x s32>) = G_AND %0, %1 + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2 +... + +--- +name: test_and_v5i32 +body: | + bb.0: + + ; CHECK-LABEL: name: test_and_v5i32 + ; CHECK: [[DEF:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[DEF1:%[0-9]+]]:_(<5 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<5 x s32>) + ; CHECK: [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF1]](<5 x s32>) + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[UV]], [[UV5]] + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[UV1]], [[UV6]] + ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[UV2]], [[UV7]] + ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[UV3]], [[UV8]] + ; CHECK: [[AND4:%[0-9]+]]:_(s32) = G_AND [[UV4]], [[UV9]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[AND]](s32), [[AND1]](s32), [[AND2]](s32), [[AND3]](s32), [[AND4]](s32) + ; CHECK: [[DEF2:%[0-9]+]]:_(<8 x s32>) = G_IMPLICIT_DEF + ; CHECK: [[INSERT:%[0-9]+]]:_(<8 x s32>) = G_INSERT [[DEF2]], [[BUILD_VECTOR]](<5 x s32>), 0 + ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT]](<8 x s32>) + %0:_(<5 x s32>) = G_IMPLICIT_DEF + %1:_(<5 x s32>) = G_IMPLICIT_DEF + %2:_(<5 x s32>) = G_AND %0, %1 + %3:_(<8 x s32>) = G_IMPLICIT_DEF + %4:_(<8 x s32>) = G_INSERT %3, %2, 0 + $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %4 +... + --- name: test_and_v2s64 body: | Index: test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir @@ -250,3 +250,59 @@ %5:_(s32) = G_ANYEXT %4 $vgpr0 = COPY %5 ... + +--- +name: test_ashr_i8_i8 +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + + ; SI-LABEL: name: test_ashr_i8_i8 + ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32) + ; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) + ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] + ; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32) + ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32) + ; SI: $vgpr0 = COPY [[COPY4]](s32) + ; VI-LABEL: name: test_ashr_i8_i8 + ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC]](s16) + ; VI: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC]](s16) + ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C1]] + ; VI: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[ASHR]], [[AND]](s32) + ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR1]](s16) + ; VI: $vgpr0 = COPY [[ANYEXT]](s32) + ; GFX9-LABEL: name: test_ashr_i8_i8 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32) + ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC]](s16) + ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC]](s16) + ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C1]] + ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[ASHR]], [[AND]](s32) + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR1]](s16) + ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s8) = G_TRUNC %0 + %3:_(s8) = G_TRUNC %1 + %4:_(s8) = G_ASHR %2, %3 + %5:_(s32) = G_ANYEXT %4 + $vgpr0 = COPY %5 +... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-load.mir @@ -129,3 +129,190 @@ $vgpr0_vgpr1_vgpr2 = COPY %1 ... + +--- +name: test_ext_load_global_s64_from_1_align1 +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_ext_load_global_s64_from_1_align1 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 1, align 4, addrspace 1) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[ANYEXT]](s64) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s64) = G_LOAD %0 :: (load 1, addrspace 1, align 4) + + $vgpr0_vgpr1 = COPY %1 +... + +--- +name: test_ext_load_global_s64_from_2_align2 +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_ext_load_global_s64_from_2_align2 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 2, align 4, addrspace 1) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[ANYEXT]](s64) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s64) = G_LOAD %0 :: (load 2, addrspace 1, align 4) + + $vgpr0_vgpr1 = COPY %1 +... + +--- +name: test_ext_load_global_s64_from_4_align4 +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_ext_load_global_s64_from_4_align4 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 4, addrspace 1) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[ANYEXT]](s64) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s64) = G_LOAD %0 :: (load 4, addrspace 1, align 4) + + $vgpr0_vgpr1 = COPY %1 +... + +--- +name: test_ext_load_global_s128_from_4_align4 +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; CHECK-LABEL: name: test_ext_load_global_s128_from_4_align4 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p1) :: (load 4, addrspace 1) + ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[LOAD]](s32) + ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[ANYEXT]](s128) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s128) = G_LOAD %0 :: (load 4, addrspace 1, align 4) + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1 + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %1 +... + +--- +name: test_load_global_s96_align4 +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; SI-LABEL: name: test_load_global_s96_align4 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) + ; SI: [[COPY1:%[0-9]+]]:_(s64) = COPY [[LOAD]](s64) + ; SI: [[DEF:%[0-9]+]]:_(s96) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s96) = G_INSERT [[DEF]], [[COPY1]](s64), 0 + ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) + ; SI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p1) :: (load 4, addrspace 1) + ; SI: [[INSERT1:%[0-9]+]]:_(s96) = G_INSERT [[INSERT]], [[LOAD1]](s32), 64 + ; SI: $vgpr0_vgpr1_vgpr2 = COPY [[INSERT1]](s96) + ; VI-LABEL: name: test_load_global_s96_align4 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[LOAD:%[0-9]+]]:_(s96) = G_LOAD [[COPY]](p1) :: (load 12, align 4, addrspace 1) + ; VI: $vgpr0_vgpr1_vgpr2 = COPY [[LOAD]](s96) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s96) = G_LOAD %0 :: (load 12, addrspace 1, align 4) + $vgpr0_vgpr1_vgpr2 = COPY %1 + +... + +--- +name: test_load_global_s160_align4 +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; SI-LABEL: name: test_load_global_s160_align4 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) + ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) + ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p1) :: (load 8, align 4, addrspace 1) + ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64) + ; SI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[MV]](s128), 0 + ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; SI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) + ; SI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p1) :: (load 4, addrspace 1) + ; SI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD2]](s32), 128 + ; SI: S_NOP 0, implicit [[INSERT1]](s160) + ; VI-LABEL: name: test_load_global_s160_align4 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) + ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; VI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) + ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p1) :: (load 8, align 4, addrspace 1) + ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64) + ; VI: [[DEF:%[0-9]+]]:_(s160) = G_IMPLICIT_DEF + ; VI: [[INSERT:%[0-9]+]]:_(s160) = G_INSERT [[DEF]], [[MV]](s128), 0 + ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; VI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) + ; VI: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p1) :: (load 4, addrspace 1) + ; VI: [[INSERT1:%[0-9]+]]:_(s160) = G_INSERT [[INSERT]], [[LOAD2]](s32), 128 + ; VI: S_NOP 0, implicit [[INSERT1]](s160) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s160) = G_LOAD %0 :: (load 20, addrspace 1, align 4) + S_NOP 0, implicit %1 +... + +--- +name: test_load_global_s224_align4 +body: | + bb.0: + liveins: $vgpr0_vgpr1 + + ; SI-LABEL: name: test_load_global_s224_align4 + ; SI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; SI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) + ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; SI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) + ; SI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p1) :: (load 8, align 4, addrspace 1) + ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; SI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) + ; SI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p1) :: (load 8, align 4, addrspace 1) + ; SI: [[MV:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64) + ; SI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF + ; SI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[MV]](s192), 0 + ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 + ; SI: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C2]](s64) + ; SI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p1) :: (load 4, addrspace 1) + ; SI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD3]](s32), 192 + ; SI: [[DEF1:%[0-9]+]]:_(s256) = G_IMPLICIT_DEF + ; SI: [[INSERT2:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT1]](s224), 0 + ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](s256) + ; VI-LABEL: name: test_load_global_s224_align4 + ; VI: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; VI: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p1) :: (load 8, align 4, addrspace 1) + ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 + ; VI: [[GEP:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C]](s64) + ; VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP]](p1) :: (load 8, align 4, addrspace 1) + ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 + ; VI: [[GEP1:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C1]](s64) + ; VI: [[LOAD2:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p1) :: (load 8, align 4, addrspace 1) + ; VI: [[MV:%[0-9]+]]:_(s192) = G_MERGE_VALUES [[LOAD]](s64), [[LOAD1]](s64), [[LOAD2]](s64) + ; VI: [[DEF:%[0-9]+]]:_(s224) = G_IMPLICIT_DEF + ; VI: [[INSERT:%[0-9]+]]:_(s224) = G_INSERT [[DEF]], [[MV]](s192), 0 + ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24 + ; VI: [[GEP2:%[0-9]+]]:_(p1) = G_GEP [[COPY]], [[C2]](s64) + ; VI: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[GEP2]](p1) :: (load 4, addrspace 1) + ; VI: [[INSERT1:%[0-9]+]]:_(s224) = G_INSERT [[INSERT]], [[LOAD3]](s32), 192 + ; VI: [[DEF1:%[0-9]+]]:_(s256) = G_IMPLICIT_DEF + ; VI: [[INSERT2:%[0-9]+]]:_(s256) = G_INSERT [[DEF1]], [[INSERT1]](s224), 0 + ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[INSERT2]](s256) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s224) = G_LOAD %0 :: (load 28, addrspace 1, align 4) + + %2:_(s256) = G_IMPLICIT_DEF + %3:_(s256) = G_INSERT %2, %1, 0 + $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3 + +... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir @@ -247,3 +247,58 @@ %5:_(s32) = G_ANYEXT %4 $vgpr0 = COPY %5 ... + +--- +name: test_lshr_i8_i8 +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + + ; SI-LABEL: name: test_lshr_i8_i8 + ; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]] + ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]] + ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[AND1]](s32) + ; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32) + ; SI: $vgpr0 = COPY [[COPY4]](s32) + ; VI-LABEL: name: test_lshr_i8_i8 + ; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32) + ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]] + ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32) + ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]] + ; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[AND1]](s32) + ; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16) + ; VI: $vgpr0 = COPY [[ANYEXT]](s32) + ; GFX9-LABEL: name: test_lshr_i8_i8 + ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) + ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32) + ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]] + ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32) + ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]] + ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[AND1]](s32) + ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16) + ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s8) = G_TRUNC %0 + %3:_(s8) = G_TRUNC %1 + %4:_(s8) = G_LSHR %2, %3 + %5:_(s32) = G_ANYEXT %4 + $vgpr0 = COPY %5 +... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir @@ -0,0 +1,156 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck %s + +--- +name: test_merge_s16_s8_s8 +body: | + bb.0: + ; CHECK-LABEL: name: test_merge_s16_s8_s8 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32) + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]] + ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C2]](s32) + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]] + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32) + ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32) + ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C5]] + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SHL]](s32) + ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[COPY3]] + ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[OR]](s32) + ; CHECK: $vgpr0 = COPY [[COPY4]](s32) + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 1 + %2:_(s16) = G_MERGE_VALUES %0, %1 + %3:_(s32) = G_ANYEXT %2 + $vgpr0 = COPY %3 +... + +--- +name: test_merge_s24_s8_s8_s8 +body: | + bb.0: + ; CHECK-LABEL: name: test_merge_s24_s8_s8_s8 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32) + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]] + ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C3]](s32) + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C5]] + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32) + ; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32) + ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C6]] + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SHL]](s32) + ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[COPY3]] + ; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32) + ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C8]] + ; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215 + ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C7]](s32) + ; CHECK: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C9]] + ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[AND4]](s32) + ; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[OR]](s32) + ; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32) + ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[COPY6]], [[COPY7]] + ; CHECK: [[COPY8:%[0-9]+]]:_(s32) = COPY [[OR1]](s32) + ; CHECK: $vgpr0 = COPY [[COPY8]](s32) + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 1 + %2:_(s8) = G_CONSTANT i8 2 + %3:_(s24) = G_MERGE_VALUES %0, %1, %2 + %4:_(s32) = G_ANYEXT %3 + $vgpr0 = COPY %4 +... + +--- +name: test_merge_s32_s8_s8_s8_s8 +body: | + bb.0: + ; CHECK-LABEL: name: test_merge_s32_s8_s8_s8_s8 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 + ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3 + ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C]](s32) + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]] + ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32) + ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C5]] + ; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 8 + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C6]](s32) + ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL]] + ; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C2]](s32) + ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C7]] + ; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND2]], [[C8]](s32) + ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]] + ; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 255 + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C3]](s32) + ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C9]] + ; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 24 + ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C10]](s32) + ; CHECK: [[OR2:%[0-9]+]]:_(s32) = G_OR [[OR1]], [[SHL2]] + ; CHECK: $vgpr0 = COPY [[OR2]](s32) + %0:_(s8) = G_CONSTANT i8 0 + %1:_(s8) = G_CONSTANT i8 1 + %2:_(s8) = G_CONSTANT i8 2 + %3:_(s8) = G_CONSTANT i8 3 + %4:_(s32) = G_MERGE_VALUES %0, %1, %2, %3 + $vgpr0 = COPY %4 +... + +--- +name: test_merge_s64_s32_s32 +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + ; CHECK-LABEL: name: test_merge_s64_s32_s32 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) + ; CHECK: $vgpr1_vgpr2 = COPY [[MV]](s64) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s64) = G_MERGE_VALUES %0, %1 + $vgpr1_vgpr2 = COPY %2 +... + +--- +name: test_merge_s64_s16_s16_s16_s16 +body: | + bb.0: + liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3 + ; CHECK-LABEL: name: test_merge_s64_s16_s16_s16_s16 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32) + ; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32) + ; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32) + ; CHECK: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32) + ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[TRUNC3]](s16) + ; CHECK: $vgpr1_vgpr2 = COPY [[MV]](s64) + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s32) = COPY $vgpr2 + %3:_(s32) = COPY $vgpr3 + %4:_(s16) = G_TRUNC %0 + %5:_(s16) = G_TRUNC %1 + %6:_(s16) = G_TRUNC %2 + %7:_(s16) = G_TRUNC %3 + %8:_(s64) = G_MERGE_VALUES %4, %5, %6, %7 + $vgpr1_vgpr2 = COPY %8 +... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir @@ -73,6 +73,91 @@ ... +--- +name: test_select_v3s32 +body: | + bb.0: + liveins: $vgpr0, $vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6 + ; CHECK-LABEL: name: test_select_v3s32 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr1_vgpr2_vgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr4_vgpr5_vgpr6 + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]] + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>) + ; CHECK: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>) + ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[UV]], [[UV3]] + ; CHECK: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[UV1]], [[UV4]] + ; CHECK: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[UV2]], [[UV5]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32) + ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>) + %0:_(s32) = G_CONSTANT i32 0 + %1:_(s32) = COPY $vgpr0 + %2:_(<3 x s32>) = COPY $vgpr1_vgpr2_vgpr3 + %3:_(<3 x s32>) = COPY $vgpr4_vgpr5_vgpr6 + + %4:_(s1) = G_ICMP intpred(ne), %0, %1 + %5:_(<3 x s32>) = G_SELECT %4, %2, %3 + $vgpr0_vgpr1_vgpr2 = COPY %5 + +... + +--- +name: test_select_v4s32 +body: | + bb.0: + liveins: $vgpr0, $vgpr1_vgpr2_vgpr3_vgpr4, $vgpr5_vgpr6_vgpr7_vgpr8 + ; CHECK-LABEL: name: test_select_v4s32 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr1_vgpr2_vgpr3_vgpr4 + ; CHECK: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr5_vgpr6_vgpr7_vgpr8 + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]] + ; CHECK: [[UV:%[0-9]+]]:_(<2 x s32>), [[UV1:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>) + ; CHECK: [[UV2:%[0-9]+]]:_(<2 x s32>), [[UV3:%[0-9]+]]:_(<2 x s32>) = G_UNMERGE_VALUES [[COPY2]](<4 x s32>) + ; CHECK: [[SELECT:%[0-9]+]]:_(<2 x s32>) = G_SELECT [[ICMP]](s1), [[UV]], [[UV2]] + ; CHECK: [[SELECT1:%[0-9]+]]:_(<2 x s32>) = G_SELECT [[ICMP]](s1), [[UV1]], [[UV3]] + ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[SELECT]](<2 x s32>), [[SELECT1]](<2 x s32>) + ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[CONCAT_VECTORS]](<4 x s32>) + %0:_(s32) = G_CONSTANT i32 0 + %1:_(s32) = COPY $vgpr0 + %2:_(<4 x s32>) = COPY $vgpr1_vgpr2_vgpr3_vgpr4 + %3:_(<4 x s32>) = COPY $vgpr5_vgpr6_vgpr7_vgpr8 + + %4:_(s1) = G_ICMP intpred(ne), %0, %1 + %5:_(<4 x s32>) = G_SELECT %4, %2, %3 + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %5 + +... + +--- +name: test_select_v2s64 +body: | + bb.0: + liveins: $vgpr0, $vgpr1_vgpr2_vgpr3_vgpr4, $vgpr5_vgpr6_vgpr7_vgpr8 + ; CHECK-LABEL: name: test_select_v2s64 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr1_vgpr2_vgpr3_vgpr4 + ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr5_vgpr6_vgpr7_vgpr8 + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]] + ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) + ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY2]](<2 x s64>) + ; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[UV]], [[UV2]] + ; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[UV1]], [[UV3]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64) + ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) + %0:_(s32) = G_CONSTANT i32 0 + %1:_(s32) = COPY $vgpr0 + %2:_(<2 x s64>) = COPY $vgpr1_vgpr2_vgpr3_vgpr4 + %3:_(<2 x s64>) = COPY $vgpr5_vgpr6_vgpr7_vgpr8 + + %4:_(s1) = G_ICMP intpred(ne), %0, %1 + %5:_(<2 x s64>) = G_SELECT %4, %2, %3 + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %5 + +... + --- name: test_select_s16 body: | @@ -179,3 +264,127 @@ $vgpr0 = COPY %5 ... +--- +name: test_vselect_v3s32 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2, $vgpr3_vgpr4_vgpr5, $vgpr6_vgpr7_vgpr8 + ; CHECK-LABEL: name: test_vselect_v3s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 + ; CHECK: [[COPY1:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5 + ; CHECK: [[COPY2:%[0-9]+]]:_(<3 x s32>) = COPY $vgpr6_vgpr7_vgpr8 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<3 x s32>) + ; CHECK: [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV3]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV4]] + ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[UV5]] + ; CHECK: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<3 x s32>) + ; CHECK: [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<3 x s32>) + ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[UV6]], [[UV9]] + ; CHECK: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[UV7]], [[UV10]] + ; CHECK: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[UV8]], [[UV11]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32) + ; CHECK: $vgpr0_vgpr1_vgpr2 = COPY [[BUILD_VECTOR]](<3 x s32>) + %0:_(<3 x s32>) = COPY $vgpr0_vgpr1_vgpr2 + %1:_(<3 x s32>) = COPY $vgpr3_vgpr4_vgpr5 + %2:_(<3 x s32>) = COPY $vgpr6_vgpr7_vgpr8 + + %3:_(<3 x s1>) = G_ICMP intpred(ne), %0, %1 + %4:_(<3 x s32>) = G_SELECT %3, %1, %2 + $vgpr0_vgpr1_vgpr2 = COPY %4 + +... + +--- +name: test_vselect_v4s32 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7, $vgpr8_vgpr9_vgpr10_vgpr11 + ; CHECK-LABEL: name: test_vselect_v4s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<4 x s32>) + ; CHECK: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV4]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV5]] + ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV2]](s32), [[UV6]] + ; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV3]](s32), [[UV7]] + ; CHECK: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<4 x s32>) + ; CHECK: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<4 x s32>) + ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[UV8]], [[UV12]] + ; CHECK: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[UV9]], [[UV13]] + ; CHECK: [[SELECT2:%[0-9]+]]:_(s32) = G_SELECT [[ICMP2]](s1), [[UV10]], [[UV14]] + ; CHECK: [[SELECT3:%[0-9]+]]:_(s32) = G_SELECT [[ICMP3]](s1), [[UV11]], [[UV15]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32), [[SELECT2]](s32), [[SELECT3]](s32) + ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<4 x s32>) + %0:_(<4 x s32>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + %1:_(<4 x s32>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + %2:_(<4 x s32>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11 + + %3:_(<4 x s1>) = G_ICMP intpred(ne), %0, %1 + %4:_(<4 x s32>) = G_SELECT %3, %1, %2 + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %4 + +... + +--- +name: test_vselect_v2s64 +body: | + bb.0: + liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4_vgpr5_vgpr6_vgpr7, $vgpr8_vgpr9_vgpr10_vgpr11 + ; CHECK-LABEL: name: test_vselect_v2s64 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11 + ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>) + ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s64), [[UV2]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s64), [[UV3]] + ; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>) + ; CHECK: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY2]](<2 x s64>) + ; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[UV4]], [[UV6]] + ; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV5]], [[UV7]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64) + ; CHECK: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>) + %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3 + %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7 + %2:_(<2 x s64>) = COPY $vgpr8_vgpr9_vgpr10_vgpr11 + + %3:_(<2 x s1>) = G_ICMP intpred(ne), %0, %1 + %4:_(<2 x s64>) = G_SELECT %3, %1, %2 + $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %4 + +... + +--- +name: test_vselect_v2s32 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5, $vgpr6_vgpr7 + ; CHECK-LABEL: name: test_vselect_v2s32 + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3 + ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5 + ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr6_vgpr7 + ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>) + ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>) + ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]] + ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]] + ; CHECK: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](<2 x s32>) + ; CHECK: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY3]](<2 x s32>) + ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[UV4]], [[UV6]] + ; CHECK: [[SELECT1:%[0-9]+]]:_(s32) = G_SELECT [[ICMP1]](s1), [[UV5]], [[UV7]] + ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SELECT]](s32), [[SELECT1]](s32) + ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>) + %0:_(<2 x s32>) = COPY $vgpr0_vgpr1 + %1:_(<2 x s32>) = COPY $vgpr2_vgpr3 + %2:_(<2 x s32>) = COPY $vgpr4_vgpr5 + %3:_(<2 x s32>) = COPY $vgpr6_vgpr7 + + %4:_(<2 x s1>) = G_ICMP intpred(ne), %0, %1 + %5:_(<2 x s32>) = G_SELECT %4, %2, %3 + $vgpr0_vgpr1 = COPY %5 + +... + Index: test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir @@ -265,3 +265,18 @@ %5:_(s32) = G_ANYEXT %4 $vgpr0 = COPY %5 ... + +--- +name: test_shl_i8_i8 +body: | + bb.0: + liveins: $vgpr0, $vgpr1 + + %0:_(s32) = COPY $vgpr0 + %1:_(s32) = COPY $vgpr1 + %2:_(s8) = G_TRUNC %0 + %3:_(s8) = G_TRUNC %1 + %4:_(s8) = G_SHL %2, %3 + %5:_(s32) = G_ANYEXT %4 + $vgpr0 = COPY %5 +... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-store.mir @@ -120,3 +120,166 @@ %1:_(<3 x s32>) = COPY $vgpr2_vgpr3_vgpr4 G_STORE %1, %0 :: (store 12, align 4, addrspace 1) ... + +--- +name: test_truncstore_global_s64_to_s8 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + + ; CHECK-LABEL: name: test_truncstore_global_s64_to_s8 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; CHECK: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s64) = COPY $vgpr2_vgpr3 + G_STORE %1, %0 :: (store 1, addrspace 1) +... + +--- +name: test_truncstore_global_s64_to_s16 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + + ; CHECK-LABEL: name: test_truncstore_global_s64_to_s16 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; CHECK: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s64) = COPY $vgpr2_vgpr3 + G_STORE %1, %0 :: (store 1, addrspace 1) +... + +--- +name: test_truncstore_global_s64_to_s32 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3 + + ; CHECK-LABEL: name: test_truncstore_global_s64_to_s32 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64) + ; CHECK: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 4, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s64) = COPY $vgpr2_vgpr3 + G_STORE %1, %0 :: (store 4, addrspace 1) +... + +--- +name: test_truncstore_global_s128_to_s16 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5 + + ; CHECK-LABEL: name: test_truncstore_global_s128_to_s16 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128) + ; CHECK: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 2, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + G_STORE %1, %0 :: (store 2, addrspace 1) +... + +--- +name: test_truncstore_global_s128_to_s8 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5 + + ; CHECK-LABEL: name: test_truncstore_global_s128_to_s8 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s128) + ; CHECK: G_STORE [[TRUNC]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + G_STORE %1, %0 :: (store 1, addrspace 1) +... + +name: test_store_global_i1 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; CHECK-LABEL: name: test_store_global_i1 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]] + ; CHECK: G_STORE [[AND]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s32) = COPY $vgpr2 + %2:_(s1) = G_TRUNC %1 + G_STORE %2, %0 :: (store 1, addrspace 1) +... + +--- +name: test_store_global_i8 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; CHECK-LABEL: name: test_store_global_i8 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; CHECK: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 1, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s32) = COPY $vgpr2 + %2:_(s8) = G_TRUNC %1 + G_STORE %2, %0 :: (store 1, addrspace 1) +... + +--- +name: test_store_global_i16 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2 + + ; CHECK-LABEL: name: test_store_global_i16 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2 + ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) + ; CHECK: G_STORE [[COPY2]](s32), [[COPY]](p1) :: (store 2, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s32) = COPY $vgpr2 + %2:_(s16) = G_TRUNC %1 + G_STORE %2, %0 :: (store 2, addrspace 1) +... + +--- +name: test_store_global_i128 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5 + + ; CHECK-LABEL: name: test_store_global_i128 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; CHECK: G_STORE [[COPY1]](s128), [[COPY]](p1) :: (store 16, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s128) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + G_STORE %1, %0 :: (store 16, addrspace 1) +... + +--- +name: test_store_global_v2s64 +body: | + bb.0: + liveins: $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5 + + ; CHECK-LABEL: name: test_store_global_v2s64 + ; CHECK: [[COPY:%[0-9]+]]:_(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + ; CHECK: G_STORE [[COPY1]](<2 x s64>), [[COPY]](p1) :: (store 16, addrspace 1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(<2 x s64>) = COPY $vgpr2_vgpr3_vgpr4_vgpr5 + G_STORE %1, %0 :: (store 16, addrspace 1) + +... Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect-ptrtoint.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect-ptrtoint.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect-ptrtoint.mir @@ -10,10 +10,10 @@ bb.0: liveins: $sgpr0_sgpr1 ; CHECK-LABEL: name: ptrtoint_s - ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1 - ; CHECK: [[PTRTOINT:%[0-9]+]]:sgpr(p4) = G_PTRTOINT [[COPY]](s64) - %0:_(s64) = COPY $sgpr0_sgpr1 - %1:_(p4) = G_PTRTOINT %0 + ; CHECK: [[COPY:%[0-9]+]]:sgpr(p1) = COPY $sgpr0_sgpr1 + ; CHECK: [[PTRTOINT:%[0-9]+]]:sgpr(s64) = G_PTRTOINT [[COPY]](p1) + %0:_(p1) = COPY $sgpr0_sgpr1 + %1:_(s64) = G_PTRTOINT %0 ... --- @@ -24,8 +24,8 @@ bb.0: liveins: $vgpr0_vgpr1 ; CHECK-LABEL: name: ptrtoint_v - ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1 - ; CHECK: [[PTRTOINT:%[0-9]+]]:vgpr(p0) = G_PTRTOINT [[COPY]](s64) - %0:_(s64) = COPY $vgpr0_vgpr1 - %1:_(p0) = G_PTRTOINT %0 + ; CHECK: [[COPY:%[0-9]+]]:vgpr(p1) = COPY $vgpr0_vgpr1 + ; CHECK: [[PTRTOINT:%[0-9]+]]:vgpr(s64) = G_PTRTOINT [[COPY]](p1) + %0:_(p1) = COPY $vgpr0_vgpr1 + %1:_(s64) = G_PTRTOINT %0 ... Index: test/CodeGen/AMDGPU/fix-wwm-vgpr-copy.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/fix-wwm-vgpr-copy.ll @@ -0,0 +1,47 @@ +; RUN: llc -mtriple=amdgcn--amdpal -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +declare i64 @llvm.amdgcn.icmp.i32(i32, i32, i32) #0 +declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32) #0 +declare i32 @llvm.amdgcn.wwm.i32(i32) #1 +declare void @llvm.amdgcn.tbuffer.store.f32(float, <4 x i32>, i32, i32, i32, i32, i32, i32, i1, i1) #2 +declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #2 + +define amdgpu_hs void @foo(i32 inreg %arg, <4 x i32> inreg %buffer) { +entry: + br label %work + +bb42: + br label %bb602 + +bb602: + %tmp603 = phi i32 [ 0, %bb42 ], [ 1, %work ] + %tmp607 = icmp eq i32 %tmp603, %tmp1196 + br i1 %tmp607, label %bb49, label %bb54 + +bb49: + tail call void @llvm.amdgcn.tbuffer.store.f32(float 1.000000e+00, <4 x i32> %buffer, i32 0, i32 1, i32 1, i32 4, i32 4, i32 7, i1 true, i1 false) #7 + ret void + +bb54: + ret void + +work: +; GCN: s_not_b64 exec, exec +; GCN: v_mov_b32_e32 v[[tmp1189:[0-9]+]], 1 +; GCN: s_not_b64 exec, exec + %tmp1189 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 4, i32 1) + +; GCN: s_or_saveexec_b64 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, -1 +; GCN: v_lshlrev_b32_e32 v[[tmp1191:[0-9]+]], 2, v[[tmp1189]] + %tmp1191 = mul i32 %tmp1189, 4 + +; GCN: s_mov_b64 exec, s{{\[}}[[LO]]:[[HI]]{{\]}} + %tmp1196 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp1191) + + %tmp34 = icmp eq i32 %arg, 0 + br i1 %tmp34, label %bb602, label %bb42 +} + +attributes #0 = { convergent nounwind readnone } +attributes #1 = { nounwind readnone speculatable } +attributes #2 = { nounwind writeonly } Index: test/CodeGen/ARM/load.ll =================================================================== --- test/CodeGen/ARM/load.ll +++ test/CodeGen/ARM/load.ll @@ -562,3 +562,473 @@ store i32 %x, i32* %0, align 4 ret void } + + +; Negative offset + +define i32 @ldrsb_ri_negative(i8* %p) { +; CHECK-T1-LABEL: ldrsb_ri_negative: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #0 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldrsb r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrsb_ri_negative: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldrsb r0, [r0, #-1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -1 + %0 = load i8, i8* %add.ptr, align 1 + %conv = sext i8 %0 to i32 + ret i32 %conv +} + +define i32 @ldrsh_ri_negative(i8* %p) { +; CHECK-T1-LABEL: ldrsh_ri_negative: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #0 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldrsh r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrsh_ri_negative: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldrsh r0, [r0, #-1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -1 + %0 = bitcast i8* %add.ptr to i16* + %1 = load i16, i16* %0, align 2 + %conv = sext i16 %1 to i32 + ret i32 %conv +} + +define i32 @ldrb_ri_negative(i8* %p) { +; CHECK-T1-LABEL: ldrb_ri_negative: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, r0, #1 +; CHECK-T1-NEXT: ldrb r0, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrb_ri_negative: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldrb r0, [r0, #-1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -1 + %0 = load i8, i8* %add.ptr, align 1 + %conv = zext i8 %0 to i32 + ret i32 %conv +} + +define i32 @ldrh_ri_negative(i8* %p) { +; CHECK-T1-LABEL: ldrh_ri_negative: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, r0, #1 +; CHECK-T1-NEXT: ldrh r0, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrh_ri_negative: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldrh r0, [r0, #-1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -1 + %0 = bitcast i8* %add.ptr to i16* + %1 = load i16, i16* %0, align 2 + %conv = zext i16 %1 to i32 + ret i32 %conv +} + +define i32 @ldr_ri_negative(i8* %p) { +; CHECK-T1-LABEL: ldr_ri_negative: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, r0, #1 +; CHECK-T1-NEXT: ldr r0, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldr_ri_negative: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldr r0, [r0, #-1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -1 + %0 = bitcast i8* %add.ptr to i32* + %1 = load i32, i32* %0, align 4 + ret i32 %1 +} + +define void @strb_ri_negative(i8* %p, i32 %x) { +; CHECK-T1-LABEL: strb_ri_negative: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, r0, #1 +; CHECK-T1-NEXT: strb r1, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: strb_ri_negative: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: strb r1, [r0, #-1] +; CHECK-T2-NEXT: bx lr +entry: + %conv = trunc i32 %x to i8 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -1 + store i8 %conv, i8* %add.ptr, align 1 + ret void +} + +define void @strh_ri_negative(i8* %p, i32 %x) { +; CHECK-T1-LABEL: strh_ri_negative: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, r0, #1 +; CHECK-T1-NEXT: strh r1, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: strh_ri_negative: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: strh r1, [r0, #-1] +; CHECK-T2-NEXT: bx lr +entry: + %conv = trunc i32 %x to i16 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -1 + %0 = bitcast i8* %add.ptr to i16* + store i16 %conv, i16* %0, align 2 + ret void +} + +define void @str_ri_negative(i8* %p, i32 %x) { +; CHECK-T1-LABEL: str_ri_negative: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, r0, #1 +; CHECK-T1-NEXT: str r1, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: str_ri_negative: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: str r1, [r0, #-1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -1 + %0 = bitcast i8* %add.ptr to i32* + store i32 %x, i32* %0, align 4 + ret void +} + + +; Negative 255 offset + +define i32 @ldrsb_ri_negative255(i8* %p) { +; CHECK-T1-LABEL: ldrsb_ri_negative255: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #254 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldrsb r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrsb_ri_negative255: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldrsb r0, [r0, #-255] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -255 + %0 = load i8, i8* %add.ptr, align 1 + %conv = sext i8 %0 to i32 + ret i32 %conv +} + +define i32 @ldrsh_ri_negative255(i8* %p) { +; CHECK-T1-LABEL: ldrsh_ri_negative255: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #254 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldrsh r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrsh_ri_negative255: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldrsh r0, [r0, #-255] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -255 + %0 = bitcast i8* %add.ptr to i16* + %1 = load i16, i16* %0, align 2 + %conv = sext i16 %1 to i32 + ret i32 %conv +} + +define i32 @ldrb_ri_negative255(i8* %p) { +; CHECK-T1-LABEL: ldrb_ri_negative255: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, #255 +; CHECK-T1-NEXT: ldrb r0, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrb_ri_negative255: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldrb r0, [r0, #-255] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -255 + %0 = load i8, i8* %add.ptr, align 1 + %conv = zext i8 %0 to i32 + ret i32 %conv +} + +define i32 @ldrh_ri_negative255(i8* %p) { +; CHECK-T1-LABEL: ldrh_ri_negative255: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, #255 +; CHECK-T1-NEXT: ldrh r0, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrh_ri_negative255: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldrh r0, [r0, #-255] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -255 + %0 = bitcast i8* %add.ptr to i16* + %1 = load i16, i16* %0, align 2 + %conv = zext i16 %1 to i32 + ret i32 %conv +} + +define i32 @ldr_ri_negative255(i8* %p) { +; CHECK-T1-LABEL: ldr_ri_negative255: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, #255 +; CHECK-T1-NEXT: ldr r0, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldr_ri_negative255: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: ldr r0, [r0, #-255] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -255 + %0 = bitcast i8* %add.ptr to i32* + %1 = load i32, i32* %0, align 4 + ret i32 %1 +} + +define void @strb_ri_negative255(i8* %p, i32 %x) { +; CHECK-T1-LABEL: strb_ri_negative255: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, #255 +; CHECK-T1-NEXT: strb r1, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: strb_ri_negative255: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: strb r1, [r0, #-255] +; CHECK-T2-NEXT: bx lr +entry: + %conv = trunc i32 %x to i8 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -255 + store i8 %conv, i8* %add.ptr, align 1 + ret void +} + +define void @strh_ri_negative255(i8* %p, i32 %x) { +; CHECK-T1-LABEL: strh_ri_negative255: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, #255 +; CHECK-T1-NEXT: strh r1, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: strh_ri_negative255: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: strh r1, [r0, #-255] +; CHECK-T2-NEXT: bx lr +entry: + %conv = trunc i32 %x to i16 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -255 + %0 = bitcast i8* %add.ptr to i16* + store i16 %conv, i16* %0, align 2 + ret void +} + +define void @str_ri_negative255(i8* %p, i32 %x) { +; CHECK-T1-LABEL: str_ri_negative255: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: subs r0, #255 +; CHECK-T1-NEXT: str r1, [r0] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: str_ri_negative255: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: str r1, [r0, #-255] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -255 + %0 = bitcast i8* %add.ptr to i32* + store i32 %x, i32* %0, align 4 + ret void +} + + +; Negative 256 offset + +define i32 @ldrsb_ri_negative256(i8* %p) { +; CHECK-T1-LABEL: ldrsb_ri_negative256: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #255 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldrsb r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrsb_ri_negative256: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: mvn r1, #255 +; CHECK-T2-NEXT: ldrsb r0, [r0, r1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -256 + %0 = load i8, i8* %add.ptr, align 1 + %conv = sext i8 %0 to i32 + ret i32 %conv +} + +define i32 @ldrsh_ri_negative256(i8* %p) { +; CHECK-T1-LABEL: ldrsh_ri_negative256: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #255 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldrsh r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrsh_ri_negative256: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: mvn r1, #255 +; CHECK-T2-NEXT: ldrsh r0, [r0, r1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -256 + %0 = bitcast i8* %add.ptr to i16* + %1 = load i16, i16* %0, align 2 + %conv = sext i16 %1 to i32 + ret i32 %conv +} + +define i32 @ldrb_ri_negative256(i8* %p) { +; CHECK-T1-LABEL: ldrb_ri_negative256: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #255 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldrb r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrb_ri_negative256: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: mvn r1, #255 +; CHECK-T2-NEXT: ldrb r0, [r0, r1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -256 + %0 = load i8, i8* %add.ptr, align 1 + %conv = zext i8 %0 to i32 + ret i32 %conv +} + +define i32 @ldrh_ri_negative256(i8* %p) { +; CHECK-T1-LABEL: ldrh_ri_negative256: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #255 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldrh r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldrh_ri_negative256: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: mvn r1, #255 +; CHECK-T2-NEXT: ldrh r0, [r0, r1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -256 + %0 = bitcast i8* %add.ptr to i16* + %1 = load i16, i16* %0, align 2 + %conv = zext i16 %1 to i32 + ret i32 %conv +} + +define i32 @ldr_ri_negative256(i8* %p) { +; CHECK-T1-LABEL: ldr_ri_negative256: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r1, #255 +; CHECK-T1-NEXT: mvns r1, r1 +; CHECK-T1-NEXT: ldr r0, [r0, r1] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: ldr_ri_negative256: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: mvn r1, #255 +; CHECK-T2-NEXT: ldr r0, [r0, r1] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -256 + %0 = bitcast i8* %add.ptr to i32* + %1 = load i32, i32* %0, align 4 + ret i32 %1 +} + +define void @strb_ri_negative256(i8* %p, i32 %x) { +; CHECK-T1-LABEL: strb_ri_negative256: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r2, #255 +; CHECK-T1-NEXT: mvns r2, r2 +; CHECK-T1-NEXT: strb r1, [r0, r2] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: strb_ri_negative256: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: mvn r2, #255 +; CHECK-T2-NEXT: strb r1, [r0, r2] +; CHECK-T2-NEXT: bx lr +entry: + %conv = trunc i32 %x to i8 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -256 + store i8 %conv, i8* %add.ptr, align 1 + ret void +} + +define void @strh_ri_negative256(i8* %p, i32 %x) { +; CHECK-T1-LABEL: strh_ri_negative256: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r2, #255 +; CHECK-T1-NEXT: mvns r2, r2 +; CHECK-T1-NEXT: strh r1, [r0, r2] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: strh_ri_negative256: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: mvn r2, #255 +; CHECK-T2-NEXT: strh r1, [r0, r2] +; CHECK-T2-NEXT: bx lr +entry: + %conv = trunc i32 %x to i16 + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -256 + %0 = bitcast i8* %add.ptr to i16* + store i16 %conv, i16* %0, align 2 + ret void +} + +define void @str_ri_negative256(i8* %p, i32 %x) { +; CHECK-T1-LABEL: str_ri_negative256: +; CHECK-T1: @ %bb.0: @ %entry +; CHECK-T1-NEXT: movs r2, #255 +; CHECK-T1-NEXT: mvns r2, r2 +; CHECK-T1-NEXT: str r1, [r0, r2] +; CHECK-T1-NEXT: bx lr +; +; CHECK-T2-LABEL: str_ri_negative256: +; CHECK-T2: @ %bb.0: @ %entry +; CHECK-T2-NEXT: mvn r2, #255 +; CHECK-T2-NEXT: str r1, [r0, r2] +; CHECK-T2-NEXT: bx lr +entry: + %add.ptr = getelementptr inbounds i8, i8* %p, i32 -256 + %0 = bitcast i8* %add.ptr to i32* + store i32 %x, i32* %0, align 4 + ret void +} Index: test/CodeGen/PowerPC/convert-rr-to-ri-instr-add.mir =================================================================== --- /dev/null +++ test/CodeGen/PowerPC/convert-rr-to-ri-instr-add.mir @@ -0,0 +1,17 @@ +# RUN: llc -mtriple=powerpc64le--linux-gnu -stop-after ppc-pre-emit-peephole %s -o - -verify-machineinstrs | FileCheck %s + +--- +# ADDI8 + STFSX can be converted to ADDI8 + STFS even ADDI8 can not be erased. +name: testFwdOperandKilledAfter +# CHECK: name: testFwdOperandKilledAfter +tracksRegLiveness: true +body: | + bb.0.entry: + liveins: $x3, $f1, $x5 + $x3 = ADDI8 $x5, 100 + STFSX killed $f1, $zero8, $x3 + ; CHECK: STFS killed $f1, 100, $x5 + STD killed $x3, killed $x5, 100 + ; CHECK: STD killed $x3, killed $x5, 100 + BLR8 implicit $lr8, implicit $rm +... Index: test/CodeGen/PowerPC/pr24546.ll =================================================================== --- test/CodeGen/PowerPC/pr24546.ll +++ test/CodeGen/PowerPC/pr24546.ll @@ -56,7 +56,7 @@ !llvm.module.flags = !{!29, !30} !llvm.ident = !{!31} -!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0 (git://github.com/llvm-mirror/clang.git e0848b6353721eb1b278a5bbea257bbf6316251e) (git://github.com/llvm-mirror/llvm.git 8724a428dfd5e78d7865bb01783708e83f9ed128)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3, globals: !23) +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang version 3.8.0", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !3, globals: !23) !1 = !DIFile(filename: "testcase.i", directory: "/tmp/glibc.build") !2 = !{} !3 = !{!4} @@ -86,7 +86,7 @@ !28 = !DISubrange(count: 23) !29 = !{i32 2, !"Dwarf Version", i32 4} !30 = !{i32 2, !"Debug Info Version", i32 3} -!31 = !{!"clang version 3.8.0 (git://github.com/llvm-mirror/clang.git e0848b6353721eb1b278a5bbea257bbf6316251e) (git://github.com/llvm-mirror/llvm.git 8724a428dfd5e78d7865bb01783708e83f9ed128)"} +!31 = !{!"clang version 3.8.0"} !32 = !DILocation(line: 21, column: 32, scope: !33) !33 = distinct !DILexicalBlock(scope: !6, file: !1, line: 21, column: 6) !34 = !DILocation(line: 22, column: 15, scope: !35) Index: test/CodeGen/SystemZ/debuginstr-cgp.mir =================================================================== --- /dev/null +++ test/CodeGen/SystemZ/debuginstr-cgp.mir @@ -0,0 +1,171 @@ +# Check that the codegenprepare succeeds in dupRetToEnableTailCallOpts() also +# in the presence of a call to @llvm.dbg.value() +# +# RUN: llc %s -mtriple=s390x-linux-gnu -mcpu=z13 -start-before=codegenprepare \ +# RUN: -stop-after codegenprepare -o - | FileCheck %s +# +# CHECK-LABEL: bb2: +# CHECK: ret +# CHECK-LABEL: bb4: +# CHECK: ret + + +# Generated with: +# +# bin/llc -mtriple=s390x-linux-gnu -mcpu=z13 -stop-before codegenprepare -simplify-mir +# +# %0 = type { i32 (...)**, i16, %1* } +# %1 = type { i32 (...)** } +# %2 = type { i32 (...)**, %1*, i8, i32, i32, i32, i16, i32, i16, i32, i16*, %3*, %6*, %9 } +# %3 = type { %4 } +# %4 = type { i32 (...)**, i8, i32, i32, %5**, %1* } +# %5 = type { i32, i32 } +# %6 = type { %7*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %1* } +# %7 = type { %8 } +# %8 = type { i32 (...)**, i8, i32, i32, %0**, %1* } +# %9 = type { i8* } +# %10 = type { %0, i32, i32, %0* } +# +# define %0* @Fun(%2* %arg) !dbg !7 { +# bb: +# switch i32 undef, label %bb3 [ +# i32 58, label %bb1 +# i32 41, label %bb2 +# ], !dbg !14 +# +# bb1: ; preds = %bb +# br label %bb4, !dbg !15 +# +# bb2: ; preds = %bb +# %tmp = tail call %10* @hoge(%6* undef, %0* undef, i32 signext 0, i32 signext 0), !dbg !16 +# call void @llvm.dbg.value(metadata %10* %tmp, metadata !10, metadata !DIExpression()), !dbg !16 +# br label %bb4, !dbg !17 +# +# bb3: ; preds = %bb +# unreachable, !dbg !18 +# +# bb4: ; preds = %bb2, %bb1 +# %tmp5 = phi %10* [ undef, %bb1 ], [ %tmp, %bb2 ], !dbg !19 +# call void @llvm.dbg.value(metadata %10* %tmp5, metadata !12, metadata !DIExpression()), !dbg !19 +# %tmp6 = bitcast %10* %tmp5 to %0*, !dbg !20 +# call void @llvm.dbg.value(metadata %0* %tmp6, metadata !13, metadata !DIExpression()), !dbg !20 +# ret %0* %tmp6, !dbg !21 +# } +# +# declare %10* @hoge(%6*, %0*, i32, i32) +# +# ; Function Attrs: nounwind readnone speculatable +# declare void @llvm.dbg.value(metadata, metadata, metadata) #1 +# +# attributes #0 = { "use-soft-float"="false" } +# attributes #1 = { nounwind readnone speculatable } +# +# !llvm.module.flags = !{!0, !1} +# !llvm.dbg.cu = !{!2} +# !llvm.debugify = !{!5, !6} +# +# !0 = !{i32 2, !"Debug Info Version", i32 3} +# !1 = !{i32 1, !"wchar_size", i32 4} +# !2 = distinct !DICompileUnit(language: DW_LANG_C, file: !3, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4) +# !3 = !DIFile(filename: "tc.ll", directory: "/") +# !4 = !{} +# !5 = !{i32 8} +# !6 = !{i32 3} +# !7 = distinct !DISubprogram(name: "eggs", linkageName: "eggs", scope: null, file: !3, line: 1, type: !8, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2, retainedNodes: !9) +# !8 = !DISubroutineType(types: !4) +# !9 = !{!10, !12, !13} +# !10 = !DILocalVariable(name: "1", scope: !7, file: !3, line: 3, type: !11) +# !11 = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned) +# !12 = !DILocalVariable(name: "2", scope: !7, file: !3, line: 6, type: !11) +# !13 = !DILocalVariable(name: "3", scope: !7, file: !3, line: 7, type: !11) +# !14 = !DILocation(line: 1, column: 1, scope: !7) +# !15 = !DILocation(line: 2, column: 1, scope: !7) +# !16 = !DILocation(line: 3, column: 1, scope: !7) +# !17 = !DILocation(line: 4, column: 1, scope: !7) +# !18 = !DILocation(line: 5, column: 1, scope: !7) +# !19 = !DILocation(line: 6, column: 1, scope: !7) +# !20 = !DILocation(line: 7, column: 1, scope: !7) +# !21 = !DILocation(line: 8, column: 1, scope: !7) + + +--- | + + %0 = type { i32 (...)**, i16, %1* } + %1 = type { i32 (...)** } + %2 = type { i32 (...)**, %1*, i8, i32, i32, i32, i16, i32, i16, i32, i16*, %3*, %6*, %9 } + %3 = type { %4 } + %4 = type { i32 (...)**, i8, i32, i32, %5**, %1* } + %5 = type { i32, i32 } + %6 = type { %7*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %0*, %1* } + %7 = type { %8 } + %8 = type { i32 (...)**, i8, i32, i32, %0**, %1* } + %9 = type { i8* } + %10 = type { %0, i32, i32, %0* } + + define %0* @Fun(%2* %arg) #0 !dbg !7 { + bb: + switch i32 undef, label %bb3 [ + i32 58, label %bb1 + i32 41, label %bb2 + ], !dbg !14 + + bb1: ; preds = %bb + br label %bb4, !dbg !15 + + bb2: ; preds = %bb + %tmp = tail call %10* @hoge(%6* undef, %0* undef, i32 signext 0, i32 signext 0), !dbg !16 + call void @llvm.dbg.value(metadata %10* %tmp, metadata !10, metadata !DIExpression()), !dbg !16 + br label %bb4, !dbg !17 + + bb3: ; preds = %bb + unreachable, !dbg !18 + + bb4: ; preds = %bb2, %bb1 + %tmp5 = phi %10* [ undef, %bb1 ], [ %tmp, %bb2 ], !dbg !19 + call void @llvm.dbg.value(metadata %10* %tmp5, metadata !12, metadata !DIExpression()), !dbg !19 + %tmp6 = bitcast %10* %tmp5 to %0*, !dbg !20 + call void @llvm.dbg.value(metadata %0* %tmp6, metadata !13, metadata !DIExpression()), !dbg !20 + ret %0* %tmp6, !dbg !21 + } + + declare %10* @hoge(%6*, %0*, i32, i32) #0 + + ; Function Attrs: nounwind readnone speculatable + declare void @llvm.dbg.value(metadata, metadata, metadata) #1 + + attributes #0 = { "target-cpu"="z13" } + attributes #1 = { nounwind readnone speculatable "target-cpu"="z13" } + + !llvm.module.flags = !{!0, !1} + !llvm.dbg.cu = !{!2} + !llvm.debugify = !{!5, !6} + + !0 = !{i32 2, !"Debug Info Version", i32 3} + !1 = !{i32 1, !"wchar_size", i32 4} + !2 = distinct !DICompileUnit(language: DW_LANG_C, file: !3, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !4) + !3 = !DIFile(filename: "tc.ll", directory: "/") + !4 = !{} + !5 = !{i32 8} + !6 = !{i32 3} + !7 = distinct !DISubprogram(name: "eggs", linkageName: "eggs", scope: null, file: !3, line: 1, type: !8, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !2, retainedNodes: !9) + !8 = !DISubroutineType(types: !4) + !9 = !{!10, !12, !13} + !10 = !DILocalVariable(name: "1", scope: !7, file: !3, line: 3, type: !11) + !11 = !DIBasicType(name: "ty64", size: 64, encoding: DW_ATE_unsigned) + !12 = !DILocalVariable(name: "2", scope: !7, file: !3, line: 6, type: !11) + !13 = !DILocalVariable(name: "3", scope: !7, file: !3, line: 7, type: !11) + !14 = !DILocation(line: 1, column: 1, scope: !7) + !15 = !DILocation(line: 2, column: 1, scope: !7) + !16 = !DILocation(line: 3, column: 1, scope: !7) + !17 = !DILocation(line: 4, column: 1, scope: !7) + !18 = !DILocation(line: 5, column: 1, scope: !7) + !19 = !DILocation(line: 6, column: 1, scope: !7) + !20 = !DILocation(line: 7, column: 1, scope: !7) + !21 = !DILocation(line: 8, column: 1, scope: !7) + +... +--- +name: Fun +alignment: 4 +tracksRegLiveness: true +... Index: test/CodeGen/WebAssembly/annotations.mir =================================================================== --- test/CodeGen/WebAssembly/annotations.mir +++ /dev/null @@ -1,94 +0,0 @@ -# RUN: llc -mtriple=wasm32-unknown-unknown -start-after xray-instrumentation -wasm-keep-registers %s -o - | FileCheck %s - ---- -# Tests if block/loop/try/catch/end instructions are correctly printed with -# their annotations. - -# CHECK: test0: -# CHECK: block -# CHECK: try -# CHECK: br 0 # 0: down to label1 -# CHECK: catch_all # catch0: -# CHECK: block -# CHECK: br_if 0, 1 # 0: down to label2 -# CHECK: loop # label3: -# CHECK: br_if 0, 1 # 0: up to label3 -# CHECK: end_loop -# CHECK: end_block # label2: -# CHECK: try -# CHECK: rethrow 0 # 0: down to catch1 -# CHECK: catch_all # catch1: -# CHECK: block -# CHECK: try -# CHECK: br 0 # 0: down to label6 -# CHECK: catch_all # catch2: -# CHECK: unreachable -# CHECK: end_try # label6: -# CHECK: end_block # label5: -# CHECK: rethrow 0 # 0: to caller -# CHECK: end_try # label4: -# CHECK: end_try # label1: -# CHECK: end_block # label0: - -name: test0 -liveins: - - { reg: '$arguments', reg: '$value_stack' } -body: | - bb.0: - successors: %bb.7, %bb.1 - BLOCK 64, implicit-def $value_stack, implicit $value_stack - TRY 64, implicit-def $value_stack, implicit $value_stack - BR 0, implicit-def $arguments - - bb.1 (landing-pad): - ; predecessors: %bb.0 - successors: %bb.2, %bb.3 - - CATCH_ALL implicit-def $arguments - BLOCK 64, implicit-def $value_stack, implicit $value_stack - BR_IF 0, 1, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack - - bb.2: - ; predecessors: %bb.1, %bb.2 - successors: %bb.2, %bb.3 - - LOOP 64, implicit-def $value_stack, implicit $value_stack - BR_IF 0, 1, implicit-def $arguments - - bb.3: - ; predecessors: %bb.1, %bb.2 - successors: %bb.4 - - END_LOOP implicit-def $value_stack, implicit $value_stack - END_BLOCK implicit-def $value_stack, implicit $value_stack - TRY 64, implicit-def $value_stack, implicit $value_stack - RETHROW 0, implicit-def $arguments - - bb.4 (landing-pad): - ; predecessors: %bb.3 - successors: %bb.6, %bb.5 - - CATCH_ALL implicit-def $arguments - BLOCK 64, implicit-def $value_stack, implicit $value_stack - TRY 64, implicit-def $value_stack, implicit $value_stack - BR 0, implicit-def $arguments - - bb.5 (landing-pad): - ; predecessors: %bb.4 - CATCH_ALL implicit-def $arguments - UNREACHABLE implicit-def dead $arguments - - bb.6: - ; predecessors: %bb.4 - END_TRY implicit-def $value_stack, implicit $value_stack - END_BLOCK implicit-def $value_stack, implicit $value_stack - RETHROW 0, implicit-def $arguments - - bb.7: - ; predecessors: %bb.0 - END_TRY implicit-def $value_stack, implicit $value_stack - END_TRY implicit-def $value_stack, implicit $value_stack - END_BLOCK implicit-def $value_stack, implicit $value_stack - FALLTHROUGH_RETURN_VOID implicit-def dead $arguments - END_FUNCTION implicit-def $value_stack, implicit $value_stack -... Index: test/CodeGen/WebAssembly/call.ll =================================================================== --- test/CodeGen/WebAssembly/call.ll +++ test/CodeGen/WebAssembly/call.ll @@ -1,5 +1,5 @@ -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-temporary-workarounds=false -mattr=+sign-ext,+simd128 | FileCheck %s -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 -wasm-temporary-workarounds=false -mattr=+sign-ext,+simd128 | FileCheck %s +; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -mattr=+sign-ext,+simd128 | FileCheck %s +; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -fast-isel -fast-isel-abort=1 -mattr=+sign-ext,+simd128 | FileCheck %s ; Test that basic call operations assemble as expected. Index: test/CodeGen/WebAssembly/cfg-stackify-eh.ll =================================================================== --- test/CodeGen/WebAssembly/cfg-stackify-eh.ll +++ test/CodeGen/WebAssembly/cfg-stackify-eh.ll @@ -7,21 +7,25 @@ @_ZTId = external constant i8* ; Simple test case with two catch clauses +; void test0() { +; try { +; foo(); +; } catch (int n) { +; bar(); +; } catch (double d) { +; } +; } ; CHECK-LABEL: test0 +; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: .LBB0_1: -; CHECK: i32.catch +; CHECK: catch $[[EXCEPT_REF:[0-9]+]]= +; CHECK: block i32 +; CHECK: br_on_exn 0, __cpp_exception@EVENT, $[[EXCEPT_REF]] +; CHECK: rethrow +; CHECK: end_block ; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION -; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION -; CHECK: call bar@FUNCTION -; CHECK: call __cxa_end_catch@FUNCTION -; CHECK: .LBB0_3: -; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION -; CHECK: call __cxa_end_catch@FUNCTION -; CHECK: .LBB0_5: -; CHECK: call __cxa_rethrow@FUNCTION -; CHECK: .LBB0_6: +; CHECK: end_try ; CHECK: return define void @test0() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { entry: @@ -68,39 +72,45 @@ } ; Nested try-catches within a catch +; void test1() { +; try { +; foo(); +; } catch (int n) { +; try { +; foo(); +; } catch (int n) { +; foo(); +; } +; } +; } ; CHECK-LABEL: test1 +; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: .LBB1_1: -; CHECK: i32.catch $0=, 0 -; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION, $0 -; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION, $0 -; CHECK: call foo@FUNCTION -; CHECK: .LBB1_3: -; CHECK: i32.catch $0=, 0 -; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION, $0 -; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION, $0 +; CHECK: catch +; CHECK: br_on_exn 0, __cpp_exception@EVENT +; CHECK: rethrow +; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION +; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: .LBB1_5: -; CHECK: catch_all -; CHECK: call __cxa_end_catch@FUNCTION +; CHECK: catch +; CHECK: br_on_exn 0, __cpp_exception@EVENT ; CHECK: rethrow -; CHECK: .LBB1_6: -; CHECK: call __cxa_rethrow@FUNCTION +; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION +; CHECK: try +; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION +; CHECK: try +; CHECK: call foo@FUNCTION +; CHECK: catch $drop= ; CHECK: rethrow -; CHECK: .LBB1_7: -; CHECK: call __cxa_end_catch@FUNCTION -; CHECK: .LBB1_8: -; CHECK: catch_all -; CHECK: call __cxa_end_catch@FUNCTION -; CHECK: .LBB1_9: -; CHECK: call __cxa_rethrow@FUNCTION +; CHECK: end_try +; CHECK: catch $drop= ; CHECK: rethrow -; CHECK: .LBB1_10: -; CHECK: call __cxa_end_catch@FUNCTION -; CHECK: .LBB1_11: +; CHECK: end_try +; CHECK: end_try +; CHECK: end_try ; CHECK: return -define hidden void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { +define void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { entry: invoke void @foo() to label %try.cont11 unwind label %catch.dispatch @@ -175,30 +185,38 @@ } ; Nested loop within a catch clause +; void test2() { +; try { +; foo(); +; } catch (...) { +; for (int i = 0; i < 50; i++) +; foo(); +; } +; } ; CHECK-LABEL: test2 +; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: .LBB2_1: -; CHECK: i32.catch -; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION -; CHECK: .LBB2_2: +; CHECK: catch +; CHECK: br_on_exn 0, __cpp_exception@EVENT +; CHECK: rethrow +; CHECK: loop +; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: .LBB2_4: -; CHECK: catch_all +; CHECK: catch $drop= +; CHECK: try ; CHECK: call __cxa_end_catch@FUNCTION -; CHECK: .LBB2_5: -; CHECK: i32.catch -; CHECK: call __clang_call_terminate@FUNCTION +; CHECK: catch +; CHECK: br_on_exn 0, __cpp_exception@EVENT +; CHECK: call __clang_call_terminate@FUNCTION, 0 ; CHECK: unreachable -; CHECK: .LBB2_6: -; CHECK: catch_all -; CHECK: call _ZSt9terminatev@FUNCTION +; CHECK: call __clang_call_terminate@FUNCTION ; CHECK: unreachable -; CHECK: .LBB2_7: +; CHECK: end_try ; CHECK: rethrow -; CHECK: .LBB2_8: -; CHECK: call __cxa_end_catch@FUNCTION -; CHECK: .LBB2_10: +; CHECK: end_try +; CHECK: end_loop +; CHECK: end_try ; CHECK: return define void @test2() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { entry: Index: test/CodeGen/WebAssembly/cfg-stackify-eh.mir =================================================================== --- test/CodeGen/WebAssembly/cfg-stackify-eh.mir +++ /dev/null @@ -1,322 +0,0 @@ -# RUN: llc -mtriple=wasm32-unknown-unknown -exception-model=wasm -run-pass wasm-cfg-stackify %s -o - | FileCheck %s - ---- | - target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" - target triple = "wasm32-unknown-unknown" - - @__wasm_lpad_context = external global { i32, i8*, i32 } - - declare void @may_throw() - ; Function Attrs: nounwind - declare void @dont_throw() #0 - declare i8* @__cxa_begin_catch(i8*) - declare void @__cxa_end_catch() - declare void @__cxa_rethrow() - ; Function Attrs: nounwind - declare i32 @__gxx_wasm_personality_v0(...) - declare i32 @_Unwind_CallPersonality(i8*) #0 - - define void @test0() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { - unreachable - } - define void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { - unreachable - } - define void @test2() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { - unreachable - } - define void @test3() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { - unreachable - } - - attributes #0 = { nounwind } - ---- -# Simplest try-catch -# try { -# may_throw(); -# } catch (...) { -# } -name: test0 -# CHECK-LABEL: name: test0 -liveins: - - { reg: '$arguments', reg: '$value_stack' } -body: | - bb.0: - successors: %bb.2, %bb.1 - - CALL_VOID @may_throw, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - BR %bb.2, implicit-def $arguments - ; CHECK-LABEL: bb.0: - ; CHECK: TRY - ; CHECK-NEXT: CALL_VOID @may_throw - - bb.1 (landing-pad): - ; predecessors: %bb.0 - successors: %bb.2 - - %2:i32 = CATCH_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %3:i32 = CALL_I32 @__cxa_begin_catch, %2:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64, implicit-def $value_stack, implicit $value_stack - DROP_I32 killed %3:i32, implicit-def $arguments - CALL_VOID @__cxa_end_catch, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - - bb.2: - ; predecessors: %bb.0, %bb.1 - - RETURN_VOID implicit-def dead $arguments - ; CHECK-LABEL: bb.2: - ; CHECK-NEXT: END_TRY - ; CHECK: RETURN_VOID -... ---- - -# Nested try-catch inside another catch -# try { -# may_throw(); -# } catch (int n) { -# try { -# may_throw(); -# } catch (int n) { -# } -# } -name: test1 -# CHECK-LABEL: name: test1 -liveins: - - { reg: '$arguments', reg: '$value_stack' } -body: | - bb.0: - successors: %bb.9, %bb.1 - - CALL_VOID @may_throw, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - BR %bb.9, implicit-def $arguments - ; CHECK-LABEL: bb.0: - ; CHECK: TRY - ; CHECK-NEXT: CALL_VOID @may_throw - - bb.1 (landing-pad): - ; predecessors: %bb.0 - successors: %bb.2, %bb.7 - - %30:i32 = CATCH_I32 0, implicit-def dead $arguments - LOCAL_SET_I32 0, %30:i32, implicit-def $arguments - %16:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %27:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - STORE_I32 2, @__wasm_lpad_context + 4, %16:i32, %27:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack :: (store 4 into `i8** getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 1)`) - %26:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %25:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - STORE_I32 2, @__wasm_lpad_context, %26:i32, %25:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack :: (store 4 into `i32* getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 0)`) - %32:i32 = LOCAL_GET_I32 0, implicit-def $arguments - %31:i32 = CALL_I32 @_Unwind_CallPersonality, %32:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - DROP_I32 killed %31:i32, implicit-def $arguments - %24:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %17:i32 = LOAD_I32 2, @__wasm_lpad_context + 8, %24:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack :: (dereferenceable load 4 from `i32* getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 2)`) - %18:i32 = CONST_I32 1, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %19:i32 = NE_I32 %17:i32, %18:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - BR_IF %bb.7, %19:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack - - bb.2: - ; predecessors: %bb.1 - successors: %bb.8, %bb.3, %bb.6 - - %34:i32 = LOCAL_GET_I32 0, implicit-def $arguments - %33:i32 = CALL_I32 @__cxa_begin_catch, %34:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - DROP_I32 killed %33:i32, implicit-def $arguments - CALL_VOID @may_throw, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - BR %bb.8, implicit-def $arguments - ; CHECK-LABEL: bb.2: - ; CHECK: DROP_I32 - ; CHECK-NEXT: TRY - ; CHECK-NEXT: TRY - ; CHECK-NEXT: CALL_VOID @may_throw - - bb.3 (landing-pad): - ; predecessors: %bb.2 - successors: %bb.4, %bb.5 - - %35:i32 = CATCH_I32 0, implicit-def dead $arguments - LOCAL_SET_I32 0, %35:i32, implicit-def $arguments - %21:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %20:i32 = CONST_I32 1, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - STORE_I32 2, @__wasm_lpad_context, %21:i32, %20:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack :: (store 4 into `i32* getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 0)`) - %37:i32 = LOCAL_GET_I32 0, implicit-def $arguments - %36:i32 = CALL_I32 @_Unwind_CallPersonality, %37:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - DROP_I32 killed %36:i32, implicit-def $arguments - %29:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %22:i32 = LOAD_I32 2, @__wasm_lpad_context + 8, %29:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack :: (dereferenceable load 4 from `i32* getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 2)`) - %28:i32 = CONST_I32 1, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %23:i32 = NE_I32 %22:i32, %28:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - BR_IF %bb.5, %23:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack - - bb.4: - ; predecessors: %bb.3 - successors: %bb.8 - - %39:i32 = LOCAL_GET_I32 0, implicit-def $arguments - %38:i32 = CALL_I32 @__cxa_begin_catch, %39:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - DROP_I32 killed %38:i32, implicit-def $arguments - CALL_VOID @__cxa_end_catch, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - BR %bb.8, implicit-def $arguments - - bb.5: - ; predecessors: %bb.3 - successors: %bb.6 - - CALL_VOID @__cxa_rethrow, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - RETHROW %bb.6, implicit-def $arguments - - bb.6 (landing-pad): - ; predecessors: %bb.2, %bb.5 - - CATCH_ALL implicit-def $arguments - CALL_VOID @__cxa_end_catch, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - RETHROW_TO_CALLER implicit-def $arguments - ; CHECK-LABEL: bb.6 (landing-pad): - ; CHECK-NEXT: END_TRY - - bb.7: - ; predecessors: %bb.1 - - CALL_VOID @__cxa_rethrow, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - RETHROW_TO_CALLER implicit-def $arguments - ; CHECK-LABEL: bb.7: - ; CHECK-NEXT: END_TRY - ; CHECK: RETHROW 0 - - bb.8: - ; predecessors: %bb.2, %bb.4 - successors: %bb.9 - - CALL_VOID @__cxa_end_catch, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - - bb.9: - ; predecessors: %bb.0, %bb.8 - - RETURN_VOID implicit-def dead $arguments - ; CHECK-LABEL: bb.9: - ; CHECK-NEXT: END_TRY -... ---- - -# A loop within a try. -# try { -# for (int i = 0; i < n; ++i) -# may_throw(); -# } catch (...) { -# } -name: test2 -# CHECK-LABEL: name: test2 -liveins: - - { reg: '$arguments', reg: '$value_stack' } -body: | - bb.0: - successors: %bb.1, %bb.4 - - %18:i32 = CONST_I32 0, implicit-def dead $arguments - LOCAL_SET_I32 1, %18:i32, implicit-def $arguments - %14:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %19:i32 = LOCAL_GET_I32 0, implicit-def $arguments - %9:i32 = GE_S_I32 %14:i32, %19:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - BR_IF %bb.4, %9:i32, implicit-def $arguments - - bb.1: - ; predecessors: %bb.0, %bb.3 - successors: %bb.3, %bb.2 - - CALL_VOID @may_throw, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - BR %bb.3, implicit-def $arguments - ; CHECK-LABEL: bb.1: - ; CHECK: LOOP - ; CHECK: TRY - ; CHECK-NEXT: CALL_VOID @may_throw - - bb.2 (landing-pad): - ; predecessors: %bb.1 - successors: %bb.4 - - %11:i32 = CATCH_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %22:i32 = CALL_I32 @__cxa_begin_catch, %11:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64, implicit-def $value_stack, implicit $value_stack - DROP_I32 killed %22:i32, implicit-def $arguments - CALL_VOID @__cxa_end_catch, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - BR %bb.4, implicit-def $arguments - - bb.3: - ; predecessors: %bb.1 - successors: %bb.1, %bb.4 - - %20:i32 = LOCAL_GET_I32 1, implicit-def $arguments - %17:i32 = CONST_I32 1, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %16:i32 = ADD_I32 %20:i32, %17:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %15:i32 = LOCAL_TEE_I32 1, %16:i32, implicit-def $arguments - %21:i32 = LOCAL_GET_I32 0, implicit-def $arguments - %10:i32 = GE_S_I32 %15:i32, %21:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - BR_UNLESS %bb.1, %10:i32, implicit-def $arguments - ; CHECK-LABEL: bb.3: - ; CHECK: END_TRY - - bb.4: - ; predecessors: %bb.2, %bb.0, %bb.3 - - RETURN_VOID implicit-def dead $arguments -... ---- - -# A loop within a catch -# try { -# may_throw(); -# } catch (...) { -# for (int i = 0; i < n; ++i) -# dont_throw(); -# } -name: test3 -# CHECK-LABEL: name: test3 -liveins: - - { reg: '$arguments', reg: '$value_stack' } -body: | - bb.0: - successors: %bb.4, %bb.1 - - CALL_VOID @may_throw, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - BR %bb.4, implicit-def $arguments - ; CHECK-LABEL: bb.0: - ; CHECK: TRY - ; CHECK-NEXT: CALL_VOID @may_throw - - bb.1 (landing-pad): - ; predecessors: %bb.0 - successors: %bb.2, %bb.3 - - %9:i32 = CATCH_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %18:i32 = CALL_I32 @__cxa_begin_catch, %9:i32, implicit-def dead $arguments, implicit $sp32, implicit $sp64, implicit-def $value_stack, implicit $value_stack - DROP_I32 killed %18:i32, implicit-def $arguments - %19:i32 = CONST_I32 0, implicit-def dead $arguments - LOCAL_SET_I32 1, %19:i32, implicit-def $arguments - %14:i32 = CONST_I32 0, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %20:i32 = LOCAL_GET_I32 0, implicit-def $arguments - %10:i32 = GE_S_I32 %14:i32, %20:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - BR_IF %bb.3, %10:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack - - bb.2: - ; predecessors: %bb.1, %bb.2 - successors: %bb.2, %bb.3 - - CALL_VOID @dont_throw, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - %21:i32 = LOCAL_GET_I32 1, implicit-def $arguments - %17:i32 = CONST_I32 1, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %16:i32 = ADD_I32 %21:i32, %17:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - %15:i32 = LOCAL_TEE_I32 1, %16:i32, implicit-def $arguments - %22:i32 = LOCAL_GET_I32 0, implicit-def $arguments - %11:i32 = GE_S_I32 %15:i32, %22:i32, implicit-def dead $arguments, implicit-def $value_stack, implicit $value_stack - BR_UNLESS %bb.2, %11:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack - - bb.3: - ; predecessors: %bb.1, %bb.2 - successors: %bb.4 - - CALL_VOID @__cxa_end_catch, implicit-def dead $arguments, implicit $sp32, implicit $sp64 - - bb.4: - ; predecessors: %bb.0, %bb.3 - - RETURN_VOID implicit-def dead $arguments - ; CHECK-LABEL: bb.4: - ; CHECK: END_TRY Index: test/CodeGen/WebAssembly/exception.ll =================================================================== --- test/CodeGen/WebAssembly/exception.ll +++ test/CodeGen/WebAssembly/exception.ll @@ -1,5 +1,5 @@ ; RUN: not llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -exception-model=wasm -; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers -exception-model=wasm -mattr=+exception-handling -verify-machineinstrs | FileCheck -allow-deprecated-dag-overlap %s +; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -exception-model=wasm -mattr=+exception-handling -verify-machineinstrs | FileCheck -allow-deprecated-dag-overlap %s ; RUN: llc < %s -disable-wasm-fallthrough-return-opt -wasm-keep-registers -exception-model=wasm -mattr=+exception-handling target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" @@ -9,30 +9,39 @@ @_ZTIi = external constant i8* -declare void @llvm.wasm.throw(i32, i8*) - ; CHECK-LABEL: test_throw: -; CHECK: local.get $push0=, 0 -; CHECK-NEXT: throw __cpp_exception@EVENT, $pop0 +; CHECK: throw __cpp_exception@EVENT, $0 ; CHECK-NOT: unreachable define void @test_throw(i8* %p) { call void @llvm.wasm.throw(i32 0, i8* %p) ret void } +; CHECK-LABEL: test_rethrow: +; CHECK: rethrow +; CHECK-NOT: unreachable +define void @test_rethrow(i8* %p) { + call void @llvm.wasm.rethrow() + ret void +} + ; CHECK-LABEL: test_catch_rethrow: -; CHECK: global.get $push{{.+}}=, __stack_pointer@GLOBAL +; CHECK: global.get ${{.+}}=, __stack_pointer@GLOBAL ; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: i32.catch $push{{.+}}=, 0 +; CHECK: catch $[[EXCEPT_REF:[0-9]+]]= +; CHECK: block i32 +; CHECK: br_on_exn 0, __cpp_exception@EVENT, $[[EXCEPT_REF]] +; CHECK: rethrow +; CHECK: end_block +; CHECK: extract_exception $[[EXN:[0-9]+]]= ; CHECK: global.set __stack_pointer@GLOBAL ; CHECK-DAG: i32.store __wasm_lpad_context ; CHECK-DAG: i32.store __wasm_lpad_context+4 -; CHECK: i32.call $push{{.+}}=, _Unwind_CallPersonality@FUNCTION -; CHECK: i32.call $push{{.+}}=, __cxa_begin_catch@FUNCTION +; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION, $[[EXN]] +; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION ; CHECK: call __cxa_end_catch@FUNCTION ; CHECK: call __cxa_rethrow@FUNCTION -; CHECK-NEXT: rethrow ; CHECK: end_try define void @test_catch_rethrow() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { entry: @@ -66,9 +75,9 @@ ; CHECK-LABEL: test_cleanup: ; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: catch_all +; CHECK: catch ; CHECK: global.set __stack_pointer@GLOBAL -; CHECK: i32.call $push{{.+}}=, _ZN7CleanupD1Ev@FUNCTION +; CHECK: i32.call $drop=, _ZN7CleanupD1Ev@FUNCTION ; CHECK: rethrow ; CHECK: end_try define void @test_cleanup() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { @@ -87,71 +96,51 @@ cleanupret from %0 unwind to caller } -; - Tests multple terminate pads are merged into one -; - Tests a catch_all terminate pad is created after a catch terminate pad - ; CHECK-LABEL: test_terminatepad -; CHECK: i32.catch -; CHECK: call __clang_call_terminate@FUNCTION -; CHECK: unreachable -; CHECK: catch_all -; CHECK: call _ZSt9terminatev@FUNCTION -; CHECK-NOT: call __clang_call_terminate@FUNCTION -define hidden i32 @test_terminatepad() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { +; CHECK: catch +; CHECK: block i32 +; CHECK: br_on_exn 0, __cpp_exception@EVENT +; CHECK: call __clang_call_terminate@FUNCTION, 0 +; CHECK: unreachable +; CHECK: end_block +; CHECK: extract_exception +; CHECK: call __clang_call_terminate@FUNCTION +; CHECK: unreachable +define void @test_terminatepad() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { entry: - %c = alloca %struct.Cleanup, align 1 - %c1 = alloca %struct.Cleanup, align 1 invoke void @foo() - to label %invoke.cont unwind label %ehcleanup - -invoke.cont: ; preds = %entry - %call = invoke %struct.Cleanup* @_ZN7CleanupD1Ev(%struct.Cleanup* %c1) to label %try.cont unwind label %catch.dispatch -ehcleanup: ; preds = %entry - %0 = cleanuppad within none [] - %call4 = invoke %struct.Cleanup* @_ZN7CleanupD1Ev(%struct.Cleanup* %c1) [ "funclet"(token %0) ] - to label %invoke.cont3 unwind label %terminate - -invoke.cont3: ; preds = %ehcleanup - cleanupret from %0 unwind label %catch.dispatch - -catch.dispatch: ; preds = %invoke.cont3, %invoke.cont - %1 = catchswitch within none [label %catch.start] unwind label %ehcleanup7 +catch.dispatch: ; preds = %entry + %0 = catchswitch within none [label %catch.start] unwind to caller catch.start: ; preds = %catch.dispatch - %2 = catchpad within %1 [i8* null] - %3 = call i8* @llvm.wasm.get.exception(token %2) - %4 = call i32 @llvm.wasm.get.ehselector(token %2) - %5 = call i8* @__cxa_begin_catch(i8* %3) [ "funclet"(token %2) ] - invoke void @__cxa_end_catch() [ "funclet"(token %2) ] - to label %invoke.cont5 unwind label %ehcleanup7 + %1 = catchpad within %0 [i8* null] + %2 = call i8* @llvm.wasm.get.exception(token %1) + %3 = call i32 @llvm.wasm.get.ehselector(token %1) + %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ] + invoke void @foo() [ "funclet"(token %1) ] + to label %invoke.cont1 unwind label %ehcleanup -invoke.cont5: ; preds = %catch.start - catchret from %2 to label %try.cont +invoke.cont1: ; preds = %catch.start + call void @__cxa_end_catch() [ "funclet"(token %1) ] + catchret from %1 to label %try.cont -try.cont: ; preds = %invoke.cont5, %invoke.cont - %call6 = call %struct.Cleanup* @_ZN7CleanupD1Ev(%struct.Cleanup* %c) - ret i32 0 +try.cont: ; preds = %entry, %invoke.cont1 + ret void -ehcleanup7: ; preds = %catch.start, %catch.dispatch - %6 = cleanuppad within none [] - %call9 = invoke %struct.Cleanup* @_ZN7CleanupD1Ev(%struct.Cleanup* %c) [ "funclet"(token %6) ] - to label %invoke.cont8 unwind label %terminate10 +ehcleanup: ; preds = %catch.start + %5 = cleanuppad within %1 [] + invoke void @__cxa_end_catch() [ "funclet"(token %5) ] + to label %invoke.cont2 unwind label %terminate -invoke.cont8: ; preds = %ehcleanup7 - cleanupret from %6 unwind to caller +invoke.cont2: ; preds = %ehcleanup + cleanupret from %5 unwind to caller terminate: ; preds = %ehcleanup - %7 = cleanuppad within %0 [] - %8 = call i8* @llvm.wasm.get.exception(token %7) - call void @__clang_call_terminate(i8* %8) [ "funclet"(token %7) ] - unreachable - -terminate10: ; preds = %ehcleanup7 - %9 = cleanuppad within %6 [] - %10 = call i8* @llvm.wasm.get.exception(token %9) - call void @__clang_call_terminate(i8* %10) [ "funclet"(token %9) ] + %6 = cleanuppad within %5 [] + %7 = call i8* @llvm.wasm.get.exception(token %6) + call void @__clang_call_terminate(i8* %7) [ "funclet"(token %6) ] unreachable } @@ -164,12 +153,12 @@ ; CHECK-LABEL: test_no_prolog_epilog_in_ehpad ; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: i32.catch +; CHECK: catch ; CHECK-NOT: global.get $push{{.+}}=, __stack_pointer@GLOBAL ; CHECK: global.set __stack_pointer@GLOBAL ; CHECK: try ; CHECK: call foo@FUNCTION -; CHECK: catch_all +; CHECK: catch ; CHECK-NOT: global.get $push{{.+}}=, __stack_pointer@GLOBAL ; CHECK: global.set __stack_pointer@GLOBAL ; CHECK: call __cxa_end_catch@FUNCTION @@ -251,6 +240,8 @@ declare void @foo() declare void @bar(i32*) declare i32 @__gxx_wasm_personality_v0(...) +declare void @llvm.wasm.throw(i32, i8*) +declare void @llvm.wasm.rethrow() declare i8* @llvm.wasm.get.exception(token) declare i32 @llvm.wasm.get.ehselector(token) declare i32 @llvm.eh.typeid.for(i8*) @@ -258,7 +249,6 @@ declare void @__cxa_end_catch() declare void @__cxa_rethrow() declare void @__clang_call_terminate(i8*) -declare void @_ZSt9terminatev() declare %struct.Cleanup* @_ZN7CleanupD1Ev(%struct.Cleanup* returned) ; CHECK: __cpp_exception: Index: test/CodeGen/WebAssembly/function-bitcasts-varargs.ll =================================================================== --- test/CodeGen/WebAssembly/function-bitcasts-varargs.ll +++ test/CodeGen/WebAssembly/function-bitcasts-varargs.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false -wasm-keep-registers | FileCheck %s +; RUN: llc < %s -asm-verbose=false -wasm-keep-registers | FileCheck %s ; Test that function pointer casts casting away varargs are replaced with ; wrappers. Index: test/CodeGen/WebAssembly/function-bitcasts.ll =================================================================== --- test/CodeGen/WebAssembly/function-bitcasts.ll +++ test/CodeGen/WebAssembly/function-bitcasts.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers -enable-emscripten-cxx-exceptions -wasm-temporary-workarounds=false | FileCheck %s +; RUN: llc < %s -asm-verbose=false -wasm-disable-explicit-locals -wasm-keep-registers -enable-emscripten-cxx-exceptions | FileCheck %s ; Test that function pointer casts are replaced with wrappers. Index: test/CodeGen/WebAssembly/main-declaration.ll =================================================================== --- test/CodeGen/WebAssembly/main-declaration.ll +++ test/CodeGen/WebAssembly/main-declaration.ll @@ -1,20 +1,18 @@ -; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s +; RUN: llc < %s -asm-verbose=false | FileCheck %s ; Test main functions with alternate signatures. target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" -declare void @main() +declare i32 @main() -define void @foo() { - call void @main() - ret void +define i32 @foo() { + %t = call i32 @main() + ret i32 %t } -; CHECK-NOT: __original_main ; CHECK-LABEL: foo: -; CHECK-NEXT: .functype foo () -> () -; CHECK-NEXT: call main@FUNCTION +; CHECK-NEXT: .functype foo () -> (i32) +; CHECK-NEXT: call __original_main@FUNCTION ; CHECK-NEXT: end_function -; CHECK-NOT: __original_main Index: test/CodeGen/WebAssembly/main-no-args.ll =================================================================== --- test/CodeGen/WebAssembly/main-no-args.ll +++ test/CodeGen/WebAssembly/main-no-args.ll @@ -1,18 +1,19 @@ -; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s +; RUN: llc < %s -asm-verbose=false | FileCheck %s ; Test main functions with alternate signatures. target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" -define void @main() { - ret void +define i32 @main() { + ret i32 0 } -; CHECK-LABEL: .L__original_main: -; CHECK-NEXT: .functype .L__original_main () -> () +; CHECK-LABEL: __original_main: +; CHECK-NEXT: .functype __original_main () -> (i32) +; CHECK-NEXT: i32.const 0 ; CHECK-NEXT: end_function ; CHECK-LABEL: main: ; CHECK-NEXT: .functype main (i32, i32) -> (i32) -; CHECK: call .L__original_main@FUNCTION +; CHECK: call __original_main@FUNCTION Index: test/CodeGen/WebAssembly/main-three-args.ll =================================================================== --- /dev/null +++ test/CodeGen/WebAssembly/main-three-args.ll @@ -0,0 +1,16 @@ +; RUN: llc < %s -asm-verbose=false | FileCheck %s + +; Test that main function with a non-standard third argument is +; not wrapped. + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +define i32 @main(i32 %a, i8** %b, i8** %c) { + ret i32 0 +} + +; CHECK-LABEL: main: +; CHECK-NEXT: .functype main (i32, i32, i32) -> (i32) + +; CHECK-NOT: __original_main: Index: test/CodeGen/WebAssembly/main-with-args.ll =================================================================== --- test/CodeGen/WebAssembly/main-with-args.ll +++ test/CodeGen/WebAssembly/main-with-args.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -asm-verbose=false -wasm-temporary-workarounds=false | FileCheck %s +; RUN: llc < %s -asm-verbose=false | FileCheck %s ; Test that main function with expected signature is not wrapped Index: test/CodeGen/WebAssembly/simd-build-vector.ll =================================================================== --- /dev/null +++ test/CodeGen/WebAssembly/simd-build-vector.ll @@ -0,0 +1,127 @@ +; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+unimplemented-simd128 | FileCheck %s + +; Test that the logic to choose between v128.const vector +; initialization and splat vector initialization and to optimize the +; choice of splat value works correctly. + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +; CHECK-LABEL: same_const_one_replaced_i8x16: +; CHECK-NEXT: .functype same_const_one_replaced_i8x16 (i32) -> (v128) +; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 42 +; CHECK-NEXT: i16x8.splat $push[[L1:[0-9]+]]=, $pop[[L0]] +; CHECK-NEXT: i16x8.replace_lane $push[[L2:[0-9]+]]=, $pop[[L1]], 5, $0 +; CHECK-NEXT: return $pop[[L2]] +define <8 x i16> @same_const_one_replaced_i8x16(i16 %x) { + %v = insertelement + <8 x i16> , + i16 %x, + i32 5 + ret <8 x i16> %v +} + +; CHECK-LABEL: different_const_one_replaced_i8x16: +; CHECK-NEXT: .functype different_const_one_replaced_i8x16 (i32) -> (v128) +; CHECK-NEXT: v128.const $push[[L0:[0-9]+]]=, 1, 2, 3, 4, 5, 0, 7, 8 +; CHECK-NEXT: i16x8.replace_lane $push[[L1:[0-9]+]]=, $pop[[L0]], 5, $0 +; CHECK-NEXT: return $pop[[L1]] +define <8 x i16> @different_const_one_replaced_i8x16(i16 %x) { + %v = insertelement + <8 x i16> , + i16 %x, + i32 5 + ret <8 x i16> %v +} + +; CHECK-LABEL: same_const_one_replaced_f32x4: +; CHECK-NEXT: .functype same_const_one_replaced_f32x4 (f32) -> (v128) +; CHECK-NEXT: f32.const $push[[L0:[0-9]+]]=, 0x1.5p5 +; CHECK-NEXT: f32x4.splat $push[[L1:[0-9]+]]=, $pop[[L0]] +; CHECK-NEXT: f32x4.replace_lane $push[[L2:[0-9]+]]=, $pop[[L1]], 2, $0 +; CHECK-NEXT: return $pop[[L2]] +define <4 x float> @same_const_one_replaced_f32x4(float %x) { + %v = insertelement + <4 x float> , + float %x, + i32 2 + ret <4 x float> %v +} + +; CHECK-LABEL: different_const_one_replaced_f32x4: +; CHECK-NEXT: .functype different_const_one_replaced_f32x4 (f32) -> (v128) +; CHECK-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1p0, 0x1p1, 0x0p0, 0x1p2 +; CHECK-NEXT: f32x4.replace_lane $push[[L1:[0-9]+]]=, $pop[[L0]], 2, $0 +; CHECK-NEXT: return $pop[[L1]] +define <4 x float> @different_const_one_replaced_f32x4(float %x) { + %v = insertelement + <4 x float> , + float %x, + i32 2 + ret <4 x float> %v +} + +; CHECK-LABEL: splat_common_const_i32x4: +; CHECK-NEXT: .functype splat_common_const_i32x4 () -> (v128) +; CHECK-NEXT: i32.const $push[[L0:[0-9]+]]=, 3 +; CHECK-NEXT: i32x4.splat $push[[L1:[0-9]+]]=, $pop[[L0]] +; CHECK-NEXT: i32.const $push[[L2:[0-9]+]]=, 1 +; CHECK-NEXT: i32x4.replace_lane $push[[L3:[0-9]+]]=, $pop[[L1]], 3, $pop[[L2]] +; CHECK-NEXT: return $pop[[L3]] +define <4 x i32> @splat_common_const_i32x4() { + ret <4 x i32> +} + +; CHECK-LABEL: splat_common_arg_i16x8: +; CHECK-NEXT: .functype splat_common_arg_i16x8 (i32, i32, i32) -> (v128) +; CHECK-NEXT: i16x8.splat $push[[L0:[0-9]+]]=, $2 +; CHECK-NEXT: i16x8.replace_lane $push[[L1:[0-9]+]]=, $pop[[L0]], 0, $1 +; CHECK-NEXT: i16x8.replace_lane $push[[L2:[0-9]+]]=, $pop[[L1]], 2, $0 +; CHECK-NEXT: i16x8.replace_lane $push[[L3:[0-9]+]]=, $pop[[L2]], 4, $1 +; CHECK-NEXT: i16x8.replace_lane $push[[L4:[0-9]+]]=, $pop[[L3]], 7, $1 +; CHECK-NEXT: return $pop[[L4]] +define <8 x i16> @splat_common_arg_i16x8(i16 %a, i16 %b, i16 %c) { + %v0 = insertelement <8 x i16> undef, i16 %b, i32 0 + %v1 = insertelement <8 x i16> %v0, i16 %c, i32 1 + %v2 = insertelement <8 x i16> %v1, i16 %a, i32 2 + %v3 = insertelement <8 x i16> %v2, i16 %c, i32 3 + %v4 = insertelement <8 x i16> %v3, i16 %b, i32 4 + %v5 = insertelement <8 x i16> %v4, i16 %c, i32 5 + %v6 = insertelement <8 x i16> %v5, i16 %c, i32 6 + %v7 = insertelement <8 x i16> %v6, i16 %b, i32 7 + ret <8 x i16> %v7 +} + +; CHECK-LABEL: undef_const_insert_f32x4: +; CHECK-NEXT: .functype undef_const_insert_f32x4 () -> (v128) +; CHECK-NEXT: f32.const $push[[L0:[0-9]+]]=, 0x1.5p5 +; CHECK-NEXT: f32x4.splat $push[[L1:[0-9]+]]=, $pop[[L0]] +; CHECK-NEXT: return $pop[[L1]] +define <4 x float> @undef_const_insert_f32x4() { + %v = insertelement <4 x float> undef, float 42., i32 1 + ret <4 x float> %v +} + +; CHECK-LABEL: undef_arg_insert_i32x4: +; CHECK-NEXT: .functype undef_arg_insert_i32x4 (i32) -> (v128) +; CHECK-NEXT: i32x4.splat $push[[L0:[0-9]+]]=, $0 +; CHECK-NEXT: return $pop[[L0]] +define <4 x i32> @undef_arg_insert_i32x4(i32 %x) { + %v = insertelement <4 x i32> undef, i32 %x, i32 3 + ret <4 x i32> %v +} + +; CHECK-LABEL: all_undef_i8x16: +; CHECK-NEXT: .functype all_undef_i8x16 () -> (v128) +; CHECK-NEXT: return $0 +define <16 x i8> @all_undef_i8x16() { + %v = insertelement <16 x i8> undef, i8 undef, i32 4 + ret <16 x i8> %v +} + +; CHECK-LABEL: all_undef_f64x2: +; CHECK-NEXT: .functype all_undef_f64x2 () -> (v128) +; CHECK-NEXT: return $0 +define <2 x double> @all_undef_f64x2() { + ret <2 x double> undef +} Index: test/CodeGen/WebAssembly/simd-scalar-to-vector.ll =================================================================== --- /dev/null +++ test/CodeGen/WebAssembly/simd-scalar-to-vector.ll @@ -0,0 +1,43 @@ +; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 | FileCheck %s + +; Test that scalar_to_vector is lowered into a splat correctly. +; This bugpoint-reduced code turns into the selection dag below. +; TODO: find small test cases that produce scalar_to_vector dag nodes +; to make this test more readable and comprehensive. + +; t0: ch = EntryToken +; t32: i32,ch = load<(load 4 from `<2 x i16>* undef`, align 1)> t0, undef:i32, undef:i32 +; t33: v4i32 = scalar_to_vector t32 +; t34: v8i16 = bitcast t33 +; t51: i32 = extract_vector_elt t34, Constant:i32<0> +; t52: ch = store<(store 2 into `<4 x i16>* undef`, align 1), trunc to i16> t32:1, t51, undef:i32, undef:i32 +; t50: i32 = extract_vector_elt t34, Constant:i32<1> +; t53: ch = store<(store 2 into `<4 x i16>* undef` + 2, align 1), trunc to i16> t32:1, t50, undef:i32, undef:i32 +; t49: i32 = extract_vector_elt t34, Constant:i32<2> +; t55: ch = store<(store 2 into `<4 x i16>* undef` + 4, align 1), trunc to i16> t32:1, t49, undef:i32, undef:i32 +; t48: i32 = extract_vector_elt t34, Constant:i32<3> +; t57: ch = store<(store 2 into `<4 x i16>* undef` + 6, align 1), trunc to i16> t32:1, t48, undef:i32, undef:i32 +; t58: ch = TokenFactor t52, t53, t55, t57 +; t24: ch = WebAssemblyISD::RETURN t58 + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +; CHECK-LABEL: foo: +; CHECK: i32x4.splat +define void @foo() { +entry: + %a = load <2 x i16>, <2 x i16>* undef, align 1 + %b = shufflevector <2 x i16> %a, <2 x i16> undef, <8 x i32> + %0 = bitcast <8 x i16> %b to <16 x i8> + %shuffle.i214 = shufflevector <16 x i8> %0, <16 x i8> , <16 x i32> + %1 = bitcast <16 x i8> %shuffle.i214 to <8 x i16> + %add82 = add <8 x i16> %1, zeroinitializer + %2 = select <8 x i1> undef, <8 x i16> undef, <8 x i16> %add82 + %3 = bitcast <8 x i16> %2 to <16 x i8> + %shuffle.i204 = shufflevector <16 x i8> %3, <16 x i8> undef, <16 x i32> + %4 = bitcast <16 x i8> %shuffle.i204 to <8 x i16> + %dst2.0.vec.extract = shufflevector <8 x i16> %4, <8 x i16> undef, <4 x i32> + store <4 x i16> %dst2.0.vec.extract, <4 x i16>* undef, align 1 + ret void +} Index: test/CodeGen/WebAssembly/wasmehprepare.ll =================================================================== --- test/CodeGen/WebAssembly/wasmehprepare.ll +++ test/CodeGen/WebAssembly/wasmehprepare.ll @@ -29,7 +29,7 @@ br i1 %matches, label %catch, label %rethrow ; CHECK: catch.start: ; CHECK-NEXT: %[[CATCHPAD:.*]] = catchpad -; CHECK-NEXT: %[[EXN:.*]] = call i8* @llvm.wasm.catch(i32 0) +; CHECK-NEXT: %[[EXN:.*]] = call i8* @llvm.wasm.extract.exception() ; CHECK-NEXT: call void @llvm.wasm.landingpad.index(token %[[CATCHPAD]], i32 0) ; CHECK-NEXT: store i32 0, i32* getelementptr inbounds ({ i32, i8*, i32 }, { i32, i8*, i32 }* @__wasm_lpad_context, i32 0, i32 0) ; CHECK-NEXT: %[[LSDA:.*]] = call i8* @llvm.wasm.lsda() @@ -76,7 +76,6 @@ catchret from %1 to label %try.cont ; CHECK: catch.start: ; CHECK-NEXT: catchpad within %0 [i8* null] -; CHECK-NEXT: call i8* @llvm.wasm.catch(i32 0) ; CHECK-NOT: call void @llvm.wasm.landingpad.index ; CHECK-NOT: store {{.*}} @__wasm_lpad_context ; CHECK-NOT: call i8* @llvm.wasm.lsda() @@ -178,7 +177,6 @@ cleanupret from %12 unwind to caller ; CHECK: ehcleanup: ; CHECK-NEXT: cleanuppad -; CHECK-NOT: call i8* @llvm.wasm.catch(i32 0) ; CHECK-NOT: call void @llvm.wasm.landingpad.index ; CHECK-NOT: store {{.*}} @__wasm_lpad_context ; CHECK-NOT: call i8* @llvm.wasm.lsda() @@ -191,7 +189,7 @@ ; A cleanuppad with a call to __clang_call_terminate(). ; A call to wasm.catch() should be generated after the cleanuppad. -define hidden void @test3() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { +define void @test3() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { ; CHECK-LABEL: @test3 entry: invoke void @foo() @@ -230,14 +228,14 @@ unreachable ; CHECK: terminate: ; CHECK-NEXT: cleanuppad -; CHECK-NEXT: %[[EXN:.*]] = call i8* @llvm.wasm.catch(i32 0) +; CHECK-NEXT: %[[EXN:.*]] = call i8* @llvm.wasm.extract.exception ; CHECK-NEXT: call void @__clang_call_terminate(i8* %[[EXN]]) } ; PHI demotion test. Only the phi before catchswitch should be demoted; the phi ; before cleanuppad should NOT. -define void @test5() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { -; CHECK-LABEL: @test5 +define void @test4() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { +; CHECK-LABEL: @test4 entry: %c = alloca %struct.Cleanup, align 1 invoke void @foo() @@ -301,8 +299,8 @@ ; Tests if instructions after a call to @llvm.wasm.throw are deleted and the ; BB's dead children are deleted. -; CHECK-LABEL: @test6 -define i32 @test6(i1 %b, i8* %p) { +; CHECK-LABEL: @test5 +define i32 @test5(i1 %b, i8* %p) { entry: br i1 %b, label %bb.true, label %bb.false @@ -326,6 +324,34 @@ ret i32 0 } +; Tests if instructions after a call to @llvm.wasm.rethrow are deleted and the +; BB's dead children are deleted. + +; CHECK-LABEL: @test6 +define i32 @test6(i1 %b, i8* %p) { +entry: + br i1 %b, label %bb.true, label %bb.false + +; CHECK: bb.true: +; CHECK-NEXT: call void @llvm.wasm.rethrow() +; CHECK-NEXT: unreachable +bb.true: ; preds = %entry + call void @llvm.wasm.rethrow() + br label %bb.true.0 + +; CHECK-NOT: bb.true.0 +bb.true.0: ; preds = %bb.true + br label %merge + +; CHECK: bb.false +bb.false: ; preds = %entry + br label %merge + +; CHECK: merge +merge: ; preds = %bb.true.0, %bb.false + ret i32 0 +} + declare void @foo() declare void @func(i32) declare %struct.Cleanup* @_ZN7CleanupD1Ev(%struct.Cleanup* returned) @@ -334,12 +360,12 @@ declare i32 @llvm.wasm.get.ehselector(token) declare i32 @llvm.eh.typeid.for(i8*) declare void @llvm.wasm.throw(i32, i8*) +declare void @llvm.wasm.rethrow() declare i8* @__cxa_begin_catch(i8*) declare void @__cxa_end_catch() declare void @__cxa_rethrow() declare void @__clang_call_terminate(i8*) -; CHECK-DAG: declare i8* @llvm.wasm.catch(i32) ; CHECK-DAG: declare void @llvm.wasm.landingpad.index(token, i32) ; CHECK-DAG: declare i8* @llvm.wasm.lsda() ; CHECK-DAG: declare i32 @_Unwind_CallPersonality(i8*) Index: test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/legalize-memop-scalar-32.mir @@ -0,0 +1,63 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -O0 -mtriple=i386-linux-gnu -run-pass=legalizer %s -o - | FileCheck --check-prefix=X32 %s + +--- +name: test_memop_s8tos32 +alignment: 4 +legalized: false +regBankSelected: false +body: | + bb.0: + ; X32-LABEL: name: test_memop_s8tos32 + ; X32: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF + ; X32: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) + ; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) + ; X32: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p0) :: (load 2) + ; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load 4) + ; X32: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[DEF]](p0) :: (load 4) + ; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY [[LOAD]](s8) + ; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY]], [[C]] + ; X32: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) + ; X32: G_STORE [[LOAD1]](s8), [[DEF]](p0) :: (store 1) + ; X32: G_STORE [[LOAD2]](s16), [[DEF]](p0) :: (store 2) + ; X32: G_STORE [[LOAD3]](s32), [[DEF]](p0) :: (store 4) + ; X32: G_STORE [[LOAD4]](p0), [[DEF]](p0) :: (store 4) + %0:_(p0) = IMPLICIT_DEF + %9:_(s1) = G_LOAD %0 :: (load 1) + %1:_(s8) = G_LOAD %0 :: (load 1) + %2:_(s16) = G_LOAD %0 :: (load 2) + %3:_(s32) = G_LOAD %0 :: (load 4) + %4:_(p0) = G_LOAD %0 :: (load 4) + + G_STORE %9, %0 :: (store 1) + G_STORE %1, %0 :: (store 1) + G_STORE %2, %0 :: (store 2) + G_STORE %3, %0 :: (store 4) + G_STORE %4, %0 :: (store 4) +... +--- +name: test_memop_s64 +alignment: 4 +legalized: false +regBankSelected: false +liveins: +body: | + bb.0: + + ; X32-LABEL: name: test_memop_s64 + ; X32: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF + ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load 4, align 8) + ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 + ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[DEF]], [[C]](s32) + ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p0) :: (load 4) + ; X32: G_STORE [[LOAD]](s32), [[DEF]](p0) :: (store 4, align 8) + ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 + ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[DEF]], [[C1]](s32) + ; X32: G_STORE [[LOAD1]](s32), [[GEP1]](p0) :: (store 4) + %0:_(p0) = IMPLICIT_DEF + %1:_(s64) = G_LOAD %0 :: (load 8) + + G_STORE %1, %0 :: (store 8) + +... Index: test/CodeGen/X86/GlobalISel/legalize-memop-scalar-64.mir =================================================================== --- /dev/null +++ test/CodeGen/X86/GlobalISel/legalize-memop-scalar-64.mir @@ -0,0 +1,57 @@ +# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py +# RUN: llc -O0 -mtriple=x86_64-linux-gnu -run-pass=legalizer -o - %s | FileCheck -check-prefix=X64 %s + +--- +name: test_memop_s8tos32 +alignment: 4 +legalized: false +regBankSelected: false +body: | + bb.0: + ; X64-LABEL: name: test_memop_s8tos32 + ; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF + ; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) + ; X64: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) + ; X64: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p0) :: (load 2) + ; X64: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load 4) + ; X64: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[DEF]](p0) :: (load 4) + ; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 + ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY [[LOAD]](s8) + ; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY]], [[C]] + ; X64: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) + ; X64: G_STORE [[LOAD1]](s8), [[DEF]](p0) :: (store 1) + ; X64: G_STORE [[LOAD2]](s16), [[DEF]](p0) :: (store 2) + ; X64: G_STORE [[LOAD3]](s32), [[DEF]](p0) :: (store 4) + ; X64: G_STORE [[LOAD4]](p0), [[DEF]](p0) :: (store 4) + %0:_(p0) = IMPLICIT_DEF + %9:_(s1) = G_LOAD %0(p0) :: (load 1) + %1:_(s8) = G_LOAD %0(p0) :: (load 1) + %2:_(s16) = G_LOAD %0(p0) :: (load 2) + %3:_(s32) = G_LOAD %0(p0) :: (load 4) + %4:_(p0) = G_LOAD %0(p0) :: (load 4) + + G_STORE %9, %0 :: (store 1) + G_STORE %1, %0 :: (store 1) + G_STORE %2, %0 :: (store 2) + G_STORE %3, %0 :: (store 4) + G_STORE %4, %0 :: (store 4) +... +--- +name: test_memop_s64 +alignment: 4 +legalized: false +regBankSelected: false +liveins: +body: | + bb.0: + + ; X64-LABEL: name: test_memop_s64 + ; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF + ; X64: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p0) :: (load 8) + ; X64: G_STORE [[LOAD]](s64), [[DEF]](p0) :: (store 8) + %0:_(p0) = IMPLICIT_DEF + %1:_(s64) = G_LOAD %0 :: (load 8) + + G_STORE %1, %0 :: (store 8) + +... Index: test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir +++ /dev/null @@ -1,112 +0,0 @@ -# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py -# RUN: llc -O0 -mtriple=x86_64-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X64 -# RUN: llc -O0 -mtriple=i386-linux-gnu -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=X32 - ---- | - define void @test_memop_s8tos32() { - ret void - } - - define void @test_memop_s64() { - ret void - } -... ---- -name: test_memop_s8tos32 -alignment: 4 -legalized: false -regBankSelected: false -registers: - - { id: 0, class: _, preferred-register: '' } - - { id: 1, class: _, preferred-register: '' } - - { id: 2, class: _, preferred-register: '' } - - { id: 3, class: _, preferred-register: '' } - - { id: 4, class: _, preferred-register: '' } - - { id: 5, class: _, preferred-register: '' } - - { id: 6, class: _, preferred-register: '' } - - { id: 7, class: _, preferred-register: '' } - - { id: 8, class: _, preferred-register: '' } - - { id: 9, class: _, preferred-register: '' } - - { id: 10, class: _, preferred-register: '' } -body: | - bb.1 (%ir-block.0): - liveins: $rdi - - ; X64-LABEL: name: test_memop_s8tos32 - ; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF - ; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) - ; X64: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) - ; X64: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p0) :: (load 2) - ; X64: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load 4) - ; X64: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[DEF]](p0) :: (load 8) - ; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 - ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY [[LOAD]](s8) - ; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY]], [[C]] - ; X64: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) - ; X64: G_STORE [[LOAD1]](s8), [[DEF]](p0) :: (store 1) - ; X64: G_STORE [[LOAD2]](s16), [[DEF]](p0) :: (store 2) - ; X64: G_STORE [[LOAD3]](s32), [[DEF]](p0) :: (store 4) - ; X64: G_STORE [[LOAD4]](p0), [[DEF]](p0) :: (store 8) - ; X32-LABEL: name: test_memop_s8tos32 - ; X32: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF - ; X32: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) - ; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p0) :: (load 1) - ; X32: [[LOAD2:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p0) :: (load 2) - ; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load 4) - ; X32: [[LOAD4:%[0-9]+]]:_(p0) = G_LOAD [[DEF]](p0) :: (load 8) - ; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1 - ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY [[LOAD]](s8) - ; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY]], [[C]] - ; X32: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1) - ; X32: G_STORE [[LOAD1]](s8), [[DEF]](p0) :: (store 1) - ; X32: G_STORE [[LOAD2]](s16), [[DEF]](p0) :: (store 2) - ; X32: G_STORE [[LOAD3]](s32), [[DEF]](p0) :: (store 4) - ; X32: G_STORE [[LOAD4]](p0), [[DEF]](p0) :: (store 8) - %0(p0) = IMPLICIT_DEF - %9(s1) = G_LOAD %0(p0) :: (load 1) - %1(s8) = G_LOAD %0(p0) :: (load 1) - %2(s16) = G_LOAD %0(p0) :: (load 2) - %3(s32) = G_LOAD %0(p0) :: (load 4) - %4(p0) = G_LOAD %0(p0) :: (load 8) - - G_STORE %9, %0 :: (store 1) - G_STORE %1, %0 :: (store 1) - G_STORE %2, %0 :: (store 2) - G_STORE %3, %0 :: (store 4) - G_STORE %4, %0 :: (store 8) -... ---- -name: test_memop_s64 -alignment: 4 -legalized: false -regBankSelected: false -registers: - - { id: 0, class: _, preferred-register: '' } - - { id: 1, class: _, preferred-register: '' } - - { id: 2, class: _, preferred-register: '' } -liveins: -# -body: | - bb.1 (%ir-block.0): - liveins: $rdi - - ; X64-LABEL: name: test_memop_s64 - ; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF - ; X64: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p0) :: (load 8) - ; X64: G_STORE [[LOAD]](s64), [[DEF]](p0) :: (store 8) - ; X32-LABEL: name: test_memop_s64 - ; X32: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF - ; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load 4, align 8) - ; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 - ; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[DEF]], [[C]](s32) - ; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP]](p0) :: (load 4) - ; X32: G_STORE [[LOAD]](s32), [[DEF]](p0) :: (store 4, align 8) - ; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 - ; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[DEF]], [[C1]](s32) - ; X32: G_STORE [[LOAD1]](s32), [[GEP1]](p0) :: (store 4) - %0(p0) = IMPLICIT_DEF - %1(s64) = G_LOAD %0(p0) :: (load 8) - - G_STORE %1, %0 :: (store 8) - -... Index: test/CodeGen/X86/GlobalISel/legalize-trunc.mir =================================================================== --- test/CodeGen/X86/GlobalISel/legalize-trunc.mir +++ test/CodeGen/X86/GlobalISel/legalize-trunc.mir @@ -24,9 +24,9 @@ ; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]] ; X32: G_STORE [[AND]](s8), [[DEF1]](p0) :: (store 1) ; X32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[DEF]](s32) - ; X32: G_STORE [[TRUNC1]](s8), [[DEF1]](p0) :: (store 8) + ; X32: G_STORE [[TRUNC1]](s8), [[DEF1]](p0) :: (store 1) ; X32: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32) - ; X32: G_STORE [[TRUNC2]](s16), [[DEF1]](p0) :: (store 16) + ; X32: G_STORE [[TRUNC2]](s16), [[DEF1]](p0) :: (store 2) ; X32: RET 0 ; X64-LABEL: name: trunc_check ; X64: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF @@ -36,9 +36,9 @@ ; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]] ; X64: G_STORE [[AND]](s8), [[DEF1]](p0) :: (store 1) ; X64: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[DEF]](s32) - ; X64: G_STORE [[TRUNC1]](s8), [[DEF1]](p0) :: (store 8) + ; X64: G_STORE [[TRUNC1]](s8), [[DEF1]](p0) :: (store 1) ; X64: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[DEF]](s32) - ; X64: G_STORE [[TRUNC2]](s16), [[DEF1]](p0) :: (store 16) + ; X64: G_STORE [[TRUNC2]](s16), [[DEF1]](p0) :: (store 2) ; X64: RET 0 %0(s32) = IMPLICIT_DEF %1(s1) = G_TRUNC %0(s32) @@ -46,10 +46,10 @@ G_STORE %1, %4 :: (store 1) %2(s8) = G_TRUNC %0(s32) - G_STORE %2, %4 :: (store 8) + G_STORE %2, %4 :: (store 1) %3(s16) = G_TRUNC %0(s32) - G_STORE %3, %4 :: (store 16) + G_STORE %3, %4 :: (store 2) RET 0 ... Index: test/CodeGen/X86/GlobalISel/select-GV-32.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-GV-32.mir +++ test/CodeGen/X86/GlobalISel/select-GV-32.mir @@ -1,7 +1,5 @@ -# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64ALL --check-prefix=X64 -# RUN: llc -mtriple=x86_64-apple-darwin -relocation-model=pic -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64ALL --check-prefix=X64_DARWIN_PIC -# RUN: llc -mtriple=i386-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ALL --check-prefix=X32 -# RUN: llc -mtriple=x86_64-linux-gnux32 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ALL --check-prefix=X32ABI +# RUN: llc -mtriple=i386-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ALL --check-prefix=X32 +# RUN: llc -mtriple=x86_64-linux-gnux32 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ALL --check-prefix=X32ABI --- | @@ -26,10 +24,6 @@ alignment: 4 legalized: true regBankSelected: true -# X64ALL: registers: -# X64ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' } -# X64ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' } -# # X32: registers: # X32-NEXT: - { id: 0, class: gr32, preferred-register: '' } # X32-NEXT: - { id: 1, class: gr32, preferred-register: '' } @@ -40,24 +34,14 @@ registers: - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } -# X64: %0:gr64 = IMPLICIT_DEF -# X64-NEXT: %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg -# X64-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) -# X64-NEXT: RET 0 -# -# X64_DARWIN_PIC: %0:gr64 = IMPLICIT_DEF -# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg -# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) -# X64_DARWIN_PIC-NEXT: RET 0 -# # X32: %0:gr32 = IMPLICIT_DEF # X32-NEXT: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg -# X32-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) +# X32-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 4 into `i32** undef`) # X32-NEXT: RET 0 # # X32ABI: %0:low32_addr_access = IMPLICIT_DEF # X32ABI-NEXT: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg -# X32ABI-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) +# X32ABI-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 4 into `i32** undef`) # X32ABI-NEXT: RET 0 body: | bb.1.entry: @@ -65,7 +49,7 @@ %0(p0) = IMPLICIT_DEF %1(p0) = G_GLOBAL_VALUE @g_int - G_STORE %1(p0), %0(p0) :: (store 8 into `i32** undef`) + G_STORE %1(p0), %0(p0) :: (store 4 into `i32** undef`) RET 0 ... @@ -75,26 +59,12 @@ alignment: 4 legalized: true regBankSelected: true -# X64ALL: registers: -# X64ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' } -# X64ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' } -# # X32ALL: registers: # X32ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' } # X32ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' } registers: - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } -# X64: %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg -# X64-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) -# X64-NEXT: $eax = COPY %0 -# X64-NEXT: RET 0, implicit $eax -# -# X64_DARWIN_PIC: %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg -# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) -# X64_DARWIN_PIC-NEXT: $eax = COPY %0 -# X64_DARWIN_PIC-NEXT: RET 0, implicit $eax -# # X32: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg # X32-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) # X32-NEXT: $eax = COPY %0 Index: test/CodeGen/X86/GlobalISel/select-GV-64.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-GV-64.mir +++ test/CodeGen/X86/GlobalISel/select-GV-64.mir @@ -1,7 +1,5 @@ # RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64ALL --check-prefix=X64 # RUN: llc -mtriple=x86_64-apple-darwin -relocation-model=pic -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X64ALL --check-prefix=X64_DARWIN_PIC -# RUN: llc -mtriple=i386-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ALL --check-prefix=X32 -# RUN: llc -mtriple=x86_64-linux-gnux32 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=X32ALL --check-prefix=X32ABI --- | @@ -30,13 +28,6 @@ # X64ALL-NEXT: - { id: 0, class: gr64, preferred-register: '' } # X64ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' } # -# X32: registers: -# X32-NEXT: - { id: 0, class: gr32, preferred-register: '' } -# X32-NEXT: - { id: 1, class: gr32, preferred-register: '' } -# -# X32ABI: registers: -# X32ABI-NEXT: - { id: 0, class: low32_addr_access, preferred-register: '' } -# X32ABI-NEXT: - { id: 1, class: gr32, preferred-register: '' } registers: - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } @@ -50,15 +41,6 @@ # X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) # X64_DARWIN_PIC-NEXT: RET 0 # -# X32: %0:gr32 = IMPLICIT_DEF -# X32-NEXT: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg -# X32-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) -# X32-NEXT: RET 0 -# -# X32ABI: %0:low32_addr_access = IMPLICIT_DEF -# X32ABI-NEXT: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg -# X32ABI-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`) -# X32ABI-NEXT: RET 0 body: | bb.1.entry: liveins: $rdi @@ -79,9 +61,6 @@ # X64ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' } # X64ALL-NEXT: - { id: 1, class: gr64, preferred-register: '' } # -# X32ALL: registers: -# X32ALL-NEXT: - { id: 0, class: gr32, preferred-register: '' } -# X32ALL-NEXT: - { id: 1, class: gr32, preferred-register: '' } registers: - { id: 0, class: gpr, preferred-register: '' } - { id: 1, class: gpr, preferred-register: '' } @@ -95,15 +74,6 @@ # X64_DARWIN_PIC-NEXT: $eax = COPY %0 # X64_DARWIN_PIC-NEXT: RET 0, implicit $eax # -# X32: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg -# X32-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) -# X32-NEXT: $eax = COPY %0 -# X32-NEXT: RET 0, implicit $eax -# -# X32ABI: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg -# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int) -# X32ABI-NEXT: $eax = COPY %0 -# X32ABI-NEXT: RET 0, implicit $eax body: | bb.1.entry: %1(p0) = G_GLOBAL_VALUE @g_int Index: test/CodeGen/X86/abi-isel.ll =================================================================== --- test/CodeGen/X86/abi-isel.ll +++ test/CodeGen/X86/abi-isel.ll @@ -1,8 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; NOTE: Use the --no_x86_scrub_rip additional argument to keep the rip address math. ; RUN: llc < %s -mcpu=generic -mtriple=x86_64-unknown-linux-gnu -relocation-model=static -code-model=small -pre-RA-sched=list-ilp | FileCheck %s -check-prefix=LINUX-64-STATIC ; RUN: llc < %s -mcpu=generic -mtriple=i686-unknown-linux-gnu -relocation-model=static -code-model=small -pre-RA-sched=list-ilp | FileCheck %s -check-prefix=LINUX-32-STATIC -; RUN: llc < %s -mcpu=generic -mtriple=i686-unknown-linux-gnu -relocation-model=static -code-model=small -pre-RA-sched=list-ilp | FileCheck %s -check-prefix=LINUX-32-PIC +; RUN: llc < %s -mcpu=generic -mtriple=i686-unknown-linux-gnu -relocation-model=pic -code-model=small -pre-RA-sched=list-ilp | FileCheck %s -check-prefix=LINUX-32-PIC ; RUN: llc < %s -mcpu=generic -mtriple=x86_64-unknown-linux-gnu -relocation-model=pic -code-model=small -pre-RA-sched=list-ilp | FileCheck %s -check-prefix=LINUX-64-PIC ; RUN: llc < %s -mcpu=generic -mtriple=i686-apple-darwin -relocation-model=static -code-model=small -pre-RA-sched=list-ilp | FileCheck %s -check-prefix=DARWIN-32-STATIC @@ -47,8 +48,15 @@ ; ; LINUX-32-PIC-LABEL: foo00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl src, %eax -; LINUX-32-PIC-NEXT: movl %eax, dst +; LINUX-32-PIC-NEXT: calll .L0$pb +; LINUX-32-PIC-NEXT: .L0$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp0: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl (%ecx), %ecx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo00: @@ -130,8 +138,15 @@ ; ; LINUX-32-PIC-LABEL: fxo00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl xsrc, %eax -; LINUX-32-PIC-NEXT: movl %eax, xdst +; LINUX-32-PIC-NEXT: calll .L1$pb +; LINUX-32-PIC-NEXT: .L1$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp1: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L1$pb), %eax +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl (%ecx), %ecx +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: fxo00: @@ -211,7 +226,14 @@ ; ; LINUX-32-PIC-LABEL: foo01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dst, ptr +; LINUX-32-PIC-NEXT: calll .L2$pb +; LINUX-32-PIC-NEXT: .L2$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp2: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L2$pb), %eax +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo01: @@ -282,7 +304,14 @@ ; ; LINUX-32-PIC-LABEL: fxo01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xdst, ptr +; LINUX-32-PIC-NEXT: calll .L3$pb +; LINUX-32-PIC-NEXT: .L3$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp3: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L3$pb), %eax +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: fxo01: @@ -357,9 +386,16 @@ ; ; LINUX-32-PIC-LABEL: foo02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl src, %eax -; LINUX-32-PIC-NEXT: movl ptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, (%ecx) +; LINUX-32-PIC-NEXT: calll .L4$pb +; LINUX-32-PIC-NEXT: .L4$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp4: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L4$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl (%ecx), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo02: @@ -450,9 +486,16 @@ ; ; LINUX-32-PIC-LABEL: fxo02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl xsrc, %eax -; LINUX-32-PIC-NEXT: movl ptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, (%ecx) +; LINUX-32-PIC-NEXT: calll .L5$pb +; LINUX-32-PIC-NEXT: .L5$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp5: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L5$pb), %eax +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl (%ecx), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: fxo02: @@ -541,8 +584,15 @@ ; ; LINUX-32-PIC-LABEL: foo03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl dsrc, %eax -; LINUX-32-PIC-NEXT: movl %eax, ddst +; LINUX-32-PIC-NEXT: calll .L6$pb +; LINUX-32-PIC-NEXT: .L6$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp6: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L6$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl (%ecx), %ecx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo03: @@ -611,7 +661,14 @@ ; ; LINUX-32-PIC-LABEL: foo04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ddst, dptr +; LINUX-32-PIC-NEXT: calll .L7$pb +; LINUX-32-PIC-NEXT: .L7$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp7: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L7$pb), %eax +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo04: @@ -680,9 +737,16 @@ ; ; LINUX-32-PIC-LABEL: foo05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl dsrc, %eax -; LINUX-32-PIC-NEXT: movl dptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, (%ecx) +; LINUX-32-PIC-NEXT: calll .L8$pb +; LINUX-32-PIC-NEXT: .L8$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp8: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L8$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl (%ecx), %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo05: @@ -761,8 +825,13 @@ ; ; LINUX-32-PIC-LABEL: foo06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl lsrc, %eax -; LINUX-32-PIC-NEXT: movl %eax, ldst +; LINUX-32-PIC-NEXT: calll .L9$pb +; LINUX-32-PIC-NEXT: .L9$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp9: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L9$pb), %eax +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF(%eax), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, ldst@GOTOFF(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo06: @@ -829,7 +898,13 @@ ; ; LINUX-32-PIC-LABEL: foo07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ldst, lptr +; LINUX-32-PIC-NEXT: calll .L10$pb +; LINUX-32-PIC-NEXT: .L10$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp10: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L10$pb), %eax +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF(%eax), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, lptr@GOTOFF(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo07: @@ -897,9 +972,14 @@ ; ; LINUX-32-PIC-LABEL: foo08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl lsrc, %eax -; LINUX-32-PIC-NEXT: movl lptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, (%ecx) +; LINUX-32-PIC-NEXT: calll .L11$pb +; LINUX-32-PIC-NEXT: .L11$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp11: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L11$pb), %eax +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF(%eax), %ecx +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: foo08: @@ -976,8 +1056,15 @@ ; ; LINUX-32-PIC-LABEL: qux00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl src+64, %eax -; LINUX-32-PIC-NEXT: movl %eax, dst+64 +; LINUX-32-PIC-NEXT: calll .L12$pb +; LINUX-32-PIC-NEXT: .L12$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp12: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L12$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 64(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 64(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux00: @@ -1058,8 +1145,15 @@ ; ; LINUX-32-PIC-LABEL: qxx00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl xsrc+64, %eax -; LINUX-32-PIC-NEXT: movl %eax, xdst+64 +; LINUX-32-PIC-NEXT: calll .L13$pb +; LINUX-32-PIC-NEXT: .L13$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp13: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L13$pb), %eax +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 64(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 64(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qxx00: @@ -1138,7 +1232,15 @@ ; ; LINUX-32-PIC-LABEL: qux01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dst+64, ptr +; LINUX-32-PIC-NEXT: calll .L14$pb +; LINUX-32-PIC-NEXT: .L14$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp14: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L14$pb), %eax +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: addl $64, %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux01: @@ -1215,7 +1317,15 @@ ; ; LINUX-32-PIC-LABEL: qxx01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xdst+64, ptr +; LINUX-32-PIC-NEXT: calll .L15$pb +; LINUX-32-PIC-NEXT: .L15$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp15: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L15$pb), %eax +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: addl $64, %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qxx01: @@ -1296,9 +1406,16 @@ ; ; LINUX-32-PIC-LABEL: qux02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl src+64, %eax -; LINUX-32-PIC-NEXT: movl ptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, 64(%ecx) +; LINUX-32-PIC-NEXT: calll .L16$pb +; LINUX-32-PIC-NEXT: .L16$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp16: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L16$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 64(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 64(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux02: @@ -1390,9 +1507,16 @@ ; ; LINUX-32-PIC-LABEL: qxx02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl xsrc+64, %eax -; LINUX-32-PIC-NEXT: movl ptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, 64(%ecx) +; LINUX-32-PIC-NEXT: calll .L17$pb +; LINUX-32-PIC-NEXT: .L17$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp17: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L17$pb), %eax +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 64(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 64(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qxx02: @@ -1482,8 +1606,15 @@ ; ; LINUX-32-PIC-LABEL: qux03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl dsrc+64, %eax -; LINUX-32-PIC-NEXT: movl %eax, ddst+64 +; LINUX-32-PIC-NEXT: calll .L18$pb +; LINUX-32-PIC-NEXT: .L18$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp18: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L18$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 64(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 64(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux03: @@ -1552,7 +1683,15 @@ ; ; LINUX-32-PIC-LABEL: qux04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ddst+64, dptr +; LINUX-32-PIC-NEXT: calll .L19$pb +; LINUX-32-PIC-NEXT: .L19$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp19: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L19$pb), %eax +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: addl $64, %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux04: @@ -1622,9 +1761,16 @@ ; ; LINUX-32-PIC-LABEL: qux05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl dsrc+64, %eax -; LINUX-32-PIC-NEXT: movl dptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, 64(%ecx) +; LINUX-32-PIC-NEXT: calll .L20$pb +; LINUX-32-PIC-NEXT: .L20$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp20: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L20$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 64(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 64(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux05: @@ -1704,8 +1850,13 @@ ; ; LINUX-32-PIC-LABEL: qux06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl lsrc+64, %eax -; LINUX-32-PIC-NEXT: movl %eax, ldst+64 +; LINUX-32-PIC-NEXT: calll .L21$pb +; LINUX-32-PIC-NEXT: .L21$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp21: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L21$pb), %eax +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF+64(%eax), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, ldst@GOTOFF+64(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux06: @@ -1772,7 +1923,13 @@ ; ; LINUX-32-PIC-LABEL: qux07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ldst+64, lptr +; LINUX-32-PIC-NEXT: calll .L22$pb +; LINUX-32-PIC-NEXT: .L22$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp22: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L22$pb), %eax +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF+64(%eax), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, lptr@GOTOFF(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux07: @@ -1840,9 +1997,14 @@ ; ; LINUX-32-PIC-LABEL: qux08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl lsrc+64, %eax -; LINUX-32-PIC-NEXT: movl lptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, 64(%ecx) +; LINUX-32-PIC-NEXT: calll .L23$pb +; LINUX-32-PIC-NEXT: .L23$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp23: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L23$pb), %eax +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF+64(%eax), %ecx +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 64(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: qux08: @@ -1921,9 +2083,16 @@ ; ; LINUX-32-PIC-LABEL: ind00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl src(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, dst(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L24$pb +; LINUX-32-PIC-NEXT: .L24$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp24: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L24$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl (%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, (%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind00: @@ -2010,9 +2179,16 @@ ; ; LINUX-32-PIC-LABEL: ixd00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl xsrc(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, xdst(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L25$pb +; LINUX-32-PIC-NEXT: .L25$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp25: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L25$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl (%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, (%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ixd00: @@ -2099,9 +2275,16 @@ ; ; LINUX-32-PIC-LABEL: ind01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal dst(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, ptr +; LINUX-32-PIC-NEXT: calll .L26$pb +; LINUX-32-PIC-NEXT: .L26$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp26: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L26$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: shll $2, %ecx +; LINUX-32-PIC-NEXT: addl dst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind01: @@ -2186,9 +2369,16 @@ ; ; LINUX-32-PIC-LABEL: ixd01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal xdst(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, ptr +; LINUX-32-PIC-NEXT: calll .L27$pb +; LINUX-32-PIC-NEXT: .L27$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp27: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L27$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: shll $2, %ecx +; LINUX-32-PIC-NEXT: addl xdst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ixd01: @@ -2275,10 +2465,17 @@ ; ; LINUX-32-PIC-LABEL: ind02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl src(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl ptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, (%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L28$pb +; LINUX-32-PIC-NEXT: .L28$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp28: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L28$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl (%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, (%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind02: @@ -2375,10 +2572,17 @@ ; ; LINUX-32-PIC-LABEL: ixd02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl xsrc(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl ptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, (%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L29$pb +; LINUX-32-PIC-NEXT: .L29$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp29: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L29$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl (%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, (%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ixd02: @@ -2473,9 +2677,16 @@ ; ; LINUX-32-PIC-LABEL: ind03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl dsrc(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, ddst(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L30$pb +; LINUX-32-PIC-NEXT: .L30$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp30: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L30$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl (%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, (%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind03: @@ -2558,9 +2769,16 @@ ; ; LINUX-32-PIC-LABEL: ind04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ddst(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, dptr +; LINUX-32-PIC-NEXT: calll .L31$pb +; LINUX-32-PIC-NEXT: .L31$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp31: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L31$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: shll $2, %ecx +; LINUX-32-PIC-NEXT: addl ddst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind04: @@ -2640,10 +2858,17 @@ ; ; LINUX-32-PIC-LABEL: ind05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl dsrc(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl dptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, (%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L32$pb +; LINUX-32-PIC-NEXT: .L32$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp32: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L32$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl (%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, (%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind05: @@ -2731,9 +2956,14 @@ ; ; LINUX-32-PIC-LABEL: ind06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl lsrc(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, ldst(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L33$pb +; LINUX-32-PIC-NEXT: .L33$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp33: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L33$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF(%eax,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl %edx, ldst@GOTOFF(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind06: @@ -2816,9 +3046,14 @@ ; ; LINUX-32-PIC-LABEL: ind07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ldst(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, lptr +; LINUX-32-PIC-NEXT: calll .L34$pb +; LINUX-32-PIC-NEXT: .L34$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp34: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L34$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF(%eax,%ecx,4), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, lptr@GOTOFF(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind07: @@ -2897,10 +3132,15 @@ ; ; LINUX-32-PIC-LABEL: ind08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl lsrc(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl lptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, (%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L35$pb +; LINUX-32-PIC-NEXT: .L35$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp35: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L35$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF(%eax,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, (%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ind08: @@ -2987,9 +3227,16 @@ ; ; LINUX-32-PIC-LABEL: off00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl src+64(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, dst+64(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L36$pb +; LINUX-32-PIC-NEXT: .L36$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp36: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L36$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off00: @@ -3077,9 +3324,16 @@ ; ; LINUX-32-PIC-LABEL: oxf00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl xsrc+64(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, xdst+64(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L37$pb +; LINUX-32-PIC-NEXT: .L37$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp37: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L37$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: oxf00: @@ -3167,9 +3421,16 @@ ; ; LINUX-32-PIC-LABEL: off01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal dst+64(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, ptr +; LINUX-32-PIC-NEXT: calll .L38$pb +; LINUX-32-PIC-NEXT: .L38$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp38: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L38$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: leal 64(%edx,%ecx,4), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off01: @@ -3255,9 +3516,16 @@ ; ; LINUX-32-PIC-LABEL: oxf01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal xdst+64(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, ptr +; LINUX-32-PIC-NEXT: calll .L39$pb +; LINUX-32-PIC-NEXT: .L39$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp39: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L39$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: leal 64(%edx,%ecx,4), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: oxf01: @@ -3345,10 +3613,17 @@ ; ; LINUX-32-PIC-LABEL: off02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl src+64(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl ptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, 64(%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L40$pb +; LINUX-32-PIC-NEXT: .L40$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp40: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L40$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off02: @@ -3446,10 +3721,17 @@ ; ; LINUX-32-PIC-LABEL: oxf02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl xsrc+64(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl ptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, 64(%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L41$pb +; LINUX-32-PIC-NEXT: .L41$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp41: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L41$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: oxf02: @@ -3545,9 +3827,16 @@ ; ; LINUX-32-PIC-LABEL: off03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl dsrc+64(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, ddst+64(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L42$pb +; LINUX-32-PIC-NEXT: .L42$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp42: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L42$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off03: @@ -3631,9 +3920,16 @@ ; ; LINUX-32-PIC-LABEL: off04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ddst+64(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, dptr +; LINUX-32-PIC-NEXT: calll .L43$pb +; LINUX-32-PIC-NEXT: .L43$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp43: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L43$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: leal 64(%edx,%ecx,4), %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off04: @@ -3714,10 +4010,17 @@ ; ; LINUX-32-PIC-LABEL: off05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl dsrc+64(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl dptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, 64(%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L44$pb +; LINUX-32-PIC-NEXT: .L44$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp44: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L44$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 64(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off05: @@ -3806,9 +4109,14 @@ ; ; LINUX-32-PIC-LABEL: off06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl lsrc+64(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, ldst+64(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L45$pb +; LINUX-32-PIC-NEXT: .L45$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp45: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L45$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF+64(%eax,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl %edx, ldst@GOTOFF+64(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off06: @@ -3892,9 +4200,14 @@ ; ; LINUX-32-PIC-LABEL: off07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ldst+64(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, lptr +; LINUX-32-PIC-NEXT: calll .L46$pb +; LINUX-32-PIC-NEXT: .L46$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp46: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L46$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF+64(%eax,%ecx,4), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, lptr@GOTOFF(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off07: @@ -3974,10 +4287,15 @@ ; ; LINUX-32-PIC-LABEL: off08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl lsrc+64(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl lptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, 64(%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L47$pb +; LINUX-32-PIC-NEXT: .L47$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp47: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L47$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF+64(%eax,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 64(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: off08: @@ -4064,8 +4382,15 @@ ; ; LINUX-32-PIC-LABEL: moo00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl src+262144, %eax -; LINUX-32-PIC-NEXT: movl %eax, dst+262144 +; LINUX-32-PIC-NEXT: calll .L48$pb +; LINUX-32-PIC-NEXT: .L48$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp48: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L48$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 262144(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 262144(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo00: @@ -4144,7 +4469,15 @@ ; ; LINUX-32-PIC-LABEL: moo01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dst+262144, ptr +; LINUX-32-PIC-NEXT: calll .L49$pb +; LINUX-32-PIC-NEXT: .L49$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp49: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L49$pb), %eax +; LINUX-32-PIC-NEXT: movl $262144, %ecx # imm = 0x40000 +; LINUX-32-PIC-NEXT: addl dst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo01: @@ -4225,9 +4558,16 @@ ; ; LINUX-32-PIC-LABEL: moo02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl src+262144, %eax -; LINUX-32-PIC-NEXT: movl ptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, 262144(%ecx) +; LINUX-32-PIC-NEXT: calll .L50$pb +; LINUX-32-PIC-NEXT: .L50$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp50: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L50$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 262144(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 262144(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo02: @@ -4317,8 +4657,15 @@ ; ; LINUX-32-PIC-LABEL: moo03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl dsrc+262144, %eax -; LINUX-32-PIC-NEXT: movl %eax, ddst+262144 +; LINUX-32-PIC-NEXT: calll .L51$pb +; LINUX-32-PIC-NEXT: .L51$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp51: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L51$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 262144(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 262144(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo03: @@ -4387,7 +4734,15 @@ ; ; LINUX-32-PIC-LABEL: moo04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ddst+262144, dptr +; LINUX-32-PIC-NEXT: calll .L52$pb +; LINUX-32-PIC-NEXT: .L52$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp52: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L52$pb), %eax +; LINUX-32-PIC-NEXT: movl $262144, %ecx # imm = 0x40000 +; LINUX-32-PIC-NEXT: addl ddst@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo04: @@ -4457,9 +4812,16 @@ ; ; LINUX-32-PIC-LABEL: moo05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl dsrc+262144, %eax -; LINUX-32-PIC-NEXT: movl dptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, 262144(%ecx) +; LINUX-32-PIC-NEXT: calll .L53$pb +; LINUX-32-PIC-NEXT: .L53$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp53: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L53$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %ecx +; LINUX-32-PIC-NEXT: movl 262144(%ecx), %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 262144(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo05: @@ -4539,8 +4901,13 @@ ; ; LINUX-32-PIC-LABEL: moo06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl lsrc+262144, %eax -; LINUX-32-PIC-NEXT: movl %eax, ldst+262144 +; LINUX-32-PIC-NEXT: calll .L54$pb +; LINUX-32-PIC-NEXT: .L54$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp54: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L54$pb), %eax +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF+262144(%eax), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, ldst@GOTOFF+262144(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo06: @@ -4607,7 +4974,13 @@ ; ; LINUX-32-PIC-LABEL: moo07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ldst+262144, lptr +; LINUX-32-PIC-NEXT: calll .L55$pb +; LINUX-32-PIC-NEXT: .L55$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp55: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L55$pb), %eax +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF+262144(%eax), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, lptr@GOTOFF(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo07: @@ -4675,9 +5048,14 @@ ; ; LINUX-32-PIC-LABEL: moo08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl lsrc+262144, %eax -; LINUX-32-PIC-NEXT: movl lptr, %ecx -; LINUX-32-PIC-NEXT: movl %eax, 262144(%ecx) +; LINUX-32-PIC-NEXT: calll .L56$pb +; LINUX-32-PIC-NEXT: .L56$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp56: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L56$pb), %eax +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF+262144(%eax), %ecx +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, 262144(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: moo08: @@ -4756,9 +5134,16 @@ ; ; LINUX-32-PIC-LABEL: big00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl src+262144(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, dst+262144(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L57$pb +; LINUX-32-PIC-NEXT: .L57$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp57: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L57$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 262144(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big00: @@ -4846,9 +5231,16 @@ ; ; LINUX-32-PIC-LABEL: big01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal dst+262144(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, ptr +; LINUX-32-PIC-NEXT: calll .L58$pb +; LINUX-32-PIC-NEXT: .L58$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp58: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L58$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: leal 262144(%edx,%ecx,4), %ecx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big01: @@ -4936,10 +5328,17 @@ ; ; LINUX-32-PIC-LABEL: big02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl src+262144(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl ptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, 262144(%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L59$pb +; LINUX-32-PIC-NEXT: .L59$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp59: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L59$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 262144(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big02: @@ -5035,9 +5434,16 @@ ; ; LINUX-32-PIC-LABEL: big03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl dsrc+262144(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, ddst+262144(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L60$pb +; LINUX-32-PIC-NEXT: .L60$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp60: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L60$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 262144(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big03: @@ -5121,9 +5527,16 @@ ; ; LINUX-32-PIC-LABEL: big04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ddst+262144(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, dptr +; LINUX-32-PIC-NEXT: calll .L61$pb +; LINUX-32-PIC-NEXT: .L61$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp61: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L61$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: leal 262144(%edx,%ecx,4), %ecx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl %ecx, (%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big04: @@ -5204,10 +5617,17 @@ ; ; LINUX-32-PIC-LABEL: big05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl dsrc+262144(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl dptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, 262144(%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L62$pb +; LINUX-32-PIC-NEXT: .L62$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp62: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp62-.L62$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %edx +; LINUX-32-PIC-NEXT: movl 262144(%edx,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big05: @@ -5296,9 +5716,14 @@ ; ; LINUX-32-PIC-LABEL: big06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl lsrc+262144(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl %ecx, ldst+262144(,%eax,4) +; LINUX-32-PIC-NEXT: calll .L63$pb +; LINUX-32-PIC-NEXT: .L63$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp63: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp63-.L63$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF+262144(%eax,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl %edx, ldst@GOTOFF+262144(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big06: @@ -5382,9 +5807,14 @@ ; ; LINUX-32-PIC-LABEL: big07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ldst+262144(,%eax,4), %eax -; LINUX-32-PIC-NEXT: movl %eax, lptr +; LINUX-32-PIC-NEXT: calll .L64$pb +; LINUX-32-PIC-NEXT: .L64$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp64: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp64-.L64$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF+262144(%eax,%ecx,4), %ecx +; LINUX-32-PIC-NEXT: movl %ecx, lptr@GOTOFF(%eax) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big07: @@ -5464,10 +5894,15 @@ ; ; LINUX-32-PIC-LABEL: big08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl lsrc+262144(,%eax,4), %ecx -; LINUX-32-PIC-NEXT: movl lptr, %edx -; LINUX-32-PIC-NEXT: movl %ecx, 262144(%edx,%eax,4) +; LINUX-32-PIC-NEXT: calll .L65$pb +; LINUX-32-PIC-NEXT: .L65$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp65: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp65-.L65$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl lsrc@GOTOFF+262144(%eax,%ecx,4), %edx +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax +; LINUX-32-PIC-NEXT: movl %edx, 262144(%eax,%ecx,4) ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: big08: @@ -5552,7 +5987,12 @@ ; ; LINUX-32-PIC-LABEL: bar00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $src, %eax +; LINUX-32-PIC-NEXT: calll .L66$pb +; LINUX-32-PIC-NEXT: .L66$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp66: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp66-.L66$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar00: @@ -5610,7 +6050,12 @@ ; ; LINUX-32-PIC-LABEL: bxr00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xsrc, %eax +; LINUX-32-PIC-NEXT: calll .L67$pb +; LINUX-32-PIC-NEXT: .L67$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp67: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp67-.L67$pb), %eax +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bxr00: @@ -5668,7 +6113,12 @@ ; ; LINUX-32-PIC-LABEL: bar01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dst, %eax +; LINUX-32-PIC-NEXT: calll .L68$pb +; LINUX-32-PIC-NEXT: .L68$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp68: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp68-.L68$pb), %eax +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar01: @@ -5726,7 +6176,12 @@ ; ; LINUX-32-PIC-LABEL: bxr01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xdst, %eax +; LINUX-32-PIC-NEXT: calll .L69$pb +; LINUX-32-PIC-NEXT: .L69$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp69: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp69-.L69$pb), %eax +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bxr01: @@ -5784,7 +6239,12 @@ ; ; LINUX-32-PIC-LABEL: bar02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ptr, %eax +; LINUX-32-PIC-NEXT: calll .L70$pb +; LINUX-32-PIC-NEXT: .L70$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp70: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp70-.L70$pb), %eax +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar02: @@ -5842,7 +6302,12 @@ ; ; LINUX-32-PIC-LABEL: bar03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dsrc, %eax +; LINUX-32-PIC-NEXT: calll .L71$pb +; LINUX-32-PIC-NEXT: .L71$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp71: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp71-.L71$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar03: @@ -5900,7 +6365,12 @@ ; ; LINUX-32-PIC-LABEL: bar04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ddst, %eax +; LINUX-32-PIC-NEXT: calll .L72$pb +; LINUX-32-PIC-NEXT: .L72$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp72: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp72-.L72$pb), %eax +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar04: @@ -5958,7 +6428,12 @@ ; ; LINUX-32-PIC-LABEL: bar05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dptr, %eax +; LINUX-32-PIC-NEXT: calll .L73$pb +; LINUX-32-PIC-NEXT: .L73$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp73: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp73-.L73$pb), %eax +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar05: @@ -6016,7 +6491,12 @@ ; ; LINUX-32-PIC-LABEL: bar06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $lsrc, %eax +; LINUX-32-PIC-NEXT: calll .L74$pb +; LINUX-32-PIC-NEXT: .L74$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp74: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp74-.L74$pb), %eax +; LINUX-32-PIC-NEXT: leal lsrc@GOTOFF(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar06: @@ -6074,7 +6554,12 @@ ; ; LINUX-32-PIC-LABEL: bar07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ldst, %eax +; LINUX-32-PIC-NEXT: calll .L75$pb +; LINUX-32-PIC-NEXT: .L75$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp75: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp75-.L75$pb), %eax +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar07: @@ -6132,7 +6617,12 @@ ; ; LINUX-32-PIC-LABEL: bar08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $lptr, %eax +; LINUX-32-PIC-NEXT: calll .L76$pb +; LINUX-32-PIC-NEXT: .L76$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp76: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp76-.L76$pb), %eax +; LINUX-32-PIC-NEXT: leal lptr@GOTOFF(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bar08: @@ -6190,7 +6680,12 @@ ; ; LINUX-32-PIC-LABEL: har00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $src, %eax +; LINUX-32-PIC-NEXT: calll .L77$pb +; LINUX-32-PIC-NEXT: .L77$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp77: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp77-.L77$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har00: @@ -6248,7 +6743,12 @@ ; ; LINUX-32-PIC-LABEL: hxr00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xsrc, %eax +; LINUX-32-PIC-NEXT: calll .L78$pb +; LINUX-32-PIC-NEXT: .L78$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp78: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp78-.L78$pb), %eax +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: hxr00: @@ -6306,7 +6806,12 @@ ; ; LINUX-32-PIC-LABEL: har01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dst, %eax +; LINUX-32-PIC-NEXT: calll .L79$pb +; LINUX-32-PIC-NEXT: .L79$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp79: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp79-.L79$pb), %eax +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har01: @@ -6364,7 +6869,12 @@ ; ; LINUX-32-PIC-LABEL: hxr01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xdst, %eax +; LINUX-32-PIC-NEXT: calll .L80$pb +; LINUX-32-PIC-NEXT: .L80$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp80: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp80-.L80$pb), %eax +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: hxr01: @@ -6422,7 +6932,13 @@ ; ; LINUX-32-PIC-LABEL: har02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl ptr, %eax +; LINUX-32-PIC-NEXT: calll .L81$pb +; LINUX-32-PIC-NEXT: .L81$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp81: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp81-.L81$pb), %eax +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har02: @@ -6488,7 +7004,12 @@ ; ; LINUX-32-PIC-LABEL: har03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dsrc, %eax +; LINUX-32-PIC-NEXT: calll .L82$pb +; LINUX-32-PIC-NEXT: .L82$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp82: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp82-.L82$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har03: @@ -6546,7 +7067,12 @@ ; ; LINUX-32-PIC-LABEL: har04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ddst, %eax +; LINUX-32-PIC-NEXT: calll .L83$pb +; LINUX-32-PIC-NEXT: .L83$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp83: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp83-.L83$pb), %eax +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har04: @@ -6604,7 +7130,13 @@ ; ; LINUX-32-PIC-LABEL: har05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl dptr, %eax +; LINUX-32-PIC-NEXT: calll .L84$pb +; LINUX-32-PIC-NEXT: .L84$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp84: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp84-.L84$pb), %eax +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har05: @@ -6665,7 +7197,12 @@ ; ; LINUX-32-PIC-LABEL: har06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $lsrc, %eax +; LINUX-32-PIC-NEXT: calll .L85$pb +; LINUX-32-PIC-NEXT: .L85$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp85: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp85-.L85$pb), %eax +; LINUX-32-PIC-NEXT: leal lsrc@GOTOFF(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har06: @@ -6723,7 +7260,12 @@ ; ; LINUX-32-PIC-LABEL: har07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ldst, %eax +; LINUX-32-PIC-NEXT: calll .L86$pb +; LINUX-32-PIC-NEXT: .L86$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp86: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp86-.L86$pb), %eax +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har07: @@ -6781,7 +7323,12 @@ ; ; LINUX-32-PIC-LABEL: har08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl lptr, %eax +; LINUX-32-PIC-NEXT: calll .L87$pb +; LINUX-32-PIC-NEXT: .L87$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp87: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp87-.L87$pb), %eax +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: har08: @@ -6841,7 +7388,13 @@ ; ; LINUX-32-PIC-LABEL: bat00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $src+64, %eax +; LINUX-32-PIC-NEXT: calll .L88$pb +; LINUX-32-PIC-NEXT: .L88$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp88: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp88-.L88$pb), %eax +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bat00: @@ -6905,7 +7458,13 @@ ; ; LINUX-32-PIC-LABEL: bxt00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xsrc+64, %eax +; LINUX-32-PIC-NEXT: calll .L89$pb +; LINUX-32-PIC-NEXT: .L89$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp89: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp89-.L89$pb), %eax +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bxt00: @@ -6969,7 +7528,13 @@ ; ; LINUX-32-PIC-LABEL: bat01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dst+64, %eax +; LINUX-32-PIC-NEXT: calll .L90$pb +; LINUX-32-PIC-NEXT: .L90$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp90: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp90-.L90$pb), %eax +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bat01: @@ -7033,7 +7598,13 @@ ; ; LINUX-32-PIC-LABEL: bxt01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xdst+64, %eax +; LINUX-32-PIC-NEXT: calll .L91$pb +; LINUX-32-PIC-NEXT: .L91$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp91: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp91-.L91$pb), %eax +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bxt01: @@ -7099,7 +7670,13 @@ ; ; LINUX-32-PIC-LABEL: bat02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl ptr, %eax +; LINUX-32-PIC-NEXT: calll .L92$pb +; LINUX-32-PIC-NEXT: .L92$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp92: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp92-.L92$pb), %eax +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax ; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; @@ -7174,7 +7751,13 @@ ; ; LINUX-32-PIC-LABEL: bat03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dsrc+64, %eax +; LINUX-32-PIC-NEXT: calll .L93$pb +; LINUX-32-PIC-NEXT: .L93$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp93: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp93-.L93$pb), %eax +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bat03: @@ -7233,7 +7816,13 @@ ; ; LINUX-32-PIC-LABEL: bat04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ddst+64, %eax +; LINUX-32-PIC-NEXT: calll .L94$pb +; LINUX-32-PIC-NEXT: .L94$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp94: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp94-.L94$pb), %eax +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bat04: @@ -7294,7 +7883,13 @@ ; ; LINUX-32-PIC-LABEL: bat05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl dptr, %eax +; LINUX-32-PIC-NEXT: calll .L95$pb +; LINUX-32-PIC-NEXT: .L95$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp95: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp95-.L95$pb), %eax +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax ; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; @@ -7364,7 +7959,12 @@ ; ; LINUX-32-PIC-LABEL: bat06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $lsrc+64, %eax +; LINUX-32-PIC-NEXT: calll .L96$pb +; LINUX-32-PIC-NEXT: .L96$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp96: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp96-.L96$pb), %eax +; LINUX-32-PIC-NEXT: leal lsrc@GOTOFF+64(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bat06: @@ -7422,7 +8022,12 @@ ; ; LINUX-32-PIC-LABEL: bat07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ldst+64, %eax +; LINUX-32-PIC-NEXT: calll .L97$pb +; LINUX-32-PIC-NEXT: .L97$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp97: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp97-.L97$pb), %eax +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF+64(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bat07: @@ -7482,7 +8087,12 @@ ; ; LINUX-32-PIC-LABEL: bat08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl lptr, %eax +; LINUX-32-PIC-NEXT: calll .L98$pb +; LINUX-32-PIC-NEXT: .L98$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp98: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp98-.L98$pb), %eax +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax ; LINUX-32-PIC-NEXT: addl $64, %eax ; LINUX-32-PIC-NEXT: retl ; @@ -7551,7 +8161,13 @@ ; ; LINUX-32-PIC-LABEL: bam00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $src+262144, %eax +; LINUX-32-PIC-NEXT: calll .L99$pb +; LINUX-32-PIC-NEXT: .L99$pb: +; LINUX-32-PIC-NEXT: popl %ecx +; LINUX-32-PIC-NEXT: .Ltmp99: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp99-.L99$pb), %ecx +; LINUX-32-PIC-NEXT: movl $262144, %eax # imm = 0x40000 +; LINUX-32-PIC-NEXT: addl src@GOT(%ecx), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam00: @@ -7615,7 +8231,13 @@ ; ; LINUX-32-PIC-LABEL: bam01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dst+262144, %eax +; LINUX-32-PIC-NEXT: calll .L100$pb +; LINUX-32-PIC-NEXT: .L100$pb: +; LINUX-32-PIC-NEXT: popl %ecx +; LINUX-32-PIC-NEXT: .Ltmp100: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp100-.L100$pb), %ecx +; LINUX-32-PIC-NEXT: movl $262144, %eax # imm = 0x40000 +; LINUX-32-PIC-NEXT: addl dst@GOT(%ecx), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam01: @@ -7679,7 +8301,13 @@ ; ; LINUX-32-PIC-LABEL: bxm01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $xdst+262144, %eax +; LINUX-32-PIC-NEXT: calll .L101$pb +; LINUX-32-PIC-NEXT: .L101$pb: +; LINUX-32-PIC-NEXT: popl %ecx +; LINUX-32-PIC-NEXT: .Ltmp101: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp101-.L101$pb), %ecx +; LINUX-32-PIC-NEXT: movl $262144, %eax # imm = 0x40000 +; LINUX-32-PIC-NEXT: addl xdst@GOT(%ecx), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bxm01: @@ -7745,13 +8373,19 @@ ; ; LINUX-32-PIC-LABEL: bam02: ; LINUX-32-PIC: # %bb.0: # %entry +; LINUX-32-PIC-NEXT: calll .L102$pb +; LINUX-32-PIC-NEXT: .L102$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp102: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp102-.L102$pb), %eax +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %ecx ; LINUX-32-PIC-NEXT: movl $262144, %eax # imm = 0x40000 -; LINUX-32-PIC-NEXT: addl ptr, %eax +; LINUX-32-PIC-NEXT: addl (%ecx), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam02: ; LINUX-64-PIC: # %bb.0: # %entry -; LINUX-64-PIC-NEXT: movq ptr@{{.*}}(%rip), %rcx +; LINUX-64-PIC-NEXT: movq ptr@GOTPCREL(%rip), %rcx ; LINUX-64-PIC-NEXT: movl $262144, %eax # imm = 0x40000 ; LINUX-64-PIC-NEXT: addq (%rcx), %rax ; LINUX-64-PIC-NEXT: retq @@ -7781,21 +8415,21 @@ ; ; DARWIN-64-STATIC-LABEL: bam02: ; DARWIN-64-STATIC: ## %bb.0: ## %entry -; DARWIN-64-STATIC-NEXT: movq _ptr@{{.*}}(%rip), %rcx +; DARWIN-64-STATIC-NEXT: movq _ptr@GOTPCREL(%rip), %rcx ; DARWIN-64-STATIC-NEXT: movl $262144, %eax ## imm = 0x40000 ; DARWIN-64-STATIC-NEXT: addq (%rcx), %rax ; DARWIN-64-STATIC-NEXT: retq ; ; DARWIN-64-DYNAMIC-LABEL: bam02: ; DARWIN-64-DYNAMIC: ## %bb.0: ## %entry -; DARWIN-64-DYNAMIC-NEXT: movq _ptr@{{.*}}(%rip), %rcx +; DARWIN-64-DYNAMIC-NEXT: movq _ptr@GOTPCREL(%rip), %rcx ; DARWIN-64-DYNAMIC-NEXT: movl $262144, %eax ## imm = 0x40000 ; DARWIN-64-DYNAMIC-NEXT: addq (%rcx), %rax ; DARWIN-64-DYNAMIC-NEXT: retq ; ; DARWIN-64-PIC-LABEL: bam02: ; DARWIN-64-PIC: ## %bb.0: ## %entry -; DARWIN-64-PIC-NEXT: movq _ptr@{{.*}}(%rip), %rcx +; DARWIN-64-PIC-NEXT: movq _ptr@GOTPCREL(%rip), %rcx ; DARWIN-64-PIC-NEXT: movl $262144, %eax ## imm = 0x40000 ; DARWIN-64-PIC-NEXT: addq (%rcx), %rax ; DARWIN-64-PIC-NEXT: retq @@ -7820,7 +8454,13 @@ ; ; LINUX-32-PIC-LABEL: bam03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dsrc+262144, %eax +; LINUX-32-PIC-NEXT: calll .L103$pb +; LINUX-32-PIC-NEXT: .L103$pb: +; LINUX-32-PIC-NEXT: popl %ecx +; LINUX-32-PIC-NEXT: .Ltmp103: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp103-.L103$pb), %ecx +; LINUX-32-PIC-NEXT: movl $262144, %eax # imm = 0x40000 +; LINUX-32-PIC-NEXT: addl dsrc@GOT(%ecx), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam03: @@ -7879,7 +8519,13 @@ ; ; LINUX-32-PIC-LABEL: bam04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ddst+262144, %eax +; LINUX-32-PIC-NEXT: calll .L104$pb +; LINUX-32-PIC-NEXT: .L104$pb: +; LINUX-32-PIC-NEXT: popl %ecx +; LINUX-32-PIC-NEXT: .Ltmp104: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp104-.L104$pb), %ecx +; LINUX-32-PIC-NEXT: movl $262144, %eax # imm = 0x40000 +; LINUX-32-PIC-NEXT: addl ddst@GOT(%ecx), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam04: @@ -7940,13 +8586,19 @@ ; ; LINUX-32-PIC-LABEL: bam05: ; LINUX-32-PIC: # %bb.0: # %entry +; LINUX-32-PIC-NEXT: calll .L105$pb +; LINUX-32-PIC-NEXT: .L105$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp105: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp105-.L105$pb), %eax +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %ecx ; LINUX-32-PIC-NEXT: movl $262144, %eax # imm = 0x40000 -; LINUX-32-PIC-NEXT: addl dptr, %eax +; LINUX-32-PIC-NEXT: addl (%ecx), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam05: ; LINUX-64-PIC: # %bb.0: # %entry -; LINUX-64-PIC-NEXT: movq dptr@{{.*}}(%rip), %rcx +; LINUX-64-PIC-NEXT: movq dptr@GOTPCREL(%rip), %rcx ; LINUX-64-PIC-NEXT: movl $262144, %eax # imm = 0x40000 ; LINUX-64-PIC-NEXT: addq (%rcx), %rax ; LINUX-64-PIC-NEXT: retq @@ -8010,7 +8662,12 @@ ; ; LINUX-32-PIC-LABEL: bam06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $lsrc+262144, %eax +; LINUX-32-PIC-NEXT: calll .L106$pb +; LINUX-32-PIC-NEXT: .L106$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp106: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp106-.L106$pb), %eax +; LINUX-32-PIC-NEXT: leal lsrc@GOTOFF+262144(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam06: @@ -8068,7 +8725,12 @@ ; ; LINUX-32-PIC-LABEL: bam07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $ldst+262144, %eax +; LINUX-32-PIC-NEXT: calll .L107$pb +; LINUX-32-PIC-NEXT: .L107$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp107: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp107-.L107$pb), %eax +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF+262144(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam07: @@ -8128,8 +8790,13 @@ ; ; LINUX-32-PIC-LABEL: bam08: ; LINUX-32-PIC: # %bb.0: # %entry +; LINUX-32-PIC-NEXT: calll .L108$pb +; LINUX-32-PIC-NEXT: .L108$pb: +; LINUX-32-PIC-NEXT: popl %ecx +; LINUX-32-PIC-NEXT: .Ltmp108: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp108-.L108$pb), %ecx ; LINUX-32-PIC-NEXT: movl $262144, %eax # imm = 0x40000 -; LINUX-32-PIC-NEXT: addl lptr, %eax +; LINUX-32-PIC-NEXT: addl lptr@GOTOFF(%ecx), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: bam08: @@ -8198,8 +8865,14 @@ ; ; LINUX-32-PIC-LABEL: cat00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal src+64(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L109$pb +; LINUX-32-PIC-NEXT: .L109$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp109: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp109-.L109$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat00: @@ -8270,8 +8943,14 @@ ; ; LINUX-32-PIC-LABEL: cxt00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal xsrc+64(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L110$pb +; LINUX-32-PIC-NEXT: .L110$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp110: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp110-.L110$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cxt00: @@ -8342,8 +9021,14 @@ ; ; LINUX-32-PIC-LABEL: cat01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal dst+64(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L111$pb +; LINUX-32-PIC-NEXT: .L111$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp111: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp111-.L111$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat01: @@ -8414,8 +9099,14 @@ ; ; LINUX-32-PIC-LABEL: cxt01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal xdst+64(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L112$pb +; LINUX-32-PIC-NEXT: .L112$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp112: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp112-.L112$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cxt01: @@ -8488,9 +9179,15 @@ ; ; LINUX-32-PIC-LABEL: cat02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl ptr, %ecx -; LINUX-32-PIC-NEXT: leal 64(%ecx,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L113$pb +; LINUX-32-PIC-NEXT: .L113$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp113: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp113-.L113$pb), %eax +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat02: @@ -8569,8 +9266,14 @@ ; ; LINUX-32-PIC-LABEL: cat03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal dsrc+64(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L114$pb +; LINUX-32-PIC-NEXT: .L114$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp114: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp114-.L114$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat03: @@ -8639,8 +9342,14 @@ ; ; LINUX-32-PIC-LABEL: cat04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ddst+64(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L115$pb +; LINUX-32-PIC-NEXT: .L115$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp115: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp115-.L115$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat04: @@ -8711,9 +9420,15 @@ ; ; LINUX-32-PIC-LABEL: cat05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl dptr, %ecx -; LINUX-32-PIC-NEXT: leal 64(%ecx,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L116$pb +; LINUX-32-PIC-NEXT: .L116$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp116: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp116-.L116$pb), %eax +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat05: @@ -8787,8 +9502,13 @@ ; ; LINUX-32-PIC-LABEL: cat06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal lsrc+64(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L117$pb +; LINUX-32-PIC-NEXT: .L117$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp117: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp117-.L117$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal lsrc@GOTOFF+64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat06: @@ -8857,8 +9577,13 @@ ; ; LINUX-32-PIC-LABEL: cat07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ldst+64(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L118$pb +; LINUX-32-PIC-NEXT: .L118$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp118: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp118-.L118$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF+64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat07: @@ -8929,9 +9654,14 @@ ; ; LINUX-32-PIC-LABEL: cat08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl lptr, %ecx -; LINUX-32-PIC-NEXT: leal 64(%ecx,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L119$pb +; LINUX-32-PIC-NEXT: .L119$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp119: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp119-.L119$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax +; LINUX-32-PIC-NEXT: leal 64(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cat08: @@ -9004,8 +9734,14 @@ ; ; LINUX-32-PIC-LABEL: cam00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal src+262144(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L120$pb +; LINUX-32-PIC-NEXT: .L120$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp120: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp120-.L120$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl src@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam00: @@ -9076,8 +9812,14 @@ ; ; LINUX-32-PIC-LABEL: cxm00: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal xsrc+262144(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L121$pb +; LINUX-32-PIC-NEXT: .L121$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp121: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp121-.L121$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xsrc@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cxm00: @@ -9148,8 +9890,14 @@ ; ; LINUX-32-PIC-LABEL: cam01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal dst+262144(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L122$pb +; LINUX-32-PIC-NEXT: .L122$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp122: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp122-.L122$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam01: @@ -9220,8 +9968,14 @@ ; ; LINUX-32-PIC-LABEL: cxm01: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal xdst+262144(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L123$pb +; LINUX-32-PIC-NEXT: .L123$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp123: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp123-.L123$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl xdst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cxm01: @@ -9294,9 +10048,15 @@ ; ; LINUX-32-PIC-LABEL: cam02: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl ptr, %ecx -; LINUX-32-PIC-NEXT: leal 262144(%ecx,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L124$pb +; LINUX-32-PIC-NEXT: .L124$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp124: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp124-.L124$pb), %eax +; LINUX-32-PIC-NEXT: movl ptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam02: @@ -9375,8 +10135,14 @@ ; ; LINUX-32-PIC-LABEL: cam03: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal dsrc+262144(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L125$pb +; LINUX-32-PIC-NEXT: .L125$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp125: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp125-.L125$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl dsrc@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam03: @@ -9445,8 +10211,14 @@ ; ; LINUX-32-PIC-LABEL: cam04: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ddst+262144(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L126$pb +; LINUX-32-PIC-NEXT: .L126$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp126: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp126-.L126$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl ddst@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam04: @@ -9517,9 +10289,15 @@ ; ; LINUX-32-PIC-LABEL: cam05: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl dptr, %ecx -; LINUX-32-PIC-NEXT: leal 262144(%ecx,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L127$pb +; LINUX-32-PIC-NEXT: .L127$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp127: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp127-.L127$pb), %eax +; LINUX-32-PIC-NEXT: movl dptr@GOT(%eax), %eax +; LINUX-32-PIC-NEXT: movl (%eax), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam05: @@ -9593,8 +10371,13 @@ ; ; LINUX-32-PIC-LABEL: cam06: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal lsrc+262144(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L128$pb +; LINUX-32-PIC-NEXT: .L128$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp128: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp128-.L128$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal lsrc@GOTOFF+262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam06: @@ -9663,8 +10446,13 @@ ; ; LINUX-32-PIC-LABEL: cam07: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: leal ldst+262144(,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L129$pb +; LINUX-32-PIC-NEXT: .L129$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp129: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp129-.L129$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: leal ldst@GOTOFF+262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam07: @@ -9735,9 +10523,14 @@ ; ; LINUX-32-PIC-LABEL: cam08: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %eax -; LINUX-32-PIC-NEXT: movl lptr, %ecx -; LINUX-32-PIC-NEXT: leal 262144(%ecx,%eax,4), %eax +; LINUX-32-PIC-NEXT: calll .L130$pb +; LINUX-32-PIC-NEXT: .L130$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp130: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp130-.L130$pb), %eax +; LINUX-32-PIC-NEXT: movl {{[0-9]+}}(%esp), %ecx +; LINUX-32-PIC-NEXT: movl lptr@GOTOFF(%eax), %eax +; LINUX-32-PIC-NEXT: leal 262144(%eax,%ecx,4), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: cam08: @@ -9825,15 +10618,22 @@ ; ; LINUX-32-PIC-LABEL: lcallee: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll x -; LINUX-32-PIC-NEXT: calll x -; LINUX-32-PIC-NEXT: calll x -; LINUX-32-PIC-NEXT: calll x -; LINUX-32-PIC-NEXT: calll x -; LINUX-32-PIC-NEXT: calll x -; LINUX-32-PIC-NEXT: calll x -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L131$pb +; LINUX-32-PIC-NEXT: .L131$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp131: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp131-.L131$pb), %ebx +; LINUX-32-PIC-NEXT: calll x@PLT +; LINUX-32-PIC-NEXT: calll x@PLT +; LINUX-32-PIC-NEXT: calll x@PLT +; LINUX-32-PIC-NEXT: calll x@PLT +; LINUX-32-PIC-NEXT: calll x@PLT +; LINUX-32-PIC-NEXT: calll x@PLT +; LINUX-32-PIC-NEXT: calll x@PLT +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: lcallee: @@ -9969,15 +10769,22 @@ ; ; LINUX-32-PIC-LABEL: dcallee: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll y -; LINUX-32-PIC-NEXT: calll y -; LINUX-32-PIC-NEXT: calll y -; LINUX-32-PIC-NEXT: calll y -; LINUX-32-PIC-NEXT: calll y -; LINUX-32-PIC-NEXT: calll y -; LINUX-32-PIC-NEXT: calll y -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L132$pb +; LINUX-32-PIC-NEXT: .L132$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp132: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp132-.L132$pb), %ebx +; LINUX-32-PIC-NEXT: calll y@PLT +; LINUX-32-PIC-NEXT: calll y@PLT +; LINUX-32-PIC-NEXT: calll y@PLT +; LINUX-32-PIC-NEXT: calll y@PLT +; LINUX-32-PIC-NEXT: calll y@PLT +; LINUX-32-PIC-NEXT: calll y@PLT +; LINUX-32-PIC-NEXT: calll y@PLT +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: dcallee: @@ -10097,7 +10904,12 @@ ; ; LINUX-32-PIC-LABEL: address: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $callee, %eax +; LINUX-32-PIC-NEXT: calll .L133$pb +; LINUX-32-PIC-NEXT: .L133$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp133: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp133-.L133$pb), %eax +; LINUX-32-PIC-NEXT: movl callee@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: address: @@ -10157,7 +10969,12 @@ ; ; LINUX-32-PIC-LABEL: laddress: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $lcallee, %eax +; LINUX-32-PIC-NEXT: calll .L134$pb +; LINUX-32-PIC-NEXT: .L134$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp134: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp134-.L134$pb), %eax +; LINUX-32-PIC-NEXT: movl lcallee@GOT(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: laddress: @@ -10215,7 +11032,12 @@ ; ; LINUX-32-PIC-LABEL: daddress: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: movl $dcallee, %eax +; LINUX-32-PIC-NEXT: calll .L135$pb +; LINUX-32-PIC-NEXT: .L135$pb: +; LINUX-32-PIC-NEXT: popl %eax +; LINUX-32-PIC-NEXT: .Ltmp135: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp135-.L135$pb), %eax +; LINUX-32-PIC-NEXT: leal dcallee@GOTOFF(%eax), %eax ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: daddress: @@ -10279,10 +11101,17 @@ ; ; LINUX-32-PIC-LABEL: caller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll callee -; LINUX-32-PIC-NEXT: calll callee -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L136$pb +; LINUX-32-PIC-NEXT: .L136$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp136: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp136-.L136$pb), %ebx +; LINUX-32-PIC-NEXT: calll callee@PLT +; LINUX-32-PIC-NEXT: calll callee@PLT +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: caller: @@ -10366,10 +11195,17 @@ ; ; LINUX-32-PIC-LABEL: dcaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L137$pb +; LINUX-32-PIC-NEXT: .L137$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp137: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp137-.L137$pb), %ebx ; LINUX-32-PIC-NEXT: calll dcallee ; LINUX-32-PIC-NEXT: calll dcallee -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: dcaller: @@ -10453,10 +11289,17 @@ ; ; LINUX-32-PIC-LABEL: lcaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll lcallee -; LINUX-32-PIC-NEXT: calll lcallee -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L138$pb +; LINUX-32-PIC-NEXT: .L138$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp138: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp138-.L138$pb), %ebx +; LINUX-32-PIC-NEXT: calll lcallee@PLT +; LINUX-32-PIC-NEXT: calll lcallee@PLT +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: lcaller: @@ -10538,9 +11381,16 @@ ; ; LINUX-32-PIC-LABEL: tailcaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll callee -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L139$pb +; LINUX-32-PIC-NEXT: .L139$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp139: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp139-.L139$pb), %ebx +; LINUX-32-PIC-NEXT: calll callee@PLT +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: tailcaller: @@ -10614,9 +11464,16 @@ ; ; LINUX-32-PIC-LABEL: dtailcaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L140$pb +; LINUX-32-PIC-NEXT: .L140$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp140: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp140-.L140$pb), %ebx ; LINUX-32-PIC-NEXT: calll dcallee -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: dtailcaller: @@ -10690,9 +11547,16 @@ ; ; LINUX-32-PIC-LABEL: ltailcaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll lcallee -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L141$pb +; LINUX-32-PIC-NEXT: .L141$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp141: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp141-.L141$pb), %ebx +; LINUX-32-PIC-NEXT: calll lcallee@PLT +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ltailcaller: @@ -10768,10 +11632,20 @@ ; ; LINUX-32-PIC-LABEL: icaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll *ifunc -; LINUX-32-PIC-NEXT: calll *ifunc -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: pushl %esi +; LINUX-32-PIC-NEXT: pushl %eax +; LINUX-32-PIC-NEXT: calll .L142$pb +; LINUX-32-PIC-NEXT: .L142$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp142: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp142-.L142$pb), %ebx +; LINUX-32-PIC-NEXT: movl ifunc@GOT(%ebx), %esi +; LINUX-32-PIC-NEXT: calll *(%esi) +; LINUX-32-PIC-NEXT: calll *(%esi) +; LINUX-32-PIC-NEXT: addl $4, %esp +; LINUX-32-PIC-NEXT: popl %esi +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: icaller: @@ -10870,10 +11744,20 @@ ; ; LINUX-32-PIC-LABEL: dicaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll *difunc -; LINUX-32-PIC-NEXT: calll *difunc -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: pushl %esi +; LINUX-32-PIC-NEXT: pushl %eax +; LINUX-32-PIC-NEXT: calll .L143$pb +; LINUX-32-PIC-NEXT: .L143$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp143: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp143-.L143$pb), %ebx +; LINUX-32-PIC-NEXT: movl difunc@GOT(%ebx), %esi +; LINUX-32-PIC-NEXT: calll *(%esi) +; LINUX-32-PIC-NEXT: calll *(%esi) +; LINUX-32-PIC-NEXT: addl $4, %esp +; LINUX-32-PIC-NEXT: popl %esi +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: dicaller: @@ -10965,10 +11849,17 @@ ; ; LINUX-32-PIC-LABEL: licaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll *lifunc -; LINUX-32-PIC-NEXT: calll *lifunc -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L144$pb +; LINUX-32-PIC-NEXT: .L144$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp144: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp144-.L144$pb), %ebx +; LINUX-32-PIC-NEXT: calll *lifunc@GOTOFF(%ebx) +; LINUX-32-PIC-NEXT: calll *lifunc@GOTOFF(%ebx) +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: licaller: @@ -11059,10 +11950,20 @@ ; ; LINUX-32-PIC-LABEL: itailcaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll *ifunc -; LINUX-32-PIC-NEXT: calll *ifunc -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: pushl %esi +; LINUX-32-PIC-NEXT: pushl %eax +; LINUX-32-PIC-NEXT: calll .L145$pb +; LINUX-32-PIC-NEXT: .L145$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp145: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp145-.L145$pb), %ebx +; LINUX-32-PIC-NEXT: movl ifunc@GOT(%ebx), %esi +; LINUX-32-PIC-NEXT: calll *(%esi) +; LINUX-32-PIC-NEXT: calll *(%esi) +; LINUX-32-PIC-NEXT: addl $4, %esp +; LINUX-32-PIC-NEXT: popl %esi +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: itailcaller: @@ -11159,9 +12060,17 @@ ; ; LINUX-32-PIC-LABEL: ditailcaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll *difunc -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L146$pb +; LINUX-32-PIC-NEXT: .L146$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp146: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp146-.L146$pb), %ebx +; LINUX-32-PIC-NEXT: movl difunc@GOT(%ebx), %eax +; LINUX-32-PIC-NEXT: calll *(%eax) +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: ditailcaller: @@ -11240,9 +12149,16 @@ ; ; LINUX-32-PIC-LABEL: litailcaller: ; LINUX-32-PIC: # %bb.0: # %entry -; LINUX-32-PIC-NEXT: subl $12, %esp -; LINUX-32-PIC-NEXT: calll *lifunc -; LINUX-32-PIC-NEXT: addl $12, %esp +; LINUX-32-PIC-NEXT: pushl %ebx +; LINUX-32-PIC-NEXT: subl $8, %esp +; LINUX-32-PIC-NEXT: calll .L147$pb +; LINUX-32-PIC-NEXT: .L147$pb: +; LINUX-32-PIC-NEXT: popl %ebx +; LINUX-32-PIC-NEXT: .Ltmp147: +; LINUX-32-PIC-NEXT: addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp147-.L147$pb), %ebx +; LINUX-32-PIC-NEXT: calll *lifunc@GOTOFF(%ebx) +; LINUX-32-PIC-NEXT: addl $8, %esp +; LINUX-32-PIC-NEXT: popl %ebx ; LINUX-32-PIC-NEXT: retl ; ; LINUX-64-PIC-LABEL: litailcaller: Index: test/CodeGen/X86/avx512-hadd-hsub.ll =================================================================== --- test/CodeGen/X86/avx512-hadd-hsub.ll +++ test/CodeGen/X86/avx512-hadd-hsub.ll @@ -6,7 +6,7 @@ ; KNL-LABEL: hadd_16: ; KNL: # %bb.0: ; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; KNL-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; KNL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; KNL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vmovd %xmm0, %eax @@ -15,7 +15,7 @@ ; SKX-LABEL: hadd_16: ; SKX: # %bb.0: ; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; SKX-NEXT: vmovd %xmm0, %eax @@ -33,7 +33,7 @@ ; KNL-LABEL: hsub_16: ; KNL: # %bb.0: ; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; KNL-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; KNL-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; KNL-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vmovd %xmm0, %eax @@ -42,7 +42,7 @@ ; SKX-LABEL: hsub_16: ; SKX: # %bb.0: ; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SKX-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; SKX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; SKX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; SKX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ; SKX-NEXT: vmovd %xmm0, %eax @@ -60,7 +60,7 @@ ; KNL-LABEL: fhadd_16: ; KNL: # %bb.0: ; KNL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; KNL-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; KNL-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; KNL-NEXT: retq @@ -68,7 +68,7 @@ ; SKX-LABEL: fhadd_16: ; SKX: # %bb.0: ; SKX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; SKX-NEXT: vzeroupper @@ -85,7 +85,7 @@ ; KNL-LABEL: fhsub_16: ; KNL: # %bb.0: ; KNL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; KNL-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; KNL-NEXT: vsubps %xmm1, %xmm0, %xmm0 ; KNL-NEXT: retq @@ -93,7 +93,7 @@ ; SKX-LABEL: fhsub_16: ; SKX: # %bb.0: ; SKX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; SKX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; SKX-NEXT: vsubps %xmm1, %xmm0, %xmm0 ; SKX-NEXT: vzeroupper Index: test/CodeGen/X86/avx512-shuffles/partial_permute.ll =================================================================== --- test/CodeGen/X86/avx512-shuffles/partial_permute.ll +++ test/CodeGen/X86/avx512-shuffles/partial_permute.ll @@ -2173,8 +2173,9 @@ define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64> %vec2, <4 x i64> %mask) { ; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask7: ; CHECK: # %bb.0: -; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 -; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [2,0,3,4] +; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm3 +; CHECK-NEXT: vpbroadcastq %xmm3, %ymm3 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm4 = [2,0,3,7] ; CHECK-NEXT: vpermi2q %ymm3, %ymm0, %ymm4 ; CHECK-NEXT: vptestnmq %ymm2, %ymm2, %k1 ; CHECK-NEXT: vpblendmq %ymm4, %ymm1, %ymm0 {%k1} @@ -2188,8 +2189,9 @@ define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64> %mask) { ; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask7: ; CHECK: # %bb.0: -; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm3 -; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [2,0,3,4] +; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm2 +; CHECK-NEXT: vpbroadcastq %xmm2, %ymm3 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [2,0,3,7] ; CHECK-NEXT: vptestnmq %ymm1, %ymm1, %k1 ; CHECK-NEXT: vpermi2q %ymm3, %ymm0, %ymm2 {%k1} {z} ; CHECK-NEXT: vmovdqa %ymm2, %ymm0 @@ -3121,7 +3123,7 @@ define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask1(<16 x float> %vec, <4 x float> %vec2, <4 x float> %mask) { ; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask1: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3 +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm3 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,2] ; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm3[0],xmm0[1],xmm3[2],xmm0[3] @@ -3139,7 +3141,7 @@ define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask1(<16 x float> %vec, <4 x float> %mask) { ; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask1: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm2 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 ; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,2] ; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3] @@ -3190,7 +3192,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7] ; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm1 -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm0 ; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,3] ; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3] ; CHECK-NEXT: vzeroupper @@ -3203,7 +3205,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [0,2,4,6,4,6,6,7] ; CHECK-NEXT: vpermps %ymm0, %ymm3, %ymm3 -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm0 ; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,3] ; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2],xmm3[3] ; CHECK-NEXT: vxorps %xmm3, %xmm3, %xmm3 @@ -3222,7 +3224,7 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7] ; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm2 -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm0 ; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,3] ; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] ; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 @@ -3804,8 +3806,9 @@ define <4 x double> @test_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec) { ; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask3: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 -; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,1,4] +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1 +; CHECK-NEXT: vbroadcastsd %xmm1, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,1,7] ; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1 ; CHECK-NEXT: vmovapd %ymm1, %ymm0 ; CHECK-NEXT: retq @@ -3815,8 +3818,9 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) { ; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask3: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3 -; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [0,2,1,4] +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm3 +; CHECK-NEXT: vbroadcastsd %xmm3, %ymm3 +; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [0,2,1,7] ; CHECK-NEXT: vpermi2pd %ymm3, %ymm0, %ymm4 ; CHECK-NEXT: vxorpd %xmm0, %xmm0, %xmm0 ; CHECK-NEXT: vcmpeqpd %ymm0, %ymm2, %k1 @@ -3831,8 +3835,9 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec, <4 x double> %mask) { ; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask3: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3 -; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = [0,2,1,4] +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm2 +; CHECK-NEXT: vbroadcastsd %xmm2, %ymm3 +; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = [0,2,1,7] ; CHECK-NEXT: vxorpd %xmm4, %xmm4, %xmm4 ; CHECK-NEXT: vcmpeqpd %ymm4, %ymm1, %k1 ; CHECK-NEXT: vpermi2pd %ymm3, %ymm0, %ymm2 {%k1} {z} @@ -3846,12 +3851,12 @@ define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask4(<8 x double> %vec, <4 x double> %vec2, <4 x double> %mask) { ; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask4: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3 -; CHECK-NEXT: vmovapd {{.*#+}} ymm4 = [1,1,5,5] -; CHECK-NEXT: vpermi2pd %ymm3, %ymm0, %ymm4 -; CHECK-NEXT: vxorpd %xmm0, %xmm0, %xmm0 -; CHECK-NEXT: vcmpeqpd %ymm0, %ymm2, %k1 -; CHECK-NEXT: vblendmpd %ymm4, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm3 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm3[1] +; CHECK-NEXT: vxorpd %xmm3, %xmm3, %xmm3 +; CHECK-NEXT: vcmpeqpd %ymm3, %ymm2, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[0,0,1,1] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 ; CHECK-NEXT: retq %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> %cmp = fcmp oeq <4 x double> %mask, zeroinitializer @@ -3862,12 +3867,11 @@ define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask4(<8 x double> %vec, <4 x double> %mask) { ; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask4: ; CHECK: # %bb.0: -; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm3 -; CHECK-NEXT: vmovapd {{.*#+}} ymm2 = [1,1,5,5] -; CHECK-NEXT: vxorpd %xmm4, %xmm4, %xmm4 -; CHECK-NEXT: vcmpeqpd %ymm4, %ymm1, %k1 -; CHECK-NEXT: vpermi2pd %ymm3, %ymm0, %ymm2 {%k1} {z} -; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm2 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] +; CHECK-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; CHECK-NEXT: vcmpeqpd %ymm2, %ymm1, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,1,1] ; CHECK-NEXT: retq %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> %cmp = fcmp oeq <4 x double> %mask, zeroinitializer Index: test/CodeGen/X86/bool-ext-inc.ll =================================================================== --- test/CodeGen/X86/bool-ext-inc.ll +++ test/CodeGen/X86/bool-ext-inc.ll @@ -102,3 +102,28 @@ ret <4 x i32> %add } +define <4 x i32> @sextbool_add_vector(<4 x i32> %cmp1, <4 x i32> %cmp2, <4 x i32> %x) { +; CHECK-LABEL: sextbool_add_vector: +; CHECK: # %bb.0: +; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 +; CHECK-NEXT: retq + %c = icmp eq <4 x i32> %cmp1, %cmp2 + %b = sext <4 x i1> %c to <4 x i32> + %s = add <4 x i32> %x, %b + ret <4 x i32> %s +} + +define <4 x i32> @zextbool_sub_vector(<4 x i32> %cmp1, <4 x i32> %cmp2, <4 x i32> %x) { +; CHECK-LABEL: zextbool_sub_vector: +; CHECK: # %bb.0: +; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; CHECK-NEXT: vpsrld $31, %xmm0, %xmm0 +; CHECK-NEXT: vpsubd %xmm0, %xmm2, %xmm0 +; CHECK-NEXT: retq + %c = icmp eq <4 x i32> %cmp1, %cmp2 + %b = zext <4 x i1> %c to <4 x i32> + %s = sub <4 x i32> %x, %b + ret <4 x i32> %s +} + Index: test/CodeGen/X86/combine-sbb.ll =================================================================== --- test/CodeGen/X86/combine-sbb.ll +++ test/CodeGen/X86/combine-sbb.ll @@ -149,4 +149,61 @@ %12 = zext i1 %11 to i8 ret i8 %12 } + +define i32 @PR40483_sub1(i32*, i32) nounwind { +; X86-LABEL: PR40483_sub1: +; X86: # %bb.0: +; X86-NEXT: pushl %esi +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %edx +; X86-NEXT: movl %edx, %esi +; X86-NEXT: subl %eax, %esi +; X86-NEXT: movl %esi, (%ecx) +; X86-NEXT: subl %edx, %eax +; X86-NEXT: addl %esi, %eax +; X86-NEXT: popl %esi +; X86-NEXT: retl +; +; X64-LABEL: PR40483_sub1: +; X64: # %bb.0: +; X64-NEXT: movl (%rdi), %ecx +; X64-NEXT: movl %ecx, %eax +; X64-NEXT: subl %esi, %eax +; X64-NEXT: movl %eax, (%rdi) +; X64-NEXT: subl %ecx, %esi +; X64-NEXT: addl %esi, %eax +; X64-NEXT: retq + %3 = load i32, i32* %0, align 4 + %4 = tail call { i8, i32 } @llvm.x86.subborrow.32(i8 0, i32 %3, i32 %1) + %5 = extractvalue { i8, i32 } %4, 1 + store i32 %5, i32* %0, align 4 + %6 = sub i32 %1, %3 + %7 = add i32 %6, %5 + ret i32 %7 +} + +define i32 @PR40483_sub2(i32*, i32) nounwind { +; X86-LABEL: PR40483_sub2: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: subl %eax, (%ecx) +; X86-NEXT: xorl %eax, %eax +; X86-NEXT: retl +; +; X64-LABEL: PR40483_sub2: +; X64: # %bb.0: +; X64-NEXT: subl %esi, (%rdi) +; X64-NEXT: xorl %eax, %eax +; X64-NEXT: retq + %3 = load i32, i32* %0, align 4 + %4 = sub i32 %3, %1 + %5 = tail call { i8, i32 } @llvm.x86.subborrow.32(i8 0, i32 %3, i32 %1) + %6 = extractvalue { i8, i32 } %5, 1 + store i32 %6, i32* %0, align 4 + %7 = sub i32 %4, %6 + ret i32 %7 +} + declare { i8, i32 } @llvm.x86.subborrow.32(i8, i32, i32) Index: test/CodeGen/X86/madd.ll =================================================================== --- test/CodeGen/X86/madd.ll +++ test/CodeGen/X86/madd.ll @@ -304,9 +304,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -497,9 +497,9 @@ ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %eax @@ -526,9 +526,9 @@ ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vmovd %xmm0, %eax @@ -888,9 +888,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1101,9 +1101,9 @@ ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %eax @@ -1131,9 +1131,9 @@ ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vmovd %xmm0, %eax @@ -1518,9 +1518,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1777,9 +1777,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax Index: test/CodeGen/X86/min-legal-vector-width.ll =================================================================== --- test/CodeGen/X86/min-legal-vector-width.ll +++ test/CodeGen/X86/min-legal-vector-width.ll @@ -253,9 +253,9 @@ ; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 -; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vmovd %xmm0, %eax @@ -379,9 +379,9 @@ ; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 -; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; CHECK-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vmovd %xmm0, %eax Index: test/CodeGen/X86/sad.ll =================================================================== --- test/CodeGen/X86/sad.ll +++ test/CodeGen/X86/sad.ll @@ -103,9 +103,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -366,9 +366,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -981,9 +981,9 @@ ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512F-NEXT: vmovd %xmm0, %eax @@ -1010,9 +1010,9 @@ ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vmovd %xmm0, %eax @@ -1467,9 +1467,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1566,9 +1566,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax Index: test/CodeGen/X86/sse-schedule.ll =================================================================== --- test/CodeGen/X86/sse-schedule.ll +++ test/CodeGen/X86/sse-schedule.ll @@ -1171,15 +1171,15 @@ ; ; BTVER2-SSE-LABEL: test_cvtsi2ss: ; BTVER2-SSE: # %bb.0: -; BTVER2-SSE-NEXT: cvtsi2ssl (%rsi), %xmm0 # sched: [14:1.00] -; BTVER2-SSE-NEXT: cvtsi2ssl %edi, %xmm1 # sched: [9:1.00] +; BTVER2-SSE-NEXT: cvtsi2ssl (%rsi), %xmm0 # sched: [9:1.00] +; BTVER2-SSE-NEXT: cvtsi2ssl %edi, %xmm1 # sched: [10:1.00] ; BTVER2-SSE-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-SSE-NEXT: retq # sched: [4:1.00] ; ; BTVER2-LABEL: test_cvtsi2ss: ; BTVER2: # %bb.0: -; BTVER2-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [9:1.00] -; BTVER2-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [14:1.00] +; BTVER2-NEXT: vcvtsi2ssl %edi, %xmm0, %xmm0 # sched: [10:1.00] +; BTVER2-NEXT: vcvtsi2ssl (%rsi), %xmm1, %xmm1 # sched: [9:1.00] ; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; @@ -1311,15 +1311,15 @@ ; ; BTVER2-SSE-LABEL: test_cvtsi2ssq: ; BTVER2-SSE: # %bb.0: -; BTVER2-SSE-NEXT: cvtsi2ssq (%rsi), %xmm0 # sched: [14:1.00] -; BTVER2-SSE-NEXT: cvtsi2ssq %rdi, %xmm1 # sched: [9:1.00] +; BTVER2-SSE-NEXT: cvtsi2ssq (%rsi), %xmm0 # sched: [9:1.00] +; BTVER2-SSE-NEXT: cvtsi2ssq %rdi, %xmm1 # sched: [10:1.00] ; BTVER2-SSE-NEXT: addss %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-SSE-NEXT: retq # sched: [4:1.00] ; ; BTVER2-LABEL: test_cvtsi2ssq: ; BTVER2: # %bb.0: -; BTVER2-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [9:1.00] -; BTVER2-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [14:1.00] +; BTVER2-NEXT: vcvtsi2ssq %rdi, %xmm0, %xmm0 # sched: [10:1.00] +; BTVER2-NEXT: vcvtsi2ssq (%rsi), %xmm1, %xmm1 # sched: [9:1.00] ; BTVER2-NEXT: vaddss %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; Index: test/CodeGen/X86/sse2-schedule.ll =================================================================== --- test/CodeGen/X86/sse2-schedule.ll +++ test/CodeGen/X86/sse2-schedule.ll @@ -2608,15 +2608,15 @@ ; ; BTVER2-SSE-LABEL: test_cvtsi2sd: ; BTVER2-SSE: # %bb.0: -; BTVER2-SSE-NEXT: cvtsi2sdl (%rsi), %xmm0 # sched: [14:1.00] -; BTVER2-SSE-NEXT: cvtsi2sdl %edi, %xmm1 # sched: [9:1.00] +; BTVER2-SSE-NEXT: cvtsi2sdl (%rsi), %xmm0 # sched: [9:1.00] +; BTVER2-SSE-NEXT: cvtsi2sdl %edi, %xmm1 # sched: [10:1.00] ; BTVER2-SSE-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-SSE-NEXT: retq # sched: [4:1.00] ; ; BTVER2-LABEL: test_cvtsi2sd: ; BTVER2: # %bb.0: -; BTVER2-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [9:1.00] -; BTVER2-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [14:1.00] +; BTVER2-NEXT: vcvtsi2sdl %edi, %xmm0, %xmm0 # sched: [10:1.00] +; BTVER2-NEXT: vcvtsi2sdl (%rsi), %xmm1, %xmm1 # sched: [9:1.00] ; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; @@ -2748,15 +2748,15 @@ ; ; BTVER2-SSE-LABEL: test_cvtsi2sdq: ; BTVER2-SSE: # %bb.0: -; BTVER2-SSE-NEXT: cvtsi2sdq (%rsi), %xmm0 # sched: [14:1.00] -; BTVER2-SSE-NEXT: cvtsi2sdq %rdi, %xmm1 # sched: [9:1.00] +; BTVER2-SSE-NEXT: cvtsi2sdq (%rsi), %xmm0 # sched: [9:1.00] +; BTVER2-SSE-NEXT: cvtsi2sdq %rdi, %xmm1 # sched: [10:1.00] ; BTVER2-SSE-NEXT: addsd %xmm1, %xmm0 # sched: [3:1.00] ; BTVER2-SSE-NEXT: retq # sched: [4:1.00] ; ; BTVER2-LABEL: test_cvtsi2sdq: ; BTVER2: # %bb.0: -; BTVER2-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [9:1.00] -; BTVER2-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [14:1.00] +; BTVER2-NEXT: vcvtsi2sdq %rdi, %xmm0, %xmm0 # sched: [10:1.00] +; BTVER2-NEXT: vcvtsi2sdq (%rsi), %xmm1, %xmm1 # sched: [9:1.00] ; BTVER2-NEXT: vaddsd %xmm1, %xmm0, %xmm0 # sched: [3:1.00] ; BTVER2-NEXT: retq # sched: [4:1.00] ; Index: test/CodeGen/X86/swap.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/swap.ll @@ -0,0 +1,52 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=haswell | FileCheck %s -check-prefix=NOAA +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=haswell -combiner-global-alias-analysis=1 | FileCheck %s -check-prefix=AA + +declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) +declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) + +%struct.S = type { [16 x i8] } + +; Function Attrs: nounwind uwtable +define dso_local void @_Z4SwapP1SS0_(%struct.S* nocapture %a, %struct.S* nocapture %b) local_unnamed_addr { +; NOAA-LABEL: _Z4SwapP1SS0_: +; NOAA: # %bb.0: # %entry +; NOAA-NEXT: vmovups (%rdi), %xmm0 +; NOAA-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; NOAA-NEXT: vmovups (%rsi), %xmm0 +; NOAA-NEXT: vmovups %xmm0, (%rdi) +; NOAA-NEXT: vmovaps -{{[0-9]+}}(%rsp), %xmm0 +; NOAA-NEXT: vmovups %xmm0, (%rsi) +; NOAA-NEXT: retq +; +; AA-LABEL: _Z4SwapP1SS0_: +; AA: # %bb.0: # %entry +; AA-NEXT: vmovups (%rdi), %xmm0 +; AA-NEXT: vmovups (%rsi), %xmm1 +; AA-NEXT: vmovups %xmm1, (%rdi) +; AA-NEXT: vmovups %xmm0, (%rsi) +; AA-NEXT: retq +entry: + %tmp.sroa.0 = alloca [16 x i8], align 1 + %tmp.sroa.0.0..sroa_idx6 = getelementptr inbounds [16 x i8], [16 x i8]* %tmp.sroa.0, i64 0, i64 0 + call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %tmp.sroa.0.0..sroa_idx6) + %tmp.sroa.0.0..sroa_idx1 = getelementptr inbounds %struct.S, %struct.S* %a, i64 0, i32 0, i64 0 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 1 %tmp.sroa.0.0..sroa_idx6, i8* align 1 %tmp.sroa.0.0..sroa_idx1, i64 16, i1 false) + %0 = getelementptr inbounds %struct.S, %struct.S* %b, i64 0, i32 0, i64 0 + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %tmp.sroa.0.0..sroa_idx1, i8* align 1 %0, i64 16, i1 false), !tbaa.struct !2 + call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %0, i8* nonnull align 1 %tmp.sroa.0.0..sroa_idx6, i64 16, i1 false) + call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %tmp.sroa.0.0..sroa_idx6) + ret void +} + +!llvm.module.flags = !{!0} +!llvm.ident = !{!1} + +!0 = !{i32 1, !"wchar_size", i32 4} +!1 = !{!"clang version 9.0.0 (trunk 352631) (llvm/trunk 352632)"} +!2 = !{i64 0, i64 16, !3} +!3 = !{!4, !4, i64 0} +!4 = !{!"omnipotent char", !5, i64 0} +!5 = !{!"Simple C++ TBAA"} + Index: test/CodeGen/X86/vector-reduce-add-widen.ll =================================================================== --- test/CodeGen/X86/vector-reduce-add-widen.ll +++ test/CodeGen/X86/vector-reduce-add-widen.ll @@ -117,7 +117,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -180,7 +180,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -346,9 +346,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -417,9 +417,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -657,11 +657,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -741,11 +741,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1137,13 +1137,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax @@ -1253,13 +1253,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax Index: test/CodeGen/X86/vector-reduce-add.ll =================================================================== --- test/CodeGen/X86/vector-reduce-add.ll +++ test/CodeGen/X86/vector-reduce-add.ll @@ -117,7 +117,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -180,7 +180,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -346,9 +346,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -417,9 +417,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -655,11 +655,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -739,11 +739,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpaddw %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1127,13 +1127,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax @@ -1243,13 +1243,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpaddb %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax Index: test/CodeGen/X86/vector-reduce-and-widen.ll =================================================================== --- test/CodeGen/X86/vector-reduce-and-widen.ll +++ test/CodeGen/X86/vector-reduce-and-widen.ll @@ -115,7 +115,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -172,7 +172,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -336,9 +336,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -401,9 +401,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -639,11 +639,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -717,11 +717,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1111,13 +1111,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax @@ -1221,13 +1221,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax Index: test/CodeGen/X86/vector-reduce-and.ll =================================================================== --- test/CodeGen/X86/vector-reduce-and.ll +++ test/CodeGen/X86/vector-reduce-and.ll @@ -115,7 +115,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -172,7 +172,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -336,9 +336,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -401,9 +401,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -637,11 +637,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -715,11 +715,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1101,13 +1101,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax @@ -1211,13 +1211,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpandq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax Index: test/CodeGen/X86/vector-reduce-fadd-fast.ll =================================================================== --- test/CodeGen/X86/vector-reduce-fadd-fast.ll +++ test/CodeGen/X86/vector-reduce-fadd-fast.ll @@ -168,9 +168,9 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm0 ; AVX512-NEXT: vaddps %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -345,9 +345,9 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -522,9 +522,9 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -616,7 +616,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm0 ; AVX512-NEXT: vaddpd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -658,7 +658,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -753,7 +753,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -795,7 +795,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -890,7 +890,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -932,7 +932,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper Index: test/CodeGen/X86/vector-reduce-fmul-fast.ll =================================================================== --- test/CodeGen/X86/vector-reduce-fmul-fast.ll +++ test/CodeGen/X86/vector-reduce-fmul-fast.ll @@ -168,9 +168,9 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm0 ; AVX512-NEXT: vmulps %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -345,9 +345,9 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -522,9 +522,9 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] -; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -616,7 +616,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm0 ; AVX512-NEXT: vmulpd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -658,7 +658,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -753,7 +753,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -795,7 +795,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -890,7 +890,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper @@ -932,7 +932,7 @@ ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper Index: test/CodeGen/X86/vector-reduce-mul-widen.ll =================================================================== --- test/CodeGen/X86/vector-reduce-mul-widen.ll +++ test/CodeGen/X86/vector-reduce-mul-widen.ll @@ -440,7 +440,7 @@ ; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0 ; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0 +; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ; AVX512DQVL-NEXT: vmovq %xmm0, %rax @@ -761,7 +761,7 @@ ; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0 ; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0 +; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ; AVX512DQVL-NEXT: vmovq %xmm0, %rax @@ -997,9 +997,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1107,9 +1107,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1347,11 +1347,11 @@ ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vmovd %xmm0, %eax @@ -1364,11 +1364,11 @@ ; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vmovd %xmm0, %eax @@ -1480,11 +1480,11 @@ ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vmovd %xmm0, %eax @@ -1498,11 +1498,11 @@ ; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vmovd %xmm0, %eax @@ -3107,58 +3107,58 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] -; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm3 -; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512BW-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BW-NEXT: vpmullw %zmm4, %zmm3, %zmm3 -; AVX512BW-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2 +; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BW-NEXT: vpmullw %zmm4, %zmm3, %zmm3 -; AVX512BW-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2 +; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] +; AVX512BW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BW-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm3 +; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BW-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm3 +; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BW-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax ; AVX512BW-NEXT: # kill: def $al killed $al killed $eax @@ -3169,58 +3169,58 @@ ; AVX512BWVL: # %bb.0: ; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] ; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] -; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm3, %zmm3 -; AVX512BWVL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm3, %zmm2 +; AVX512BWVL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] +; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] ; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm3, %zmm3 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm2, %zmm2 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] +; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] ; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm3, %zmm3 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm2, %zmm2 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BWVL-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] +; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BWVL-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm3 +; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BWVL-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm3 +; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BWVL-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax ; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax Index: test/CodeGen/X86/vector-reduce-mul.ll =================================================================== --- test/CodeGen/X86/vector-reduce-mul.ll +++ test/CodeGen/X86/vector-reduce-mul.ll @@ -440,7 +440,7 @@ ; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0 ; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0 +; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ; AVX512DQVL-NEXT: vmovq %xmm0, %rax @@ -761,7 +761,7 @@ ; AVX512DQVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0 ; AVX512DQVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512DQVL-NEXT: vpmullq %zmm1, %zmm0, %zmm0 +; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0 ; AVX512DQVL-NEXT: vmovq %xmm0, %rax @@ -990,9 +990,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1100,9 +1100,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1354,11 +1354,11 @@ ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vmovd %xmm0, %eax @@ -1371,11 +1371,11 @@ ; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vmovd %xmm0, %eax @@ -1487,11 +1487,11 @@ ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512BW-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BW-NEXT: vmovd %xmm0, %eax @@ -1505,11 +1505,11 @@ ; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512BWVL-NEXT: vpmullw %xmm1, %xmm0, %xmm0 ; AVX512BWVL-NEXT: vmovd %xmm0, %eax @@ -3062,58 +3062,58 @@ ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] -; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm3 -; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512BW-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BW-NEXT: vpmullw %zmm2, %zmm3, %zmm2 +; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BW-NEXT: vpmullw %zmm4, %zmm3, %zmm3 -; AVX512BW-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2 +; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] +; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] ; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BW-NEXT: vpmullw %zmm4, %zmm3, %zmm3 -; AVX512BW-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BW-NEXT: vpmullw %zmm4, %zmm2, %zmm2 +; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BW-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BW-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] +; AVX512BW-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BW-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm3 +; AVX512BW-NEXT: vpsrld $16, %xmm0, %xmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BW-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm3 +; AVX512BW-NEXT: vpsrlw $8, %xmm0, %xmm2 ; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BW-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BW-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BW-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 ; AVX512BW-NEXT: vpextrb $0, %xmm0, %eax ; AVX512BW-NEXT: # kill: def $al killed $al killed $eax @@ -3124,58 +3124,58 @@ ; AVX512BWVL: # %bb.0: ; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] ; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] -; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm3, %zmm3 -; AVX512BWVL-NEXT: vmovdqa64 {{.*#+}} zmm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm3, %zmm2 +; AVX512BWVL-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255] +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] +; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] ; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm3, %zmm3 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm2, %zmm2 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] +; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63] ; AVX512BWVL-NEXT: vpunpckhbw {{.*#+}} zmm4 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63] -; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm3, %zmm3 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm3, %zmm3 +; AVX512BWVL-NEXT: vpmullw %zmm4, %zmm2, %zmm2 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm2, %zmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpackuswb %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpackuswb %zmm2, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55] ; AVX512BWVL-NEXT: vpmullw %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512BWVL-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,2,3] +; AVX512BWVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,2,3] ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BWVL-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm3 +; AVX512BWVL-NEXT: vpsrld $16, %xmm0, %xmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BWVL-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm3 +; AVX512BWVL-NEXT: vpsrlw $8, %xmm0, %xmm2 ; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55] -; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm3 = zmm3[0],zmm0[0],zmm3[1],zmm0[1],zmm3[2],zmm0[2],zmm3[3],zmm0[3],zmm3[4],zmm0[4],zmm3[5],zmm0[5],zmm3[6],zmm0[6],zmm3[7],zmm0[7],zmm3[16],zmm0[16],zmm3[17],zmm0[17],zmm3[18],zmm0[18],zmm3[19],zmm0[19],zmm3[20],zmm0[20],zmm3[21],zmm0[21],zmm3[22],zmm0[22],zmm3[23],zmm0[23],zmm3[32],zmm0[32],zmm3[33],zmm0[33],zmm3[34],zmm0[34],zmm3[35],zmm0[35],zmm3[36],zmm0[36],zmm3[37],zmm0[37],zmm3[38],zmm0[38],zmm3[39],zmm0[39],zmm3[48],zmm0[48],zmm3[49],zmm0[49],zmm3[50],zmm0[50],zmm3[51],zmm0[51],zmm3[52],zmm0[52],zmm3[53],zmm0[53],zmm3[54],zmm0[54],zmm3[55],zmm0[55] -; AVX512BWVL-NEXT: vpmullw %zmm3, %zmm0, %zmm0 -; AVX512BWVL-NEXT: vpandq %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpunpcklbw {{.*#+}} zmm2 = zmm2[0],zmm0[0],zmm2[1],zmm0[1],zmm2[2],zmm0[2],zmm2[3],zmm0[3],zmm2[4],zmm0[4],zmm2[5],zmm0[5],zmm2[6],zmm0[6],zmm2[7],zmm0[7],zmm2[16],zmm0[16],zmm2[17],zmm0[17],zmm2[18],zmm0[18],zmm2[19],zmm0[19],zmm2[20],zmm0[20],zmm2[21],zmm0[21],zmm2[22],zmm0[22],zmm2[23],zmm0[23],zmm2[32],zmm0[32],zmm2[33],zmm0[33],zmm2[34],zmm0[34],zmm2[35],zmm0[35],zmm2[36],zmm0[36],zmm2[37],zmm0[37],zmm2[38],zmm0[38],zmm2[39],zmm0[39],zmm2[48],zmm0[48],zmm2[49],zmm0[49],zmm2[50],zmm0[50],zmm2[51],zmm0[51],zmm2[52],zmm0[52],zmm2[53],zmm0[53],zmm2[54],zmm0[54],zmm2[55],zmm0[55] +; AVX512BWVL-NEXT: vpmullw %zmm2, %zmm0, %zmm0 +; AVX512BWVL-NEXT: vpandq %zmm3, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpackuswb %zmm1, %zmm0, %zmm0 ; AVX512BWVL-NEXT: vpextrb $0, %xmm0, %eax ; AVX512BWVL-NEXT: # kill: def $al killed $al killed $eax Index: test/CodeGen/X86/vector-reduce-or-widen.ll =================================================================== --- test/CodeGen/X86/vector-reduce-or-widen.ll +++ test/CodeGen/X86/vector-reduce-or-widen.ll @@ -115,7 +115,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -172,7 +172,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -336,9 +336,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -401,9 +401,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -639,11 +639,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -717,11 +717,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1111,13 +1111,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax @@ -1221,13 +1221,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax Index: test/CodeGen/X86/vector-reduce-or.ll =================================================================== --- test/CodeGen/X86/vector-reduce-or.ll +++ test/CodeGen/X86/vector-reduce-or.ll @@ -115,7 +115,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -172,7 +172,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -336,9 +336,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -401,9 +401,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -637,11 +637,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -715,11 +715,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1101,13 +1101,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax @@ -1211,13 +1211,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vporq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax Index: test/CodeGen/X86/vector-reduce-xor-widen.ll =================================================================== --- test/CodeGen/X86/vector-reduce-xor-widen.ll +++ test/CodeGen/X86/vector-reduce-xor-widen.ll @@ -115,7 +115,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -172,7 +172,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -336,9 +336,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -401,9 +401,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -639,11 +639,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -717,11 +717,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1111,13 +1111,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax @@ -1221,13 +1221,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax Index: test/CodeGen/X86/vector-reduce-xor.ll =================================================================== --- test/CodeGen/X86/vector-reduce-xor.ll +++ test/CodeGen/X86/vector-reduce-xor.ll @@ -115,7 +115,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -172,7 +172,7 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovq %xmm0, %rax @@ -336,9 +336,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -401,9 +401,9 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxord %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -637,11 +637,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -715,11 +715,11 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vmovd %xmm0, %eax @@ -1101,13 +1101,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax @@ -1211,13 +1211,13 @@ ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 ; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1 -; AVX512-NEXT: vpxorq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsrlw $8, %xmm0, %xmm1 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpextrb $0, %xmm0, %eax Index: test/CodeGen/X86/vector-shuffle-512-v16.ll =================================================================== --- test/CodeGen/X86/vector-shuffle-512-v16.ll +++ test/CodeGen/X86/vector-shuffle-512-v16.ll @@ -338,7 +338,7 @@ define <8 x float> @test_v16f32_0_1_2_3_4_6_7_10 (<16 x float> %v) { ; ALL-LABEL: test_v16f32_0_1_2_3_4_6_7_10: ; ALL: # %bb.0: -; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; ALL-NEXT: vextractf32x4 $2, %zmm0, %xmm1 ; ALL-NEXT: vmovsldup {{.*#+}} xmm1 = xmm1[0,0,2,2] ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,7,u] Index: test/DebugInfo/COFF/inlining.ll =================================================================== --- test/DebugInfo/COFF/inlining.ll +++ test/DebugInfo/COFF/inlining.ll @@ -29,7 +29,6 @@ ; ASM: .cv_loc 1 1 9 5 # t.cpp:9:5 ; ASM: addl $4, "?x@@3HC" ; ASM: .cv_inline_site_id 2 within 1 inlined_at 1 10 3 -; ASM: .cv_loc 2 1 3 7 # t.cpp:3:7 ; ASM: .cv_loc 2 1 4 5 # t.cpp:4:5 ; ASM: addl {{.*}}, "?x@@3HC" ; ASM: .cv_loc 2 1 5 5 # t.cpp:5:5 @@ -172,7 +171,7 @@ ; OBJ: PtrParent: 0x0 ; OBJ: PtrEnd: 0x0 ; OBJ: PtrNext: 0x0 -; OBJ: CodeSize: 0x3C +; OBJ: CodeSize: 0x35 ; OBJ: DbgStart: 0x0 ; OBJ: DbgEnd: 0x0 ; OBJ: FunctionType: baz (0x1004) @@ -189,9 +188,9 @@ ; OBJ: Inlinee: bar (0x1002) ; OBJ: BinaryAnnotations [ ; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0x8, LineOffset: 1} -; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0x7, LineOffset: 1} +; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0x9, LineOffset: 1} ; OBJ-NEXT: ChangeLineOffset: 1 -; OBJ-NEXT: ChangeCodeOffset: 0x1D +; OBJ-NEXT: ChangeCodeOffset: 0x14 ; OBJ-NEXT: ChangeCodeLength: 0x7 ; OBJ: ] ; OBJ: } @@ -200,8 +199,8 @@ ; OBJ: PtrEnd: 0x0 ; OBJ: Inlinee: foo (0x1003) ; OBJ: BinaryAnnotations [ -; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0xF, LineOffset: 1} -; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0x9, LineOffset: 1} +; OBJ-NEXT: ChangeLineOffset: 2 +; OBJ-NEXT: ChangeCodeOffset: 0x11 ; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0x6, LineOffset: 1} ; OBJ-NEXT: ChangeCodeOffsetAndLineOffset: {CodeOffset: 0x7, LineOffset: 1} ; OBJ-NEXT: ChangeCodeLength: 0x7 Index: test/DebugInfo/COFF/lexicalblock.ll =================================================================== --- test/DebugInfo/COFF/lexicalblock.ll +++ test/DebugInfo/COFF/lexicalblock.ll @@ -70,9 +70,6 @@ ; CHECK: Kind: S_BLOCK32 {{.*}} ; CHECK: BlockName: ; CHECK: } -; CHECK: LocalSym { -; CHECK: VarName: localC -; CHECK: } ; CHECK: ScopeEndSym { ; CHECK: Kind: S_END {{.*}} ; CHECK: } @@ -80,9 +77,6 @@ ; CHECK: Kind: S_BLOCK32 {{.*}} ; CHECK: BlockName: ; CHECK: } -; CHECK: LocalSym { -; CHECK: VarName: localD -; CHECK: } ; CHECK: ScopeEndSym { ; CHECK: Kind: S_END {{.*}} ; CHECK: } @@ -90,38 +84,12 @@ ; CHECK: Kind: S_BLOCK32 {{.*}} ; CHECK: BlockName: ; CHECK: } -; CHECK: LocalSym { -; CHECK: VarName: localE -; CHECK: } -; CHECK: ScopeEndSym { -; CHECK: } -; CHECK: BlockSym { -; CHECK: Kind: S_BLOCK32 {{.*}} -; CHECK: BlockName: -; CHECK: } -; CHECK: LocalSym { -; CHECK: VarName: localF -; CHECK: } -; CHECK: BlockSym { -; CHECK: Kind: S_BLOCK32 {{.*}} -; CHECK: BlockName: -; CHECK: } -; CHECK: LocalSym { -; CHECK: VarName: localG -; CHECK: } ; CHECK: ScopeEndSym { -; CHECK: Kind: S_END {{.*}} -; CHECK: } -; CHECK: ScopeEndSym { -; CHECK: Kind: S_END {{.*}} ; CHECK: } ; CHECK: BlockSym { ; CHECK: Kind: S_BLOCK32 {{.*}} ; CHECK: BlockName: ; CHECK: } -; CHECK: LocalSym { -; CHECK: VarName: localH -; CHECK: } ; CHECK: ScopeEndSym { ; CHECK: Kind: S_END {{.*}} ; CHECK: } Index: test/DebugInfo/Generic/incorrect-variable-debugloc1.ll =================================================================== --- test/DebugInfo/Generic/incorrect-variable-debugloc1.ll +++ test/DebugInfo/Generic/incorrect-variable-debugloc1.ll @@ -1,8 +1,4 @@ ; REQUIRES: object-emission -; This test is failing for powerpc64, because a location list for the -; variable 'c' is not generated at all. Temporary marking this test as XFAIL -; for powerpc, until PR21881 is fixed. -; XFAIL: powerpc64 ; RUN: %llc_dwarf -O2 -dwarf-version 2 -filetype=obj < %s | llvm-dwarfdump - | FileCheck %s --check-prefix=DWARF23 ; RUN: %llc_dwarf -O2 -dwarf-version 3 -filetype=obj < %s | llvm-dwarfdump - | FileCheck %s --check-prefix=DWARF23 Index: test/DebugInfo/NVPTX/dbg-value-const-byref.ll =================================================================== --- test/DebugInfo/NVPTX/dbg-value-const-byref.ll +++ test/DebugInfo/NVPTX/dbg-value-const-byref.ll @@ -17,10 +17,10 @@ ; particularly variables that are described as constants and passed ; by reference. ; -; CHECK: DEBUG_VALUE: foo:i <- [DW_OP_deref] $vrdepot ; CHECK: DEBUG_VALUE: foo:i <- 3 ; CHECK: DEBUG_VALUE: foo:i <- 7 ; CHECK: DEBUG_VALUE: foo:i <- % +; CHECK: DEBUG_VALUE: foo:i <- [DW_OP_deref] $vrdepot ; Function Attrs: nounwind ssp uwtable define i32 @foo() #0 !dbg !4 { Index: test/DebugInfo/X86/dbg-value-frame-index-2.ll =================================================================== --- /dev/null +++ test/DebugInfo/X86/dbg-value-frame-index-2.ll @@ -0,0 +1,79 @@ +; RUN: llc -start-after=codegenprepare -stop-before=expand-isel-pseudos < %s -o - | FileCheck %s + +; Test that stack frame dbg.values are lowered to DBG_VALUEs, in blocks that +; are local to the alloca, and elsewhere. Differs from dbg-value-frame-index.ll +; because this test does not result in the frame-index being in a vreg, +; instead it's exclusively referred to by memory operands of instructions. +; +; Additionally test that we don't re-order with constant values -- both are +; independent of the order the instructions get lowered, but should not +; interleave. + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +declare void @dud() + +; CHECK: [[BARVAR:![0-9]+]] = !DILocalVariable(name: "bar", + +define i32 @foo() !dbg !6 { +; CHECK-LABEL: body + +; CHECK: DBG_VALUE 0, $noreg, [[BARVAR]] +; CHECK-NEXT: MOV32mi %[[STACKLOC:[a-zA-Z0-9\.]+]], 1, $noreg +; CHECK-NEXT: DBG_VALUE %[[STACKLOC]], $noreg, [[BARVAR]] + + %p1 = alloca i32 + call void @llvm.dbg.value(metadata i32 *null, metadata !17, metadata !DIExpression()), !dbg !18 + store i32 0, i32 *%p1 + call void @llvm.dbg.value(metadata i32 *%p1, metadata !17, metadata !DIExpression()), !dbg !18 + br label %foo + +foo: + +; CHECK-LABEL: bb.1.foo +; CHECK: DBG_VALUE %[[STACKLOC]], $noreg, [[BARVAR]] + + call void @dud() + call void @llvm.dbg.value(metadata i32 *%p1, metadata !17, metadata !DIExpression()), !dbg !18 + br label %bar + +bar: + +; CHECK-LABEL: bb.2.bar +; CHECK: DBG_VALUE %[[STACKLOC]], $noreg, [[BARVAR]] +; CHECK-NEXT: ADJCALLSTACKDOWN +; CHECK-NEXT: CALL +; CHECK-NEXT: ADJCALLSTACKUP +; CHECK-NEXT: DBG_VALUE 0, $noreg, [[BARVAR]] + call void @llvm.dbg.value(metadata i32 *%p1, metadata !17, metadata !DIExpression()), !dbg !18 + call void @dud() + call void @llvm.dbg.value(metadata i32 *null, metadata !17, metadata !DIExpression()), !dbg !18 + %loaded = load i32, i32 *%p1 + ret i32 %loaded, !dbg !19 +} + +; Function Attrs: nounwind readnone speculatable +declare void @llvm.dbg.value(metadata, metadata, metadata) #6 + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!3, !4} +!llvm.ident = !{!5} + +!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2, globals: !2) +!1 = !DIFile(filename: "a.c", directory: "b") +!2 = !{} +!3 = !{i32 2, !"Dwarf Version", i32 4} +!4 = !{i32 2, !"Debug Info Version", i32 3} +!5 = !{!""} +!6 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 13, type: !7, isLocal: false, isDefinition: true, scopeLine: 14, isOptimized: false, unit: !0, retainedNodes: !2) +!7 = !DISubroutineType(types: !8) +!8 = !{!140} +!140 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed) +!15 = !{!16} +!16 = !DISubrange(count: 5) +!17 = !DILocalVariable(name: "bar", scope: !6, line: 13, type: !140) +!18 = !DILocation(line: 13, column: 23, scope: !6) +!19 = !DILocation(line: 15, column: 5, scope: !6) +!20 = !DILocation(line: 16, column: 1, scope: !6) +!21 = !DILocalVariable(name: "baz", scope: !6, line: 13, type: !140) Index: test/DebugInfo/X86/pr40427.ll =================================================================== --- test/DebugInfo/X86/pr40427.ll +++ test/DebugInfo/X86/pr40427.ll @@ -9,6 +9,8 @@ ; CHECK: ![[DBGVAR:[0-9]+]] = !DILocalVariable(name: "bees", +target triple = "x86_64-unknown-linux-gnu" + define i16 @lolwat(i1 %spoons, i64 *%bees, i16 %yellow, i64 *%more) { entry: br i1 %spoons, label %trueb, label %falseb Index: test/MC/AsmParser/debug-only-comments.s =================================================================== --- /dev/null +++ test/MC/AsmParser/debug-only-comments.s @@ -0,0 +1,6 @@ + # RUN: llvm-mc -triple i386-linux-gnu -g -dwarf-version 4 < %s | FileCheck %s + # RUN: llvm-mc -triple i386-linux-gnu -g -dwarf-version 5 < %s | FileCheck %s + # CHECK: .section .debug_info + # CHECK: .section .debug_info + # CHECK-NOT: .section + # CHECK: .ascii "" Index: test/MC/RISCV/align.s =================================================================== --- /dev/null +++ test/MC/RISCV/align.s @@ -0,0 +1,105 @@ +# The file testing Nop insertion with R_RISCV_ALIGN for relaxation. + +# Relaxation enabled: +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+relax < %s \ +# RUN: | llvm-objdump -d -riscv-no-aliases - \ +# RUN: | FileCheck -check-prefix=RELAX-INST %s +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+relax < %s \ +# RUN: | llvm-readobj -r | FileCheck -check-prefix=RELAX-RELOC %s + +# Relaxation disabled: +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=-relax < %s \ +# RUN: | llvm-objdump -d -riscv-no-aliases - \ +# RUN: | FileCheck -check-prefix=NORELAX-INST %s +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=-relax < %s \ +# RUN: | llvm-readobj -r | FileCheck -check-prefix=NORELAX-RELOC %s + +# Relaxation enabled with C extension: +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+c,+relax < %s \ +# RUN: | llvm-objdump -d -riscv-no-aliases - \ +# RUN: | FileCheck -check-prefix=C-EXT-RELAX-INST %s +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+c,+relax < %s \ +# RUN: | llvm-readobj -r | FileCheck -check-prefix=C-EXT-RELAX-RELOC %s + +# Relaxation disabled with C extension: +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+c,-relax < %s \ +# RUN: | llvm-objdump -d -riscv-no-aliases - \ +# RUN: | FileCheck -check-prefix=C-EXT-NORELAX-INST %s +# RUN: llvm-mc -filetype=obj -triple riscv32 -mattr=+c,-relax < %s \ +# RUN: | llvm-readobj -r | FileCheck -check-prefix=C-EXT-NORELAX-RELOC %s + +# We need to insert N-MinNopSize bytes NOPs and R_RISCV_ALIGN relocation +# type for .align N directive when linker relaxation enabled. +# Linker could satisfy alignment by removing NOPs after linker relaxation. + +# The first R_RISCV_ALIGN come from +# MCELFStreamer::InitSections() EmitCodeAlignment(4). +# C-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x2 +# C-EXT-RELAX-INST: c.nop +test: + .p2align 2 +# C-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x2 +# C-EXT-RELAX-INST: c.nop + bne zero, a0, .LBB0_2 + mv a0, zero + .p2align 3 +# RELAX-RELOC: R_RISCV_ALIGN - 0x4 +# RELAX-INST: addi zero, zero, 0 +# C-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x6 +# C-EXT-RELAX-INST: addi zero, zero, 0 +# C-EXT-RELAX-INST: c.nop +# C-EXT-NORELAX-INST: addi zero, zero, 0 + add a0, a0, a1 + .align 4 +.LBB0_2: +# RELAX-RELOC: R_RISCV_ALIGN - 0xC +# RELAX-INST: addi zero, zero, 0 +# RELAX-INST: addi zero, zero, 0 +# RELAX-INST: addi zero, zero, 0 +# NORELAX-INST: addi zero, zero, 0 +# C-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0xE +# C-EXT-RELAX-INST: addi zero, zero, 0 +# C-EXT-RELAX-INST: addi zero, zero, 0 +# C-EXT-RELAX-INST: addi zero, zero, 0 +# C-EXT-RELAX-INST: c.nop +# C-EXT-INST: addi zero, zero, 0 +# C-EXT-INST: c.nop + add a0, a0, a1 + .p2align 3 +.constant_pool: +.long 3126770193 +# RELAX-RELOC: R_RISCV_ALIGN - 0x4 +# RELAX-INST: addi zero, zero, 0 +# NORELAX-INST: addi zero, zero, 0 +# C-EXT-RELAX-RELOC: R_RISCV_ALIGN - 0x6 +# C-EXT-RELAX-INST: addi zero, zero, 0 +# C-EXT-RELAX-INST: c.nop +# C-EXT-INST: addi zero, zero, 0 +# C-EXT-INST: c.nop + add a0, a0, a1 +# Alignment directive with specific padding value 0x01. +# We will not emit R_RISCV_ALIGN in this case. +# The behavior is the same as GNU assembler. + .p2align 4, 1 +# RELAX-RELOC-NOT: R_RISCV_ALIGN - 0xC +# RELAX-INST: 01 01 +# RELAX-INST: 01 01 +# C-EXT-RELAX-RELOC-NOT: R_RISCV_ALIGN - 0xE +# C-EXT-RELAX-INST: 01 01 +# C-EXT-INST: 01 01 + ret +# NORELAX-RELOC-NOT: R_RISCV +# C-EXT-NORELAX-RELOC-NOT: R_RISCV +# We only need to insert R_RISCV_ALIGN for code section +# when the linker relaxation enabled. + .data + .p2align 3 +# RELAX-RELOC-NOT: R_RISCV_ALIGN +# C-EXT-RELAX-RELOC-NOT: R_RISCV_ALIGN +data1: + .word 7 + .p2align 4 +# RELAX-RELOC-NOT: R_RISCV_ALIGN +# C-EXT-RELAX-RELOC-NOT: R_RISCV_ALIGN +data2: + .word 9 Index: test/MC/WebAssembly/annotations.s =================================================================== --- /dev/null +++ test/MC/WebAssembly/annotations.s @@ -0,0 +1,71 @@ +# RUN: llvm-mc -triple=wasm32-unknown-unknown -mattr=+exception-handling < %s | FileCheck %s + +# Tests if block/loop/try/catch/end/branch/rethrow instructions are correctly +# printed with their annotations. + + .text + .section .text.test_annotation,"",@ + .type test_annotation,@function +test_annotation: + .functype test_annotation () -> () + .eventtype __cpp_exception i32 + try + br 0 + catch + block + br_if 0 + loop + br_if 1 + end_loop + end_block + try + rethrow + catch + block + try + br 0 + catch + local.set 0 + block i32 + local.get 0 + br_on_exn 0, __cpp_exception@EVENT + rethrow + end_block + end_try + end_block + rethrow + end_try + end_try + end_function + + +# CHECK: test_annotation: +# CHECK: try +# CHECK-NEXT: br 0 # 0: down to label0 +# CHECK-NEXT: catch # catch0: +# CHECK-NEXT: block +# CHECK-NEXT: br_if 0 # 0: down to label1 +# CHECK-NEXT: loop # label2: +# CHECK-NEXT: br_if 1 # 1: down to label1 +# CHECK-NEXT: end_loop +# CHECK-NEXT: end_block # label1: +# CHECK-NEXT: try +# CHECK-NEXT: rethrow # down to catch1 +# CHECK-NEXT: catch # catch1: +# CHECK-NEXT: block +# CHECK-NEXT: try +# CHECK-NEXT: br 0 # 0: down to label5 +# CHECK-NEXT: catch # catch2: +# CHECK-NEXT: local.set 0 +# CHECK-NEXT: block i32 +# CHECK-NEXT: local.get 0 +# CHECK-NEXT: br_on_exn 0, __cpp_exception@EVENT # 0: down to label6 +# CHECK-NEXT: rethrow # to caller +# CHECK-NEXT: end_block # label6: +# CHECK-NEXT: end_try # label5: +# CHECK-NEXT: end_block # label4: +# CHECK-NEXT: rethrow # to caller +# CHECK-NEXT: end_try # label3: +# CHECK-NEXT: end_try # label0: +# CHECK-NEXT: end_function + Index: test/MC/WebAssembly/basic-assembly.s =================================================================== --- test/MC/WebAssembly/basic-assembly.s +++ test/MC/WebAssembly/basic-assembly.s @@ -71,11 +71,18 @@ i32.trunc_f32_s try except_ref .LBB0_3: - i32.catch 0 + catch + local.set 0 + block i32 + local.get 0 + br_on_exn 0, __cpp_exception@EVENT + rethrow .LBB0_4: - catch_all -.LBB0_5: + end_block end_try + i32.const 0 + throw 0 +.LBB0_5: #i32.trunc_sat_f32_s global.get __stack_pointer@GLOBAL end_function @@ -143,11 +150,18 @@ # CHECK-NEXT: i32.trunc_f32_s # CHECK-NEXT: try except_ref # CHECK-NEXT: .LBB0_3: -# CHECK-NEXT: i32.catch 0 +# CHECK-NEXT: catch +# CHECK-NEXT: local.set 0 +# CHECK-NEXT: block i32 +# CHECK-NEXT: local.get 0 +# CHECK-NEXT: br_on_exn 0, __cpp_exception@EVENT +# CHECK-NEXT: rethrow # CHECK-NEXT: .LBB0_4: -# CHECK-NEXT: catch_all -# CHECK-NEXT: .LBB0_5: +# CHECK-NEXT: end_block # CHECK-NEXT: end_try +# CHECK-NEXT: i32.const 0 +# CHECK-NEXT: throw 0 +# CHECK-NEXT: .LBB0_5: # CHECK-NEXT: global.get __stack_pointer@GLOBAL # CHECK-NEXT: end_function Index: test/Transforms/IPConstantProp/arg-count-mismatch.ll =================================================================== --- /dev/null +++ test/Transforms/IPConstantProp/arg-count-mismatch.ll @@ -0,0 +1,72 @@ +; RUN: opt < %s -ipconstprop -S -o - | FileCheck %s + +; The original C source looked like this: +; +; long long a101, b101, e101; +; volatile long c101; +; int d101; +; +; static inline int bar(p1, p2) +; { +; return 0; +; } +; +; void foo(unsigned p1) +; { +; long long *f = &b101, *g = &e101; +; c101 = 0; +; (void)((*f |= a101) - (*g = bar(d101))); +; c101 = (*f |= a101 &= p1) == d101; +; } +; +; When compiled with Clang it gives a warning +; warning: too few arguments in call to 'bar' +; +; This ll reproducer has been reduced to only include tha call. +; +; Note that -lint will report this as UB, but it passes -verify. + +; This test is just to verify that we do not crash/assert due to mismatch in +; argument count between the caller and callee. + +define dso_local void @foo(i16 %a) { +; CHECK-LABEL: @foo( +; CHECK-NEXT: [[CALL:%.*]] = call i16 bitcast (i16 (i16, i16)* @bar to i16 (i16)*)(i16 [[A:%.*]]) +; CHECK-NEXT: ret void +; + %call = call i16 bitcast (i16 (i16, i16) * @bar to i16 (i16) *)(i16 %a) + ret void +} + +define internal i16 @bar(i16 %p1, i16 %p2) { +; CHECK-LABEL: @bar( +; CHECK-NEXT: ret i16 0 +; + ret i16 0 +} + +;------------------------------------------------------------------------------- +; Additional tests to verify that we still optimize when having a mismatch +; in argument count due to varargs (as long as all non-variadic arguments have +; been provided), + +define dso_local void @vararg_tests(i16 %a) { + %call1 = call i16 (i16, ...) @vararg_prop(i16 7, i16 8, i16 %a) + %call2 = call i16 bitcast (i16 (i16, i16, ...) * @vararg_no_prop to i16 (i16) *) (i16 7) + ret void +} + +define internal i16 @vararg_prop(i16 %p1, ...) { +; CHECK-LABEL: define internal i16 @vararg_prop( +; CHECK-NEXT: ret i16 7 +; + ret i16 %p1 +} + +define internal i16 @vararg_no_prop(i16 %p1, i16 %p2, ...) { +; CHECK-LABEL: define internal i16 @vararg_no_prop( +; CHECK-NEXT: ret i16 [[P1:%.*]] +; + ret i16 %p1 +} + Index: test/Transforms/IPConstantProp/arg-type-mismatch.ll =================================================================== --- /dev/null +++ test/Transforms/IPConstantProp/arg-type-mismatch.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -ipconstprop -S -o - | FileCheck %s + +; This test is just to verify that we do not crash/assert due to mismatch in +; argument type between the caller and callee. + +define dso_local void @foo(i16 %a) { +; CHECK-LABEL: @foo( +; CHECK-NEXT: [[CALL:%.*]] = call i16 bitcast (i16 (i16, i16)* @bar to i16 (i16, i32)*)(i16 [[A:%.*]], i32 7) +; CHECK-NEXT: ret void +; + %call = call i16 bitcast (i16 (i16, i16) * @bar to i16 (i16, i32) *)(i16 %a, i32 7) + ret void +} + +define internal i16 @bar(i16 %p1, i16 %p2) { +; CHECK-LABEL: @bar( +; CHECK-NEXT: ret i16 [[P2:%.*]] +; + ret i16 %p2 +} + + Index: test/Transforms/InstCombine/X86/addcarry.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/X86/addcarry.ll @@ -0,0 +1,36 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s + +declare { i8, i32 } @llvm.x86.addcarry.32(i8, i32, i32) +declare { i8, i64 } @llvm.x86.addcarry.64(i8, i64, i64) + +define i32 @no_carryin_i32(i32 %x, i32 %y, i8* %p) { +; CHECK-LABEL: @no_carryin_i32( +; CHECK-NEXT: [[S:%.*]] = call { i8, i32 } @llvm.x86.addcarry.32(i8 0, i32 [[X:%.*]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[OV:%.*]] = extractvalue { i8, i32 } [[S]], 0 +; CHECK-NEXT: store i8 [[OV]], i8* [[P:%.*]], align 1 +; CHECK-NEXT: [[R:%.*]] = extractvalue { i8, i32 } [[S]], 1 +; CHECK-NEXT: ret i32 [[R]] +; + %s = call { i8, i32 } @llvm.x86.addcarry.32(i8 0, i32 %x, i32 %y) + %ov = extractvalue { i8, i32 } %s, 0 + store i8 %ov, i8* %p + %r = extractvalue { i8, i32 } %s, 1 + ret i32 %r +} + +define i64 @no_carryin_i64(i64 %x, i64 %y, i8* %p) { +; CHECK-LABEL: @no_carryin_i64( +; CHECK-NEXT: [[S:%.*]] = call { i8, i64 } @llvm.x86.addcarry.64(i8 0, i64 [[X:%.*]], i64 [[Y:%.*]]) +; CHECK-NEXT: [[OV:%.*]] = extractvalue { i8, i64 } [[S]], 0 +; CHECK-NEXT: store i8 [[OV]], i8* [[P:%.*]], align 1 +; CHECK-NEXT: [[R:%.*]] = extractvalue { i8, i64 } [[S]], 1 +; CHECK-NEXT: ret i64 [[R]] +; + %s = call { i8, i64 } @llvm.x86.addcarry.64(i8 0, i64 %x, i64 %y) + %ov = extractvalue { i8, i64 } %s, 0 + store i8 %ov, i8* %p + %r = extractvalue { i8, i64 } %s, 1 + ret i64 %r +} + Index: test/Transforms/InstCombine/apint-shift.ll =================================================================== --- test/Transforms/InstCombine/apint-shift.ll +++ test/Transforms/InstCombine/apint-shift.ll @@ -1,11 +1,9 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; This test makes sure that shift instructions are properly eliminated -; even with arbitrary precision integers. ; RUN: opt < %s -instcombine -S | FileCheck %s define i55 @test6(i55 %A) { ; CHECK-LABEL: @test6( -; CHECK-NEXT: [[C:%.*]] = mul i55 %A, 6 +; CHECK-NEXT: [[C:%.*]] = mul i55 [[A:%.*]], 6 ; CHECK-NEXT: ret i55 [[C]] ; %B = shl i55 %A, 1 @@ -17,7 +15,7 @@ define i55 @test6a(i55 %A) { ; CHECK-LABEL: @test6a( -; CHECK-NEXT: [[C:%.*]] = mul i55 %A, 6 +; CHECK-NEXT: [[C:%.*]] = mul i55 [[A:%.*]], 6 ; CHECK-NEXT: ret i55 [[C]] ; %B = mul i55 %A, 3 @@ -29,7 +27,7 @@ define <2 x i55> @test6a_vec(<2 x i55> %A) { ; CHECK-LABEL: @test6a_vec( -; CHECK-NEXT: [[C:%.*]] = mul <2 x i55> %A, +; CHECK-NEXT: [[C:%.*]] = mul <2 x i55> [[A:%.*]], ; CHECK-NEXT: ret <2 x i55> [[C]] ; %B = mul <2 x i55> %A, @@ -57,7 +55,7 @@ define i17 @test9(i17 %A) { ; CHECK-LABEL: @test9( -; CHECK-NEXT: [[B:%.*]] = and i17 %A, 1 +; CHECK-NEXT: [[B:%.*]] = and i17 [[A:%.*]], 1 ; CHECK-NEXT: ret i17 [[B]] ; %B = shl i17 %A, 16 @@ -69,7 +67,7 @@ define i19 @test10(i19 %X) { ; CHECK-LABEL: @test10( -; CHECK-NEXT: [[SH1:%.*]] = and i19 %X, -262144 +; CHECK-NEXT: [[SH1:%.*]] = and i19 [[X:%.*]], -262144 ; CHECK-NEXT: ret i19 [[SH1]] ; %sh1 = lshr i19 %X, 18 @@ -82,7 +80,7 @@ define <2 x i19> @lshr_lshr_splat_vec(<2 x i19> %X) { ; CHECK-LABEL: @lshr_lshr_splat_vec( -; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i19> %X, +; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i19> [[X:%.*]], ; CHECK-NEXT: ret <2 x i19> [[SH1]] ; %sh1 = lshr <2 x i19> %X, @@ -92,8 +90,8 @@ define i9 @multiuse_lshr_lshr(i9 %x) { ; CHECK-LABEL: @multiuse_lshr_lshr( -; CHECK-NEXT: [[SH1:%.*]] = lshr i9 %x, 2 -; CHECK-NEXT: [[SH2:%.*]] = lshr i9 %x, 5 +; CHECK-NEXT: [[SH1:%.*]] = lshr i9 [[X:%.*]], 2 +; CHECK-NEXT: [[SH2:%.*]] = lshr i9 [[X]], 5 ; CHECK-NEXT: [[MUL:%.*]] = mul i9 [[SH1]], [[SH2]] ; CHECK-NEXT: ret i9 [[MUL]] ; @@ -105,8 +103,8 @@ define <2 x i9> @multiuse_lshr_lshr_splat(<2 x i9> %x) { ; CHECK-LABEL: @multiuse_lshr_lshr_splat( -; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i9> %x, -; CHECK-NEXT: [[SH2:%.*]] = lshr <2 x i9> %x, +; CHECK-NEXT: [[SH1:%.*]] = lshr <2 x i9> [[X:%.*]], +; CHECK-NEXT: [[SH2:%.*]] = lshr <2 x i9> [[X]], ; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i9> [[SH1]], [[SH2]] ; CHECK-NEXT: ret <2 x i9> [[MUL]] ; @@ -121,7 +119,7 @@ define <2 x i19> @shl_shl_splat_vec(<2 x i19> %X) { ; CHECK-LABEL: @shl_shl_splat_vec( -; CHECK-NEXT: [[SH1:%.*]] = shl <2 x i19> %X, +; CHECK-NEXT: [[SH1:%.*]] = shl <2 x i19> [[X:%.*]], ; CHECK-NEXT: ret <2 x i19> [[SH1]] ; %sh1 = shl <2 x i19> %X, @@ -131,8 +129,8 @@ define i42 @multiuse_shl_shl(i42 %x) { ; CHECK-LABEL: @multiuse_shl_shl( -; CHECK-NEXT: [[SH1:%.*]] = shl i42 %x, 8 -; CHECK-NEXT: [[SH2:%.*]] = shl i42 %x, 17 +; CHECK-NEXT: [[SH1:%.*]] = shl i42 [[X:%.*]], 8 +; CHECK-NEXT: [[SH2:%.*]] = shl i42 [[X]], 17 ; CHECK-NEXT: [[MUL:%.*]] = mul i42 [[SH1]], [[SH2]] ; CHECK-NEXT: ret i42 [[MUL]] ; @@ -144,8 +142,8 @@ define <2 x i42> @multiuse_shl_shl_splat(<2 x i42> %x) { ; CHECK-LABEL: @multiuse_shl_shl_splat( -; CHECK-NEXT: [[SH1:%.*]] = shl <2 x i42> %x, -; CHECK-NEXT: [[SH2:%.*]] = shl <2 x i42> %x, +; CHECK-NEXT: [[SH1:%.*]] = shl <2 x i42> [[X:%.*]], +; CHECK-NEXT: [[SH2:%.*]] = shl <2 x i42> [[X]], ; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i42> [[SH1]], [[SH2]] ; CHECK-NEXT: ret <2 x i42> [[MUL]] ; @@ -160,7 +158,7 @@ define <2 x i19> @eq_shl_lshr_splat_vec(<2 x i19> %X) { ; CHECK-LABEL: @eq_shl_lshr_splat_vec( -; CHECK-NEXT: [[SH1:%.*]] = and <2 x i19> %X, +; CHECK-NEXT: [[SH1:%.*]] = and <2 x i19> [[X:%.*]], ; CHECK-NEXT: ret <2 x i19> [[SH1]] ; %sh1 = shl <2 x i19> %X, @@ -173,7 +171,7 @@ define <2 x i19> @eq_lshr_shl_splat_vec(<2 x i19> %X) { ; CHECK-LABEL: @eq_lshr_shl_splat_vec( -; CHECK-NEXT: [[SH1:%.*]] = and <2 x i19> %X, +; CHECK-NEXT: [[SH1:%.*]] = and <2 x i19> [[X:%.*]], ; CHECK-NEXT: ret <2 x i19> [[SH1]] ; %sh1 = lshr <2 x i19> %X, @@ -186,7 +184,7 @@ define <2 x i7> @lshr_shl_splat_vec(<2 x i7> %X) { ; CHECK-LABEL: @lshr_shl_splat_vec( -; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i7> %X, +; CHECK-NEXT: [[MUL:%.*]] = mul <2 x i7> [[X:%.*]], ; CHECK-NEXT: [[SH1:%.*]] = lshr exact <2 x i7> [[MUL]], ; CHECK-NEXT: ret <2 x i7> [[SH1]] ; @@ -201,7 +199,7 @@ define <2 x i7> @shl_lshr_splat_vec(<2 x i7> %X) { ; CHECK-LABEL: @shl_lshr_splat_vec( -; CHECK-NEXT: [[DIV:%.*]] = udiv <2 x i7> %X, +; CHECK-NEXT: [[DIV:%.*]] = udiv <2 x i7> [[X:%.*]], ; CHECK-NEXT: [[SH1:%.*]] = shl nuw nsw <2 x i7> [[DIV]], ; CHECK-NEXT: ret <2 x i7> [[SH1]] ; @@ -212,25 +210,26 @@ } ; Don't hide the shl from scalar evolution. DAGCombine will get it. -define i23 @test11(i23 %A) { + +define i23 @test11(i23 %x) { ; CHECK-LABEL: @test11( -; CHECK-NEXT: [[A:%.*]] = mul i23 %A, 3 +; CHECK-NEXT: [[A:%.*]] = mul i23 [[X:%.*]], 3 ; CHECK-NEXT: [[B:%.*]] = lshr i23 [[A]], 11 ; CHECK-NEXT: [[C:%.*]] = shl i23 [[B]], 12 ; CHECK-NEXT: ret i23 [[C]] ; - %a = mul i23 %A, 3 - %B = lshr i23 %a, 11 - %C = shl i23 %B, 12 - ret i23 %C + %a = mul i23 %x, 3 + %b = lshr i23 %a, 11 + %c = shl i23 %b, 12 + ret i23 %c } ; shl (ashr X, C), C --> and X, C' define i47 @test12(i47 %X) { ; CHECK-LABEL: @test12( -; CHECK-NEXT: [[SH11:%.*]] = and i47 %X, -256 -; CHECK-NEXT: ret i47 [[SH11]] +; CHECK-NEXT: [[TMP1:%.*]] = and i47 [[X:%.*]], -256 +; CHECK-NEXT: ret i47 [[TMP1]] ; %sh1 = ashr i47 %X, 8 %sh2 = shl i47 %sh1, 8 @@ -239,7 +238,7 @@ define <2 x i47> @test12_splat_vec(<2 x i47> %X) { ; CHECK-LABEL: @test12_splat_vec( -; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i47> %X, +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i47> [[X:%.*]], ; CHECK-NEXT: ret <2 x i47> [[TMP1]] ; %sh1 = ashr <2 x i47> %X, @@ -248,22 +247,23 @@ } ; Don't hide the shl from scalar evolution. DAGCombine will get it. -define i18 @test13(i18 %A) { + +define i18 @test13(i18 %x) { ; CHECK-LABEL: @test13( -; CHECK-NEXT: [[A:%.*]] = mul i18 %A, 3 -; CHECK-NEXT: [[B1:%.*]] = lshr i18 [[A]], 8 -; CHECK-NEXT: [[C:%.*]] = shl i18 [[B1]], 9 +; CHECK-NEXT: [[A:%.*]] = mul i18 [[X:%.*]], 3 +; CHECK-NEXT: [[TMP1:%.*]] = lshr i18 [[A]], 8 +; CHECK-NEXT: [[C:%.*]] = shl i18 [[TMP1]], 9 ; CHECK-NEXT: ret i18 [[C]] ; - %a = mul i18 %A, 3 - %B = ashr i18 %a, 8 - %C = shl i18 %B, 9 - ret i18 %C + %a = mul i18 %x, 3 + %b = ashr i18 %a, 8 + %c = shl i18 %b, 9 + ret i18 %c } define i35 @test14(i35 %A) { ; CHECK-LABEL: @test14( -; CHECK-NEXT: [[B:%.*]] = and i35 %A, -19760 +; CHECK-NEXT: [[B:%.*]] = and i35 [[A:%.*]], -19760 ; CHECK-NEXT: [[C:%.*]] = or i35 [[B]], 19744 ; CHECK-NEXT: ret i35 [[C]] ; @@ -275,7 +275,7 @@ define i79 @test14a(i79 %A) { ; CHECK-LABEL: @test14a( -; CHECK-NEXT: [[C:%.*]] = and i79 %A, 77 +; CHECK-NEXT: [[C:%.*]] = and i79 [[A:%.*]], 77 ; CHECK-NEXT: ret i79 [[C]] ; %B = shl i79 %A, 4 @@ -286,7 +286,7 @@ define i45 @test15(i1 %C) { ; CHECK-LABEL: @test15( -; CHECK-NEXT: [[A:%.*]] = select i1 %C, i45 12, i45 4 +; CHECK-NEXT: [[A:%.*]] = select i1 [[C:%.*]], i45 12, i45 4 ; CHECK-NEXT: ret i45 [[A]] ; %A = select i1 %C, i45 3, i45 1 @@ -296,7 +296,7 @@ define i53 @test15a(i1 %X) { ; CHECK-LABEL: @test15a( -; CHECK-NEXT: [[V:%.*]] = select i1 %X, i53 512, i53 128 +; CHECK-NEXT: [[V:%.*]] = select i1 [[X:%.*]], i53 512, i53 128 ; CHECK-NEXT: ret i53 [[V]] ; %A = select i1 %X, i8 3, i8 1 @@ -307,7 +307,7 @@ define i1 @test16(i84 %X) { ; CHECK-LABEL: @test16( -; CHECK-NEXT: [[AND:%.*]] = and i84 %X, 16 +; CHECK-NEXT: [[AND:%.*]] = and i84 [[X:%.*]], 16 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i84 [[AND]], 0 ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -331,7 +331,7 @@ define i1 @test17(i106 %A) { ; CHECK-LABEL: @test17( -; CHECK-NEXT: [[B_MASK:%.*]] = and i106 %A, -8 +; CHECK-NEXT: [[B_MASK:%.*]] = and i106 [[A:%.*]], -8 ; CHECK-NEXT: [[C:%.*]] = icmp eq i106 [[B_MASK]], 9872 ; CHECK-NEXT: ret i1 [[C]] ; @@ -342,7 +342,7 @@ define <2 x i1> @test17vec(<2 x i106> %A) { ; CHECK-LABEL: @test17vec( -; CHECK-NEXT: [[B_MASK:%.*]] = and <2 x i106> %A, +; CHECK-NEXT: [[B_MASK:%.*]] = and <2 x i106> [[A:%.*]], ; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i106> [[B_MASK]], ; CHECK-NEXT: ret <2 x i1> [[C]] ; @@ -362,7 +362,7 @@ define i1 @test19(i37 %A) { ; CHECK-LABEL: @test19( -; CHECK-NEXT: [[C:%.*]] = icmp ult i37 %A, 4 +; CHECK-NEXT: [[C:%.*]] = icmp ult i37 [[A:%.*]], 4 ; CHECK-NEXT: ret i1 [[C]] ; %B = ashr i37 %A, 2 @@ -372,7 +372,7 @@ define <2 x i1> @test19vec(<2 x i37> %A) { ; CHECK-LABEL: @test19vec( -; CHECK-NEXT: [[C:%.*]] = icmp ult <2 x i37> %A, +; CHECK-NEXT: [[C:%.*]] = icmp ult <2 x i37> [[A:%.*]], ; CHECK-NEXT: ret <2 x i1> [[C]] ; %B = ashr <2 x i37> %A, @@ -382,7 +382,7 @@ define i1 @test19a(i39 %A) { ; CHECK-LABEL: @test19a( -; CHECK-NEXT: [[C:%.*]] = icmp ugt i39 %A, -5 +; CHECK-NEXT: [[C:%.*]] = icmp ugt i39 [[A:%.*]], -5 ; CHECK-NEXT: ret i1 [[C]] ; %B = ashr i39 %A, 2 @@ -392,7 +392,7 @@ define <2 x i1> @test19a_vec(<2 x i39> %A) { ; CHECK-LABEL: @test19a_vec( -; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i39> %A, +; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i39> [[A:%.*]], ; CHECK-NEXT: ret <2 x i1> [[C]] ; %B = ashr <2 x i39> %A, @@ -411,7 +411,7 @@ define i1 @test21(i12 %A) { ; CHECK-LABEL: @test21( -; CHECK-NEXT: [[B_MASK:%.*]] = and i12 %A, 63 +; CHECK-NEXT: [[B_MASK:%.*]] = and i12 [[A:%.*]], 63 ; CHECK-NEXT: [[C:%.*]] = icmp eq i12 [[B_MASK]], 62 ; CHECK-NEXT: ret i1 [[C]] ; @@ -422,7 +422,7 @@ define i1 @test22(i14 %A) { ; CHECK-LABEL: @test22( -; CHECK-NEXT: [[B_MASK:%.*]] = and i14 %A, 127 +; CHECK-NEXT: [[B_MASK:%.*]] = and i14 [[A:%.*]], 127 ; CHECK-NEXT: [[C:%.*]] = icmp eq i14 [[B_MASK]], 0 ; CHECK-NEXT: ret i1 [[C]] ; @@ -433,7 +433,7 @@ define i11 @test23(i44 %A) { ; CHECK-LABEL: @test23( -; CHECK-NEXT: [[D:%.*]] = trunc i44 %A to i11 +; CHECK-NEXT: [[D:%.*]] = trunc i44 [[A:%.*]] to i11 ; CHECK-NEXT: ret i11 [[D]] ; %B = shl i44 %A, 33 @@ -446,8 +446,8 @@ define i44 @shl_lshr_eq_amt_multi_use(i44 %A) { ; CHECK-LABEL: @shl_lshr_eq_amt_multi_use( -; CHECK-NEXT: [[B:%.*]] = shl i44 %A, 33 -; CHECK-NEXT: [[C:%.*]] = and i44 %A, 2047 +; CHECK-NEXT: [[B:%.*]] = shl i44 [[A:%.*]], 33 +; CHECK-NEXT: [[C:%.*]] = and i44 [[A]], 2047 ; CHECK-NEXT: [[D:%.*]] = or i44 [[B]], [[C]] ; CHECK-NEXT: ret i44 [[D]] ; @@ -461,8 +461,8 @@ define <2 x i44> @shl_lshr_eq_amt_multi_use_splat_vec(<2 x i44> %A) { ; CHECK-LABEL: @shl_lshr_eq_amt_multi_use_splat_vec( -; CHECK-NEXT: [[B:%.*]] = shl <2 x i44> %A, -; CHECK-NEXT: [[C:%.*]] = and <2 x i44> %A, +; CHECK-NEXT: [[B:%.*]] = shl <2 x i44> [[A:%.*]], +; CHECK-NEXT: [[C:%.*]] = and <2 x i44> [[A]], ; CHECK-NEXT: [[D:%.*]] = or <2 x i44> [[B]], [[C]] ; CHECK-NEXT: ret <2 x i44> [[D]] ; @@ -476,8 +476,8 @@ define i43 @lshr_shl_eq_amt_multi_use(i43 %A) { ; CHECK-LABEL: @lshr_shl_eq_amt_multi_use( -; CHECK-NEXT: [[B:%.*]] = lshr i43 %A, 23 -; CHECK-NEXT: [[C:%.*]] = and i43 %A, -8388608 +; CHECK-NEXT: [[B:%.*]] = lshr i43 [[A:%.*]], 23 +; CHECK-NEXT: [[C:%.*]] = and i43 [[A]], -8388608 ; CHECK-NEXT: [[D:%.*]] = mul i43 [[B]], [[C]] ; CHECK-NEXT: ret i43 [[D]] ; @@ -491,8 +491,8 @@ define <2 x i43> @lshr_shl_eq_amt_multi_use_splat_vec(<2 x i43> %A) { ; CHECK-LABEL: @lshr_shl_eq_amt_multi_use_splat_vec( -; CHECK-NEXT: [[B:%.*]] = lshr <2 x i43> %A, -; CHECK-NEXT: [[C:%.*]] = and <2 x i43> %A, +; CHECK-NEXT: [[B:%.*]] = lshr <2 x i43> [[A:%.*]], +; CHECK-NEXT: [[C:%.*]] = and <2 x i43> [[A]], ; CHECK-NEXT: [[D:%.*]] = mul <2 x i43> [[B]], [[C]] ; CHECK-NEXT: ret <2 x i43> [[D]] ; @@ -504,8 +504,8 @@ define i37 @test25(i37 %tmp.2, i37 %AA) { ; CHECK-LABEL: @test25( -; CHECK-NEXT: [[TMP_3:%.*]] = and i37 %tmp.2, -131072 -; CHECK-NEXT: [[X2:%.*]] = add i37 [[TMP_3]], %AA +; CHECK-NEXT: [[TMP_3:%.*]] = and i37 [[TMP_2:%.*]], -131072 +; CHECK-NEXT: [[X2:%.*]] = add i37 [[TMP_3]], [[AA:%.*]] ; CHECK-NEXT: [[TMP_6:%.*]] = and i37 [[X2]], -131072 ; CHECK-NEXT: ret i37 [[TMP_6]] ; @@ -518,7 +518,7 @@ define i40 @test26(i40 %A) { ; CHECK-LABEL: @test26( -; CHECK-NEXT: [[B:%.*]] = and i40 %A, -2 +; CHECK-NEXT: [[B:%.*]] = and i40 [[A:%.*]], -2 ; CHECK-NEXT: ret i40 [[B]] ; %B = lshr i40 %A, 1 Index: test/Transforms/InstCombine/fmul-exp.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/fmul-exp.ll @@ -0,0 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -instcombine < %s | FileCheck %s + +declare double @llvm.exp.f64(double) nounwind readnone speculatable +declare void @use(double) + +; exp(a) * exp(b) no reassoc flags +define double @exp_a_exp_b(double %a, double %b) { +; CHECK-LABEL: @exp_a_exp_b( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP]], [[TMP1]] +; CHECK-NEXT: ret double [[MUL]] +; + %tmp = call double @llvm.exp.f64(double %a) + %tmp1 = call double @llvm.exp.f64(double %b) + %mul = fmul double %tmp, %tmp1 + ret double %mul +} + +; exp(a) * exp(b) reassoc, multiple uses +define double @exp_a_exp_b_multiple_uses(double %a, double %b) { +; CHECK-LABEL: @exp_a_exp_b_multiple_uses( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP]], [[TMP1]] +; CHECK-NEXT: call void @use(double [[TMP1]]) +; CHECK-NEXT: ret double [[MUL]] +; + %tmp = call double @llvm.exp.f64(double %a) + %tmp1 = call double @llvm.exp.f64(double %b) + %mul = fmul reassoc double %tmp, %tmp1 + call void @use(double %tmp1) + ret double %mul +} + +; exp(a) * exp(b) reassoc, both with multiple uses +define double @exp_a_exp_b_multiple_uses_both(double %a, double %b) { +; CHECK-LABEL: @exp_a_exp_b_multiple_uses_both( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP]], [[TMP1]] +; CHECK-NEXT: call void @use(double [[TMP]]) +; CHECK-NEXT: call void @use(double [[TMP1]]) +; CHECK-NEXT: ret double [[MUL]] +; + %tmp = call double @llvm.exp.f64(double %a) + %tmp1 = call double @llvm.exp.f64(double %b) + %mul = fmul reassoc double %tmp, %tmp1 + call void @use(double %tmp) + call void @use(double %tmp1) + ret double %mul +} + +; exp(a) * exp(b) => exp(a+b) with reassoc +define double @exp_a_exp_b_reassoc(double %a, double %b) { +; CHECK-LABEL: @exp_a_exp_b_reassoc( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP]], [[TMP1]] +; CHECK-NEXT: ret double [[MUL]] +; + %tmp = call double @llvm.exp.f64(double %a) + %tmp1 = call double @llvm.exp.f64(double %b) + %mul = fmul reassoc double %tmp, %tmp1 + ret double %mul +} + +; exp(a) * exp(b) * exp(c) * exp(d) => exp(a+b+c+d) with reassoc +define double @exp_a_exp_b_exp_c_exp_d_fast(double %a, double %b, double %c, double %d) { +; CHECK-LABEL: @exp_a_exp_b_exp_c_exp_d_fast( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP]], [[TMP1]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.exp.f64(double [[C:%.*]]) +; CHECK-NEXT: [[MUL1:%.*]] = fmul reassoc double [[MUL]], [[TMP2]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @llvm.exp.f64(double [[D:%.*]]) +; CHECK-NEXT: [[MUL2:%.*]] = fmul reassoc double [[MUL1]], [[TMP3]] +; CHECK-NEXT: ret double [[MUL2]] +; + %tmp = call double @llvm.exp.f64(double %a) + %tmp1 = call double @llvm.exp.f64(double %b) + %mul = fmul reassoc double %tmp, %tmp1 + %tmp2 = call double @llvm.exp.f64(double %c) + %mul1 = fmul reassoc double %mul, %tmp2 + %tmp3 = call double @llvm.exp.f64(double %d) + %mul2 = fmul reassoc double %mul1, %tmp3 + ret double %mul2 +} Index: test/Transforms/InstCombine/fmul-exp2.ll =================================================================== --- /dev/null +++ test/Transforms/InstCombine/fmul-exp2.ll @@ -0,0 +1,89 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -instcombine < %s | FileCheck %s + +declare double @llvm.exp2.f64(double) nounwind readnone speculatable +declare void @use(double) + +; exp2(a) * exp2(b) no reassoc flags +define double @exp2_a_exp2_b(double %a, double %b) { +; CHECK-LABEL: @exp2_a_exp2_b( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp2.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp2.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP]], [[TMP1]] +; CHECK-NEXT: ret double [[MUL]] +; + %tmp = call double @llvm.exp2.f64(double %a) + %tmp1 = call double @llvm.exp2.f64(double %b) + %mul = fmul double %tmp, %tmp1 + ret double %mul +} + +; exp2(a) * exp2(b) reassoc, multiple uses +define double @exp2_a_exp2_b_multiple_uses(double %a, double %b) { +; CHECK-LABEL: @exp2_a_exp2_b_multiple_uses( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp2.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp2.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP]], [[TMP1]] +; CHECK-NEXT: call void @use(double [[TMP1]]) +; CHECK-NEXT: ret double [[MUL]] +; + %tmp = call double @llvm.exp2.f64(double %a) + %tmp1 = call double @llvm.exp2.f64(double %b) + %mul = fmul reassoc double %tmp, %tmp1 + call void @use(double %tmp1) + ret double %mul +} + +; exp2(a) * exp2(b) reassoc, both with multiple uses +define double @exp2_a_exp2_b_multiple_uses_both(double %a, double %b) { +; CHECK-LABEL: @exp2_a_exp2_b_multiple_uses_both( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp2.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp2.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP]], [[TMP1]] +; CHECK-NEXT: call void @use(double [[TMP]]) +; CHECK-NEXT: call void @use(double [[TMP1]]) +; CHECK-NEXT: ret double [[MUL]] +; + %tmp = call double @llvm.exp2.f64(double %a) + %tmp1 = call double @llvm.exp2.f64(double %b) + %mul = fmul reassoc double %tmp, %tmp1 + call void @use(double %tmp) + call void @use(double %tmp1) + ret double %mul +} + +; exp2(a) * exp2(b) => exp2(a+b) with reassoc +define double @exp2_a_exp2_b_reassoc(double %a, double %b) { +; CHECK-LABEL: @exp2_a_exp2_b_reassoc( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp2.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp2.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP]], [[TMP1]] +; CHECK-NEXT: ret double [[MUL]] +; + %tmp = call double @llvm.exp2.f64(double %a) + %tmp1 = call double @llvm.exp2.f64(double %b) + %mul = fmul reassoc double %tmp, %tmp1 + ret double %mul +} + +; exp2(a) * exp2(b) * exp2(c) * exp2(d) => exp2(a+b+c+d) with reassoc +define double @exp2_a_exp2_b_exp2_c_exp2_d(double %a, double %b, double %c, double %d) { +; CHECK-LABEL: @exp2_a_exp2_b_exp2_c_exp2_d( +; CHECK-NEXT: [[TMP:%.*]] = call double @llvm.exp2.f64(double [[A:%.*]]) +; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.exp2.f64(double [[B:%.*]]) +; CHECK-NEXT: [[MUL:%.*]] = fmul reassoc double [[TMP]], [[TMP1]] +; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.exp2.f64(double [[C:%.*]]) +; CHECK-NEXT: [[MUL1:%.*]] = fmul reassoc double [[MUL]], [[TMP2]] +; CHECK-NEXT: [[TMP3:%.*]] = call double @llvm.exp2.f64(double [[D:%.*]]) +; CHECK-NEXT: [[MUL2:%.*]] = fmul reassoc double [[MUL1]], [[TMP3]] +; CHECK-NEXT: ret double [[MUL2]] +; + %tmp = call double @llvm.exp2.f64(double %a) + %tmp1 = call double @llvm.exp2.f64(double %b) + %mul = fmul reassoc double %tmp, %tmp1 + %tmp2 = call double @llvm.exp2.f64(double %c) + %mul1 = fmul reassoc double %mul, %tmp2 + %tmp3 = call double @llvm.exp2.f64(double %d) + %mul2 = fmul reassoc double %mul1, %tmp3 + ret double %mul2 +} Index: test/Transforms/InstCombine/saturating-add-sub.ll =================================================================== --- test/Transforms/InstCombine/saturating-add-sub.ll +++ test/Transforms/InstCombine/saturating-add-sub.ll @@ -655,9 +655,9 @@ define i32 @uadd_sat_constant_commute(i32 %x) { ; CHECK-LABEL: @uadd_sat_constant_commute( ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], 42 -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X]], -43 -; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 -1 -; CHECK-NEXT: ret i32 [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[X]], -43 +; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 -1, i32 [[A]] +; CHECK-NEXT: ret i32 [[TMP2]] ; %a = add i32 %x, 42 %c = icmp ult i32 %x, -43 @@ -681,9 +681,9 @@ define <4 x i32> @uadd_sat_constant_vec_commute(<4 x i32> %x) { ; CHECK-LABEL: @uadd_sat_constant_vec_commute( ; CHECK-NEXT: [[A:%.*]] = add <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[C:%.*]] = icmp ult <4 x i32> [[X]], -; CHECK-NEXT: [[R:%.*]] = select <4 x i1> [[C]], <4 x i32> [[A]], <4 x i32> -; CHECK-NEXT: ret <4 x i32> [[R]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i32> [[X]], +; CHECK-NEXT: [[TMP2:%.*]] = select <4 x i1> [[TMP1]], <4 x i32> , <4 x i32> [[A]] +; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; %a = add <4 x i32> %x, %c = icmp ult <4 x i32> %x, Index: test/Transforms/InstCombine/zext-bool-add-sub.ll =================================================================== --- test/Transforms/InstCombine/zext-bool-add-sub.ll +++ test/Transforms/InstCombine/zext-bool-add-sub.ll @@ -315,3 +315,90 @@ ret i8 %sub } +define i32 @sextbool_add(i1 %c, i32 %x) { +; CHECK-LABEL: @sextbool_add( +; CHECK-NEXT: [[B:%.*]] = sext i1 [[C:%.*]] to i32 +; CHECK-NEXT: [[S:%.*]] = add i32 [[B]], [[X:%.*]] +; CHECK-NEXT: ret i32 [[S]] +; + %b = sext i1 %c to i32 + %s = add i32 %b, %x + ret i32 %s +} + +define i32 @sextbool_add_commute(i1 %c, i32 %px) { +; CHECK-LABEL: @sextbool_add_commute( +; CHECK-NEXT: [[X:%.*]] = urem i32 [[PX:%.*]], 42 +; CHECK-NEXT: [[B:%.*]] = sext i1 [[C:%.*]] to i32 +; CHECK-NEXT: [[S:%.*]] = add nsw i32 [[X]], [[B]] +; CHECK-NEXT: ret i32 [[S]] +; + %x = urem i32 %px, 42 ; thwart complexity-based canonicalization + %b = sext i1 %c to i32 + %s = add i32 %x, %b + ret i32 %s +} + +; Negative test - extra use prevents canonicalization. + +declare void @use32(i32) + +define i32 @sextbool_add_uses(i1 %c, i32 %x) { +; CHECK-LABEL: @sextbool_add_uses( +; CHECK-NEXT: [[B:%.*]] = sext i1 [[C:%.*]] to i32 +; CHECK-NEXT: call void @use32(i32 [[B]]) +; CHECK-NEXT: [[S:%.*]] = add i32 [[B]], [[X:%.*]] +; CHECK-NEXT: ret i32 [[S]] +; + %b = sext i1 %c to i32 + call void @use32(i32 %b) + %s = add i32 %b, %x + ret i32 %s +} + +define <4 x i32> @sextbool_add_vector(<4 x i1> %c, <4 x i32> %x) { +; CHECK-LABEL: @sextbool_add_vector( +; CHECK-NEXT: [[B:%.*]] = sext <4 x i1> [[C:%.*]] to <4 x i32> +; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[B]], [[X:%.*]] +; CHECK-NEXT: ret <4 x i32> [[S]] +; + %b = sext <4 x i1> %c to <4 x i32> + %s = add <4 x i32> %x, %b + ret <4 x i32> %s +} + +define i32 @zextbool_sub(i1 %c, i32 %x) { +; CHECK-LABEL: @zextbool_sub( +; CHECK-NEXT: [[B:%.*]] = zext i1 [[C:%.*]] to i32 +; CHECK-NEXT: [[S:%.*]] = sub i32 [[B]], [[X:%.*]] +; CHECK-NEXT: ret i32 [[S]] +; + %b = zext i1 %c to i32 + %s = sub i32 %b, %x + ret i32 %s +} + +define i32 @zextbool_sub_uses(i1 %c, i32 %x) { +; CHECK-LABEL: @zextbool_sub_uses( +; CHECK-NEXT: [[B:%.*]] = zext i1 [[C:%.*]] to i32 +; CHECK-NEXT: call void @use32(i32 [[B]]) +; CHECK-NEXT: [[S:%.*]] = sub i32 [[X:%.*]], [[B]] +; CHECK-NEXT: ret i32 [[S]] +; + %b = zext i1 %c to i32 + call void @use32(i32 %b) + %s = sub i32 %x, %b + ret i32 %s +} + +define <4 x i32> @zextbool_sub_vector(<4 x i1> %c, <4 x i32> %x) { +; CHECK-LABEL: @zextbool_sub_vector( +; CHECK-NEXT: [[B:%.*]] = zext <4 x i1> [[C:%.*]] to <4 x i32> +; CHECK-NEXT: [[S:%.*]] = sub <4 x i32> [[X:%.*]], [[B]] +; CHECK-NEXT: ret <4 x i32> [[S]] +; + %b = zext <4 x i1> %c to <4 x i32> + %s = sub <4 x i32> %x, %b + ret <4 x i32> %s +} + Index: test/Transforms/LoopVectorize/no_switch_disable_vectorization.ll =================================================================== --- /dev/null +++ test/Transforms/LoopVectorize/no_switch_disable_vectorization.ll @@ -0,0 +1,95 @@ +; RUN: opt < %s -loop-vectorize -force-vector-width=4 -transform-warning -S 2>&1 | FileCheck %s +; RUN: opt < %s -loop-vectorize -force-vector-width=1 -transform-warning -S 2>&1 | FileCheck %s -check-prefix=NOANALYSIS +; RUN: opt < %s -loop-vectorize -force-vector-width=4 -transform-warning -pass-remarks-missed='loop-vectorize' -S 2>&1 | FileCheck %s -check-prefix=MOREINFO + +; This test is a copy of no_switch.ll, with the "llvm.loop.vectorize.enable" metadata set to false. +; It tests that vectorization is explicitly disabled and no warnings are emitted. + +; CHECK-NOT: remark: source.cpp:4:5: loop not vectorized: loop contains a switch statement +; CHECK-NOT: warning: source.cpp:4:5: loop not vectorized: the optimizer was unable to perform the requested transformation; the transformation might be disabled or specified as part of an unsupported transformation ordering + +; NOANALYSIS-NOT: remark: {{.*}} +; NOANALYSIS-NOT: warning: source.cpp:4:5: loop not vectorized: the optimizer was unable to perform the requested transformation; the transformation might be disabled or specified as part of an unsupported transformation ordering + +; MOREINFO: remark: source.cpp:4:5: loop not vectorized: vectorization is explicitly disabled +; MOREINFO-NOT: warning: source.cpp:4:5: loop not vectorized: the optimizer was unable to perform the requested transformation; the transformation might be disabled or specified as part of an unsupported transformation ordering + +; CHECK: _Z11test_switchPii +; CHECK-NOT: x i32> +; CHECK: ret + +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" + +; Function Attrs: nounwind optsize ssp uwtable +define void @_Z11test_switchPii(i32* nocapture %A, i32 %Length) #0 !dbg !4 { +entry: + %cmp18 = icmp sgt i32 %Length, 0, !dbg !10 + br i1 %cmp18, label %for.body.preheader, label %for.end, !dbg !10, !llvm.loop !12 + +for.body.preheader: ; preds = %entry + br label %for.body, !dbg !14 + +for.body: ; preds = %for.body.preheader, %for.inc + %indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %A, i64 %indvars.iv, !dbg !14 + %0 = load i32, i32* %arrayidx, align 4, !dbg !14, !tbaa !16 + switch i32 %0, label %for.inc [ + i32 0, label %sw.bb + i32 1, label %sw.bb3 + ], !dbg !14 + +sw.bb: ; preds = %for.body + %1 = trunc i64 %indvars.iv to i32, !dbg !20 + %mul = shl nsw i32 %1, 1, !dbg !20 + br label %for.inc, !dbg !22 + +sw.bb3: ; preds = %for.body + %2 = trunc i64 %indvars.iv to i32, !dbg !23 + store i32 %2, i32* %arrayidx, align 4, !dbg !23, !tbaa !16 + br label %for.inc, !dbg !23 + +for.inc: ; preds = %sw.bb3, %for.body, %sw.bb + %storemerge = phi i32 [ %mul, %sw.bb ], [ 0, %for.body ], [ 0, %sw.bb3 ] + store i32 %storemerge, i32* %arrayidx, align 4, !dbg !20, !tbaa !16 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1, !dbg !10 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32, !dbg !10 + %exitcond = icmp eq i32 %lftr.wideiv, %Length, !dbg !10 + br i1 %exitcond, label %for.end.loopexit, label %for.body, !dbg !10, !llvm.loop !12 + +for.end.loopexit: ; preds = %for.inc + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + ret void, !dbg !24 +} + +attributes #0 = { nounwind } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!7, !8} +!llvm.ident = !{!9} + +!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus, producer: "clang version 3.5.0", isOptimized: true, runtimeVersion: 6, emissionKind: LineTablesOnly, file: !1, enums: !2, retainedTypes: !2, globals: !2, imports: !2) +!1 = !DIFile(filename: "source.cpp", directory: ".") +!2 = !{} +!4 = distinct !DISubprogram(name: "test_switch", line: 1, isLocal: false, isDefinition: true, virtualIndex: 6, flags: DIFlagPrototyped, isOptimized: true, unit: !0, scopeLine: 1, file: !1, scope: !5, type: !6, retainedNodes: !2) +!5 = !DIFile(filename: "source.cpp", directory: ".") +!6 = !DISubroutineType(types: !2) +!7 = !{i32 2, !"Dwarf Version", i32 2} +!8 = !{i32 2, !"Debug Info Version", i32 3} +!9 = !{!"clang version 3.5.0"} +!10 = !DILocation(line: 3, column: 8, scope: !11) +!11 = distinct !DILexicalBlock(line: 3, column: 3, file: !1, scope: !4) +!12 = !{!12, !13, !13} +!13 = !{!"llvm.loop.vectorize.enable", i1 false} +!14 = !DILocation(line: 4, column: 5, scope: !15) +!15 = distinct !DILexicalBlock(line: 3, column: 36, file: !1, scope: !11) +!16 = !{!17, !17, i64 0} +!17 = !{!"int", !18, i64 0} +!18 = !{!"omnipotent char", !19, i64 0} +!19 = !{!"Simple C/C++ TBAA"} +!20 = !DILocation(line: 6, column: 7, scope: !21) +!21 = distinct !DILexicalBlock(line: 4, column: 18, file: !1, scope: !15) +!22 = !DILocation(line: 7, column: 5, scope: !21) +!23 = !DILocation(line: 9, column: 7, scope: !21) +!24 = !DILocation(line: 14, column: 1, scope: !4) Index: test/Verifier/test_g_addrspacecast.mir =================================================================== --- /dev/null +++ test/Verifier/test_g_addrspacecast.mir @@ -0,0 +1,57 @@ +#RUN: not llc -o - -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +# REQUIRES: global-isel, aarch64-registered-target + +--- +name: test_addrspacecast +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: +body: | + bb.0: + + %0:_(s64) = G_IMPLICIT_DEF + %1:_(p0) = G_IMPLICIT_DEF + %2:_(<2 x s64>) = G_IMPLICIT_DEF + %3:_(<2 x p0>) = G_IMPLICIT_DEF + + ; CHECK: Bad machine code: Too few operands + %4:_(s64) = G_ADDRSPACE_CAST + + ; CHECK: Bad machine code: Too few operands + ; CHECK: Bad machine code: Explicit definition marked as use + G_ADDRSPACE_CAST %1 + + ; CHECK: Bad machine code: addrspacecast types must be pointers + %5:_(p0) = G_ADDRSPACE_CAST %0 + + ; CHECK: Bad machine code: addrspacecast types must be pointers + %6:_(s64) = G_ADDRSPACE_CAST %1 + + ; CHECK: Bad machine code: addrspacecast types must be pointers + %7:_(<2 x s64>) = G_ADDRSPACE_CAST %1 + + ; CHECK: Bad machine code: addrspacecast types must be pointers + %8:_(<2 x p0>) = G_ADDRSPACE_CAST %2 + + ; CHECK: Bad machine code: pointer casts must be all-vector or all-scalar + %9:_(<2 x p1>) = G_ADDRSPACE_CAST %1 + + ; CHECK: Bad machine code: pointer casts must be all-vector or all-scalar + %10:_(p1) = G_ADDRSPACE_CAST %3 + + ; CHECK: Bad machine code: pointer casts must preserve number of elements + %11:_(<4 x p1>) = G_ADDRSPACE_CAST %3 + + ; CHECK: Bad machine code: pointer casts must preserve number of elements + %12:_(<4 x p1>) = G_IMPLICIT_DEF + %13:_(<2 x p0>) = G_ADDRSPACE_CAST %12 + + ; CHECK: Bad machine code: addrspacecast must convert different address spaces + %14:_(p0) = G_ADDRSPACE_CAST %1 + + ; CHECK: Bad machine code: addrspacecast must convert different address spaces + %15:_(<2 x p0>) = G_ADDRSPACE_CAST %3 + +... Index: test/Verifier/test_g_inttoptr.mir =================================================================== --- /dev/null +++ test/Verifier/test_g_inttoptr.mir @@ -0,0 +1,45 @@ +#RUN: not llc -o - -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +# REQUIRES: global-isel, aarch64-registered-target + +--- +name: test_inttoptr +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: +body: | + bb.0: + + %0:_(s64) = G_IMPLICIT_DEF + %1:_(p0) = G_IMPLICIT_DEF + %2:_(<2 x s64>) = G_IMPLICIT_DEF + %3:_(<2 x p0>) = G_IMPLICIT_DEF + + ; CHECK: Bad machine code: Too few operands + %4:_(p0) = G_INTTOPTR + + ; CHECK: Bad machine code: Too few operands + ; CHECK: Bad machine code: Explicit definition marked as use + G_INTTOPTR %0 + + ; CHECK: Bad machine code: inttoptr result type must be a pointer + %5:_(s64) = G_INTTOPTR %0 + + ; CHECK: Bad machine code: inttoptr result type must be a pointer + %6:_(<2 x s64>) = G_INTTOPTR %2 + + ; CHECK: Bad machine code: pointer casts must be all-vector or all-scalar + %7:_(<2 x p0>) = G_INTTOPTR %0 + + ; CHECK: Bad machine code: pointer casts must be all-vector or all-scalar + %8:_(p0) = G_INTTOPTR %2 + + ; CHECK: Bad machine code: pointer casts must preserve number of elements + %9:_(<4 x p0>) = G_INTTOPTR %2 + + ; CHECK: Bad machine code: pointer casts must preserve number of elements + %10:_(<4 x s64>) = G_IMPLICIT_DEF + %11:_(<2 x p0>) = G_INTTOPTR %10 + +... Index: test/Verifier/test_g_load.mir =================================================================== --- test/Verifier/test_g_load.mir +++ test/Verifier/test_g_load.mir @@ -1,4 +1,4 @@ -#RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +#RUN: not llc -o - -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s # REQUIRES: global-isel, aarch64-registered-target --- @@ -15,4 +15,9 @@ %0:_(s64) = G_CONSTANT i32 0 %1:_(s32) = G_LOAD %0 :: (load 4) + %2:_(p0) = G_IMPLICIT_DEF + + ; CHECK: Bad machine code: load memory size cannot exceed result size + %3:_(s8) = G_LOAD %2 :: (load 2) + ... Index: test/Verifier/test_g_ptrtoint.mir =================================================================== --- /dev/null +++ test/Verifier/test_g_ptrtoint.mir @@ -0,0 +1,45 @@ +#RUN: not llc -o - -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +# REQUIRES: global-isel, aarch64-registered-target + +--- +name: test_ptrtoint +legalized: true +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: +body: | + bb.0: + + %0:_(s64) = G_IMPLICIT_DEF + %1:_(p0) = G_IMPLICIT_DEF + %2:_(<2 x s64>) = G_IMPLICIT_DEF + %3:_(<2 x p0>) = G_IMPLICIT_DEF + + ; CHECK: Bad machine code: Too few operands + %4:_(s64) = G_PTRTOINT + + ; CHECK: Bad machine code: Too few operands + ; CHECK: Bad machine code: Explicit definition marked as use + G_PTRTOINT %1 + + ; CHECK: Bad machine code: ptrtoint result type must not be a pointer + %5:_(p0) = G_PTRTOINT %1 + + ; CHECK: Bad machine code: ptrtoint result type must not be a pointer + %6:_(<2 x p0>) = G_PTRTOINT %0 + + ; CHECK: Bad machine code: ptrtoint source type must be a pointer + %7:_(<2 x s64>) = G_PTRTOINT %2 + + ; CHECK: Bad machine code: pointer casts must be all-vector or all-scalar + %8:_(s64) = G_PTRTOINT %3 + + ; CHECK: Bad machine code: pointer casts must preserve number of elements + %9:_(<4 x s64>) = G_INTTOPTR %3 + + ; CHECK: Bad machine code: pointer casts must preserve number of elements + %10:_(<4 x p0>) = G_IMPLICIT_DEF + %11:_(<2 x s64>) = G_PTRTOINT %10 + +... Index: test/Verifier/test_g_store.mir =================================================================== --- test/Verifier/test_g_store.mir +++ test/Verifier/test_g_store.mir @@ -16,4 +16,9 @@ %1:_(s32) = G_CONSTANT i32 1 G_STORE %1, %0 :: (store 4) + %2:_(p0) = G_IMPLICIT_DEF + %3:_(s8) = G_IMPLICIT_DEF + ; CHECK: Bad machine code: store memory size cannot exceed value size + G_STORE %3, %2 :: (store 2) + ... Index: test/tools/llvm-exegesis/X86/inverse_throughput-by-opcode-name.s =================================================================== --- /dev/null +++ test/tools/llvm-exegesis/X86/inverse_throughput-by-opcode-name.s @@ -0,0 +1,8 @@ +# RUN: llvm-exegesis -mode=inverse_throughput -opcode-name=ADD32rr | FileCheck %s + +CHECK: --- +CHECK-NEXT: mode: inverse_throughput +CHECK-NEXT: key: +CHECK-NEXT: instructions: +CHECK-NEXT: ADD32rr +CHECK: key: inverse_throughput Index: test/tools/llvm-mca/X86/BtVer2/int-to-fpu-forwarding-2.s =================================================================== --- test/tools/llvm-mca/X86/BtVer2/int-to-fpu-forwarding-2.s +++ test/tools/llvm-mca/X86/BtVer2/int-to-fpu-forwarding-2.s @@ -1,7 +1,8 @@ # NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py # RUN: llvm-mca -mtriple=x86_64-unknown-unknown -mcpu=btver2 -iterations=500 < %s | FileCheck %s -# Throughput for all the code snippet below should tend to 1.00 IPC. +# Throughput for the AVX code snippets below should tend to 0.25 IPC. +# Throughput for the SSE code snippets below should tend to 1.00 IPC. # LLVM-MCA-BEGIN vcvtsi2ss %ecx, %xmm0, %xmm0 @@ -31,12 +32,12 @@ # CHECK: Iterations: 500 # CHECK-NEXT: Instructions: 500 -# CHECK-NEXT: Total Cycles: 4503 +# CHECK-NEXT: Total Cycles: 2003 # CHECK-NEXT: Total uOps: 1000 # CHECK: Dispatch Width: 2 -# CHECK-NEXT: uOps Per Cycle: 0.22 -# CHECK-NEXT: IPC: 0.11 +# CHECK-NEXT: uOps Per Cycle: 0.50 +# CHECK-NEXT: IPC: 0.25 # CHECK-NEXT: Block RThroughput: 1.0 # CHECK: Instruction Info: @@ -48,7 +49,7 @@ # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 2 9 1.00 vcvtsi2ssl %ecx, %xmm0, %xmm0 +# CHECK-NEXT: 2 10 1.00 vcvtsi2ssl %ecx, %xmm0, %xmm0 # CHECK: Resources: # CHECK-NEXT: [0] - JALU0 @@ -78,12 +79,12 @@ # CHECK: Iterations: 500 # CHECK-NEXT: Instructions: 500 -# CHECK-NEXT: Total Cycles: 4503 +# CHECK-NEXT: Total Cycles: 2003 # CHECK-NEXT: Total uOps: 1000 # CHECK: Dispatch Width: 2 -# CHECK-NEXT: uOps Per Cycle: 0.22 -# CHECK-NEXT: IPC: 0.11 +# CHECK-NEXT: uOps Per Cycle: 0.50 +# CHECK-NEXT: IPC: 0.25 # CHECK-NEXT: Block RThroughput: 1.0 # CHECK: Instruction Info: @@ -95,7 +96,7 @@ # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 2 9 1.00 vcvtsi2sdl %ecx, %xmm0, %xmm0 +# CHECK-NEXT: 2 10 1.00 vcvtsi2sdl %ecx, %xmm0, %xmm0 # CHECK: Resources: # CHECK-NEXT: [0] - JALU0 @@ -125,12 +126,12 @@ # CHECK: Iterations: 500 # CHECK-NEXT: Instructions: 500 -# CHECK-NEXT: Total Cycles: 511 +# CHECK-NEXT: Total Cycles: 506 # CHECK-NEXT: Total uOps: 1000 # CHECK: Dispatch Width: 2 -# CHECK-NEXT: uOps Per Cycle: 1.96 -# CHECK-NEXT: IPC: 0.98 +# CHECK-NEXT: uOps Per Cycle: 1.98 +# CHECK-NEXT: IPC: 0.99 # CHECK-NEXT: Block RThroughput: 1.0 # CHECK: Instruction Info: @@ -142,7 +143,7 @@ # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 2 9 1.00 cvtsi2ssl %ecx, %xmm0 +# CHECK-NEXT: 2 10 1.00 cvtsi2ssl %ecx, %xmm0 # CHECK: Resources: # CHECK-NEXT: [0] - JALU0 @@ -172,12 +173,12 @@ # CHECK: Iterations: 500 # CHECK-NEXT: Instructions: 500 -# CHECK-NEXT: Total Cycles: 511 +# CHECK-NEXT: Total Cycles: 506 # CHECK-NEXT: Total uOps: 1000 # CHECK: Dispatch Width: 2 -# CHECK-NEXT: uOps Per Cycle: 1.96 -# CHECK-NEXT: IPC: 0.98 +# CHECK-NEXT: uOps Per Cycle: 1.98 +# CHECK-NEXT: IPC: 0.99 # CHECK-NEXT: Block RThroughput: 1.0 # CHECK: Instruction Info: @@ -189,7 +190,7 @@ # CHECK-NEXT: [6]: HasSideEffects (U) # CHECK: [1] [2] [3] [4] [5] [6] Instructions: -# CHECK-NEXT: 2 9 1.00 cvtsi2sdl %ecx, %xmm0 +# CHECK-NEXT: 2 10 1.00 cvtsi2sdl %ecx, %xmm0 # CHECK: Resources: # CHECK-NEXT: [0] - JALU0 Index: test/tools/llvm-mca/X86/BtVer2/resources-avx1.s =================================================================== --- test/tools/llvm-mca/X86/BtVer2/resources-avx1.s +++ test/tools/llvm-mca/X86/BtVer2/resources-avx1.s @@ -1144,14 +1144,14 @@ # CHECK-NEXT: 2 12 1.00 * vcvtsd2si (%rax), %rcx # CHECK-NEXT: 2 7 2.00 vcvtsd2ss %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 2 12 2.00 * vcvtsd2ss (%rax), %xmm1, %xmm2 -# CHECK-NEXT: 2 9 1.00 vcvtsi2sdl %ecx, %xmm0, %xmm2 -# CHECK-NEXT: 2 9 1.00 vcvtsi2sdq %rcx, %xmm0, %xmm2 -# CHECK-NEXT: 2 14 1.00 * vcvtsi2sdl (%rax), %xmm0, %xmm2 -# CHECK-NEXT: 2 14 1.00 * vcvtsi2sdq (%rax), %xmm0, %xmm2 -# CHECK-NEXT: 2 9 1.00 vcvtsi2ssl %ecx, %xmm0, %xmm2 -# CHECK-NEXT: 2 9 1.00 vcvtsi2ssq %rcx, %xmm0, %xmm2 -# CHECK-NEXT: 2 14 1.00 * vcvtsi2ssl (%rax), %xmm0, %xmm2 -# CHECK-NEXT: 2 14 1.00 * vcvtsi2ssq (%rax), %xmm0, %xmm2 +# CHECK-NEXT: 2 10 1.00 vcvtsi2sdl %ecx, %xmm0, %xmm2 +# CHECK-NEXT: 2 10 1.00 vcvtsi2sdq %rcx, %xmm0, %xmm2 +# CHECK-NEXT: 1 9 1.00 * vcvtsi2sdl (%rax), %xmm0, %xmm2 +# CHECK-NEXT: 1 9 1.00 * vcvtsi2sdq (%rax), %xmm0, %xmm2 +# CHECK-NEXT: 2 10 1.00 vcvtsi2ssl %ecx, %xmm0, %xmm2 +# CHECK-NEXT: 2 10 1.00 vcvtsi2ssq %rcx, %xmm0, %xmm2 +# CHECK-NEXT: 1 9 1.00 * vcvtsi2ssl (%rax), %xmm0, %xmm2 +# CHECK-NEXT: 1 9 1.00 * vcvtsi2ssq (%rax), %xmm0, %xmm2 # CHECK-NEXT: 2 7 2.00 vcvtss2sd %xmm0, %xmm1, %xmm2 # CHECK-NEXT: 2 12 2.00 * vcvtss2sd (%rax), %xmm1, %xmm2 # CHECK-NEXT: 2 7 1.00 vcvtss2si %xmm0, %ecx Index: test/tools/llvm-mca/X86/BtVer2/resources-sse1.s =================================================================== --- test/tools/llvm-mca/X86/BtVer2/resources-sse1.s +++ test/tools/llvm-mca/X86/BtVer2/resources-sse1.s @@ -212,10 +212,10 @@ # CHECK-NEXT: 1 8 1.00 * cvtpi2ps (%rax), %xmm2 # CHECK-NEXT: 1 3 1.00 cvtps2pi %xmm0, %mm2 # CHECK-NEXT: 1 8 1.00 * cvtps2pi (%rax), %mm2 -# CHECK-NEXT: 2 9 1.00 cvtsi2ssl %ecx, %xmm2 -# CHECK-NEXT: 2 9 1.00 cvtsi2ssq %rcx, %xmm2 -# CHECK-NEXT: 2 14 1.00 * cvtsi2ssl (%rax), %xmm2 -# CHECK-NEXT: 2 14 1.00 * cvtsi2ssl (%rax), %xmm2 +# CHECK-NEXT: 2 10 1.00 cvtsi2ssl %ecx, %xmm2 +# CHECK-NEXT: 2 10 1.00 cvtsi2ssq %rcx, %xmm2 +# CHECK-NEXT: 1 9 1.00 * cvtsi2ssl (%rax), %xmm2 +# CHECK-NEXT: 1 9 1.00 * cvtsi2ssl (%rax), %xmm2 # CHECK-NEXT: 2 7 1.00 cvtss2si %xmm0, %ecx # CHECK-NEXT: 2 7 1.00 cvtss2si %xmm0, %rcx # CHECK-NEXT: 2 12 1.00 * cvtss2si (%rax), %ecx Index: test/tools/llvm-mca/X86/BtVer2/resources-sse2.s =================================================================== --- test/tools/llvm-mca/X86/BtVer2/resources-sse2.s +++ test/tools/llvm-mca/X86/BtVer2/resources-sse2.s @@ -444,10 +444,10 @@ # CHECK-NEXT: 2 12 1.00 * cvtsd2si (%rax), %rcx # CHECK-NEXT: 2 7 2.00 cvtsd2ss %xmm0, %xmm2 # CHECK-NEXT: 2 12 2.00 * cvtsd2ss (%rax), %xmm2 -# CHECK-NEXT: 2 9 1.00 cvtsi2sdl %ecx, %xmm2 -# CHECK-NEXT: 2 9 1.00 cvtsi2sdq %rcx, %xmm2 -# CHECK-NEXT: 2 14 1.00 * cvtsi2sdl (%rax), %xmm2 -# CHECK-NEXT: 2 14 1.00 * cvtsi2sdl (%rax), %xmm2 +# CHECK-NEXT: 2 10 1.00 cvtsi2sdl %ecx, %xmm2 +# CHECK-NEXT: 2 10 1.00 cvtsi2sdq %rcx, %xmm2 +# CHECK-NEXT: 1 9 1.00 * cvtsi2sdl (%rax), %xmm2 +# CHECK-NEXT: 1 9 1.00 * cvtsi2sdl (%rax), %xmm2 # CHECK-NEXT: 2 7 2.00 cvtss2sd %xmm0, %xmm2 # CHECK-NEXT: 2 12 2.00 * cvtss2sd (%rax), %xmm2 # CHECK-NEXT: 1 3 1.00 cvttpd2dq %xmm0, %xmm2 Index: test/tools/llvm-nm/lit.local.cfg =================================================================== --- test/tools/llvm-nm/lit.local.cfg +++ test/tools/llvm-nm/lit.local.cfg @@ -1,4 +1,4 @@ if not 'X86' in config.root.targets: config.unsupported = True -config.suffixes = ['.s', '.test', '.yaml'] +config.suffixes = ['.ll', '.s', '.test', '.yaml'] Index: test/tools/llvm-nm/wasm/extern-only.ll =================================================================== --- /dev/null +++ test/tools/llvm-nm/wasm/extern-only.ll @@ -0,0 +1,23 @@ +; RUN: llc -filetype=obj -mtriple=wasm32-unknown-unknown -o %t.o %s +; RUN: llvm-nm --extern-only %t.o | FileCheck %s + +; Verity that hidden symbols are listed even when --extern-only is passed + +define hidden i32 @foo() { +entry: + ret i32 42 +} + +define i32 @bar() { +entry: + ret i32 43 +} + +define internal i32 @baz() { +entry: + ret i32 44 +} + +; CHECK: 00000006 T bar +; CHECK-NOT: baz +; CHECK: 00000001 T foo Index: test/tools/llvm-nm/wasm/lit.local.cfg =================================================================== --- /dev/null +++ test/tools/llvm-nm/wasm/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'WebAssembly' in config.root.targets: + config.unsupported = True Index: test/tools/llvm-objcopy/ELF/bad-build-id.test =================================================================== --- test/tools/llvm-objcopy/ELF/bad-build-id.test +++ test/tools/llvm-objcopy/ELF/bad-build-id.test @@ -1,7 +1,7 @@ # RUN: yaml2obj %s > %t # RUN: not llvm-objcopy --build-id-link-dir=%t-dir --build-id-link-input=.debug %t 2>&1 >/dev/null | FileCheck %s -# CHECK: build ID in file {{.*}} is smaller than two bytes. +# CHECK: build ID is smaller than two bytes. --- !ELF FileHeader: Index: test/tools/llvm-objcopy/ELF/discard-locals-rel.test =================================================================== --- /dev/null +++ test/tools/llvm-objcopy/ELF/discard-locals-rel.test @@ -0,0 +1,27 @@ +# RUN: yaml2obj %s > %t +# RUN: not llvm-objcopy --discard-locals %t %t2 2>&1 | FileCheck %s + +!ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS + - Name: .rel.text + Type: SHT_REL + Link: .symtab + Info: .text + Relocations: + - Offset: 0x1000 + Symbol: .L.rel + Type: R_X86_64_PC32 +Symbols: + Local: + - Name: .L.rel + Type: STT_FUNC + Section: .text + +# CHECK: not stripping symbol '.L.rel' because it is named in a relocation. Index: test/tools/llvm-objcopy/ELF/discard-locals.test =================================================================== --- /dev/null +++ test/tools/llvm-objcopy/ELF/discard-locals.test @@ -0,0 +1,121 @@ +# RUN: yaml2obj %s > %t +# RUN: cp %t %t1 +# RUN: llvm-objcopy --discard-locals %t %t2 +# Verify that llvm-objcopy has not modified the input. +# RUN: cmp %t %t1 +# RUN: llvm-readobj --symbols %t2 | FileCheck %s + +# RUN: llvm-objcopy -X %t %t3 +# Verify that llvm-objcopy has not modified the input. +# RUN: cmp %t %t1 +# RUN: cmp %t2 %t3 + +# Verify that llvm-strip modifies the symbol table the same way. + +# RUN: cp %t %t4 +# RUN: llvm-strip --discard-locals %t4 +# RUN: cmp %t2 %t4 + +# RUN: cp %t %t5 +# RUN: llvm-strip -X %t5 +# RUN: cmp %t2 %t5 + +!ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS + - Name: .LLVM.Custom.Section + Type: SHT_PROGBITS +Symbols: + Local: + - Name: Local + Type: STT_FUNC + Section: .text + - Name: .L.LocalSection + Type: STT_SECTION + Section: .text + - Type: STT_SECTION + Section: .LLVM.Custom.Section + - Name: .L.LocalFile + Type: STT_FILE + - Name: .L.str + Type: STT_OBJECT + Section: .text + - Name: .L.undefined + - Name: .L.abs + Index: SHN_ABS + Global: + - Name: .L.Global + Type: STT_FUNC + Section: .text + +# CHECK: Symbols [ +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: +# CHECK-NEXT: Value: 0x0 +# CHECK-NEXT: Size: 0 +# CHECK-NEXT: Binding: Local +# CHECK-NEXT: Type: None +# CHECK-NEXT: Other: 0 +# CHECK-NEXT: Section: Undefined +# CHECK-NEXT: } +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: Local +# CHECK-NEXT: Value: +# CHECK-NEXT: Size: +# CHECK-NEXT: Binding: Local +# CHECK-NEXT: Type: Function +# CHECK-NEXT: Other: +# CHECK-NEXT: Section: .text +# CHECK-NEXT: } +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: .L.LocalSection +# CHECK-NEXT: Value: +# CHECK-NEXT: Size: +# CHECK-NEXT: Binding: Local +# CHECK-NEXT: Type: Section +# CHECK-NEXT: Other: +# CHECK-NEXT: Section: .text +# CHECK-NEXT: } +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: +# CHECK-NEXT: Value: +# CHECK-NEXT: Size: +# CHECK-NEXT: Binding: Local +# CHECK-NEXT: Type: Section +# CHECK-NEXT: Other: +# CHECK-NEXT: Section: .LLVM.Custom.Section +# CHECK-NEXT: } +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: .L.LocalFile +# CHECK-NEXT: Value: +# CHECK-NEXT: Size: +# CHECK-NEXT: Binding: Local +# CHECK-NEXT: Type: File +# CHECK-NEXT: Other: +# CHECK-NEXT: Section: Undefined +# CHECK-NEXT: } +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: .L.undefined +# CHECK-NEXT: Value: +# CHECK-NEXT: Size: +# CHECK-NEXT: Binding: Local +# CHECK-NEXT: Type: None +# CHECK-NEXT: Other: +# CHECK-NEXT: Section: Undefined +# CHECK-NEXT: } +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: .L.Global +# CHECK-NEXT: Value: +# CHECK-NEXT: Size: +# CHECK-NEXT: Binding: Global +# CHECK-NEXT: Type: Function +# CHECK-NEXT: Other: +# CHECK-NEXT: Section: .text +# CHECK-NEXT: } +# CHECK-NEXT: ] Index: test/tools/llvm-objcopy/ELF/discard-mix-local-and-all.test =================================================================== --- /dev/null +++ test/tools/llvm-objcopy/ELF/discard-mix-local-and-all.test @@ -0,0 +1,107 @@ +# RUN: yaml2obj %s > %t +# Establish baseline objects for further checks. --discard-locals only discards +# compiler-generated local symbols (starting with .L), --discard-all discards +# all regular local symbols. +# RUN: llvm-objcopy %t %t-discard-none +# RUN: llvm-readobj --symbols %t-discard-none | FileCheck %s --check-prefixes=CHECK,LOCAL,COMPILER-LOCAL +# RUN: llvm-objcopy --discard-all %t %t-discard-all +# RUN: llvm-readobj --symbols %t-discard-all | FileCheck %s +# RUN: llvm-objcopy --discard-locals %t %t-discard-locals +# RUN: llvm-readobj --symbols %t-discard-locals | FileCheck %s --check-prefixes=CHECK,LOCAL + +# When mixing --discard-all and --discard-locals, the last one wins. +# RUN: llvm-objcopy --discard-all --discard-locals %t %t.1.o +# RUN: cmp %t.1.o %t-discard-locals +# RUN: llvm-objcopy --discard-locals --discard-all %t %t.2.o +# RUN: cmp %t.2.o %t-discard-all +# RUN: llvm-objcopy -x -X %t %t.3.o +# RUN: cmp %t.3.o %t-discard-locals +# RUN: llvm-objcopy -X -x %t %t.4.o +# RUN: cmp %t.4.o %t-discard-all +# RUN: llvm-objcopy -x -X -x -X %t %t.5.o +# RUN: cmp %t.5.o %t-discard-locals +# RUN: llvm-objcopy -X -x -X -x %t %t.6.o +# RUN: cmp %t.6.o %t-discard-all +# RUN: llvm-objcopy -X -x -X -x --discard-locals %t %t.7.o +# RUN: cmp %t.7.o %t-discard-locals +# RUN: llvm-objcopy -X -x -X -x --discard-all %t %t.8.o +# RUN: cmp %t.8.o %t-discard-all + +# llvm-strip works in the same way. +# RUN: llvm-strip --discard-all --discard-locals %t -o %t.9.o +# RUN: cmp %t.9.o %t-discard-locals +# RUN: llvm-strip --discard-locals --discard-all %t -o %t.10.o +# RUN: cmp %t.10.o %t-discard-all +# RUN: llvm-strip -x -X %t -o %t.11.o +# RUN: cmp %t.11.o %t-discard-locals +# RUN: llvm-strip -X -x %t -o %t.12.o +# RUN: cmp %t.12.o %t-discard-all +# RUN: llvm-strip -x -X -x -X %t -o %t.13.o +# RUN: cmp %t.13.o %t-discard-locals +# RUN: llvm-strip -X -x -X -x %t -o %t.14.o +# RUN: cmp %t.14.o %t-discard-all +# RUN: llvm-strip -X -x -X -x --discard-locals %t -o %t.15.o +# RUN: cmp %t.15.o %t-discard-locals +# RUN: llvm-strip -X -x -X -x --discard-all %t -o %t.16.o +# RUN: cmp %t.16.o %t-discard-all + +!ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .text + Type: SHT_PROGBITS +Symbols: + Local: + - Name: Local + Type: STT_FUNC + Section: .text + - Name: .L.str + Type: STT_OBJECT + Section: .text + Global: + - Name: Global + Type: STT_FUNC + Section: .text + +# CHECK: Symbols [ +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: +# CHECK-NEXT: Value: 0x0 +# CHECK-NEXT: Size: 0 +# CHECK-NEXT: Binding: Local +# CHECK-NEXT: Type: None +# CHECK-NEXT: Other: 0 +# CHECK-NEXT: Section: Undefined +# CHECK-NEXT: } +# LOCAL-NEXT: Symbol { +# LOCAL-NEXT: Name: Local +# LOCAL-NEXT: Value: +# LOCAL-NEXT: Size: +# LOCAL-NEXT: Binding: Local +# LOCAL-NEXT: Type: Function +# LOCAL-NEXT: Other: +# LOCAL-NEXT: Section: .text +# LOCAL-NEXT: } +# COMPILER-LOCAL-NEXT: Symbol { +# COMPILER-LOCAL-NEXT: Name: .L.str +# COMPILER-LOCAL-NEXT: Value: +# COMPILER-LOCAL-NEXT: Size: +# COMPILER-LOCAL-NEXT: Binding: Local +# COMPILER-LOCAL-NEXT: Type: Object +# COMPILER-LOCAL-NEXT: Other: +# COMPILER-LOCAL-NEXT: Section: .text +# COMPILER-LOCAL-NEXT: } +# CHECK-NEXT: Symbol { +# CHECK-NEXT: Name: Global +# CHECK-NEXT: Value: +# CHECK-NEXT: Size: +# CHECK-NEXT: Binding: Global +# CHECK-NEXT: Type: Function +# CHECK-NEXT: Other: +# CHECK-NEXT: Section: .text +# CHECK-NEXT: } +# CHECK-NEXT: ] Index: test/tools/llvm-objcopy/ELF/set-section-flags-and-rename.test =================================================================== --- /dev/null +++ test/tools/llvm-objcopy/ELF/set-section-flags-and-rename.test @@ -0,0 +1,14 @@ +# RUN: yaml2obj %s > %t + +# RUN: not llvm-objcopy --rename-section=.foo=.bar --set-section-flags=.foo=alloc %t %t.2 2>&1 | FileCheck %s --check-prefix=SET-FOO +# RUN: not llvm-objcopy --rename-section=.foo=.bar --set-section-flags=.bar=alloc %t %t.2 2>&1 | FileCheck %s --check-prefix=SET-BAR + +!ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 + +# SET-FOO: --set-section-flags=.foo conflicts with --rename-section=.foo=.bar. +# SET-BAR: --set-section-flags=.bar conflicts with --rename-section=.foo=.bar. Index: test/tools/llvm-objcopy/ELF/set-section-flags-multiple.test =================================================================== --- /dev/null +++ test/tools/llvm-objcopy/ELF/set-section-flags-multiple.test @@ -0,0 +1,32 @@ +# RUN: yaml2obj %s > %t + +# RUN: llvm-objcopy --set-section-flags=.foo=alloc --set-section-flags=.bar=code %t %t.2 +# RUN: llvm-readobj --sections %t.2 | FileCheck %s --check-prefixes=CHECK,ALLOC,WRITE + +!ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .foo + Type: SHT_PROGBITS + Flags: [ ] + - Name: .bar + Type: SHT_PROGBITS + Flags: [ ] + +# CHECK: Name: .foo +# CHECK-NEXT: Type: SHT_PROGBITS +# CHECK-NEXT: Flags [ +# CHECK-NEXT: SHF_ALLOC (0x2) +# CHECK-NEXT: SHF_WRITE (0x1) +# CHECK-NEXT: ] + +# CHECK: Name: .bar +# CHECK-NEXT: Type: SHT_PROGBITS +# CHECK-NEXT: Flags [ +# CHECK-NEXT: SHF_EXECINSTR (0x4) +# CHECK-NEXT: SHF_WRITE (0x1) +# CHECK-NEXT: ] Index: test/tools/llvm-objcopy/ELF/set-section-flags.test =================================================================== --- /dev/null +++ test/tools/llvm-objcopy/ELF/set-section-flags.test @@ -0,0 +1,68 @@ +# RUN: yaml2obj %s > %t + +# Single flags on a section with no flags: +# RUN: llvm-objcopy --set-section-flags=.foo=alloc %t %t.alloc +# RUN: llvm-readobj --sections %t.alloc | FileCheck %s --check-prefixes=CHECK,ALLOC,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=load %t %t.load +# RUN: llvm-readobj --sections %t.load | FileCheck %s --check-prefixes=CHECK,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=noload %t %t.noload +# RUN: llvm-readobj --sections %t.noload | FileCheck %s --check-prefixes=CHECK,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=readonly %t %t.readonly +# RUN: llvm-readobj --sections %t.readonly | FileCheck %s --check-prefixes=CHECK +# RUN: llvm-objcopy --set-section-flags=.foo=debug %t %t.debug +# RUN: llvm-readobj --sections %t.debug | FileCheck %s --check-prefixes=CHECK,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=code %t %t.code +# RUN: llvm-readobj --sections %t.code | FileCheck %s --check-prefixes=CHECK,EXEC,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=data %t %t.data +# RUN: llvm-readobj --sections %t.data | FileCheck %s --check-prefixes=CHECK,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=rom %t %t.rom +# RUN: llvm-readobj --sections %t.rom | FileCheck %s --check-prefixes=CHECK,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=contents %t %t.contents +# RUN: llvm-readobj --sections %t.contents | FileCheck %s --check-prefixes=CHECK,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=merge %t %t.merge +# RUN: llvm-readobj --sections %t.merge | FileCheck %s --check-prefixes=CHECK,MERGE,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=strings %t %t.strings +# RUN: llvm-readobj --sections %t.strings | FileCheck %s --check-prefixes=CHECK,STRINGS,WRITE +# RUN: llvm-objcopy --set-section-flags=.foo=share %t %t.share +# RUN: llvm-readobj --sections %t.share | FileCheck %s --check-prefixes=CHECK,WRITE + +# Multiple flags: +# RUN: llvm-objcopy --set-section-flags=.foo=alloc,readonly,strings %t %t.alloc_ro_strings +# RUN: llvm-readobj --sections %t.alloc_ro_strings | FileCheck %s --check-prefixes=CHECK,ALLOC,STRINGS +# RUN: llvm-objcopy --set-section-flags=.foo=alloc,code %t %t.alloc_code +# RUN: llvm-readobj --sections %t.alloc_code | FileCheck %s --check-prefixes=CHECK,ALLOC,EXEC,WRITE + +# Invalid flags: +# RUN: not llvm-objcopy --set-section-flags=.foo=xyzzy %t %t.xyzzy 2>&1 | FileCheck %s --check-prefix=BAD-FLAG + +# Bad flag format: +# RUN: not llvm-objcopy --set-section-flags=.foo %t %t2 2>&1 | FileCheck %s --check-prefix=BAD-FORMAT + +# Setting flags for the same section multiple times: +# RUN: not llvm-objcopy --set-section-flags=.foo=alloc --set-section-flags=.foo=load %t %t2 2>&1 | FileCheck %s --check-prefix=MULTIPLE-SETS + +!ELF +FileHeader: + Class: ELFCLASS64 + Data: ELFDATA2LSB + Type: ET_REL + Machine: EM_X86_64 +Sections: + - Name: .foo + Type: SHT_PROGBITS + Flags: [ ] + +# CHECK: Name: .foo +# CHECK-NEXT: Type: SHT_PROGBITS +# CHECK-NEXT: Flags [ +# ALLOC-NEXT: SHF_ALLOC (0x2) +# EXEC-NEXT: SHF_EXECINSTR (0x4) +# MERGE-NEXT: SHF_MERGE (0x10) +# STRINGS-NEXT: SHF_STRINGS (0x20) +# WRITE-NEXT: SHF_WRITE (0x1) +# CHECK-NEXT: ] + +# BAD-FORMAT: Bad format for --set-section-flags: missing '=' +# MULTIPLE-SETS: --set-section-flags set multiple times for section .foo + +# BAD-FLAG: Unrecognized section flag 'xyzzy'. Flags supported for GNU compatibility: alloc, load, noload, readonly, debug, code, data, rom, share, contents, merge, strings. Index: test/tools/llvm-pdbdump/fpo-data.test =================================================================== --- /dev/null +++ test/tools/llvm-pdbdump/fpo-data.test @@ -0,0 +1,14 @@ +; RUN: llvm-pdbutil dump -fpo %p/Inputs/FPOTest.pdb \ +; RUN: | FileCheck %s + +CHECK: Old FPO Data +CHECK-NEXT: ============================================================ +CHECK-NEXT: RVA | Code | Locals | Params | Prolog | Saved Regs | Use BP | Has SEH | Frame Type +CHECK-NEXT: 0000004E | 19 | 0 | 0 | 0 | 0 | false | false | FPO + +CHECK: New FPO Data +CHECK-NEXT: ============================================================ +CHECK-NEXT: RVA | Code | Locals | Params | Stack | Prolog | Saved Regs | Has SEH | Has C++EH | Start | Program +CHECK-NEXT: 00001010 | 18 | 0 | 0 | 0 | 4 | 0 | false | false | true | $T0 .raSearch = $eip $T0 ^ = $esp $T0 4 + = +CHECK-NEXT: 00001011 | 17 | 0 | 0 | 0 | 3 | 4 | false | false | false | $T0 .raSearch = $eip $T0 ^ = $esp $T0 4 + = $ebp $T0 4 - ^ = +CHECK-NEXT: 00001013 | 15 | 0 | 0 | 0 | 1 | 4 | false | false | false | $T0 $ebp 4 + = $eip $T0 ^ = $esp $T0 4 + = $ebp $T0 4 - ^ = \ No newline at end of file Index: tools/llvm-exegesis/lib/BenchmarkResult.h =================================================================== --- tools/llvm-exegesis/lib/BenchmarkResult.h +++ tools/llvm-exegesis/lib/BenchmarkResult.h @@ -57,7 +57,7 @@ // The result of an instruction benchmark. struct InstructionBenchmark { InstructionBenchmarkKey Key; - enum ModeE { Unknown, Latency, Uops }; + enum ModeE { Unknown, Latency, Uops, InverseThroughput }; ModeE Mode; std::string CpuName; std::string LLVMTriple; Index: tools/llvm-exegesis/lib/BenchmarkResult.cpp =================================================================== --- tools/llvm-exegesis/lib/BenchmarkResult.cpp +++ tools/llvm-exegesis/lib/BenchmarkResult.cpp @@ -209,6 +209,8 @@ Io.enumCase(Value, "", exegesis::InstructionBenchmark::Unknown); Io.enumCase(Value, "latency", exegesis::InstructionBenchmark::Latency); Io.enumCase(Value, "uops", exegesis::InstructionBenchmark::Uops); + Io.enumCase(Value, "inverse_throughput", + exegesis::InstructionBenchmark::InverseThroughput); } }; Index: tools/llvm-exegesis/lib/BenchmarkRunner.h =================================================================== --- tools/llvm-exegesis/lib/BenchmarkRunner.h +++ tools/llvm-exegesis/lib/BenchmarkRunner.h @@ -75,6 +75,7 @@ protected: const LLVMState &State; + const InstructionBenchmark::ModeE Mode; private: virtual llvm::Expected> @@ -84,7 +85,6 @@ writeObjectFile(const BenchmarkCode &Configuration, llvm::ArrayRef Code) const; - const InstructionBenchmark::ModeE Mode; const std::unique_ptr Scratch; }; Index: tools/llvm-exegesis/lib/Latency.h =================================================================== --- tools/llvm-exegesis/lib/Latency.h +++ tools/llvm-exegesis/lib/Latency.h @@ -32,8 +32,8 @@ class LatencyBenchmarkRunner : public BenchmarkRunner { public: - LatencyBenchmarkRunner(const LLVMState &State) - : BenchmarkRunner(State, InstructionBenchmark::Latency) {} + LatencyBenchmarkRunner(const LLVMState &State, + InstructionBenchmark::ModeE Mode); ~LatencyBenchmarkRunner() override; private: Index: tools/llvm-exegesis/lib/Latency.cpp =================================================================== --- tools/llvm-exegesis/lib/Latency.cpp +++ tools/llvm-exegesis/lib/Latency.cpp @@ -165,6 +165,14 @@ return std::move(Results); } +LatencyBenchmarkRunner::LatencyBenchmarkRunner(const LLVMState &State, + InstructionBenchmark::ModeE Mode) + : BenchmarkRunner(State, Mode) { + assert((Mode == InstructionBenchmark::Latency || + Mode == InstructionBenchmark::InverseThroughput) && + "invalid mode"); +} + LatencyBenchmarkRunner::~LatencyBenchmarkRunner() = default; llvm::Expected> @@ -184,8 +192,17 @@ if (*ExpectedCounterValue < MinValue) MinValue = *ExpectedCounterValue; } - std::vector Result = { - BenchmarkMeasure::Create("latency", MinValue)}; + std::vector Result; + switch (Mode) { + case InstructionBenchmark::Latency: + Result = {BenchmarkMeasure::Create("latency", MinValue)}; + break; + case InstructionBenchmark::InverseThroughput: + Result = {BenchmarkMeasure::Create("inverse_throughput", MinValue)}; + break; + default: + break; + } return std::move(Result); } Index: tools/llvm-exegesis/lib/Target.h =================================================================== --- tools/llvm-exegesis/lib/Target.h +++ tools/llvm-exegesis/lib/Target.h @@ -130,7 +130,7 @@ std::unique_ptr virtual createUopsSnippetGenerator( const LLVMState &State) const; std::unique_ptr virtual createLatencyBenchmarkRunner( - const LLVMState &State) const; + const LLVMState &State, InstructionBenchmark::ModeE Mode) const; std::unique_ptr virtual createUopsBenchmarkRunner( const LLVMState &State) const; Index: tools/llvm-exegesis/lib/Target.cpp =================================================================== --- tools/llvm-exegesis/lib/Target.cpp +++ tools/llvm-exegesis/lib/Target.cpp @@ -45,6 +45,7 @@ case InstructionBenchmark::Latency: return createLatencySnippetGenerator(State); case InstructionBenchmark::Uops: + case InstructionBenchmark::InverseThroughput: return createUopsSnippetGenerator(State); } return nullptr; @@ -57,7 +58,8 @@ case InstructionBenchmark::Unknown: return nullptr; case InstructionBenchmark::Latency: - return createLatencyBenchmarkRunner(State); + case InstructionBenchmark::InverseThroughput: + return createLatencyBenchmarkRunner(State, Mode); case InstructionBenchmark::Uops: return createUopsBenchmarkRunner(State); } @@ -74,9 +76,9 @@ return llvm::make_unique(State); } -std::unique_ptr -ExegesisTarget::createLatencyBenchmarkRunner(const LLVMState &State) const { - return llvm::make_unique(State); +std::unique_ptr ExegesisTarget::createLatencyBenchmarkRunner( + const LLVMState &State, InstructionBenchmark::ModeE Mode) const { + return llvm::make_unique(State, Mode); } std::unique_ptr Index: tools/llvm-exegesis/llvm-exegesis.cpp =================================================================== --- tools/llvm-exegesis/llvm-exegesis.cpp +++ tools/llvm-exegesis/llvm-exegesis.cpp @@ -56,16 +56,19 @@ static cl::opt BenchmarkFile("benchmarks-file", cl::desc(""), cl::init("")); -static cl::opt - BenchmarkMode("mode", cl::desc("the mode to run"), - cl::values(clEnumValN(exegesis::InstructionBenchmark::Latency, - "latency", "Instruction Latency"), - clEnumValN(exegesis::InstructionBenchmark::Uops, - "uops", "Uop Decomposition"), - // When not asking for a specific benchmark mode, - // we'll analyse the results. - clEnumValN(exegesis::InstructionBenchmark::Unknown, - "analysis", "Analysis"))); +static cl::opt BenchmarkMode( + "mode", cl::desc("the mode to run"), + cl::values(clEnumValN(exegesis::InstructionBenchmark::Latency, "latency", + "Instruction Latency"), + clEnumValN(exegesis::InstructionBenchmark::InverseThroughput, + "inverse_throughput", + "Instruction Inverse Throughput"), + clEnumValN(exegesis::InstructionBenchmark::Uops, "uops", + "Uop Decomposition"), + // When not asking for a specific benchmark mode, + // we'll analyse the results. + clEnumValN(exegesis::InstructionBenchmark::Unknown, "analysis", + "Analysis"))); static cl::opt NumRepetitions("num-repetitions", Index: tools/llvm-objcopy/COFF/COFFObjcopy.h =================================================================== --- tools/llvm-objcopy/COFF/COFFObjcopy.h +++ tools/llvm-objcopy/COFF/COFFObjcopy.h @@ -10,6 +10,7 @@ #define LLVM_TOOLS_OBJCOPY_COFFOBJCOPY_H namespace llvm { +class Error; namespace object { class COFFObjectFile; @@ -20,8 +21,8 @@ class Buffer; namespace coff { -void executeObjcopyOnBinary(const CopyConfig &Config, - object::COFFObjectFile &In, Buffer &Out); +Error executeObjcopyOnBinary(const CopyConfig &Config, + object::COFFObjectFile &In, Buffer &Out); } // end namespace coff } // end namespace objcopy Index: tools/llvm-objcopy/COFF/COFFObjcopy.cpp =================================================================== --- tools/llvm-objcopy/COFF/COFFObjcopy.cpp +++ tools/llvm-objcopy/COFF/COFFObjcopy.cpp @@ -97,7 +97,7 @@ return true; if (Config.StripDebug || Config.StripAll || Config.StripAllGNU || - Config.DiscardAll || Config.StripUnneeded) { + Config.DiscardMode == DiscardType::All || Config.StripUnneeded) { if (isDebugSection(Sec) && (Sec.Header.Characteristics & IMAGE_SCN_MEM_DISCARDABLE) != 0) return true; @@ -125,7 +125,7 @@ Sec.Relocs.clear(); // If we need to do per-symbol removals, initialize the Referenced field. - if (Config.StripUnneeded || Config.DiscardAll || + if (Config.StripUnneeded || Config.DiscardMode == DiscardType::All || !Config.SymbolsToRemove.empty()) if (Error E = Obj.markSymbols()) return E; @@ -159,7 +159,8 @@ // GNU objcopy keeps referenced local symbols and external symbols // if --discard-all is set, similar to what --strip-unneeded does, // but undefined local symbols are kept when --discard-all is set. - if (Config.DiscardAll && Sym.Sym.StorageClass == IMAGE_SYM_CLASS_STATIC && + if (Config.DiscardMode == DiscardType::All && + Sym.Sym.StorageClass == IMAGE_SYM_CLASS_STATIC && Sym.Sym.SectionNumber != 0) return true; } @@ -177,10 +178,11 @@ !Config.SymbolsToGlobalize.empty() || !Config.SymbolsToKeep.empty() || !Config.SymbolsToLocalize.empty() || !Config.SymbolsToWeaken.empty() || !Config.SymbolsToKeepGlobal.empty() || !Config.SectionsToRename.empty() || - !Config.SymbolsToRename.empty() || Config.ExtractDWO || - Config.KeepFileSymbols || Config.LocalizeHidden || Config.PreserveDates || - Config.StripDWO || Config.StripNonAlloc || Config.StripSections || - Config.Weaken || Config.DecompressDebugSections) { + !Config.SetSectionFlags.empty() || !Config.SymbolsToRename.empty() || + Config.ExtractDWO || Config.KeepFileSymbols || Config.LocalizeHidden || + Config.PreserveDates || Config.StripDWO || Config.StripNonAlloc || + Config.StripSections || Config.Weaken || Config.DecompressDebugSections || + Config.DiscardMode == DiscardType::Locals) { return createStringError(llvm::errc::invalid_argument, "Option not supported by llvm-objcopy for COFF"); } @@ -188,19 +190,20 @@ return Error::success(); } -void executeObjcopyOnBinary(const CopyConfig &Config, - COFFObjectFile &In, Buffer &Out) { +Error executeObjcopyOnBinary(const CopyConfig &Config, COFFObjectFile &In, + Buffer &Out) { COFFReader Reader(In); Expected> ObjOrErr = Reader.create(); if (!ObjOrErr) - reportError(Config.InputFilename, ObjOrErr.takeError()); + return createFileError(Config.InputFilename, ObjOrErr.takeError()); Object *Obj = ObjOrErr->get(); assert(Obj && "Unable to deserialize COFF object"); if (Error E = handleArgs(Config, *Obj)) - reportError(Config.InputFilename, std::move(E)); + return createFileError(Config.InputFilename, std::move(E)); COFFWriter Writer(*Obj, Out); if (Error E = Writer.write()) - reportError(Config.OutputFilename, std::move(E)); + return createFileError(Config.OutputFilename, std::move(E)); + return Error::success(); } } // end namespace coff Index: tools/llvm-objcopy/CopyConfig.h =================================================================== --- tools/llvm-objcopy/CopyConfig.h +++ tools/llvm-objcopy/CopyConfig.h @@ -37,6 +37,17 @@ Optional NewFlags; }; +struct SectionFlagsUpdate { + StringRef Name; + uint64_t NewFlags; +}; + +enum class DiscardType { + None, // Default + All, // --discard-all (-x) + Locals, // --discard-locals (-X) +}; + // Configuration for copying/stripping a single file. struct CopyConfig { // Main input/output options @@ -57,6 +68,7 @@ Optional BuildIdLinkOutput; StringRef SplitDWO; StringRef SymbolsPrefix; + DiscardType DiscardMode = DiscardType::None; // Repeated options std::vector AddSection; @@ -73,11 +85,11 @@ // Map options StringMap SectionsToRename; + StringMap SetSectionFlags; StringMap SymbolsToRename; // Boolean options bool DeterministicArchives = true; - bool DiscardAll = false; bool ExtractDWO = false; bool KeepFileSymbols = false; bool LocalizeHidden = false; Index: tools/llvm-objcopy/CopyConfig.cpp =================================================================== --- tools/llvm-objcopy/CopyConfig.cpp +++ tools/llvm-objcopy/CopyConfig.cpp @@ -128,6 +128,32 @@ .Default(SectionFlag::SecNone); } +static uint64_t parseSectionFlagSet(ArrayRef SectionFlags) { + SectionFlag ParsedFlags = SectionFlag::SecNone; + for (StringRef Flag : SectionFlags) { + SectionFlag ParsedFlag = parseSectionRenameFlag(Flag); + if (ParsedFlag == SectionFlag::SecNone) + error("Unrecognized section flag '" + Flag + + "'. Flags supported for GNU compatibility: alloc, load, noload, " + "readonly, debug, code, data, rom, share, contents, merge, " + "strings."); + ParsedFlags |= ParsedFlag; + } + + uint64_t NewFlags = 0; + if (ParsedFlags & SectionFlag::SecAlloc) + NewFlags |= ELF::SHF_ALLOC; + if (!(ParsedFlags & SectionFlag::SecReadonly)) + NewFlags |= ELF::SHF_WRITE; + if (ParsedFlags & SectionFlag::SecCode) + NewFlags |= ELF::SHF_EXECINSTR; + if (ParsedFlags & SectionFlag::SecMerge) + NewFlags |= ELF::SHF_MERGE; + if (ParsedFlags & SectionFlag::SecStrings) + NewFlags |= ELF::SHF_STRINGS; + return NewFlags; +} + static SectionRename parseRenameSectionValue(StringRef FlagValue) { if (!FlagValue.contains('=')) error("Bad format for --rename-section: missing '='"); @@ -142,34 +168,29 @@ Old2New.second.split(NameAndFlags, ','); SR.NewName = NameAndFlags[0]; - if (NameAndFlags.size() > 1) { - SectionFlag Flags = SectionFlag::SecNone; - for (size_t I = 1, Size = NameAndFlags.size(); I < Size; ++I) { - SectionFlag Flag = parseSectionRenameFlag(NameAndFlags[I]); - if (Flag == SectionFlag::SecNone) - error("Unrecognized section flag '" + NameAndFlags[I] + - "'. Flags supported for GNU compatibility: alloc, load, noload, " - "readonly, debug, code, data, rom, share, contents, merge, " - "strings."); - Flags |= Flag; - } - - SR.NewFlags = 0; - if (Flags & SectionFlag::SecAlloc) - *SR.NewFlags |= ELF::SHF_ALLOC; - if (!(Flags & SectionFlag::SecReadonly)) - *SR.NewFlags |= ELF::SHF_WRITE; - if (Flags & SectionFlag::SecCode) - *SR.NewFlags |= ELF::SHF_EXECINSTR; - if (Flags & SectionFlag::SecMerge) - *SR.NewFlags |= ELF::SHF_MERGE; - if (Flags & SectionFlag::SecStrings) - *SR.NewFlags |= ELF::SHF_STRINGS; - } + if (NameAndFlags.size() > 1) + SR.NewFlags = parseSectionFlagSet(makeArrayRef(NameAndFlags).drop_front()); return SR; } +static SectionFlagsUpdate parseSetSectionFlagValue(StringRef FlagValue) { + if (!StringRef(FlagValue).contains('=')) + error("Bad format for --set-section-flags: missing '='"); + + // Initial split: ".foo" = "f1,f2,..." + auto Section2Flags = StringRef(FlagValue).split('='); + SectionFlagsUpdate SFU; + SFU.Name = Section2Flags.first; + + // Flags split: "f1" "f2" ... + SmallVector SectionFlags; + Section2Flags.second.split(SectionFlags, ','); + SFU.NewFlags = parseSectionFlagSet(SectionFlags); + + return SFU; +} + static const StringMap ArchMap{ // Name, {EMachine, 64bit, LittleEndian} {"aarch64", {ELF::EM_AARCH64, true, true}}, @@ -327,6 +348,24 @@ if (!Config.SectionsToRename.try_emplace(SR.OriginalName, SR).second) error("Multiple renames of section " + SR.OriginalName); } + for (auto Arg : InputArgs.filtered(OBJCOPY_set_section_flags)) { + SectionFlagsUpdate SFU = parseSetSectionFlagValue(Arg->getValue()); + if (!Config.SetSectionFlags.try_emplace(SFU.Name, SFU).second) + error("--set-section-flags set multiple times for section " + SFU.Name); + } + // Prohibit combinations of --set-section-flags when the section name is used + // by --rename-section, either as a source or a destination. + for (const auto &E : Config.SectionsToRename) { + const SectionRename &SR = E.second; + if (Config.SetSectionFlags.count(SR.OriginalName)) + error("--set-section-flags=" + SR.OriginalName + + " conflicts with --rename-section=" + SR.OriginalName + "=" + + SR.NewName); + if (Config.SetSectionFlags.count(SR.NewName)) + error("--set-section-flags=" + SR.NewName + + " conflicts with --rename-section=" + SR.OriginalName + "=" + + SR.NewName); + } for (auto Arg : InputArgs.filtered(OBJCOPY_remove_section)) Config.ToRemove.push_back(Arg->getValue()); @@ -348,7 +387,11 @@ Config.ExtractDWO = InputArgs.hasArg(OBJCOPY_extract_dwo); Config.LocalizeHidden = InputArgs.hasArg(OBJCOPY_localize_hidden); Config.Weaken = InputArgs.hasArg(OBJCOPY_weaken); - Config.DiscardAll = InputArgs.hasArg(OBJCOPY_discard_all); + if (InputArgs.hasArg(OBJCOPY_discard_all, OBJCOPY_discard_locals)) + Config.DiscardMode = + InputArgs.hasFlag(OBJCOPY_discard_all, OBJCOPY_discard_locals) + ? DiscardType::All + : DiscardType::Locals; Config.OnlyKeepDebug = InputArgs.hasArg(OBJCOPY_only_keep_debug); Config.KeepFileSymbols = InputArgs.hasArg(OBJCOPY_keep_file_symbols); Config.DecompressDebugSections = @@ -428,13 +471,17 @@ CopyConfig Config; Config.StripDebug = InputArgs.hasArg(STRIP_strip_debug); - Config.DiscardAll = InputArgs.hasArg(STRIP_discard_all); + if (InputArgs.hasArg(STRIP_discard_all, STRIP_discard_locals)) + Config.DiscardMode = + InputArgs.hasFlag(STRIP_discard_all, STRIP_discard_locals) + ? DiscardType::All + : DiscardType::Locals; Config.StripUnneeded = InputArgs.hasArg(STRIP_strip_unneeded); Config.StripAll = InputArgs.hasArg(STRIP_strip_all); Config.StripAllGNU = InputArgs.hasArg(STRIP_strip_all_gnu); - if (!Config.StripDebug && !Config.StripUnneeded && !Config.DiscardAll && - !Config.StripAllGNU) + if (!Config.StripDebug && !Config.StripUnneeded && + Config.DiscardMode == DiscardType::None && !Config.StripAllGNU) Config.StripAll = true; for (auto Arg : InputArgs.filtered(STRIP_keep_section)) Index: tools/llvm-objcopy/ELF/ELFObjcopy.h =================================================================== --- tools/llvm-objcopy/ELF/ELFObjcopy.h +++ tools/llvm-objcopy/ELF/ELFObjcopy.h @@ -10,6 +10,7 @@ #define LLVM_TOOLS_OBJCOPY_ELFOBJCOPY_H namespace llvm { +class Error; class MemoryBuffer; namespace object { @@ -21,10 +22,10 @@ class Buffer; namespace elf { -void executeObjcopyOnRawBinary(const CopyConfig &Config, MemoryBuffer &In, - Buffer &Out); -void executeObjcopyOnBinary(const CopyConfig &Config, - object::ELFObjectFileBase &In, Buffer &Out); +Error executeObjcopyOnRawBinary(const CopyConfig &Config, MemoryBuffer &In, + Buffer &Out); +Error executeObjcopyOnBinary(const CopyConfig &Config, + object::ELFObjectFileBase &In, Buffer &Out); } // end namespace elf } // end namespace objcopy Index: tools/llvm-objcopy/ELF/ELFObjcopy.cpp =================================================================== --- tools/llvm-objcopy/ELF/ELFObjcopy.cpp +++ tools/llvm-objcopy/ELF/ELFObjcopy.cpp @@ -70,6 +70,17 @@ return !isDWOSection(Sec); } +static uint64_t setSectionFlagsPreserveMask(uint64_t OldFlags, + uint64_t NewFlags) { + // Preserve some flags which should not be dropped when setting flags. + // Also, preserve anything OS/processor dependant. + const uint64_t PreserveMask = ELF::SHF_COMPRESSED | ELF::SHF_EXCLUDE | + ELF::SHF_GROUP | ELF::SHF_LINK_ORDER | + ELF::SHF_MASKOS | ELF::SHF_MASKPROC | + ELF::SHF_TLS | ELF::SHF_INFO_LINK; + return (OldFlags & PreserveMask) | (NewFlags & ~PreserveMask); +} + static ElfType getOutputElfType(const Binary &Bin) { // Infer output ELF type from the input ELF object if (isa>(Bin)) @@ -167,8 +178,8 @@ } } -static void splitDWOToFile(const CopyConfig &Config, const Reader &Reader, - StringRef File, ElfType OutputElfType) { +static Error splitDWOToFile(const CopyConfig &Config, const Reader &Reader, + StringRef File, ElfType OutputElfType) { auto DWOFile = Reader.create(); DWOFile->removeSections( [&](const SectionBase &Sec) { return onlyKeepDWOPred(*DWOFile, Sec); }); @@ -177,9 +188,8 @@ FileBuffer FB(File); auto Writer = createWriter(Config, *DWOFile, FB, OutputElfType); if (Error E = Writer->finalize()) - error(std::move(E)); - if (Error E = Writer->write()) - error(std::move(E)); + return E; + return Writer->write(); } static Error dumpSectionToFile(StringRef SecName, StringRef Filename, @@ -258,12 +268,14 @@ // any previous removals. Lastly whether or not something is removed shouldn't // depend a) on the order the options occur in or b) on some opaque priority // system. The only priority is that keeps/copies overrule removes. -static void handleArgs(const CopyConfig &Config, Object &Obj, - const Reader &Reader, ElfType OutputElfType) { +static Error handleArgs(const CopyConfig &Config, Object &Obj, + const Reader &Reader, ElfType OutputElfType) { + + if (!Config.SplitDWO.empty()) + if (Error E = + splitDWOToFile(Config, Reader, Config.SplitDWO, OutputElfType)) + return E; - if (!Config.SplitDWO.empty()) { - splitDWOToFile(Config, Reader, Config.SplitDWO, OutputElfType); - } if (Config.OutputArch) Obj.Machine = Config.OutputArch.getValue().EMachine; @@ -325,9 +337,11 @@ (Config.KeepFileSymbols && Sym.Type == STT_FILE)) return false; - if (Config.DiscardAll && Sym.Binding == STB_LOCAL && - Sym.getShndx() != SHN_UNDEF && Sym.Type != STT_FILE && - Sym.Type != STT_SECTION) + if ((Config.DiscardMode == DiscardType::All || + (Config.DiscardMode == DiscardType::Locals && + StringRef(Sym.Name).startswith(".L"))) && + Sym.Binding == STB_LOCAL && Sym.getShndx() != SHN_UNDEF && + Sym.Type != STT_FILE && Sym.Type != STT_SECTION) return true; if (Config.StripAll || Config.StripAllGNU) @@ -484,16 +498,19 @@ if (Iter != Config.SectionsToRename.end()) { const SectionRename &SR = Iter->second; Sec.Name = SR.NewName; - if (SR.NewFlags.hasValue()) { - // Preserve some flags which should not be dropped when setting flags. - // Also, preserve anything OS/processor dependant. - const uint64_t PreserveMask = ELF::SHF_COMPRESSED | ELF::SHF_EXCLUDE | - ELF::SHF_GROUP | ELF::SHF_LINK_ORDER | - ELF::SHF_MASKOS | ELF::SHF_MASKPROC | - ELF::SHF_TLS | ELF::SHF_INFO_LINK; - Sec.Flags = (Sec.Flags & PreserveMask) | - (SR.NewFlags.getValue() & ~PreserveMask); - } + if (SR.NewFlags.hasValue()) + Sec.Flags = + setSectionFlagsPreserveMask(Sec.Flags, SR.NewFlags.getValue()); + } + } + } + + if (!Config.SetSectionFlags.empty()) { + for (auto &Sec : Obj.sections()) { + const auto Iter = Config.SetSectionFlags.find(Sec.Name); + if (Iter != Config.SetSectionFlags.end()) { + const SectionFlagsUpdate &SFU = Iter->second; + Sec.Flags = setSectionFlagsPreserveMask(Sec.Flags, SFU.NewFlags); } } } @@ -506,7 +523,7 @@ ErrorOr> BufOrErr = MemoryBuffer::getFile(File); if (!BufOrErr) - reportError(File, BufOrErr.getError()); + return createFileError(File, errorCodeToError(BufOrErr.getError())); std::unique_ptr Buf = std::move(*BufOrErr); ArrayRef Data( reinterpret_cast(Buf->getBufferStart()), @@ -524,16 +541,18 @@ StringRef SecName = SecPair.first; StringRef File = SecPair.second; if (Error E = dumpSectionToFile(SecName, File, Obj)) - reportError(Config.InputFilename, std::move(E)); + return createFileError(Config.InputFilename, std::move(E)); } } if (!Config.AddGnuDebugLink.empty()) Obj.addSection(Config.AddGnuDebugLink); + + return Error::success(); } -void executeObjcopyOnRawBinary(const CopyConfig &Config, MemoryBuffer &In, - Buffer &Out) { +Error executeObjcopyOnRawBinary(const CopyConfig &Config, MemoryBuffer &In, + Buffer &Out) { BinaryReader Reader(Config.BinaryArch, &In); std::unique_ptr Obj = Reader.create(); @@ -541,17 +560,17 @@ // (-B). const ElfType OutputElfType = getOutputElfType( Config.OutputArch ? Config.OutputArch.getValue() : Config.BinaryArch); - handleArgs(Config, *Obj, Reader, OutputElfType); + if (Error E = handleArgs(Config, *Obj, Reader, OutputElfType)) + return E; std::unique_ptr Writer = createWriter(Config, *Obj, Out, OutputElfType); if (Error E = Writer->finalize()) - error(std::move(E)); - if (Error E = Writer->write()) - error(std::move(E)); + return E; + return Writer->write(); } -void executeObjcopyOnBinary(const CopyConfig &Config, - object::ELFObjectFileBase &In, Buffer &Out) { +Error executeObjcopyOnBinary(const CopyConfig &Config, + object::ELFObjectFileBase &In, Buffer &Out) { ELFReader Reader(&In); std::unique_ptr Obj = Reader.create(); // Prefer OutputArch (-O) if set, otherwise infer it from the input. @@ -563,25 +582,29 @@ if (!Config.BuildIdLinkDir.empty()) { BuildIdBytes = unwrapOrError(findBuildID(In)); if (BuildIdBytes.size() < 2) - error("build ID in file '" + Config.InputFilename + - "' is smaller than two bytes"); + return createFileError( + Config.InputFilename, + createStringError(object_error::parse_failed, + "build ID is smaller than two bytes.")); } if (!Config.BuildIdLinkDir.empty() && Config.BuildIdLinkInput) { linkToBuildIdDir(Config, Config.InputFilename, Config.BuildIdLinkInput.getValue(), BuildIdBytes); } - handleArgs(Config, *Obj, Reader, OutputElfType); + if (Error E = handleArgs(Config, *Obj, Reader, OutputElfType)) + return E; std::unique_ptr Writer = createWriter(Config, *Obj, Out, OutputElfType); if (Error E = Writer->finalize()) - error(std::move(E)); + return E; if (Error E = Writer->write()) - error(std::move(E)); + return E; if (!Config.BuildIdLinkDir.empty() && Config.BuildIdLinkOutput) { linkToBuildIdDir(Config, Config.OutputFilename, Config.BuildIdLinkOutput.getValue(), BuildIdBytes); } + return Error::success(); } } // end namespace elf Index: tools/llvm-objcopy/ObjcopyOpts.td =================================================================== --- tools/llvm-objcopy/ObjcopyOpts.td +++ tools/llvm-objcopy/ObjcopyOpts.td @@ -86,6 +86,13 @@ "Make a section named
with the contents of .">, MetaVarName<"section=file">; +defm set_section_flags + : Eq<"set-section-flags", + "Set section flags for a given section. Flags supported for GNU " + "compatibility: alloc, load, noload, readonly, debug, code, data, " + "rom, share, contents, merge, strings.">, + MetaVarName<"section=flag1[,flag2,...]">; + def strip_all : Flag<["-", "--"], "strip-all">, HelpText< @@ -142,6 +149,12 @@ def W : JoinedOrSeparate<["-"], "W">, Alias; def weaken : Flag<["-", "--"], "weaken">, HelpText<"Mark all global symbols as weak">; + +def discard_locals : Flag<["-", "--"], "discard-locals">, + HelpText<"Remove compiler-generated local symbols, (e.g. " + "symbols starting with .L)">; +def X : Flag<["-"], "X">, Alias; + def discard_all : Flag<["-", "--"], "discard-all">, HelpText<"Remove all local symbols except file and section symbols">; Index: tools/llvm-objcopy/StripOpts.td =================================================================== --- tools/llvm-objcopy/StripOpts.td +++ tools/llvm-objcopy/StripOpts.td @@ -57,6 +57,11 @@ MetaVarName<"symbol">; def K : JoinedOrSeparate<["-"], "K">, Alias; +def discard_locals : Flag<["-", "--"], "discard-locals">, + HelpText<"Remove compiler-generated local symbols, (e.g. " + "symbols starting with .L)">; +def X : Flag<["-"], "X">, Alias; + def discard_all : Flag<["-", "--"], "discard-all">, HelpText<"Remove all local symbols except file and section symbols">; Index: tools/llvm-objcopy/llvm-objcopy.cpp =================================================================== --- tools/llvm-objcopy/llvm-objcopy.cpp +++ tools/llvm-objcopy/llvm-objcopy.cpp @@ -122,8 +122,8 @@ /// The function executeObjcopyOnRawBinary does the dispatch based on the format /// of the output specified by the command line options. -static void executeObjcopyOnRawBinary(const CopyConfig &Config, - MemoryBuffer &In, Buffer &Out) { +static Error executeObjcopyOnRawBinary(const CopyConfig &Config, + MemoryBuffer &In, Buffer &Out) { // TODO: llvm-objcopy should parse CopyConfig.OutputFormat to recognize // formats other than ELF / "binary" and invoke // elf::executeObjcopyOnRawBinary, macho::executeObjcopyOnRawBinary or @@ -133,18 +133,19 @@ /// The function executeObjcopyOnBinary does the dispatch based on the format /// of the input binary (ELF, MachO or COFF). -static void executeObjcopyOnBinary(const CopyConfig &Config, object::Binary &In, - Buffer &Out) { +static Error executeObjcopyOnBinary(const CopyConfig &Config, + object::Binary &In, Buffer &Out) { if (auto *ELFBinary = dyn_cast(&In)) return elf::executeObjcopyOnBinary(Config, *ELFBinary, Out); else if (auto *COFFBinary = dyn_cast(&In)) return coff::executeObjcopyOnBinary(Config, *COFFBinary, Out); else - error("Unsupported object file format"); + return createStringError(object_error::invalid_file_type, + "Unsupported object file format"); } -static void executeObjcopyOnArchive(const CopyConfig &Config, - const Archive &Ar) { +static Error executeObjcopyOnArchive(const CopyConfig &Config, + const Archive &Ar) { std::vector NewArchiveMembers; Error Err = Error::success(); for (const Archive::Child &Child : Ar.children(Err)) { @@ -158,7 +159,8 @@ reportError(Ar.getFileName(), ChildNameOrErr.takeError()); MemBuffer MB(ChildNameOrErr.get()); - executeObjcopyOnBinary(Config, *Bin, MB); + if (Error E = executeObjcopyOnBinary(Config, *Bin, MB)) + return E; Expected Member = NewArchiveMember::getOldMember(Child, Config.DeterministicArchives); @@ -175,6 +177,7 @@ Ar.hasSymbolTable(), Ar.kind(), Config.DeterministicArchives, Ar.isThin())) reportError(Config.OutputFilename, std::move(E)); + return Error::success(); } static void restoreDateOnFile(StringRef Filename, @@ -207,7 +210,8 @@ if (!BufOrErr) reportError(Config.InputFilename, BufOrErr.getError()); FileBuffer FB(Config.OutputFilename); - executeObjcopyOnRawBinary(Config, *BufOrErr->get(), FB); + if (Error E = executeObjcopyOnRawBinary(Config, *BufOrErr->get(), FB)) + error(std::move(E)); } else { Expected> BinaryOrErr = createBinary(Config.InputFilename); @@ -215,10 +219,13 @@ reportError(Config.InputFilename, BinaryOrErr.takeError()); if (Archive *Ar = dyn_cast(BinaryOrErr.get().getBinary())) { - executeObjcopyOnArchive(Config, *Ar); + if (Error E = executeObjcopyOnArchive(Config, *Ar)) + error(std::move(E)); } else { FileBuffer FB(Config.OutputFilename); - executeObjcopyOnBinary(Config, *BinaryOrErr.get().getBinary(), FB); + if (Error E = executeObjcopyOnBinary(Config, + *BinaryOrErr.get().getBinary(), FB)) + error(std::move(E)); } } Index: tools/llvm-pdbutil/DumpOutputStyle.cpp =================================================================== --- tools/llvm-pdbutil/DumpOutputStyle.cpp +++ tools/llvm-pdbutil/DumpOutputStyle.cpp @@ -1010,17 +1010,12 @@ ExitOnError Err("Error dumping old fpo data:"); auto &Dbi = Err(File.getPDBDbiStream()); - uint32_t Index = Dbi.getDebugStreamIndex(DbgHeaderType::FPO); - if (Index == kInvalidStreamIndex) { + if (!Dbi.hasOldFpoRecords()) { printStreamNotPresent("FPO"); return Error::success(); } - std::unique_ptr OldFpo = File.createIndexedStream(Index); - BinaryStreamReader Reader(*OldFpo); - FixedStreamArray Records; - Err(Reader.readArray(Records, - Reader.bytesRemaining() / sizeof(object::FpoData))); + const FixedStreamArray& Records = Dbi.getOldFpoRecords(); P.printLine(" RVA | Code | Locals | Params | Prolog | Saved Regs | Use " "BP | Has SEH | Frame Type"); @@ -1042,18 +1037,12 @@ ExitOnError Err("Error dumping new fpo data:"); auto &Dbi = Err(File.getPDBDbiStream()); - uint32_t Index = Dbi.getDebugStreamIndex(DbgHeaderType::NewFPO); - if (Index == kInvalidStreamIndex) { + if (!Dbi.hasNewFpoRecords()) { printStreamNotPresent("New FPO"); return Error::success(); } - std::unique_ptr NewFpo = File.createIndexedStream(Index); - - DebugFrameDataSubsectionRef FDS; - if (auto EC = FDS.initialize(*NewFpo)) - return make_error(raw_error_code::corrupt_file, - "Invalid new fpo stream"); + const DebugFrameDataSubsectionRef& FDS = Dbi.getNewFpoRecords(); P.printLine(" RVA | Code | Locals | Params | Stack | Prolog | Saved Regs " "| Has SEH | Has C++EH | Start | Program"); Index: tools/llvm-readobj/ELFDumper.cpp =================================================================== --- tools/llvm-readobj/ELFDumper.cpp +++ tools/llvm-readobj/ELFDumper.cpp @@ -381,11 +381,11 @@ private: struct Field { - StringRef Str; + std::string Str; unsigned Column; Field(StringRef S, unsigned Col) : Str(S), Column(Col) {} - Field(unsigned Col) : Str(""), Column(Col) {} + Field(unsigned Col) : Column(Col) {} }; template @@ -2700,18 +2700,14 @@ template void GNUStyle::printRelocation(const ELFO *Obj, const Elf_Shdr *SymTab, const Elf_Rela &R, bool IsRela) { - std::string Offset, Info, Addend, Value; - SmallString<32> RelocName; - std::string TargetName; - const Elf_Sym *Sym = nullptr; - unsigned Width = ELFT::Is64Bits ? 16 : 8; - unsigned Bias = ELFT::Is64Bits ? 8 : 0; - // First two fields are bit width dependent. The rest of them are after are // fixed width. + unsigned Bias = ELFT::Is64Bits ? 8 : 0; Field Fields[5] = {0, 10 + Bias, 19 + 2 * Bias, 42 + 2 * Bias, 53 + 2 * Bias}; + SmallString<32> RelocName; Obj->getRelocationTypeName(R.getType(Obj->isMips64EL()), RelocName); - Sym = unwrapOrError(Obj->getRelocationSymbol(&R, SymTab)); + const Elf_Sym *Sym = unwrapOrError(Obj->getRelocationSymbol(&R, SymTab)); + std::string TargetName; if (Sym && Sym->getType() == ELF::STT_SECTION) { const Elf_Shdr *Sec = unwrapOrError( Obj->getSection(Sym, SymTab, this->dumper()->getShndxTable())); @@ -2721,6 +2717,17 @@ TargetName = maybeDemangle(unwrapOrError(Sym->getName(StrTable))); } + unsigned Width = ELFT::Is64Bits ? 16 : 8; + Fields[0].Str = to_string(format_hex_no_prefix(R.r_offset, Width)); + Fields[1].Str = to_string(format_hex_no_prefix(R.r_info, Width)); + Fields[2].Str = RelocName.str(); + if (Sym) + Fields[3].Str = to_string(format_hex_no_prefix(Sym->getValue(), Width)); + Fields[4].Str = TargetName; + for (auto &F : Fields) + printField(F); + + std::string Addend; if (Sym && IsRela) { if (R.r_addend < 0) Addend = " - "; @@ -2728,25 +2735,9 @@ Addend = " + "; } - Offset = to_string(format_hex_no_prefix(R.r_offset, Width)); - Info = to_string(format_hex_no_prefix(R.r_info, Width)); - - int64_t RelAddend = R.r_addend; if (IsRela) - Addend += to_hexString(std::abs(RelAddend), false); - - if (Sym) - Value = to_string(format_hex_no_prefix(Sym->getValue(), Width)); - - Fields[0].Str = Offset; - Fields[1].Str = Info; - Fields[2].Str = RelocName; - Fields[3].Str = Value; - Fields[4].Str = TargetName; - for (auto &field : Fields) - printField(field); - OS << Addend; - OS << "\n"; + Addend += to_hexString(std::abs(R.r_addend), false); + OS << Addend << "\n"; } template void GNUStyle::printRelocHeader(unsigned SType) { @@ -2944,62 +2935,37 @@ template void GNUStyle::printSectionHeaders(const ELFO *Obj) { - size_t SectionIndex = 0; - std::string Number, Type, Size, Address, Offset, Flags, Link, Info, EntrySize, - Alignment; - unsigned Bias; - unsigned Width; - - if (ELFT::Is64Bits) { - Bias = 0; - Width = 16; - } else { - Bias = 8; - Width = 8; - } - + unsigned Bias = ELFT::Is64Bits ? 0 : 8; ArrayRef Sections = unwrapOrError(Obj->sections()); OS << "There are " << to_string(Sections.size()) << " section headers, starting at offset " << "0x" << to_hexString(Obj->getHeader()->e_shoff, false) << ":\n\n"; OS << "Section Headers:\n"; - Field Fields[11] = {{"[Nr]", 2}, - {"Name", 7}, - {"Type", 25}, - {"Address", 41}, - {"Off", 58 - Bias}, - {"Size", 65 - Bias}, - {"ES", 72 - Bias}, - {"Flg", 75 - Bias}, - {"Lk", 79 - Bias}, - {"Inf", 82 - Bias}, - {"Al", 86 - Bias}}; - for (auto &f : Fields) - printField(f); + Field Fields[11] = { + {"[Nr]", 2}, {"Name", 7}, {"Type", 25}, + {"Address", 41}, {"Off", 58 - Bias}, {"Size", 65 - Bias}, + {"ES", 72 - Bias}, {"Flg", 75 - Bias}, {"Lk", 79 - Bias}, + {"Inf", 82 - Bias}, {"Al", 86 - Bias}}; + for (auto &F : Fields) + printField(F); OS << "\n"; + size_t SectionIndex = 0; for (const Elf_Shdr &Sec : Sections) { - Number = to_string(SectionIndex); - Fields[0].Str = Number; + Fields[0].Str = to_string(SectionIndex); Fields[1].Str = unwrapOrError(Obj->getSectionName(&Sec)); - Type = getSectionTypeString(Obj->getHeader()->e_machine, Sec.sh_type); - Fields[2].Str = Type; - Address = to_string(format_hex_no_prefix(Sec.sh_addr, Width)); - Fields[3].Str = Address; - Offset = to_string(format_hex_no_prefix(Sec.sh_offset, 6)); - Fields[4].Str = Offset; - Size = to_string(format_hex_no_prefix(Sec.sh_size, 6)); - Fields[5].Str = Size; - EntrySize = to_string(format_hex_no_prefix(Sec.sh_entsize, 2)); - Fields[6].Str = EntrySize; - Flags = getGNUFlags(Sec.sh_flags); - Fields[7].Str = Flags; - Link = to_string(Sec.sh_link); - Fields[8].Str = Link; - Info = to_string(Sec.sh_info); - Fields[9].Str = Info; - Alignment = to_string(Sec.sh_addralign); - Fields[10].Str = Alignment; + Fields[2].Str = + getSectionTypeString(Obj->getHeader()->e_machine, Sec.sh_type); + Fields[3].Str = + to_string(format_hex_no_prefix(Sec.sh_addr, ELFT::Is64Bits ? 16 : 8)); + Fields[4].Str = to_string(format_hex_no_prefix(Sec.sh_offset, 6)); + Fields[5].Str = to_string(format_hex_no_prefix(Sec.sh_size, 6)); + Fields[6].Str = to_string(format_hex_no_prefix(Sec.sh_entsize, 2)); + Fields[7].Str = getGNUFlags(Sec.sh_flags); + Fields[8].Str = to_string(Sec.sh_link); + Fields[9].Str = to_string(Sec.sh_info); + Fields[10].Str = to_string(Sec.sh_addralign); + OS.PadToColumn(Fields[0].Column); OS << "[" << right_justify(Fields[0].Str, 2) << "]"; for (int i = 1; i < 7; i++) @@ -3081,7 +3047,6 @@ bool IsDynamic) { static int Idx = 0; static bool Dynamic = true; - size_t Width; // If this function was called with a different value from IsDynamic // from last call, happens when we move from dynamic to static symbol @@ -3090,84 +3055,63 @@ Idx = 0; Dynamic = false; } - std::string Num, Name, Value, Size, Binding, Type, Visibility, Section; - unsigned Bias = 0; - if (ELFT::Is64Bits) { - Bias = 8; - Width = 16; - } else { - Bias = 0; - Width = 8; - } + + unsigned Bias = ELFT::Is64Bits ? 8 : 0; Field Fields[8] = {0, 8, 17 + Bias, 23 + Bias, 31 + Bias, 38 + Bias, 47 + Bias, 51 + Bias}; - Num = to_string(format_decimal(Idx++, 6)) + ":"; - Value = to_string(format_hex_no_prefix(Symbol->st_value, Width)); - Size = to_string(format_decimal(Symbol->st_size, 5)); + Fields[0].Str = to_string(format_decimal(Idx++, 6)) + ":"; + Fields[1].Str = to_string( + format_hex_no_prefix(Symbol->st_value, ELFT::Is64Bits ? 16 : 8)); + Fields[2].Str = to_string(format_decimal(Symbol->st_size, 5)); + unsigned char SymbolType = Symbol->getType(); if (Obj->getHeader()->e_machine == ELF::EM_AMDGPU && SymbolType >= ELF::STT_LOOS && SymbolType < ELF::STT_HIOS) - Type = printEnum(SymbolType, makeArrayRef(AMDGPUSymbolTypes)); + Fields[3].Str = printEnum(SymbolType, makeArrayRef(AMDGPUSymbolTypes)); else - Type = printEnum(SymbolType, makeArrayRef(ElfSymbolTypes)); - unsigned Vis = Symbol->getVisibility(); - Binding = printEnum(Symbol->getBinding(), makeArrayRef(ElfSymbolBindings)); - Visibility = printEnum(Vis, makeArrayRef(ElfSymbolVisibilities)); - Section = getSymbolSectionNdx(Obj, Symbol, FirstSym); - Name = this->dumper()->getFullSymbolName(Symbol, StrTable, IsDynamic); - Fields[0].Str = Num; - Fields[1].Str = Value; - Fields[2].Str = Size; - Fields[3].Str = Type; - Fields[4].Str = Binding; - Fields[5].Str = Visibility; - Fields[6].Str = Section; - Fields[7].Str = Name; + Fields[3].Str = printEnum(SymbolType, makeArrayRef(ElfSymbolTypes)); + + Fields[4].Str = + printEnum(Symbol->getBinding(), makeArrayRef(ElfSymbolBindings)); + Fields[5].Str = + printEnum(Symbol->getVisibility(), makeArrayRef(ElfSymbolVisibilities)); + Fields[6].Str = getSymbolSectionNdx(Obj, Symbol, FirstSym); + Fields[7].Str = + this->dumper()->getFullSymbolName(Symbol, StrTable, IsDynamic); for (auto &Entry : Fields) printField(Entry); OS << "\n"; } + template void GNUStyle::printHashedSymbol(const ELFO *Obj, const Elf_Sym *FirstSym, uint32_t Sym, StringRef StrTable, uint32_t Bucket) { - std::string Num, Buc, Name, Value, Size, Binding, Type, Visibility, Section; - unsigned Width, Bias = 0; - if (ELFT::Is64Bits) { - Bias = 8; - Width = 16; - } else { - Bias = 0; - Width = 8; - } + unsigned Bias = ELFT::Is64Bits ? 8 : 0; Field Fields[9] = {0, 6, 11, 20 + Bias, 25 + Bias, 34 + Bias, 41 + Bias, 49 + Bias, 53 + Bias}; - Num = to_string(format_decimal(Sym, 5)); - Buc = to_string(format_decimal(Bucket, 3)) + ":"; + Fields[0].Str = to_string(format_decimal(Sym, 5)); + Fields[1].Str = to_string(format_decimal(Bucket, 3)) + ":"; const auto Symbol = FirstSym + Sym; - Value = to_string(format_hex_no_prefix(Symbol->st_value, Width)); - Size = to_string(format_decimal(Symbol->st_size, 5)); + Fields[2].Str = to_string( + format_hex_no_prefix(Symbol->st_value, ELFT::Is64Bits ? 18 : 8)); + Fields[3].Str = to_string(format_decimal(Symbol->st_size, 5)); + unsigned char SymbolType = Symbol->getType(); if (Obj->getHeader()->e_machine == ELF::EM_AMDGPU && SymbolType >= ELF::STT_LOOS && SymbolType < ELF::STT_HIOS) - Type = printEnum(SymbolType, makeArrayRef(AMDGPUSymbolTypes)); + Fields[4].Str = printEnum(SymbolType, makeArrayRef(AMDGPUSymbolTypes)); else - Type = printEnum(SymbolType, makeArrayRef(ElfSymbolTypes)); - unsigned Vis = Symbol->getVisibility(); - Binding = printEnum(Symbol->getBinding(), makeArrayRef(ElfSymbolBindings)); - Visibility = printEnum(Vis, makeArrayRef(ElfSymbolVisibilities)); - Section = getSymbolSectionNdx(Obj, Symbol, FirstSym); - Name = this->dumper()->getFullSymbolName(Symbol, StrTable, true); - Fields[0].Str = Num; - Fields[1].Str = Buc; - Fields[2].Str = Value; - Fields[3].Str = Size; - Fields[4].Str = Type; - Fields[5].Str = Binding; - Fields[6].Str = Visibility; - Fields[7].Str = Section; - Fields[8].Str = Name; + Fields[4].Str = printEnum(SymbolType, makeArrayRef(ElfSymbolTypes)); + + Fields[5].Str = + printEnum(Symbol->getBinding(), makeArrayRef(ElfSymbolBindings)); + Fields[6].Str = + printEnum(Symbol->getVisibility(), makeArrayRef(ElfSymbolVisibilities)); + Fields[7].Str = getSymbolSectionNdx(Obj, Symbol, FirstSym); + Fields[8].Str = this->dumper()->getFullSymbolName(Symbol, StrTable, true); + for (auto &Entry : Fields) printField(Entry); OS << "\n"; @@ -3189,10 +3133,9 @@ return; auto StringTable = this->dumper()->getDynamicStringTable(); auto DynSyms = this->dumper()->dynamic_symbols(); - auto SysVHash = this->dumper()->getHashTable(); // Try printing .hash - if (SysVHash) { + if (auto SysVHash = this->dumper()->getHashTable()) { OS << "\n Symbol table of .hash for image:\n"; if (ELFT::Is64Bits) OS << " Num Buc: Value Size Type Bind Vis Ndx Name"; @@ -3200,14 +3143,12 @@ OS << " Num Buc: Value Size Type Bind Vis Ndx Name"; OS << "\n"; - uint32_t NBuckets = SysVHash->nbucket; - uint32_t NChains = SysVHash->nchain; auto Buckets = SysVHash->buckets(); auto Chains = SysVHash->chains(); - for (uint32_t Buc = 0; Buc < NBuckets; Buc++) { + for (uint32_t Buc = 0; Buc < SysVHash->nbucket; Buc++) { if (Buckets[Buc] == ELF::STN_UNDEF) continue; - for (uint32_t Ch = Buckets[Buc]; Ch < NChains; Ch = Chains[Ch]) { + for (uint32_t Ch = Buckets[Buc]; Ch < SysVHash->nchain; Ch = Chains[Ch]) { if (Ch == ELF::STN_UNDEF) break; printHashedSymbol(Obj, &DynSyms[0], Ch, StringTable, Buc); @@ -3216,17 +3157,15 @@ } // Try printing .gnu.hash - auto GnuHash = this->dumper()->getGnuHashTable(); - if (GnuHash) { + if (auto GnuHash = this->dumper()->getGnuHashTable()) { OS << "\n Symbol table of .gnu.hash for image:\n"; if (ELFT::Is64Bits) OS << " Num Buc: Value Size Type Bind Vis Ndx Name"; else OS << " Num Buc: Value Size Type Bind Vis Ndx Name"; OS << "\n"; - uint32_t NBuckets = GnuHash->nbuckets; auto Buckets = GnuHash->buckets(); - for (uint32_t Buc = 0; Buc < NBuckets; Buc++) { + for (uint32_t Buc = 0; Buc < GnuHash->nbuckets; Buc++) { if (Buckets[Buc] == ELF::STN_UNDEF) continue; uint32_t Index = Buckets[Buc]; @@ -3312,10 +3251,6 @@ template void GNUStyle::printProgramHeaders(const ELFO *Obj) { unsigned Bias = ELFT::Is64Bits ? 8 : 0; - unsigned Width = ELFT::Is64Bits ? 18 : 10; - unsigned SizeWidth = ELFT::Is64Bits ? 8 : 7; - std::string Type, Offset, VMA, LMA, FileSz, MemSz, Flag, Align; - const Elf_Ehdr *Header = Obj->getHeader(); Field Fields[8] = {2, 17, 26, 37 + Bias, 48 + Bias, 56 + Bias, 64 + Bias, 68 + Bias}; @@ -3331,23 +3266,18 @@ else OS << " Type Offset VirtAddr PhysAddr FileSiz " << "MemSiz Flg Align\n"; + + unsigned Width = ELFT::Is64Bits ? 18 : 10; + unsigned SizeWidth = ELFT::Is64Bits ? 8 : 7; for (const auto &Phdr : unwrapOrError(Obj->program_headers())) { - Type = getElfPtType(Header->e_machine, Phdr.p_type); - Offset = to_string(format_hex(Phdr.p_offset, 8)); - VMA = to_string(format_hex(Phdr.p_vaddr, Width)); - LMA = to_string(format_hex(Phdr.p_paddr, Width)); - FileSz = to_string(format_hex(Phdr.p_filesz, SizeWidth)); - MemSz = to_string(format_hex(Phdr.p_memsz, SizeWidth)); - Flag = printPhdrFlags(Phdr.p_flags); - Align = to_string(format_hex(Phdr.p_align, 1)); - Fields[0].Str = Type; - Fields[1].Str = Offset; - Fields[2].Str = VMA; - Fields[3].Str = LMA; - Fields[4].Str = FileSz; - Fields[5].Str = MemSz; - Fields[6].Str = Flag; - Fields[7].Str = Align; + Fields[0].Str = getElfPtType(Header->e_machine, Phdr.p_type); + Fields[1].Str = to_string(format_hex(Phdr.p_offset, 8)); + Fields[2].Str = to_string(format_hex(Phdr.p_vaddr, Width)); + Fields[3].Str = to_string(format_hex(Phdr.p_paddr, Width)); + Fields[4].Str = to_string(format_hex(Phdr.p_filesz, SizeWidth)); + Fields[5].Str = to_string(format_hex(Phdr.p_memsz, SizeWidth)); + Fields[6].Str = printPhdrFlags(Phdr.p_flags); + Fields[7].Str = to_string(format_hex(Phdr.p_align, 1)); for (auto Field : Fields) printField(Field); if (Phdr.p_type == ELF::PT_INTERP) { @@ -3382,24 +3312,33 @@ template void GNUStyle::printDynamicRelocation(const ELFO *Obj, Elf_Rela R, bool IsRela) { - SmallString<32> RelocName; - std::string SymbolName; - unsigned Width = ELFT::Is64Bits ? 16 : 8; unsigned Bias = ELFT::Is64Bits ? 8 : 0; // First two fields are bit width dependent. The rest of them are after are // fixed width. Field Fields[5] = {0, 10 + Bias, 19 + 2 * Bias, 42 + 2 * Bias, 53 + 2 * Bias}; + unsigned Width = ELFT::Is64Bits ? 16 : 8; + Fields[0].Str = to_string(format_hex_no_prefix(R.r_offset, Width)); + Fields[1].Str = to_string(format_hex_no_prefix(R.r_info, Width)); + uint32_t SymIndex = R.getSymbol(Obj->isMips64EL()); const Elf_Sym *Sym = this->dumper()->dynamic_symbols().begin() + SymIndex; + SmallString<32> RelocName; Obj->getRelocationTypeName(R.getType(Obj->isMips64EL()), RelocName); - SymbolName = maybeDemangle( + Fields[2].Str = RelocName.c_str(); + + std::string SymbolName = maybeDemangle( unwrapOrError(Sym->getName(this->dumper()->getDynamicStringTable()))); - std::string Addend, Info, Offset, Value; - Offset = to_string(format_hex_no_prefix(R.r_offset, Width)); - Info = to_string(format_hex_no_prefix(R.r_info, Width)); - Value = to_string(format_hex_no_prefix(Sym->getValue(), Width)); + + if (!SymbolName.empty() || Sym->getValue() != 0) + Fields[3].Str = to_string(format_hex_no_prefix(Sym->getValue(), Width)); + + Fields[4].Str = SymbolName; + for (auto &Field : Fields) + printField(Field); + int64_t RelAddend = R.r_addend; + std::string Addend; if (!SymbolName.empty() && IsRela) { if (R.r_addend < 0) Addend = " - "; @@ -3407,22 +3346,9 @@ Addend = " + "; } - if (SymbolName.empty() && Sym->getValue() == 0) - Value = ""; - if (IsRela) Addend += to_string(format_hex_no_prefix(std::abs(RelAddend), 1)); - - - Fields[0].Str = Offset; - Fields[1].Str = Info; - Fields[2].Str = RelocName.c_str(); - Fields[3].Str = Value; - Fields[4].Str = SymbolName; - for (auto &Field : Fields) - printField(Field); - OS << Addend; - OS << "\n"; + OS << Addend << "\n"; } template @@ -3494,12 +3420,8 @@ // Additionally cumulative coverage of symbols for each set of buckets. template void GNUStyle::printHashHistogram(const ELFFile *Obj) { - - const Elf_Hash *HashTable = this->dumper()->getHashTable(); - const Elf_GnuHash *GnuHashTable = this->dumper()->getGnuHashTable(); - // Print histogram for .hash section - if (HashTable) { + if (const Elf_Hash *HashTable = this->dumper()->getHashTable()) { size_t NBucket = HashTable->nbucket; size_t NChain = HashTable->nchain; ArrayRef Buckets = HashTable->buckets(); @@ -3543,7 +3465,7 @@ } // Print histogram for .gnu.hash section - if (GnuHashTable) { + if (const Elf_GnuHash *GnuHashTable = this->dumper()->getGnuHashTable()) { size_t NBucket = GnuHashTable->nbuckets; ArrayRef Buckets = GnuHashTable->buckets(); unsigned NumSyms = this->dumper()->dynamic_symbols().size(); @@ -3912,9 +3834,6 @@ template void GNUStyle::printNotes(const ELFFile *Obj) { - const Elf_Ehdr *e = Obj->getHeader(); - bool IsCore = e->e_type == ELF::ET_CORE; - auto PrintHeader = [&](const typename ELFT::Off Offset, const typename ELFT::Addr Size) { OS << "Displaying notes found at file offset " << format_hex(Offset, 10) @@ -3951,7 +3870,7 @@ OS << '\n'; }; - if (IsCore) { + if (Obj->getHeader()->e_type == ELF::ET_CORE) { for (const auto &P : unwrapOrError(Obj->program_headers())) { if (P.p_type != PT_NOTE) continue; @@ -4095,21 +4014,21 @@ } template void LLVMStyle::printFileHeaders(const ELFO *Obj) { - const Elf_Ehdr *e = Obj->getHeader(); + const Elf_Ehdr *E = Obj->getHeader(); { DictScope D(W, "ElfHeader"); { DictScope D(W, "Ident"); - W.printBinary("Magic", makeArrayRef(e->e_ident).slice(ELF::EI_MAG0, 4)); - W.printEnum("Class", e->e_ident[ELF::EI_CLASS], makeArrayRef(ElfClass)); - W.printEnum("DataEncoding", e->e_ident[ELF::EI_DATA], + W.printBinary("Magic", makeArrayRef(E->e_ident).slice(ELF::EI_MAG0, 4)); + W.printEnum("Class", E->e_ident[ELF::EI_CLASS], makeArrayRef(ElfClass)); + W.printEnum("DataEncoding", E->e_ident[ELF::EI_DATA], makeArrayRef(ElfDataEncoding)); - W.printNumber("FileVersion", e->e_ident[ELF::EI_VERSION]); + W.printNumber("FileVersion", E->e_ident[ELF::EI_VERSION]); auto OSABI = makeArrayRef(ElfOSABI); - if (e->e_ident[ELF::EI_OSABI] >= ELF::ELFOSABI_FIRST_ARCH && - e->e_ident[ELF::EI_OSABI] <= ELF::ELFOSABI_LAST_ARCH) { - switch (e->e_machine) { + if (E->e_ident[ELF::EI_OSABI] >= ELF::ELFOSABI_FIRST_ARCH && + E->e_ident[ELF::EI_OSABI] <= ELF::ELFOSABI_LAST_ARCH) { + switch (E->e_machine) { case ELF::EM_AMDGPU: OSABI = makeArrayRef(AMDGPUElfOSABI); break; @@ -4121,32 +4040,32 @@ break; } } - W.printEnum("OS/ABI", e->e_ident[ELF::EI_OSABI], OSABI); - W.printNumber("ABIVersion", e->e_ident[ELF::EI_ABIVERSION]); - W.printBinary("Unused", makeArrayRef(e->e_ident).slice(ELF::EI_PAD)); + W.printEnum("OS/ABI", E->e_ident[ELF::EI_OSABI], OSABI); + W.printNumber("ABIVersion", E->e_ident[ELF::EI_ABIVERSION]); + W.printBinary("Unused", makeArrayRef(E->e_ident).slice(ELF::EI_PAD)); } - W.printEnum("Type", e->e_type, makeArrayRef(ElfObjectFileType)); - W.printEnum("Machine", e->e_machine, makeArrayRef(ElfMachineType)); - W.printNumber("Version", e->e_version); - W.printHex("Entry", e->e_entry); - W.printHex("ProgramHeaderOffset", e->e_phoff); - W.printHex("SectionHeaderOffset", e->e_shoff); - if (e->e_machine == EM_MIPS) - W.printFlags("Flags", e->e_flags, makeArrayRef(ElfHeaderMipsFlags), + W.printEnum("Type", E->e_type, makeArrayRef(ElfObjectFileType)); + W.printEnum("Machine", E->e_machine, makeArrayRef(ElfMachineType)); + W.printNumber("Version", E->e_version); + W.printHex("Entry", E->e_entry); + W.printHex("ProgramHeaderOffset", E->e_phoff); + W.printHex("SectionHeaderOffset", E->e_shoff); + if (E->e_machine == EM_MIPS) + W.printFlags("Flags", E->e_flags, makeArrayRef(ElfHeaderMipsFlags), unsigned(ELF::EF_MIPS_ARCH), unsigned(ELF::EF_MIPS_ABI), unsigned(ELF::EF_MIPS_MACH)); - else if (e->e_machine == EM_AMDGPU) - W.printFlags("Flags", e->e_flags, makeArrayRef(ElfHeaderAMDGPUFlags), + else if (E->e_machine == EM_AMDGPU) + W.printFlags("Flags", E->e_flags, makeArrayRef(ElfHeaderAMDGPUFlags), unsigned(ELF::EF_AMDGPU_MACH)); - else if (e->e_machine == EM_RISCV) - W.printFlags("Flags", e->e_flags, makeArrayRef(ElfHeaderRISCVFlags)); + else if (E->e_machine == EM_RISCV) + W.printFlags("Flags", E->e_flags, makeArrayRef(ElfHeaderRISCVFlags)); else - W.printFlags("Flags", e->e_flags); - W.printNumber("HeaderSize", e->e_ehsize); - W.printNumber("ProgramHeaderEntrySize", e->e_phentsize); - W.printNumber("ProgramHeaderCount", e->e_phnum); - W.printNumber("SectionHeaderEntrySize", e->e_shentsize); + W.printFlags("Flags", E->e_flags); + W.printNumber("HeaderSize", E->e_ehsize); + W.printNumber("ProgramHeaderEntrySize", E->e_phentsize); + W.printNumber("ProgramHeaderCount", E->e_phnum); + W.printNumber("SectionHeaderEntrySize", E->e_shentsize); W.printString("SectionHeaderCount", getSectionHeadersNumString(Obj)); W.printString("StringTableSectionIndex", getSectionHeaderTableIndexString(Obj)); } @@ -4593,8 +4512,6 @@ template void LLVMStyle::printNotes(const ELFFile *Obj) { ListScope L(W, "Notes"); - const Elf_Ehdr *e = Obj->getHeader(); - bool IsCore = e->e_type == ELF::ET_CORE; auto PrintHeader = [&](const typename ELFT::Off Offset, const typename ELFT::Addr Size) { @@ -4630,7 +4547,7 @@ } }; - if (IsCore) { + if (Obj->getHeader()->e_type == ELF::ET_CORE) { for (const auto &P : unwrapOrError(Obj->program_headers())) { if (P.p_type != PT_NOTE) continue; Index: tools/llvm-shlib/CMakeLists.txt =================================================================== --- tools/llvm-shlib/CMakeLists.txt +++ tools/llvm-shlib/CMakeLists.txt @@ -137,20 +137,13 @@ list(APPEND FULL_LIB_NAMES ${CMAKE_BINARY_DIR}/${CMAKE_CFG_INTDIR}/lib/${lib}.lib) endforeach() - # Need to seperate lib names with newlines. - string(REPLACE ";" "\n" FILE_CONTENT "${FULL_LIB_NAMES}") - - # Write out the full lib names into file to be read by the python script. - set(LIBSFILE ${CMAKE_BINARY_DIR}/${CMAKE_CFG_INTDIR}/libllvm-c.args) - file(WRITE ${LIBSFILE} "${FILE_CONTENT}") - # Generate the exports file dynamically. set(GEN_SCRIPT ${CMAKE_CURRENT_SOURCE_DIR}/gen-msvc-exports.py) set(LLVM_EXPORTED_SYMBOL_FILE ${CMAKE_BINARY_DIR}/${CMAKE_CFG_INTDIR}/libllvm-c.exports) add_custom_command(OUTPUT ${LLVM_EXPORTED_SYMBOL_FILE} - COMMAND ${PYTHON_EXECUTABLE} ${GEN_SCRIPT} --libsfile ${LIBSFILE} ${GEN_UNDERSCORE} --nm ${LLVM_TOOLS_BINARY_DIR}/llvm-nm -o ${LLVM_EXPORTED_SYMBOL_FILE} + COMMAND ${PYTHON_EXECUTABLE} ${GEN_SCRIPT} ${FULL_LIB_NAMES} ${GEN_UNDERSCORE} --nm ${LLVM_TOOLS_BINARY_DIR}/llvm-nm -o ${LLVM_EXPORTED_SYMBOL_FILE} DEPENDS ${LIB_NAMES} llvm-nm COMMENT "Generating export list for LLVM-C" VERBATIM ) Index: tools/llvm-shlib/gen-msvc-exports.py =================================================================== --- tools/llvm-shlib/gen-msvc-exports.py +++ tools/llvm-shlib/gen-msvc-exports.py @@ -82,10 +82,6 @@ def main(): parser = argparse.ArgumentParser('gen-msvc-exports') - parser.add_argument( - '-i', '--libsfile', help='file with list of libs, new line separated', - action='store', default=None - ) parser.add_argument( '-o', '--output', help='output filename', default='LLVM-C.exports' ) @@ -97,19 +93,12 @@ '--nm', help='path to the llvm-nm executable', default='llvm-nm' ) parser.add_argument( - 'libs', metavar='LIBS', nargs='*', help='list of libraries to generate export from' + 'libs', metavar='LIBS', nargs='+', help='list of libraries to generate export from' ) ns = parser.parse_args() - libs = ns.libs - - # Add if we where given a libsfile add it to the libs. - if ns.libsfile: - with open(ns.libsfile) as f: - libs.extend(f.read().splitlines()) - - gen_llvm_c_export(ns.output, ns.underscore, libs, ns.nm) + gen_llvm_c_export(ns.output, ns.underscore, ns.libs, ns.nm) if __name__ == '__main__': Index: unittests/Support/VirtualFileSystemTest.cpp =================================================================== --- unittests/Support/VirtualFileSystemTest.cpp +++ unittests/Support/VirtualFileSystemTest.cpp @@ -776,7 +776,7 @@ bool Local = true; ASSERT_FALSE(PFS.isLocal("/a", Local)); - ASSERT_EQ(false, Local); + EXPECT_FALSE(Local); } class InMemoryFileSystemTest : public ::testing::Test { Index: unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp =================================================================== --- unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp +++ unittests/Target/WebAssembly/WebAssemblyExceptionInfoTest.cpp @@ -74,7 +74,7 @@ declare i32 @__gxx_wasm_personality_v0(...) - define hidden void @test0() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { + define void @test0() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { unreachable } @@ -100,14 +100,14 @@ ; predecessors: %bb.0 successors: %bb.3, %bb.9 liveins: $value_stack - CATCH_ALL implicit-def $arguments + %0:except_ref = CATCH implicit-def $arguments CLEANUPRET implicit-def dead $arguments bb.3 (landing-pad): ; predecessors: %bb.2 successors: %bb.4, %bb.6 liveins: $value_stack - CATCH_ALL implicit-def $arguments + %1:except_ref = CATCH implicit-def $arguments BR_IF %bb.4, %58:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack BR %bb.6, implicit-def $arguments @@ -138,13 +138,13 @@ ; predecessors: %bb.4 successors: %bb.9 liveins: $value_stack - CATCH_ALL implicit-def $arguments + %2:except_ref = CATCH implicit-def $arguments CLEANUPRET implicit-def dead $arguments bb.9 (landing-pad): ; predecessors: %bb.2, %bb.6, %bb.8 liveins: $value_stack - CATCH_ALL implicit-def $arguments + %3:except_ref = CATCH implicit-def $arguments CLEANUPRET implicit-def dead $arguments bb.10: @@ -237,7 +237,7 @@ declare i32 @__gxx_wasm_personality_v0(...) - define hidden void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { + define void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { unreachable } @@ -257,7 +257,7 @@ ; predecessors: %bb.0 successors: %bb.2, %bb.8 liveins: $value_stack - %52:i32 = CATCH_I32 0, implicit-def dead $arguments + %0:except_ref = CATCH implicit-def $arguments BR_IF %bb.2, %32:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack BR %bb.8, implicit-def $arguments @@ -271,7 +271,7 @@ ; predecessors: %bb.2 successors: %bb.4, %bb.6 liveins: $value_stack - CATCH_ALL implicit-def $arguments + %1:except_ref = CATCH implicit-def $arguments BR_IF %bb.4, %43:i32, implicit-def $arguments, implicit-def $value_stack, implicit $value_stack BR %bb.6, implicit-def $arguments @@ -313,13 +313,13 @@ ; predecessors: %bb.4 successors: %bb.11 liveins: $value_stack - CATCH_ALL implicit-def $arguments + %2:except_ref = CATCH implicit-def $arguments CLEANUPRET implicit-def dead $arguments bb.11 (landing-pad): ; predecessors: %bb.2, %bb.6, %bb.10 liveins: $value_stack - CATCH_ALL implicit-def $arguments + %3:except_ref = CATCH implicit-def $arguments CLEANUPRET implicit-def dead $arguments bb.12: @@ -415,135 +415,3 @@ EXPECT_EQ(WE0_1->getParentException(), WE0); EXPECT_EQ(WE0_1->getExceptionDepth(), (unsigned)2); } - -// Terminate pad test -TEST(WebAssemblyExceptionInfoTest, TEST2) { - std::unique_ptr TM = createTargetMachine(); - ASSERT_TRUE(TM); - - StringRef MIRString = R"MIR( ---- | - target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" - target triple = "wasm32-unknown-unknown" - - declare i32 @__gxx_wasm_personality_v0(...) - declare void @_ZSt9terminatev() - declare void @__clang_call_terminate(i8*) - - define hidden void @test2() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) { - unreachable - } - -... ---- -name: test2 -liveins: - - { reg: '$arguments' } - - { reg: '$value_stack' } -body: | - bb.0: - successors: %bb.3, %bb.1 - BR %bb.3, implicit-def dead $arguments - - bb.1 (landing-pad): - ; predecessors: %bb.0 - successors: %bb.2, %bb.4 - %3:i32 = CATCH_I32 0, implicit-def dead $arguments - BR %bb.2, implicit-def dead $arguments - - bb.2: - ; predecessors: %bb.1 - successors: %bb.3(0x80000000); %bb.3(200.00%) - CATCHRET %bb.3, %bb.0, implicit-def dead $arguments - - bb.3: - ; predecessors: %bb.0, %bb.2 - RETURN_VOID implicit-def $arguments - - bb.4 (landing-pad): - ; predecessors: %bb.1 - successors: %bb.5, %bb.6 - CATCH_ALL implicit-def $arguments - BR %bb.5, implicit-def dead $arguments - - bb.5: - ; predecessors: %bb.4 - CLEANUPRET implicit-def dead $arguments - - bb.6 (landing-pad): - ; predecessors: %bb.4 - successors: %bb.7(0x80000000); %bb.7(200.00%) - %6:i32 = CATCH_I32 0, implicit-def dead $arguments - CALL_VOID @__clang_call_terminate, %7:i32, implicit-def $arguments - UNREACHABLE implicit-def $arguments - - bb.7 (landing-pad): - ; predecessors: %bb.6 - CATCH_ALL implicit-def $arguments - CALL_VOID @_ZSt9terminatev, implicit-def $arguments - UNREACHABLE implicit-def $arguments -)MIR"; - - LLVMContext Context; - std::unique_ptr MIR; - MachineModuleInfo MMI(TM.get()); - std::unique_ptr M = - parseMIR(Context, MIR, *TM, MIRString, "test2", MMI); - ASSERT_TRUE(M); - - Function *F = M->getFunction("test2"); - auto *MF = MMI.getMachineFunction(*F); - ASSERT_TRUE(MF); - - WebAssemblyExceptionInfo WEI; - MachineDominatorTree MDT; - MachineDominanceFrontier MDF; - MDT.runOnMachineFunction(*MF); - MDF.getBase().analyze(MDT.getBase()); - WEI.recalculate(MDT, MDF); - - // Exception info structure: - // |- bb1 (ehpad), bb2, bb4, bb5, bb6, bb7 - // |- bb4 (ehpad), bb5, bb6, bb7 - // |- bb6 (ehpad), bb7 - // - // Here, bb6 is a terminate pad with a 'catch' instruction, and bb7 is a - // terminate pad with a 'catch_all' instruction, In this case we put bb6 and - // bb7 into one exception. - - auto *MBB1 = MF->getBlockNumbered(1); - auto *WE0 = WEI.getExceptionFor(MBB1); - ASSERT_TRUE(WE0); - EXPECT_EQ(WE0->getEHPad(), MBB1); - EXPECT_EQ(WE0->getParentException(), nullptr); - EXPECT_EQ(WE0->getExceptionDepth(), (unsigned)1); - - auto *MBB2 = MF->getBlockNumbered(2); - WE0 = WEI.getExceptionFor(MBB2); - ASSERT_TRUE(WE0); - EXPECT_EQ(WE0->getEHPad(), MBB1); - - auto *MBB4 = MF->getBlockNumbered(4); - auto *WE0_0 = WEI.getExceptionFor(MBB4); - ASSERT_TRUE(WE0_0); - EXPECT_EQ(WE0_0->getEHPad(), MBB4); - EXPECT_EQ(WE0_0->getParentException(), WE0); - EXPECT_EQ(WE0_0->getExceptionDepth(), (unsigned)2); - - auto *MBB5 = MF->getBlockNumbered(5); - WE0_0 = WEI.getExceptionFor(MBB5); - ASSERT_TRUE(WE0_0); - EXPECT_EQ(WE0_0->getEHPad(), MBB4); - - auto *MBB6 = MF->getBlockNumbered(6); - auto *WE0_0_0 = WEI.getExceptionFor(MBB6); - ASSERT_TRUE(WE0_0_0); - EXPECT_EQ(WE0_0_0->getEHPad(), MBB6); - EXPECT_EQ(WE0_0_0->getParentException(), WE0_0); - EXPECT_EQ(WE0_0_0->getExceptionDepth(), (unsigned)3); - - auto *MBB7 = MF->getBlockNumbered(7); - WE0_0_0 = WEI.getExceptionFor(MBB7); - ASSERT_TRUE(WE0_0_0); - EXPECT_EQ(WE0_0_0->getEHPad(), MBB6); -} Index: utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn =================================================================== --- utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn +++ utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn @@ -67,6 +67,7 @@ "AArch64AsmPrinter.cpp", "AArch64BranchTargets.cpp", "AArch64CallLowering.cpp", + "AArch64CallingConvention.cpp", "AArch64CleanupLocalDynamicTLSPass.cpp", "AArch64CollectLOH.cpp", "AArch64CompressJumpTables.cpp", Index: utils/gn/secondary/llvm/lib/Target/ARM/BUILD.gn =================================================================== --- utils/gn/secondary/llvm/lib/Target/ARM/BUILD.gn +++ utils/gn/secondary/llvm/lib/Target/ARM/BUILD.gn @@ -66,6 +66,7 @@ "ARMBaseInstrInfo.cpp", "ARMBaseRegisterInfo.cpp", "ARMCallLowering.cpp", + "ARMCallingConv.cpp", "ARMCodeGenPrepare.cpp", "ARMComputeBlockSize.cpp", "ARMConstantIslandPass.cpp", Index: utils/gn/secondary/llvm/lib/Target/PowerPC/BUILD.gn =================================================================== --- utils/gn/secondary/llvm/lib/Target/PowerPC/BUILD.gn +++ utils/gn/secondary/llvm/lib/Target/PowerPC/BUILD.gn @@ -45,6 +45,7 @@ "PPCBranchSelector.cpp", "PPCCCState.cpp", "PPCCTRLoops.cpp", + "PPCCallingConv.cpp", "PPCEarlyReturn.cpp", "PPCExpandISEL.cpp", "PPCFastISel.cpp", Index: utils/lit/setup.py =================================================================== --- utils/lit/setup.py +++ utils/lit/setup.py @@ -52,8 +52,8 @@ Source ====== -The *lit* source is available as part of LLVM, in the LLVM SVN repository: -http://llvm.org/svn/llvm-project/llvm/trunk/utils/lit. +The *lit* source is available as part of LLVM, in the LLVM source repository: +https://github.com/llvm/llvm-project/tree/master/llvm/utils/lit """, classifiers=[ Index: utils/release/build_llvm_package.bat =================================================================== --- utils/release/build_llvm_package.bat +++ utils/release/build_llvm_package.bat @@ -52,17 +52,7 @@ REM Setting CMAKE_CL_SHOWINCLUDES_PREFIX to work around PR27226. -set cmake_flags=^ - -DCMAKE_BUILD_TYPE=Release ^ - -DLLVM_ENABLE_ASSERTIONS=ON ^ - -DLLVM_INSTALL_TOOLCHAIN_ONLY=ON ^ - -DLLVM_BUILD_LLVM_C_DYLIB=ON ^ - -DCMAKE_INSTALL_UCRT_LIBRARIES=ON ^ - -DCLANG_FORMAT_VS_VERSION=%clang_format_vs_version% ^ - -DPACKAGE_VERSION=%package_version% ^ - -DLLDB_RELOCATABLE_PYTHON=1 ^ - -DLLDB_TEST_COMPILER=%cd%\build32_stage0\bin\clang.exe ^ - -DCMAKE_CL_SHOWINCLUDES_PREFIX="Note: including file: " +set cmake_flags=-DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_ASSERTIONS=ON -DLLVM_INSTALL_TOOLCHAIN_ONLY=ON -DCMAKE_INSTALL_UCRT_LIBRARIES=ON -DCLANG_FORMAT_VS_VERSION=%clang_format_vs_version% -DPACKAGE_VERSION=%package_version% -DLLDB_RELOCATABLE_PYTHON=1 -DLLDB_TEST_COMPILER=%cd%\build32_stage0\bin\clang.exe -DCMAKE_CL_SHOWINCLUDES_PREFIX="Note: including file: " REM TODO: Run the "check-all" tests. Index: utils/release/merge-request.sh =================================================================== --- utils/release/merge-request.sh +++ utils/release/merge-request.sh @@ -100,6 +100,9 @@ 7.0) release_metabug="39106" ;; + 8.0) + release_metabug="40331" + ;; *) echo "error: invalid stable version" exit 1