diff --git a/llvm/include/llvm/CodeGen/FastISel.h b/llvm/include/llvm/CodeGen/FastISel.h --- a/llvm/include/llvm/CodeGen/FastISel.h +++ b/llvm/include/llvm/CodeGen/FastISel.h @@ -224,10 +224,6 @@ /// makes sense (for example, on function calls) MachineInstr *EmitStartPt; - /// Last local value flush point. On a subsequent flush, no local value will - /// sink past this point. - MachineBasicBlock::iterator LastFlushPoint; - public: virtual ~FastISel(); diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -215,7 +215,6 @@ LastLocalValue = EmitStartPt; recomputeInsertPt(); SavedInsertPt = FuncInfo.InsertPt; - LastFlushPoint = FuncInfo.InsertPt; } bool FastISel::hasTrivialKill(const Value *V) { @@ -437,8 +436,6 @@ assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 && "Invalid iterator!"); while (I != E) { - if (LastFlushPoint == I) - LastFlushPoint = E; if (SavedInsertPt == I) SavedInsertPt = E; if (EmitStartPt == I) @@ -1189,11 +1186,6 @@ // Handle simple inline asms. if (const InlineAsm *IA = dyn_cast(Call->getCalledOperand())) { - // If the inline asm has side effects, then make sure that no local value - // lives across by flushing the local value map. - if (IA->hasSideEffects()) - flushLocalValueMap(); - // Don't attempt to handle constraints. if (!IA->getConstraintString().empty()) return false; @@ -1223,15 +1215,6 @@ if (const auto *II = dyn_cast(Call)) return selectIntrinsicCall(II); - // Usually, it does not make sense to initialize a value, - // make an unrelated function call and use the value, because - // it tends to be spilled on the stack. So, we move the pointer - // to the last local value to the beginning of the block, so that - // all the values which have already been materialized, - // appear after the call. It also makes sense to skip intrinsics - // since they tend to be inlined. - flushLocalValueMap(); - return lowerCall(Call); } @@ -1388,20 +1371,6 @@ return selectXRayCustomEvent(II); case Intrinsic::xray_typedevent: return selectXRayTypedEvent(II); - - case Intrinsic::memcpy: - case Intrinsic::memcpy_element_unordered_atomic: - case Intrinsic::memcpy_inline: - case Intrinsic::memmove: - case Intrinsic::memmove_element_unordered_atomic: - case Intrinsic::memset: - case Intrinsic::memset_element_unordered_atomic: - // Flush the local value map just like we do for regular calls, - // to avoid excessive spills and reloads. - // These intrinsics mostly turn into library calls at O0; and - // even memcpy_inline should be treated like one for this purpose. - flushLocalValueMap(); - break; } return fastLowerIntrinsicCall(II);