Changeset View
Changeset View
Standalone View
Standalone View
llvm/lib/Transforms/Scalar/EarlyCSE.cpp
Show First 20 Lines • Show All 126 Lines • ▼ Show 20 Lines | if (CallInst *CI = dyn_cast<CallInst>(Inst)) { | ||||
// the rounding mode to change. | // the rounding mode to change. | ||||
if (CFP->getRoundingMode() && | if (CFP->getRoundingMode() && | ||||
CFP->getRoundingMode() == RoundingMode::Dynamic) | CFP->getRoundingMode() == RoundingMode::Dynamic) | ||||
return false; | return false; | ||||
return true; | return true; | ||||
} | } | ||||
} | } | ||||
} | } | ||||
return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); | return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy() && | ||||
// FIXME: Currently the calls which may access the thread id may | |||||
// be considered as not accessing the memory. But this is | |||||
// problematic for coroutines, since coroutines may resume in a | |||||
// different thread. So we disable the optimization here for the | |||||
// correctness. However, it may block many other correct | |||||
// optimizations. Revert this one when we detect the memory | |||||
// accessing kind more precisely. | |||||
!CI->getFunction()->isPresplitCoroutine(); | |||||
} | } | ||||
return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) || | return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) || | ||||
isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) || | isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) || | ||||
isa<CmpInst>(Inst) || isa<SelectInst>(Inst) || | isa<CmpInst>(Inst) || isa<SelectInst>(Inst) || | ||||
isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || | isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || | ||||
isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) || | isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) || | ||||
isa<InsertValueInst>(Inst) || isa<FreezeInst>(Inst); | isa<InsertValueInst>(Inst) || isa<FreezeInst>(Inst); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 314 Lines • ▼ Show 20 Lines | struct CallValue { | ||||
} | } | ||||
static bool canHandle(Instruction *Inst) { | static bool canHandle(Instruction *Inst) { | ||||
// Don't value number anything that returns void. | // Don't value number anything that returns void. | ||||
if (Inst->getType()->isVoidTy()) | if (Inst->getType()->isVoidTy()) | ||||
return false; | return false; | ||||
CallInst *CI = dyn_cast<CallInst>(Inst); | CallInst *CI = dyn_cast<CallInst>(Inst); | ||||
if (!CI || !CI->onlyReadsMemory()) | if (!CI || !CI->onlyReadsMemory() || | ||||
// FIXME: Currently the calls which may access the thread id may | |||||
// be considered as not accessing the memory. But this is | |||||
// problematic for coroutines, since coroutines may resume in a | |||||
// different thread. So we disable the optimization here for the | |||||
// correctness. However, it may block many other correct | |||||
// optimizations. Revert this one when we detect the memory | |||||
// accessing kind more precisely. | |||||
CI->getFunction()->isPresplitCoroutine()) | |||||
return false; | return false; | ||||
return true; | return true; | ||||
} | } | ||||
}; | }; | ||||
} // end anonymous namespace | } // end anonymous namespace | ||||
namespace llvm { | namespace llvm { | ||||
▲ Show 20 Lines • Show All 1,316 Lines • Show Last 20 Lines |