Newer
Older
//===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements an analysis that determines, for a given memory
// operation, what preceding memory operations it depends on. It builds on
// alias analysis information, and tries to provide a lazy, caching interface to
// a common kind of alias information query.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "memdep"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Function.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/PredIteratorCache.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
STATISTIC(NumCacheNonLocalPtr,
"Number of fully cached non-local ptr responses");
STATISTIC(NumCacheDirtyNonLocalPtr,
"Number of cached, but dirty, non-local ptr responses");
STATISTIC(NumUncacheNonLocalPtr,
"Number of uncached non-local ptr responses");
STATISTIC(NumCacheCompleteNonLocalPtr,
"Number of block queries that were completely cached");
char MemoryDependenceAnalysis::ID = 0;
// Register this pass...
static RegisterPass<MemoryDependenceAnalysis> X("memdep",
"Memory Dependence Analysis", false, true);
MemoryDependenceAnalysis::MemoryDependenceAnalysis()
: FunctionPass(&ID), PredCache(0) {
}
MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
}
/// Clean up memory in between runs
void MemoryDependenceAnalysis::releaseMemory() {
LocalDeps.clear();
NonLocalDeps.clear();
NonLocalPointerDeps.clear();
ReverseLocalDeps.clear();
ReverseNonLocalDeps.clear();
ReverseNonLocalPtrDeps.clear();
PredCache->clear();
}
/// getAnalysisUsage - Does not modify anything. It uses Alias Analysis.
///
void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredTransitive<AliasAnalysis>();
}
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>();
if (PredCache == 0)
PredCache.reset(new PredIteratorCache());
return false;
}
/// RemoveFromReverseMap - This is a helper function that removes Val from
/// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry.
template <typename KeyTy>
static void RemoveFromReverseMap(DenseMap<Instruction*,
SmallPtrSet<KeyTy, 4> > &ReverseMap,
Instruction *Inst, KeyTy Val) {
typename DenseMap<Instruction*, SmallPtrSet<KeyTy, 4> >::iterator
InstIt = ReverseMap.find(Inst);
assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
bool Found = InstIt->second.erase(Val);
assert(Found && "Invalid reverse map!"); Found=Found;
if (InstIt->second.empty())
ReverseMap.erase(InstIt);
}
/// getCallSiteDependencyFrom - Private helper for finding the local
/// dependencies of a call site.
MemDepResult MemoryDependenceAnalysis::
getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
BasicBlock::iterator ScanIt, BasicBlock *BB) {
// Walk backwards through the block, looking for dependencies
while (ScanIt != BB->begin()) {
Instruction *Inst = --ScanIt;
// If this inst is a memory op, get the pointer it accessed
Value *Pointer = 0;
uint64_t PointerSize = 0;
if (StoreInst *S = dyn_cast<StoreInst>(Inst)) {
Pointer = S->getPointerOperand();
PointerSize = AA->getTypeStoreSize(S->getOperand(0)->getType());
} else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
Pointer = V->getOperand(0);
PointerSize = AA->getTypeStoreSize(V->getType());
} else if (isFreeCall(Inst)) {
Pointer = Inst->getOperand(1);
// calls to free() erase the entire structure
PointerSize = ~0ULL;
} else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
// Debug intrinsics don't cause dependences.
if (isa<DbgInfoIntrinsic>(Inst)) continue;
CallSite InstCS = CallSite::get(Inst);
// If these two calls do not interfere, look past it.
switch (AA->getModRefInfo(CS, InstCS)) {
case AliasAnalysis::NoModRef:
// If the two calls don't interact (e.g. InstCS is readnone) keep
// scanning.
case AliasAnalysis::Ref:
// If the two calls read the same memory locations and CS is a readonly
// function, then we have two cases: 1) the calls may not interfere with
// each other at all. 2) the calls may produce the same value. In case
// #1 we want to ignore the values, in case #2, we want to return Inst
// as a Def dependence. This allows us to CSE in cases like:
// X = strlen(P);
// memchr(...);
// Y = strlen(P); // Y = X
if (isReadOnlyCall) {
if (CS.getCalledFunction() != 0 &&
CS.getCalledFunction() == InstCS.getCalledFunction())
return MemDepResult::getDef(Inst);
// Ignore unrelated read/read call dependences.
continue;
}
// FALL THROUGH
default:
return MemDepResult::getClobber(Inst);
} else {
// Non-memory instruction.
if (AA->getModRefInfo(CS, Pointer, PointerSize) != AliasAnalysis::NoModRef)
return MemDepResult::getClobber(Inst);
}
// No dependence found. If this is the entry block of the function, it is a
// clobber, otherwise it is non-local.
if (BB != &BB->getParent()->getEntryBlock())
return MemDepResult::getNonLocal();
return MemDepResult::getClobber(ScanIt);
}
/// getPointerDependencyFrom - Return the instruction on which a memory
/// location depends. If isLoad is true, this routine ignore may-aliases with
/// read-only operations.
MemDepResult MemoryDependenceAnalysis::
getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
BasicBlock::iterator ScanIt, BasicBlock *BB) {
Value *invariantTag = 0;
// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
Instruction *Inst = --ScanIt;
// If we're in an invariant region, no dependencies can be found before
// we pass an invariant-begin marker.
if (invariantTag == Inst) {
invariantTag = 0;
continue;
} else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
// If we pass an invariant-end marker, then we've just entered an
// invariant region and can start ignoring dependencies.
if (II->getIntrinsicID() == Intrinsic::invariant_end) {
uint64_t invariantSize = ~0ULL;
if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(2)))
invariantSize = CI->getZExtValue();
AliasAnalysis::AliasResult R =
AA->alias(II->getOperand(3), invariantSize, MemPtr, MemSize);
if (R == AliasAnalysis::MustAlias) {
invariantTag = II->getOperand(1);
continue;
}
// If we reach a lifetime begin or end marker, then the query ends here
// because the value is undefined.
} else if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
uint64_t invariantSize = ~0ULL;
if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getOperand(1)))
invariantSize = CI->getZExtValue();
AliasAnalysis::AliasResult R =
AA->alias(II->getOperand(2), invariantSize, MemPtr, MemSize);
if (R == AliasAnalysis::MustAlias)
return MemDepResult::getDef(II);
}
}
// If we're querying on a load and we're in an invariant region, we're done
// at this point. Nothing a load depends on can live in an invariant region.
if (isLoad && invariantTag) continue;
// Debug intrinsics don't cause dependences.
if (isa<DbgInfoIntrinsic>(Inst)) continue;
// Values depend on loads if the pointers are must aliased. This means that
// a load depends on another must aliased load from the same value.
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Value *Pointer = LI->getPointerOperand();
uint64_t PointerSize = AA->getTypeStoreSize(LI->getType());
// If we found a pointer, check if it could be the same as our pointer.
AliasAnalysis::AliasResult R =
AA->alias(Pointer, PointerSize, MemPtr, MemSize);
if (R == AliasAnalysis::NoAlias)
continue;
// May-alias loads don't depend on each other without a dependence.
if (isLoad && R == AliasAnalysis::MayAlias)
continue;
// Stores depend on may and must aliased loads, loads depend on must-alias
// loads.
return MemDepResult::getDef(Inst);
}
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
Owen Anderson
committed
// There can't be stores to the value we care about inside an
// invariant region.
if (invariantTag) continue;
// If alias analysis can tell that this store is guaranteed to not modify
// the query pointer, ignore it. Use getModRefInfo to handle cases where
// the query pointer points to constant memory etc.
if (AA->getModRefInfo(SI, MemPtr, MemSize) == AliasAnalysis::NoModRef)
continue;
// Ok, this store might clobber the query pointer. Check to see if it is
// a must alias: in this case, we want to return this as a def.
Value *Pointer = SI->getPointerOperand();
uint64_t PointerSize = AA->getTypeStoreSize(SI->getOperand(0)->getType());
// If we found a pointer, check if it could be the same as our pointer.
AliasAnalysis::AliasResult R =
AA->alias(Pointer, PointerSize, MemPtr, MemSize);
if (R == AliasAnalysis::NoAlias)
continue;
if (R == AliasAnalysis::MayAlias)
return MemDepResult::getClobber(Inst);
return MemDepResult::getDef(Inst);
}
// If this is an allocation, and if we know that the accessed pointer is to
// the allocation, return Def. This means that there is no dependence and
// the access can be optimized based on that. For example, a load could
// turn into undef.
Victor Hernandez
committed
// Note: Only determine this to be a malloc if Inst is the malloc call, not
// a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call.
Victor Hernandez
committed
if (isa<AllocaInst>(Inst) || extractMallocCall(Inst)) {
Victor Hernandez
committed
Value *AccessPtr = MemPtr->getUnderlyingObject();
if (AccessPtr == Inst ||
AA->alias(Inst, 1, AccessPtr, 1) == AliasAnalysis::MustAlias)
return MemDepResult::getDef(Inst);
continue;
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
switch (AA->getModRefInfo(Inst, MemPtr, MemSize)) {
case AliasAnalysis::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
Owen Anderson
committed
case AliasAnalysis::Mod:
// If we're in an invariant region, we can ignore calls that ONLY
// modify the pointer.
if (invariantTag) continue;
return MemDepResult::getClobber(Inst);
case AliasAnalysis::Ref:
// If the call is known to never store to the pointer, and if this is a
// load query, we can safely ignore it (scan past it).
if (isLoad)
continue;
default:
// Otherwise, there is a potential dependence. Return a clobber.
return MemDepResult::getClobber(Inst);
}
}
// No dependence found. If this is the entry block of the function, it is a
// clobber, otherwise it is non-local.
if (BB != &BB->getParent()->getEntryBlock())
return MemDepResult::getNonLocal();
return MemDepResult::getClobber(ScanIt);
}
/// getDependency - Return the instruction on which a memory operation
/// depends.
MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
Instruction *ScanPos = QueryInst;
// Check for a cached result
MemDepResult &LocalCache = LocalDeps[QueryInst];
// If the cached entry is non-dirty, just return it. Note that this depends
// on MemDepResult's default constructing to 'dirty'.
if (!LocalCache.isDirty())
return LocalCache;
// Otherwise, if we have a dirty entry, we know we can start the scan at that
// instruction, which may save us some work.
if (Instruction *Inst = LocalCache.getInst()) {
ScanPos = Inst;
RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
BasicBlock *QueryParent = QueryInst->getParent();
Value *MemPtr = 0;
uint64_t MemSize = 0;
// Do the scan.
if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
// No dependence found. If this is the entry block of the function, it is a
// clobber, otherwise it is non-local.
if (QueryParent != &QueryParent->getParent()->getEntryBlock())
LocalCache = MemDepResult::getNonLocal();
else
LocalCache = MemDepResult::getClobber(QueryInst);
} else if (StoreInst *SI = dyn_cast<StoreInst>(QueryInst)) {
// If this is a volatile store, don't mess around with it. Just return the
// previous instruction as a clobber.
if (SI->isVolatile())
LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
else {
MemPtr = SI->getPointerOperand();
MemSize = AA->getTypeStoreSize(SI->getOperand(0)->getType());
}
} else if (LoadInst *LI = dyn_cast<LoadInst>(QueryInst)) {
// If this is a volatile load, don't mess around with it. Just return the
// previous instruction as a clobber.
if (LI->isVolatile())
LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
else {
MemPtr = LI->getPointerOperand();
MemSize = AA->getTypeStoreSize(LI->getType());
} else if (isFreeCall(QueryInst)) {
// calls to free() erase the entire structure, not just a field.
MemSize = ~0UL;
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
Nick Lewycky
committed
int IntrinsicID = 0; // Intrinsic IDs start at 1.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
IntrinsicID = II->getIntrinsicID();
switch (IntrinsicID) {
case Intrinsic::lifetime_start:
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
MemPtr = QueryInst->getOperand(2);
MemSize = cast<ConstantInt>(QueryInst->getOperand(1))->getZExtValue();
break;
case Intrinsic::invariant_end:
MemPtr = QueryInst->getOperand(3);
MemSize = cast<ConstantInt>(QueryInst->getOperand(2))->getZExtValue();
break;
default:
CallSite QueryCS = CallSite::get(QueryInst);
bool isReadOnly = AA->onlyReadsMemory(QueryCS);
LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
QueryParent);
}
} else {
// Non-memory instruction.
LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
}
// If we need to do a pointer scan, make it happen.
Nick Lewycky
committed
if (MemPtr) {
bool isLoad = !QueryInst->mayWriteToMemory();
if (IntrinsicInst *II = dyn_cast<MemoryUseIntrinsic>(QueryInst)) {
isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end;
}
LocalCache = getPointerDependencyFrom(MemPtr, MemSize, isLoad, ScanPos,
QueryParent);
}
// Remember the result!
if (Instruction *I = LocalCache.getInst())
return LocalCache;
#ifndef NDEBUG
/// AssertSorted - This method is used when -debug is specified to verify that
/// cache arrays are properly kept sorted.
static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
int Count = -1) {
if (Count == -1) Count = Cache.size();
if (Count == 0) return;
for (unsigned i = 1; i != unsigned(Count); ++i)
assert(Cache[i-1] <= Cache[i] && "Cache isn't sorted!");
}
#endif
/// getNonLocalCallDependency - Perform a full dependency query for the
/// specified call, returning the set of blocks that the value is
/// potentially live across. The returned set of results will include a
/// "NonLocal" result for all blocks where the value is live across.
///
/// This method assumes the instruction returns a "NonLocal" dependency
/// within its own block.
///
/// This returns a reference to an internal data structure that may be
/// invalidated on the next non-local query or when an instruction is
/// removed. Clients must copy this data if they want it around longer than
/// that.
const MemoryDependenceAnalysis::NonLocalDepInfo &
MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
assert(getDependency(QueryCS.getInstruction()).isNonLocal() &&
"getNonLocalCallDependency should only be used on calls with non-local deps!");
PerInstNLInfo &CacheP = NonLocalDeps[QueryCS.getInstruction()];
NonLocalDepInfo &Cache = CacheP.first;
/// DirtyBlocks - This is the set of blocks that need to be recomputed. In
/// the cached case, this can happen due to instructions being deleted etc. In
/// the uncached case, this starts out as the set of predecessors we care
/// about.
SmallVector<BasicBlock*, 32> DirtyBlocks;
if (!Cache.empty()) {
// Okay, we have a cache entry. If we know it is not dirty, just return it
// with no computation.
if (!CacheP.second) {
NumCacheNonLocal++;
return Cache;
}
// If we already have a partially computed set of results, scan them to
// determine what is dirty, seeding our initial DirtyBlocks worklist.
for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
I != E; ++I)
if (I->second.isDirty())
DirtyBlocks.push_back(I->first);
// Sort the cache so that we can do fast binary search lookups below.
std::sort(Cache.begin(), Cache.end());
++NumCacheDirtyNonLocal;
//cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
// << Cache.size() << " cached: " << *QueryInst;
} else {
// Seed DirtyBlocks with each of the preds of QueryInst's block.
BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
DirtyBlocks.push_back(*PI);
NumUncacheNonLocal++;
}
// isReadonlyCall - If this is a read-only call, we can be more aggressive.
bool isReadonlyCall = AA->onlyReadsMemory(QueryCS);
SmallPtrSet<BasicBlock*, 64> Visited;
unsigned NumSortedEntries = Cache.size();
DEBUG(AssertSorted(Cache));
// Iterate while we still have blocks to update.
while (!DirtyBlocks.empty()) {
BasicBlock *DirtyBB = DirtyBlocks.back();
DirtyBlocks.pop_back();
// Already processed this block?
if (!Visited.insert(DirtyBB))
continue;
// Do a binary search to see if we already have an entry for this block in
// the cache set. If so, find it.
DEBUG(AssertSorted(Cache, NumSortedEntries));
NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
std::make_pair(DirtyBB, MemDepResult()));
if (Entry != Cache.begin() && prior(Entry)->first == DirtyBB)
--Entry;
MemDepResult *ExistingResult = 0;
if (Entry != Cache.begin()+NumSortedEntries &&
Entry->first == DirtyBB) {
// If we already have an entry, and if it isn't already dirty, the block
// is done.
if (!Entry->second.isDirty())
continue;
// Otherwise, remember this slot so we can update the value.
ExistingResult = &Entry->second;
}
// If the dirty entry has a pointer, start scanning from it so we don't have
// to rescan the entire block.
BasicBlock::iterator ScanPos = DirtyBB->end();
if (ExistingResult) {
if (Instruction *Inst = ExistingResult->getInst()) {
ScanPos = Inst;
// We're removing QueryInst's use of Inst.
RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
QueryCS.getInstruction());
}
// Find out if this block has a local dependency for QueryInst.
MemDepResult Dep;
if (ScanPos != DirtyBB->begin()) {
Dep = getCallSiteDependencyFrom(QueryCS, isReadonlyCall,ScanPos, DirtyBB);
} else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
// No dependence found. If this is the entry block of the function, it is
// a clobber, otherwise it is non-local.
Dep = MemDepResult::getNonLocal();
} else {
Dep = MemDepResult::getClobber(ScanPos);
// If we had a dirty entry for the block, update it. Otherwise, just add
// a new entry.
if (ExistingResult)
*ExistingResult = Dep;
else
Cache.push_back(std::make_pair(DirtyBB, Dep));
// If the block has a dependency (i.e. it isn't completely transparent to
// the value), remember the association!
if (!Dep.isNonLocal()) {
// Keep the ReverseNonLocalDeps map up to date so we can efficiently
// update this when we remove instructions.
if (Instruction *Inst = Dep.getInst())
ReverseNonLocalDeps[Inst].insert(QueryCS.getInstruction());
} else {
// If the block *is* completely transparent to the load, we need to check
// the predecessors of this block. Add them to our worklist.
for (BasicBlock **PI = PredCache->GetPreds(DirtyBB); *PI; ++PI)
DirtyBlocks.push_back(*PI);
}
return Cache;
/// getNonLocalPointerDependency - Perform a full dependency query for an
/// access to the specified (non-volatile) memory location, returning the
/// set of instructions that either define or clobber the value.
///
/// This method assumes the pointer has a "NonLocal" dependency within its
/// own block.
///
void MemoryDependenceAnalysis::
getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB,
SmallVectorImpl<NonLocalDepEntry> &Result) {
assert(isa<PointerType>(Pointer->getType()) &&
"Can't get pointer deps of a non-pointer!");
Result.clear();
// We know that the pointer value is live into FromBB find the def/clobbers
// from presecessors.
const Type *EltTy = cast<PointerType>(Pointer->getType())->getElementType();
uint64_t PointeeSize = AA->getTypeStoreSize(EltTy);
// This is the set of blocks we've inspected, and the pointer we consider in
// each block. Because of critical edges, we currently bail out if querying
// a block with multiple different pointers. This can happen during PHI
// translation.
DenseMap<BasicBlock*, Value*> Visited;
if (!getNonLocalPointerDepFromBB(Pointer, PointeeSize, isLoad, FromBB,
Result, Visited, true))
return;
Result.clear();
Result.push_back(std::make_pair(FromBB,
MemDepResult::getClobber(FromBB->begin())));
/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
/// Pointer/PointeeSize using either cached information in Cache or by doing a
/// lookup (which may use dirty cache info if available). If we do a lookup,
/// add the result to the cache.
MemDepResult MemoryDependenceAnalysis::
GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize,
bool isLoad, BasicBlock *BB,
NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
// Do a binary search to see if we already have an entry for this block in
// the cache set. If so, find it.
NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
std::make_pair(BB, MemDepResult()));
if (Entry != Cache->begin() && prior(Entry)->first == BB)
--Entry;
MemDepResult *ExistingResult = 0;
if (Entry != Cache->begin()+NumSortedEntries && Entry->first == BB)
ExistingResult = &Entry->second;
// If we have a cached entry, and it is non-dirty, use it as the value for
// this dependency.
if (ExistingResult && !ExistingResult->isDirty()) {
++NumCacheNonLocalPtr;
return *ExistingResult;
}
// Otherwise, we have to scan for the value. If we have a dirty cache
// entry, start scanning from its position, otherwise we scan from the end
// of the block.
BasicBlock::iterator ScanPos = BB->end();
if (ExistingResult && ExistingResult->getInst()) {
assert(ExistingResult->getInst()->getParent() == BB &&
"Instruction invalidated?");
++NumCacheDirtyNonLocalPtr;
ScanPos = ExistingResult->getInst();
// Eliminating the dirty entry from 'Cache', so update the reverse info.
ValueIsLoadPair CacheKey(Pointer, isLoad);
RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
} else {
++NumUncacheNonLocalPtr;
}
// Scan the block for the dependency.
MemDepResult Dep = getPointerDependencyFrom(Pointer, PointeeSize, isLoad,
ScanPos, BB);
// If we had a dirty entry for the block, update it. Otherwise, just add
// a new entry.
if (ExistingResult)
*ExistingResult = Dep;
else
Cache->push_back(std::make_pair(BB, Dep));
// If the block has a dependency (i.e. it isn't completely transparent to
// the value), remember the reverse association because we just added it
// to Cache!
if (Dep.isNonLocal())
return Dep;
// Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
// update MemDep when we remove instructions.
Instruction *Inst = Dep.getInst();
assert(Inst && "Didn't depend on anything?");
ValueIsLoadPair CacheKey(Pointer, isLoad);
ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
return Dep;
}
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
/// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain
/// number of elements in the array that are already properly ordered. This is
/// optimized for the case when only a few entries are added.
static void
SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo &Cache,
unsigned NumSortedEntries) {
switch (Cache.size() - NumSortedEntries) {
case 0:
// done, no new entries.
break;
case 2: {
// Two new entries, insert the last one into place.
MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back();
Cache.pop_back();
MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.end()-1, Val);
Cache.insert(Entry, Val);
// FALL THROUGH.
}
case 1:
// One new entry, Just insert the new value at the appropriate position.
if (Cache.size() != 1) {
MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back();
Cache.pop_back();
MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.end(), Val);
Cache.insert(Entry, Val);
}
break;
default:
// Added many values, do a full scale sort.
std::sort(Cache.begin(), Cache.end());
break;
}
}
/// isPHITranslatable - Return true if the specified computation is derived from
/// a PHI node in the current block and if it is simple enough for us to handle.
static bool isPHITranslatable(Instruction *Inst) {
if (isa<PHINode>(Inst))
return true;
// We can handle bitcast of a PHI, but the PHI needs to be in the same block
// as the bitcast.
if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) {
Instruction *OpI = dyn_cast<Instruction>(BC->getOperand(0));
if (OpI == 0 || OpI->getParent() != Inst->getParent())
return true;
return isPHITranslatable(OpI);
}
// We can translate a GEP if all of its operands defined in this block are phi
// translatable.
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
Instruction *OpI = dyn_cast<Instruction>(GEP->getOperand(i));
if (OpI == 0 || OpI->getParent() != Inst->getParent())
if (!isPHITranslatable(OpI))
return false;
}
return true;
}
if (Inst->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Inst->getOperand(1))) {
Instruction *OpI = dyn_cast<Instruction>(Inst->getOperand(0));
if (OpI == 0 || OpI->getParent() != Inst->getParent())
return isPHITranslatable(OpI);
// cerr << "MEMDEP: Could not PHI translate: " << *Pointer;
// if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst))
// cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0);
return false;
}
/// GetPHITranslatedValue - Given a computation that satisfied the
/// isPHITranslatable predicate, see if we can translate the computation into
/// the specified predecessor block. If so, return that value.
Value *MemoryDependenceAnalysis::
GetPHITranslatedValue(Value *InVal, BasicBlock *CurBB, BasicBlock *Pred,
const TargetData *TD) const {
// If the input value is not an instruction, or if it is not defined in CurBB,
// then we don't need to phi translate it.
Instruction *Inst = dyn_cast<Instruction>(InVal);
if (Inst == 0 || Inst->getParent() != CurBB)
return InVal;
if (PHINode *PN = dyn_cast<PHINode>(Inst))
return PN->getIncomingValueForBlock(Pred);
// Handle bitcast of PHI.
if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) {
// PHI translate the input operand.
Value *PHIIn = GetPHITranslatedValue(BC->getOperand(0), CurBB, Pred, TD);
if (PHIIn == 0) return 0;
// Constants are trivial to phi translate.
if (Constant *C = dyn_cast<Constant>(PHIIn))
return ConstantExpr::getBitCast(C, BC->getType());
// Otherwise we have to see if a bitcasted version of the incoming pointer
// is available. If so, we can use it, otherwise we have to fail.
for (Value::use_iterator UI = PHIIn->use_begin(), E = PHIIn->use_end();
UI != E; ++UI) {
if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI))
if (BCI->getType() == BC->getType())
return BCI;
}
return 0;
}
// Handle getelementptr with at least one PHI translatable operand.
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
SmallVector<Value*, 8> GEPOps;
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
Value *GEPOp = GEP->getOperand(i);
// No PHI translation is needed of operands whose values are live in to
// the predecessor block.
if (!isa<Instruction>(GEPOp) ||
cast<Instruction>(GEPOp)->getParent() != CurBB) {
GEPOps.push_back(GEPOp);
continue;
}
// If the operand is a phi node, do phi translation.
Value *InOp = GetPHITranslatedValue(GEPOp, CurBB, Pred, TD);
if (InOp == 0) return 0;
GEPOps.push_back(InOp);
// Simplify the GEP to handle 'gep x, 0' -> x etc.
if (Value *V = SimplifyGEPInst(&GEPOps[0], GEPOps.size(), TD))
return V;
// Scan to see if we have this GEP available.
Value *APHIOp = GEPOps[0];
for (Value::use_iterator UI = APHIOp->use_begin(), E = APHIOp->use_end();
UI != E; ++UI) {
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI))
GEPI->getNumOperands() == GEPOps.size() &&
bool Mismatch = false;
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
if (GEPI->getOperand(i) != GEPOps[i]) {
Mismatch = true;
break;
}
if (!Mismatch)
return GEPI;
}
}
return 0;
}
// Handle add with a constant RHS.
if (Inst->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Inst->getOperand(1))) {
// PHI translate the LHS.
Value *LHS;
Constant *RHS = cast<ConstantInt>(Inst->getOperand(1));
Instruction *OpI = dyn_cast<Instruction>(Inst->getOperand(0));
bool isNSW = cast<BinaryOperator>(Inst)->hasNoSignedWrap();
bool isNUW = cast<BinaryOperator>(Inst)->hasNoUnsignedWrap();
if (OpI == 0 || OpI->getParent() != Inst->getParent())
LHS = Inst->getOperand(0);
else {
LHS = GetPHITranslatedValue(Inst->getOperand(0), CurBB, Pred, TD);
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
if (LHS == 0)
return 0;
}
// If the PHI translated LHS is an add of a constant, fold the immediates.
if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(LHS))
if (BOp->getOpcode() == Instruction::Add)
if (ConstantInt *CI = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
LHS = BOp->getOperand(0);
RHS = ConstantExpr::getAdd(RHS, CI);
isNSW = isNUW = false;
}
// See if the add simplifies away.
if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD))
return Res;
// Otherwise, see if we have this add available somewhere.
for (Value::use_iterator UI = LHS->use_begin(), E = LHS->use_end();
UI != E; ++UI) {
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(*UI))
if (BO->getOperand(0) == LHS && BO->getOperand(1) == RHS &&
BO->getParent()->getParent() == CurBB->getParent())
return BO;
}
return 0;
}
/// GetAvailablePHITranslatePointer - Return the value computed by
/// PHITranslatePointer if it dominates PredBB, otherwise return null.
Value *MemoryDependenceAnalysis::
GetAvailablePHITranslatedValue(Value *V,
BasicBlock *CurBB, BasicBlock *PredBB,
const TargetData *TD,
const DominatorTree &DT) const {
// See if PHI translation succeeds.
V = GetPHITranslatedValue(V, CurBB, PredBB, TD);
if (V == 0) return 0;
// Make sure the value is live in the predecessor.
if (Instruction *Inst = dyn_cast_or_null<Instruction>(V))
if (!DT.dominates(Inst->getParent(), PredBB))
return 0;
return V;
}
/// InsertPHITranslatedPointer - Insert a computation of the PHI translated
/// version of 'V' for the edge PredBB->CurBB into the end of the PredBB
/// block. All newly created instructions are added to the NewInsts list.
///
Value *MemoryDependenceAnalysis::
InsertPHITranslatedPointer(Value *InVal, BasicBlock *CurBB,
BasicBlock *PredBB, const TargetData *TD,
const DominatorTree &DT,
SmallVectorImpl<Instruction*> &NewInsts) const {
// See if we have a version of this value already available and dominating
// PredBB. If so, there is no need to insert a new copy.
if (Value *Res = GetAvailablePHITranslatedValue(InVal, CurBB, PredBB, TD, DT))
return Res;
// If we don't have an available version of this value, it must be an
// instruction.
Instruction *Inst = cast<Instruction>(InVal);
// Handle bitcast of PHI translatable value.
if (BitCastInst *BC = dyn_cast<BitCastInst>(Inst)) {
Value *OpVal = InsertPHITranslatedPointer(BC->getOperand(0),
CurBB, PredBB, TD, DT, NewInsts);
if (OpVal == 0) return 0;
// Otherwise insert a bitcast at the end of PredBB.
BitCastInst *New = new BitCastInst(OpVal, InVal->getType(),
InVal->getName()+".phi.trans.insert",
PredBB->getTerminator());
NewInsts.push_back(New);
return New;
}
// Handle getelementptr with at least one PHI operand.
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
SmallVector<Value*, 8> GEPOps;
BasicBlock *CurBB = GEP->getParent();
for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
Value *OpVal = InsertPHITranslatedPointer(GEP->getOperand(i),
CurBB, PredBB, TD, DT, NewInsts);
if (OpVal == 0) return 0;
GEPOps.push_back(OpVal);
}
GetElementPtrInst *Result =
GetElementPtrInst::Create(GEPOps[0], GEPOps.begin()+1, GEPOps.end(),
InVal->getName()+".phi.trans.insert",
PredBB->getTerminator());
Result->setIsInBounds(GEP->isInBounds());
NewInsts.push_back(Result);
return Result;
}
#if 0
// FIXME: This code works, but it is unclear that we actually want to insert
// a big chain of computation in order to make a value available in a block.
// This needs to be evaluated carefully to consider its cost trade offs.
// Handle add with a constant RHS.
if (Inst->getOpcode() == Instruction::Add &&
isa<ConstantInt>(Inst->getOperand(1))) {
// PHI translate the LHS.
Value *OpVal = InsertPHITranslatedPointer(Inst->getOperand(0),
CurBB, PredBB, TD, DT, NewInsts);
if (OpVal == 0) return 0;
BinaryOperator *Res = BinaryOperator::CreateAdd(OpVal, Inst->getOperand(1),
InVal->getName()+".phi.trans.insert",
PredBB->getTerminator());
Res->setHasNoSignedWrap(cast<BinaryOperator>(Inst)->hasNoSignedWrap());
Res->setHasNoUnsignedWrap(cast<BinaryOperator>(Inst)->hasNoUnsignedWrap());
NewInsts.push_back(Res);
return Res;
}
#endif
return 0;
}
/// getNonLocalPointerDepFromBB - Perform a dependency query based on
/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
/// results to the results vector and keep track of which blocks are visited in
/// 'Visited'.
///
/// This has special behavior for the first block queries (when SkipFirstBlock
/// is true). In this special case, it ignores the contents of the specified
/// block and starts returning dependence info for its predecessors.
///
/// This function returns false on success, or true to indicate that it could
/// not compute dependence information for some reason. This should be treated
/// as a clobber dependence on the first instruction in the predecessor block.
bool MemoryDependenceAnalysis::
getNonLocalPointerDepFromBB(Value *Pointer, uint64_t PointeeSize,
bool isLoad, BasicBlock *StartBB,
SmallVectorImpl<NonLocalDepEntry> &Result,
DenseMap<BasicBlock*, Value*> &Visited,
bool SkipFirstBlock) {
// Look up the cached info for Pointer.
ValueIsLoadPair CacheKey(Pointer, isLoad);