GT-2369 - Merge remote-tracking branch 'github/caheckman_loadguard'

This commit is contained in:
caheckman 2019-06-19 17:14:02 -04:00
commit 4563ac9d38
20 changed files with 3811 additions and 915 deletions

View file

@ -696,6 +696,34 @@ int4 mostsigbit_set(uintb val)
return res;
}
/// Count the number of more significant zero bits before the most significant
/// one bit in the representation of the given value;
/// \param val is the given value
/// \return the number of zero bits
int4 count_leading_zeros(uintb val)
{
if (val == 0)
return 8*sizeof(uintb);
uintb mask = ~((uintb)0);
int4 maskSize = 4*sizeof(uintb);
mask &= (mask << maskSize);
int4 bit = 0;
do {
if ((mask & val)==0) {
bit += maskSize;
maskSize >>= 1;
mask |= (mask >> maskSize);
}
else {
maskSize >>= 1;
mask &= (mask << maskSize);
}
} while(maskSize != 0);
return bit;
}
/// Return smallest number of form 2^n-1, bigger or equal to the given value
/// \param val is the given value
/// \return the mask

View file

@ -482,7 +482,7 @@ inline uintb pcode_left(uintb val,int4 sa) {
return val << sa;
}
extern bool signbit_negative(uintb val,int4 size); ///< Return true if the sign-big is set
extern bool signbit_negative(uintb val,int4 size); ///< Return true if the sign-bit is set
extern uintb calc_mask(int4 size); ///< Calculate a mask for a given byte size
extern uintb uintb_negate(uintb in,int4 size); ///< Negate the \e sized value
extern uintb sign_extend(uintb in,int4 sizein,int4 sizeout); ///< Sign-extend a value between two byte sizes
@ -493,7 +493,8 @@ extern void byte_swap(intb &val,int4 size); ///< Swap bytes in the given value
extern uintb byte_swap(uintb val,int4 size); ///< Return the given value with bytes swapped
extern int4 leastsigbit_set(uintb val); ///< Return index of least significant bit set in given value
extern int4 mostsigbit_set(uintb val); ///< Return index of most significant bit set in given val
extern int4 mostsigbit_set(uintb val); ///< Return index of most significant bit set in given value
extern int4 count_leading_zeros(uintb val); ///< Return the number of leading zero bits in the given value
extern uintb coveringmask(uintb val); ///< Return a mask that \e covers the given value
extern int4 bit_transitions(uintb val,int4 sz); ///< Calculate the number of bit transitions in the sized value

View file

@ -26,7 +26,7 @@
vector<ArchitectureCapability *> ArchitectureCapability::thelist;
const uint4 ArchitectureCapability::majorversion = 3;
const uint4 ArchitectureCapability::minorversion = 4;
const uint4 ArchitectureCapability::minorversion = 5;
/// This builds a list of just the ArchitectureCapability extensions
void ArchitectureCapability::initialize(void)

View file

@ -369,6 +369,31 @@ bool FlowBlock::dominates(const FlowBlock *subBlock) const
return false;
}
/// \brief Check if the condition from the given block holds for \b this block
///
/// We assume the given block has 2 out-edges and that \b this block is immediately reached by
/// one of these two edges. Some condition holds when traversing the out-edge to \b this, and the complement
/// of the condition holds for traversing the other out-edge. We verify that the condition holds for
/// this entire block. More specifically, we check that that there is no path to \b this through the
/// sibling edge, where the complement of the condition holds (unless we loop back through the conditional block).
/// \param cond is the conditional block with 2 out-edges
/// \return \b true if the condition holds for this block
bool FlowBlock::restrictedByConditional(const FlowBlock *cond) const
{
if (sizeIn() == 1) return true; // Its impossible for any path to come through sibling to this
if (getImmedDom() != cond) return false; // This is not dominated by conditional block at all
for(int4 i=0;i<sizeIn();++i) {
const FlowBlock *inBlock = getIn(i);
if (inBlock == cond) continue; // The unique edge from cond to this
while(inBlock != this) {
if (inBlock == cond) return false; // Must have come through sibling
inBlock = inBlock->getImmedDom();
}
}
return true;
}
/// \return \b true if \b this is the top of a loop
bool FlowBlock::hasLoopIn(void) const

View file

@ -215,6 +215,7 @@ public:
FlowBlock *getFrontLeaf(void); ///< Get the first leaf FlowBlock
int4 calcDepth(const FlowBlock *leaf) const; ///< Get the depth of the given component FlowBlock
bool dominates(const FlowBlock *subBlock) const; ///< Does \b this block dominate the given block
bool restrictedByConditional(const FlowBlock *cond) const;
int4 sizeOut(void) const { return outofthis.size(); } ///< Get the number of out edges
int4 sizeIn(void) const { return intothis.size(); } ///< Get the number of in edges
bool hasLoopIn(void) const; ///< Is there a looping edge coming into \b this block

View file

@ -3246,7 +3246,7 @@ int4 ActionConditionalConst::apply(Funcdata &data)
if (flipEdge)
constEdge = 1 - constEdge;
FlowBlock *constBlock = bl->getOut(constEdge);
if (constBlock->sizeIn() != 1) continue; // Must only be one path to constant block directly through CBRANCH
if (!constBlock->restrictedByConditional(bl)) continue; // Make sure condition holds
propagateConstant(varVn,constVn,constBlock,data);
}
return 0;

View file

@ -232,6 +232,9 @@ public:
/// \return \b true if the Varnode is fully linked
bool isHeritaged(Varnode *vn) { return (heritage.heritagePass(vn->getAddr())>=0); }
const list<LoadGuard> &getLoadGuards(void) const { return heritage.getLoadGuards(); } ///< Get the list of guarded LOADs
const list<LoadGuard> &getStoreGuards(void) const { return heritage.getStoreGuards(); } ///< Get the list of guarded STOREs
// Function prototype and call specification routines
int4 numCalls(void) const { return qlst.size(); } ///< Get the number of calls made by \b this function
FuncCallSpecs *getCallSpecs(int4 i) const { return qlst[i]; } ///< Get the i-th call specification

View file

@ -144,11 +144,8 @@ void Heritage::clearInfoList(void)
{
vector<HeritageInfo>::iterator iter;
for(iter=infolist.begin();iter!=infolist.end();++iter) {
(*iter).deadremoved = 0;
(*iter).deadcodedelay = (*iter).delay;
(*iter).warningissued = false;
}
for(iter=infolist.begin();iter!=infolist.end();++iter)
(*iter).reset();
}
/// \brief Collect free reads, writes, and inputs in the given address range
@ -454,6 +451,403 @@ void Heritage::splitPieces(const vector<Varnode *> &vnlist,PcodeOp *insertop,
}
}
/// \brief Find the last PcodeOps that write to specific addresses that flow to specific sites
///
/// Given a set of sites for which data-flow needs to be preserved at a specific address, find
/// the \e last ops that write to the address such that data flows to the site
/// only through \e artificial COPYs and MULTIEQUALs. A COPY/MULTIEQUAL is artificial if all
/// of its input and output Varnodes have the same storage address. The specific sites are
/// presented as artificial COPY ops. The final set of ops that are not artificial will all
/// have an output Varnode that matches the specific address of a COPY sink and will need to
/// be marked address forcing. The original set of COPY sinks will be extended to all artificial
/// COPY/MULTIEQUALs encountered. Every PcodeOp encountered will have its mark set.
/// \param copySinks is the list of sinks that we are trying to find flow to
/// \param forces is the final list of address forcing PcodeOps
void Heritage::findAddressForces(vector<PcodeOp *> &copySinks,vector<PcodeOp *> &forces)
{
// Mark the sinks
for(int4 i=0;i<copySinks.size();++i) {
PcodeOp *op = copySinks[i];
op->setMark();
}
// Mark everything back-reachable from a sink, trimming at non-artificial ops
int4 pos = 0;
while(pos < copySinks.size()) {
PcodeOp *op = copySinks[pos];
Address addr = op->getOut()->getAddr(); // Address being flowed to
pos += 1;
int4 maxIn = op->numInput();
for(int4 i=0;i<maxIn;++i) {
Varnode *vn = op->getIn(i);
if (!vn->isWritten()) continue;
if (vn->isAddrForce()) continue; // Already marked address forced
PcodeOp *newOp = vn->getDef();
if (newOp->isMark()) continue; // Already visited this op
newOp->setMark();
OpCode opc = newOp->code();
bool isArtificial = (opc == CPUI_COPY || opc == CPUI_MULTIEQUAL);
if (isArtificial) {
int4 maxInNew = newOp->numInput();
for(int4 j=0;j<maxInNew;++j) {
Varnode *inVn = newOp->getIn(j);
if (addr != inVn->getAddr()) {
isArtificial = false;
break;
}
}
}
if (isArtificial)
copySinks.push_back(newOp);
else
forces.push_back(newOp);
}
}
}
/// \brief Eliminate a COPY sink preserving its data-flow
///
/// Given a COPY from a storage location to itself, propagate the input Varnode
/// version of the storage location to all the ops reading the output Varnode, so
/// the output no longer has any descendants. Then eliminate the COPY.
/// \param op is the given COPY sink
void Heritage::propagateCopyAway(PcodeOp *op)
{
Varnode *inVn = op->getIn(0);
while(inVn->isWritten()) { // Follow any COPY chain to earliest input
PcodeOp *nextOp = inVn->getDef();
if (nextOp->code() != CPUI_COPY) break;
Varnode *nextIn = nextOp->getIn(0);
if (nextIn->getAddr() != inVn->getAddr()) break;
inVn = nextIn;
}
fd->totalReplace(op->getOut(),inVn);
fd->opDestroy(op);
}
/// \brief Mark the boundary of artificial ops introduced by load guards
///
/// Having just completed renaming, run through all new COPY sinks from load guards
/// and mark boundary Varnodes (Varnodes whose data-flow along all paths traverses only
/// COPY/INDIRECT/MULTIEQUAL ops and hits a load guard). This lets dead code removal
/// run forward from the boundary while still preserving the address force on the load guard.
void Heritage::handleNewLoadCopies(void)
{
if (loadCopyOps.empty()) return;
vector<PcodeOp *> forces;
int4 copySinkSize = loadCopyOps.size();
findAddressForces(loadCopyOps, forces);
if (!forces.empty()) {
RangeList loadRanges;
for(list<LoadGuard>::const_iterator iter=loadGuard.begin();iter!=loadGuard.end();++iter) {
const LoadGuard &guard( *iter );
loadRanges.insertRange(guard.spc, guard.minimumOffset, guard.maximumOffset);
}
// Mark everything on the boundary as address forced to prevent dead-code removal
for(int4 i=0;i<forces.size();++i) {
PcodeOp *op = forces[i];
Varnode *vn = op->getOut();
if (loadRanges.inRange(vn->getAddr(), 1)) // If we are within one of the guarded ranges
vn->setAddrForce(); // then consider the output address forced
op->clearMark();
}
}
// Eliminate or propagate away original COPY sinks
for(int4 i=0;i<copySinkSize;++i) {
PcodeOp *op = loadCopyOps[i];
propagateCopyAway(op); // Make sure load guard COPYs no longer exist
}
// Clear marks on remaining artificial COPYs
for(int4 i=copySinkSize;i<loadCopyOps.size();++i) {
PcodeOp *op = loadCopyOps[i];
op->clearMark();
}
loadCopyOps.clear(); // We have handled all the load guard COPY ops
}
/// Make some determination of the range of possible values for a LOAD based
/// an partial value set analysis. This can sometimes get
/// - minimumOffset - otherwise the original constant pulled with the LOAD is used
/// - step - the partial analysis shows step and direction
/// - maximumOffset - in rare cases
///
/// isAnalyzed is set to \b true, if full range analysis is not needed
/// \param valueSet is the calculated value set as seen by the LOAD operation
void LoadGuard::establishRange(const ValueSetRead &valueSet)
{
const CircleRange &range( valueSet.getRange() );
uintb rangeSize = range.getSize();
uintb size;
if (range.isEmpty()) {
minimumOffset = pointerBase;
size = 0x1000;
}
else if (range.isFull() || rangeSize > 0xffffff) {
minimumOffset = pointerBase;
size = 0x1000;
analysisState = 1; // Don't bother doing more analysis
}
else {
step = (rangeSize == 3) ? range.getStep() : 0; // Check for consistent step
size = 0x1000;
if (valueSet.isLeftStable()) {
minimumOffset = range.getMin();
}
else if (valueSet.isRightStable()) {
if (pointerBase < range.getEnd()) {
minimumOffset = pointerBase;
size = (range.getEnd() - pointerBase);
}
else {
minimumOffset = range.getMin();
size = rangeSize * range.getStep();
}
}
else
minimumOffset = pointerBase;
}
uintb max = spc->getHighest();
if (minimumOffset > max) {
minimumOffset = max;
maximumOffset = minimumOffset; // Something is seriously wrong
}
else {
uintb maxSize = (max - minimumOffset) + 1;
if (size > maxSize)
size = maxSize;
maximumOffset = minimumOffset + size -1;
}
}
void LoadGuard::finalizeRange(const ValueSetRead &valueSet)
{
analysisState = 1; // In all cases the settings determined here are final
const CircleRange &range( valueSet.getRange() );
uintb rangeSize = range.getSize();
if (rangeSize == 0x100 || rangeSize == 0x10000) {
// These sizes likely result from the storage size of the index
if (step == 0) // If we didn't see signs of iteration
rangeSize = 0; // don't use this range
}
if (rangeSize > 1 && rangeSize < 0xffffff) { // Did we converge to something reasonable
analysisState = 2; // Mark that we got a definitive result
if (rangeSize > 2)
step = range.getStep();
minimumOffset = range.getMin();
maximumOffset = (range.getEnd() - 1) & range.getMask(); // NOTE: Don't subtract a whole step
if (maximumOffset < minimumOffset) { // Values extend into what is usually stack parameters
maximumOffset = spc->getHighest();
analysisState = 1; // Remove the lock as we have likely overflowed
}
}
if (minimumOffset > spc->getHighest())
minimumOffset = spc->getHighest();
if (maximumOffset > spc->getHighest())
maximumOffset = spc->getHighest();
}
void Heritage::analyzeNewLoadGuards(void)
{
bool nothingToDo = true;
if (!loadGuard.empty()) {
if (loadGuard.back().analysisState == 0) // Check if unanalyzed
nothingToDo = false;
}
if (!storeGuard.empty()) {
if (storeGuard.back().analysisState == 0)
nothingToDo = false;
}
if (nothingToDo) return;
vector<Varnode *> sinks;
vector<PcodeOp *> reads;
list<LoadGuard>::iterator loadIter = loadGuard.end();
while(loadIter != loadGuard.begin()) {
--loadIter;
LoadGuard &guard( *loadIter );
if (guard.analysisState != 0) break;
reads.push_back(guard.op);
sinks.push_back(guard.op->getIn(1)); // The CPUI_LOAD pointer
}
list<LoadGuard>::iterator storeIter = storeGuard.end();
while(storeIter != storeGuard.begin()) {
--storeIter;
LoadGuard &guard( *storeIter );
if (guard.analysisState != 0) break;
reads.push_back(guard.op);
sinks.push_back(guard.op->getIn(1)); // The CPUI_STORE pointer
}
AddrSpace *stackSpc = fd->getArch()->getStackSpace();
Varnode *stackReg = (Varnode *)0;
if (stackSpc != (AddrSpace *)0 && stackSpc->numSpacebase() > 0)
stackReg = fd->findSpacebaseInput(stackSpc);
ValueSetSolver vsSolver;
vsSolver.establishValueSets(sinks, reads, stackReg, false);
WidenerNone widener;
vsSolver.solve(10000,widener);
list<LoadGuard>::iterator iter;
bool runFullAnalysis = false;
for(iter=loadIter;iter!=loadGuard.end(); ++iter) {
LoadGuard &guard( *iter );
guard.establishRange(vsSolver.getValueSetRead(guard.op->getSeqNum()));
if (guard.analysisState == 0)
runFullAnalysis = true;
}
for(iter=storeIter;iter!=storeGuard.end(); ++iter) {
LoadGuard &guard( *iter );
guard.establishRange(vsSolver.getValueSetRead(guard.op->getSeqNum()));
if (guard.analysisState == 0)
runFullAnalysis = true;
}
if (runFullAnalysis) {
WidenerFull fullWidener;
vsSolver.solve(10000, fullWidener);
for (iter = loadIter; iter != loadGuard.end(); ++iter) {
LoadGuard &guard(*iter);
guard.finalizeRange(vsSolver.getValueSetRead(guard.op->getSeqNum()));
}
for (iter = storeIter; iter != storeGuard.end(); ++iter) {
LoadGuard &guard(*iter);
guard.finalizeRange(vsSolver.getValueSetRead(guard.op->getSeqNum()));
}
}
}
/// \brief Generate a guard record given an indexed LOAD into a stack space
///
/// Record the LOAD op and the (likely) range of addresses in the stack space that
/// might be loaded from.
/// \param node is the path element containing the constructed Address
/// \param op is the LOAD PcodeOp
/// \param spc is the stack space
void Heritage::generateLoadGuard(StackNode &node,PcodeOp *op,AddrSpace *spc)
{
loadGuard.push_back(LoadGuard());
loadGuard.back().set(op,spc,node.offset);
}
/// \brief Generate a guard record given an indexed STORE to a stack space
///
/// Record the STORE op and the (likely) range of addresses in the stack space that
/// might be stored to.
/// \param node is the path element containing the constructed Address
/// \param op is the STORE PcodeOp
/// \param spc is the stack space
void Heritage::generateStoreGuard(StackNode &node,PcodeOp *op,AddrSpace *spc)
{
storeGuard.push_back(LoadGuard());
storeGuard.back().set(op,spc,node.offset);
}
/// \brief Trace input stackpointer to any indexed loads
///
/// Look for expressions of the form val = *(SP(i) + vn + #c), where the base stack
/// pointer has an (optional) constant added to it and a non-constant index, then a
/// value is loaded from the resulting address. The LOAD operations are added to the list
/// of ops that potentially need to be guarded during a heritage pass.
/// \param spc is the particular address space with a stackpointer (into it)
void Heritage::discoverIndexedStackPointers(AddrSpace *spc)
{
// We need to be careful of exponential ladders, so we mark Varnodes independently of
// the depth first path we are traversing.
vector<Varnode *> markedVn;
vector<StackNode> path;
for(int4 i=0;i<spc->numSpacebase();++i) {
const VarnodeData &stackPointer(spc->getSpacebase(i));
Varnode *spInput = fd->findVarnodeInput(stackPointer.size, stackPointer.getAddr());
if (spInput == (Varnode *)0) continue;
path.push_back(StackNode(spInput,0,0));
while(!path.empty()) {
StackNode &curNode(path.back());
if (curNode.iter == curNode.vn->endDescend()) {
path.pop_back();
continue;
}
PcodeOp *op = *curNode.iter;
++curNode.iter;
Varnode *outVn = op->getOut();
if (outVn != (Varnode *)0 && outVn->isMark()) continue; // Don't revisit Varnodes
switch(op->code()) {
case CPUI_INT_ADD:
{
Varnode *otherVn = op->getIn(1-op->getSlot(curNode.vn));
if (otherVn->isConstant()) {
uintb newOffset = spc->wrapOffset(curNode.offset + otherVn->getOffset());
StackNode nextNode(outVn,newOffset,curNode.traversals);
if (nextNode.iter != nextNode.vn->endDescend()) {
outVn->setMark();
path.push_back(nextNode);
markedVn.push_back(outVn);
}
}
else {
StackNode nextNode(outVn,curNode.offset,curNode.traversals | StackNode::nonconstant_index);
if (nextNode.iter != nextNode.vn->endDescend()) {
outVn->setMark();
path.push_back(nextNode);
markedVn.push_back(outVn);
}
}
break;
}
case CPUI_INDIRECT:
case CPUI_COPY:
{
StackNode nextNode(outVn,curNode.offset,curNode.traversals);
if (nextNode.iter != nextNode.vn->endDescend()) {
outVn->setMark();
path.push_back(nextNode);
markedVn.push_back(outVn);
}
break;
}
case CPUI_MULTIEQUAL:
{
StackNode nextNode(outVn,curNode.offset,curNode.traversals | StackNode::multiequal);
if (nextNode.iter != nextNode.vn->endDescend()) {
outVn->setMark();
path.push_back(nextNode);
markedVn.push_back(outVn);
}
break;
}
case CPUI_LOAD:
{
// Note that if ANY path has one of the traversals (non-constant ADD or MULTIEQUAL), then
// THIS path must have one of the traversals, because the only other acceptable path elements
// (INDIRECT/COPY/constant ADD) have only one path through.
if (curNode.traversals != 0) {
generateLoadGuard(curNode,op,spc);
}
break;
}
case CPUI_STORE:
{
if (curNode.traversals != 0) {
generateStoreGuard(curNode, op, spc);
}
break;
}
default:
break;
}
}
}
for(int4 i=0;i<markedVn.size();++i)
markedVn[i]->clearMark();
}
/// \brief Normalize p-code ops so that phi-node placement and renaming works
///
/// The traditional phi-node placement and renaming algorithms don't expect
@ -516,7 +910,7 @@ void Heritage::guard(const Address &addr,int4 size,vector<Varnode *> &read,vecto
guardReturns(flags,addr,size,write);
if (fd->getArch()->highPtrPossible(addr,size)) {
guardStores(addr,size,write);
// guardLoads(flags,addr,size,write);
guardLoads(flags,addr,size,write);
}
}
}
@ -632,6 +1026,48 @@ void Heritage::guardStores(const Address &addr,int4 size,vector<Varnode *> &writ
}
}
/// \brief Guard LOAD ops in preparation for the renaming algorithm
///
/// The op must be in the loadGuard list, which means it may pull values from an indexed
/// range on the stack. A COPY guard is placed for the given range on any LOAD op whose
/// indexed range it intersects.
/// \param flags is boolean properties associated with the address
/// \param addr is the first address of the given range
/// \param size is the number of bytes in the given range
/// \param write is the list of written Varnodes in the range (may be updated)
void Heritage::guardLoads(uint4 flags,const Address &addr,int4 size,vector<Varnode *> &write)
{
PcodeOp *copyop;
list<LoadGuard>::iterator iter;
if ((flags & Varnode::addrtied)==0) return; // If not address tied, don't consider for index alias
iter = loadGuard.begin();
while(iter!=loadGuard.end()) {
LoadGuard &guardRec(*iter);
if (!guardRec.isValid(CPUI_LOAD)) {
list<LoadGuard>::iterator copyIter = iter;
++iter;
loadGuard.erase(copyIter);
continue;
}
++iter;
if (guardRec.spc != addr.getSpace()) continue;
if (addr.getOffset() < guardRec.minimumOffset) continue;
if (addr.getOffset() > guardRec.maximumOffset) continue;
copyop = fd->newOp(1,guardRec.op->getAddr());
Varnode *vn = fd->newVarnodeOut(size,addr,copyop);
vn->setActiveHeritage();
vn->setAddrForce();
fd->opSetOpcode(copyop,CPUI_COPY);
Varnode *invn = fd->newVarnode(size,addr);
invn->setActiveHeritage();
fd->opSetInput(copyop,invn,0);
fd->opInsertBefore(copyop,guardRec.op);
loadCopyOps.push_back(copyop);
}
}
/// \brief Guard global data-flow at RETURN ops in preparation for renaming
///
/// For the given global (persistent) address range, data-flow must persist up to
@ -682,31 +1118,6 @@ void Heritage::guardReturns(uint4 flags,const Address &addr,int4 size,vector<Var
}
}
// void Heritage::guardLoads(uint4 flags,const Address &addr,int4 size,vector<Varnode *> &write)
// {
// list<PcodeOp *>::const_iterator iter,iterend;
// PcodeOp *op,*copyop;
// iterend = fd->endOp(CPUI_LOAD);
// for(iter=fd->beginOp(CPUI_LOAD);iter!=iterend;++iter) {
// op = *iter;
// if (op->isDead()) continue;
// // Check if load could possible read from this addr
// if (!Address::getSpaceFromConst(op->getIn(0)->getAddr())->contain(addr.getSpace()))
// continue;
// copyop = fd->newOp(1,op->getAddr());
// Varnode *vn = fd->newVarnodeOut(size,addr,copyop);
// vn->setActiveHeritage();
// vn->setAddrForce();
// fd->opSetOpcode(copyop,CPUI_COPY);
// Varnode *invn = fd->newVarnode(size,addr);
// vn->setActiveHeritage();
// fd->opSetInput(copyop,invn,0);
// fd->opInsertBefore(copyop,op);
// }
// }
/// \brief Build a refinement array given an address range and a list of Varnodes
///
/// The array is a preallocated array of ints, one for each byte in the address
@ -1674,6 +2085,10 @@ void Heritage::heritage(void)
if (!space->isHeritaged()) continue;
info = getInfo(space);
if (pass < info->delay) continue; // It is too soon to heritage this space
if (!info->loadGuardSearch) {
info->loadGuardSearch = true;
discoverIndexedStackPointers(info->space);
}
needwarning = false;
iter = fd->beginLoc(space);
enditer = fd->endLoc(space);
@ -1727,6 +2142,8 @@ void Heritage::heritage(void)
}
placeMultiequals();
rename();
analyzeNewLoadGuards();
handleNewLoadCopies();
if (pass == 0)
splitmanage.splitAdditional();
pass += 1;
@ -1835,6 +2252,8 @@ void Heritage::clear(void)
depth.clear();
merge.clear();
clearInfoList();
loadGuard.clear();
storeGuard.clear();
maxdepth = -1;
pass = 0;
}

View file

@ -87,9 +87,46 @@ class HeritageInfo {
int4 delay; ///< How many passes to delay heritage of this space
int4 deadcodedelay; ///< How many passes to delay deadcode removal of this space
int4 deadremoved; ///< >0 if Varnodes in this space have been eliminated
bool loadGuardSearch; ///< \b true if the search for LOAD ops to guard has been performed
bool warningissued; ///< \b true if warning issued previously
HeritageInfo(AddrSpace *spc,int4 dl,int4 dcdl) {
space=spc; delay=dl; deadcodedelay=dcdl; deadremoved=0; warningissued=false; } ///< Constructor
space=spc; delay=dl; deadcodedelay=dcdl; deadremoved=0; loadGuardSearch=false; warningissued=false; } ///< Constructor
void reset(void) {
deadremoved = 0; deadcodedelay = delay; warningissued = false; loadGuardSearch = false; } ///< Reset
};
/// \brief Description of a LOAD operation that needs to be guarded
///
/// Heritage maintains a list of CPUI_LOAD ops that reference the stack dynamically. These
/// can potentially alias stack Varnodes, so we maintain what (possibly limited) information
/// we known about the range of stack addresses that can be referenced.
class LoadGuard {
friend class Heritage;
PcodeOp *op; ///< The LOAD op
AddrSpace *spc; ///< The stack space being loaded from
uintb pointerBase; ///< Base offset of the pointer
uintb minimumOffset; ///< Minimum offset of the LOAD
uintb maximumOffset; ///< Maximum offset of the LOAD
int4 step; ///< Step of any access into this range (0=unknown)
int4 analysisState; ///< 0=unanalyzed, 1=analyzed(partial result), 2=analyzed(full result)
void establishRange(const ValueSetRead &valueSet); ///< Convert partial value set analysis into guard range
void finalizeRange(const ValueSetRead &valueSet); ///< Convert value set analysis to final guard range
/// \brief Set a new unanalyzed LOAD guard that initially guards everything
///
/// \param o is the LOAD op
/// \param s is the (stack) space it is loading from
/// \param off is the base offset that is indexed from
void set(PcodeOp *o,AddrSpace *s,uintb off) {
op = o; spc = s; pointerBase=off; minimumOffset=0; maximumOffset=s->getHighest(); step=0; analysisState=0;
}
public:
PcodeOp *getOp(void) const { return op; } ///< Get the PcodeOp being guarded
uintb getMinimum(void) const { return minimumOffset; } ///< Get minimum offset of the guarded range
uintb getMaximum(void) const { return maximumOffset; } ///< Get maximum offset of the guarded range
int4 getStep(void) const { return step; } ///< Get the calculated step associated with the range (or 0)
bool isRangeLocked(void) const { return (analysisState == 2); } ///< Return \b true if the range is fully determined
bool isValid(OpCode opc) const { return (!op->isDead() && op->code() == opc); } ///< Return \b true if the record still describes an active LOAD
};
/// \brief Manage the construction of Static Single Assignment (SSA) form
@ -134,6 +171,25 @@ class Heritage {
mark_node = 2, ///< Node has already been in queue
merged_node = 4 ///< Node has already been merged
};
/// \brief Node for depth-first traversal of stack references
struct StackNode {
enum {
nonconstant_index = 1,
multiequal = 2
};
Varnode *vn; ///< Varnode being traversed
uintb offset; ///< Offset relative to base
uint4 traversals; ///< What kind of operations has this pointer accumulated
list<PcodeOp *>::const_iterator iter; ///< Next PcodeOp to follow
StackNode(Varnode *v,uintb o,uint4 trav) {
vn = v;
offset = o;
iter = v->beginDescend();
traversals = trav;
}
};
Funcdata *fd; ///< The function \b this is controlling SSA construction
LocationMap globaldisjoint; ///< Disjoint cover of every heritaged memory location
LocationMap disjoint; ///< Disjoint cover of memory locations currently being heritaged
@ -147,6 +203,9 @@ class Heritage {
PriorityQueue pq; ///< Priority queue for phi-node placement
vector<FlowBlock *> merge; ///< Calculate merge points (blocks containing phi-nodes)
vector<HeritageInfo> infolist; ///< Heritage status for individual address spaces
list<LoadGuard> loadGuard; ///< List of LOAD operations that need to be guarded
list<LoadGuard> storeGuard; ///< List of STORE operations taking an indexed pointer to the stack
vector<PcodeOp *> loadCopyOps; ///< List of COPY ops generated by load guards
void clearInfoList(void); ///< Reset heritage status for all address spaces
/// \brief Get the heritage status for the given address space
@ -168,12 +227,19 @@ class Heritage {
Varnode *normalizeWriteSize(Varnode *vn,const Address &addr,int4 size);
Varnode *concatPieces(const vector<Varnode *> &vnlist,PcodeOp *insertop,Varnode *finalvn);
void splitPieces(const vector<Varnode *> &vnlist,PcodeOp *insertop,const Address &addr,int4 size,Varnode *startvn);
void findAddressForces(vector<PcodeOp *> &copySinks,vector<PcodeOp *> &forces);
void propagateCopyAway(PcodeOp *op);
void handleNewLoadCopies(void);
void analyzeNewLoadGuards(void);
void generateLoadGuard(StackNode &node,PcodeOp *op,AddrSpace *spc);
void generateStoreGuard(StackNode &node,PcodeOp *op,AddrSpace *spc);
void discoverIndexedStackPointers(AddrSpace *spc);
void guard(const Address &addr,int4 size,vector<Varnode *> &read,vector<Varnode *> &write,vector<Varnode *> &inputvars);
void guardInput(const Address &addr,int4 size,vector<Varnode *> &input);
void guardCalls(uint4 flags,const Address &addr,int4 size,vector<Varnode *> &write);
void guardStores(const Address &addr,int4 size,vector<Varnode *> &write);
void guardLoads(uint4 flags,const Address &addr,int4 size,vector<Varnode *> &write);
void guardReturns(uint4 flags,const Address &addr,int4 size,vector<Varnode *> &write);
// void guardLoads(uint4 flags,const Address &addr,int4 size,vector<Varnode *> &write);
static void buildRefinement(vector<int4> &refine,const Address &addr,int4 size,const vector<Varnode *> &vnlist);
void splitByRefinement(Varnode *vn,const Address &addr,const vector<int4> &refine,vector<Varnode *> &split);
void refineRead(Varnode *vn,const Address &addr,const vector<int4> &refine,vector<Varnode *> &newvn);
@ -185,6 +251,8 @@ class Heritage {
void calcMultiequals(const vector<Varnode *> &write);
void renameRecurse(BlockBasic *bl,VariableStack &varstack);
void bumpDeadcodeDelay(Varnode *vn);
void placeMultiequals(void);
void rename(void);
public:
Heritage(Funcdata *data); ///< Constructor
@ -202,9 +270,9 @@ public:
void buildInfoList(void); ///< Initialize information for each space
void forceRestructure(void) { maxdepth = -1; } ///< Force regeneration of basic block structures
void clear(void); ///< Reset all analysis of heritage
void placeMultiequals(void);
void rename(void);
void heritage(void); ///< Perform one pass of heritage
const list<LoadGuard> &getLoadGuards(void) const { return loadGuard; } ///< Get list of LOAD ops that are guarded
const list<LoadGuard> &getStoreGuards(void) const { return storeGuard; } ///< Get list of STORE ops that are guarded
};
#endif

View file

@ -129,6 +129,7 @@ void IfaceDecompCapability::registerCommands(IfaceStatus *status)
status->registerCom(new IfcVolatile(),"volatile");
status->registerCom(new IfcPreferSplit(),"prefersplit");
status->registerCom(new IfcStructureBlocks(),"structure","blocks");
status->registerCom(new IfcAnalyzeRange(), "analyze","range");
#ifdef CPUI_RULECOMPILE
status->registerCom(new IfcParseRule(),"parse","rule");
status->registerCom(new IfcExperimentalRules(),"experimental","rules");
@ -2474,6 +2475,56 @@ void IfcCountPcode::execute(istream &s)
*status->optr << "Count - pcode = " << dec << count << endl;
}
void IfcAnalyzeRange::execute(istream &s)
{
if (dcp->conf == (Architecture *)0)
throw IfaceExecutionError("Image not loaded");
if (dcp->fd == (Funcdata *)0)
throw IfaceExecutionError("No function selected");
bool useFullWidener;
string token;
s >> ws >> token;
if (token == "full")
useFullWidener = true;
else if (token == "partial") {
useFullWidener = false;
}
else
throw IfaceParseError("Must specify \"full\" or \"partial\" widening");
Varnode *vn = iface_read_varnode(dcp,s);
vector<Varnode *> sinks;
vector<PcodeOp *> reads;
sinks.push_back(vn);
for(list<PcodeOp *>::const_iterator iter=vn->beginDescend();iter!=vn->endDescend();++iter) {
PcodeOp *op = *iter;
if (op->code() == CPUI_LOAD || op->code() == CPUI_STORE)
reads.push_back(op);
}
Varnode *stackReg = dcp->fd->findSpacebaseInput(dcp->conf->getStackSpace());
ValueSetSolver vsSolver;
vsSolver.establishValueSets(sinks, reads, stackReg, false);
if (useFullWidener) {
WidenerFull widener;
vsSolver.solve(10000,widener);
}
else {
WidenerNone widener;
vsSolver.solve(10000,widener);
}
list<ValueSet>::const_iterator iter;
for(iter=vsSolver.beginValueSets();iter!=vsSolver.endValueSets();++iter) {
(*iter).printRaw(*status->optr);
*status->optr << endl;
}
map<SeqNum,ValueSetRead>::const_iterator riter;
for(riter=vsSolver.beginValueSetReads();riter!=vsSolver.endValueSetReads();++riter) {
(*riter).second.printRaw(*status->optr);
*status->optr << endl;
}
}
#ifdef OPACTION_DEBUG
void IfcDebugAction::execute(istream &s)

View file

@ -541,6 +541,11 @@ public:
virtual void execute(istream &s);
};
class IfcAnalyzeRange : public IfaceDecompCommand {
public:
virtual void execute(istream &s);
};
#ifdef CPUI_RULECOMPILE
class IfcParseRule : public IfaceDecompCommand {
public:

View file

@ -231,11 +231,19 @@ void EmulateFunction::collectLoadPoints(vector<LoadTable> &res) const
}
}
/// The starting value for the range and the step is preserved. The
/// ending value is set so there are exactly the given number of elements
/// in the range.
/// \param nm is the given number
void JumpValuesRange::truncate(int4 nm)
{
// FIXME: This doesn't work if there is a stride
range = CircleRange(range.getMin(),range.getMin() + (nm-1),range.getMask());
int4 rangeSize = 8*sizeof(uintb) - count_leading_zeros(range.getMask());
rangeSize >>= 3;
uintb left = range.getMin();
int4 step = range.getStep();
uintb right = (left + step * nm) & range.getMask();
range.setRange(left, right, rangeSize, step);
}
uintb JumpValuesRange::getSize(void) const
@ -403,18 +411,21 @@ bool JumpBasic::ispoint(Varnode *vn)
return true;
}
void JumpBasic::setStride(Varnode *vn,CircleRange &rng)
/// If the some of the least significant bits of the given Varnode are known to
/// be zero, translate this into a stride for the jumptable range.
/// \param vn is the given Varnode
/// \return the calculated stride = 1,2,4,...
int4 JumpBasic::getStride(Varnode *vn)
{
uintb mask = vn->getNZMask();
int4 stride = 0;
int4 stride = 1;
while((mask&1)==0) {
mask >>= 1;
stride += 1;
stride <<= 1;
}
if (stride==0) return;
if (stride > 6) return;
rng.setStride(stride);
if (stride > 32) return 1;
return stride;
}
uintb JumpBasic::backup2Switch(Funcdata *fd,uintb output,Varnode *outvn,Varnode *invn)
@ -915,23 +926,25 @@ void JumpBasic::calcRange(Varnode *vn,CircleRange &rng) const
// by using the precalculated guard ranges.
// Get an initial range, based on the size/type of -vn-
int4 stride = 1;
if (vn->isConstant())
rng = CircleRange(vn->getOffset(),vn->getSize());
else if (vn->isWritten() && vn->getDef()->isBoolOutput())
rng = CircleRange(0,1,1); // Only 0 or 1 possible
rng = CircleRange(0,2,1,1); // Only 0 or 1 possible
else { // Should we go ahead and use nzmask in all cases?
uintb mask = calc_mask(vn->getSize());
uintb maxValue = 0; // Every possible value
if (vn->isWritten()) {
PcodeOp *andop = vn->getDef();
if (andop->code() == CPUI_INT_AND) {
Varnode *constvn = andop->getIn(1);
if (constvn->isConstant()) {
mask = coveringmask( constvn->getOffset() );
maxValue = coveringmask( constvn->getOffset() );
maxValue = (maxValue + 1) & calc_mask(vn->getSize());
}
}
}
rng = CircleRange(0,mask,mask);
setStride(vn,rng);
stride = getStride(vn);
rng = CircleRange(0,maxValue,vn->getSize(),stride);
}
// Intersect any guard ranges which apply to -vn-
@ -950,7 +963,7 @@ void JumpBasic::calcRange(Varnode *vn,CircleRange &rng) const
// in which case the guard might not check for it. If the
// size is too big, we try only positive values
if (rng.getSize() > 0x10000) {
CircleRange positive(0,rng.getMask()>>1,rng.getMask());
CircleRange positive(0,(rng.getMask()>>1)+1,vn->getSize(),stride);
positive.intersect(rng);
if (!positive.isEmpty())
rng = positive;

View file

@ -145,7 +145,7 @@ public:
void setRange(const CircleRange &rng) { range = rng; }
void setStartVn(Varnode *vn) { normqvn = vn; }
void setStartOp(PcodeOp *op) { startop = op; }
virtual void truncate(int4 nm);
virtual void truncate(int4 nm); ///< Truncate the number of values to the given number
virtual uintb getSize(void) const;
virtual bool contains(uintb val) const;
virtual bool initializeForReading(void) const;
@ -233,7 +233,7 @@ protected:
Varnode *switchvn; // The unnormalized switch varnode
static bool isprune(Varnode *vn);
static bool ispoint(Varnode *vn);
static void setStride(Varnode *vn,CircleRange &rng);
static int4 getStride(Varnode *vn); ///< Get the step/stride associated with the Varnode
static uintb backup2Switch(Funcdata *fd,uintb output,Varnode *outvn,Varnode *invn);
void findDeterminingVarnodes(PcodeOp *op,int4 slot);
void analyzeGuards(BlockBasic *bl,int4 pathout);

File diff suppressed because it is too large Load diff

View file

@ -51,35 +51,289 @@ class CircleRange {
uintb mask; ///< Bit mask defining the size (modulus) and stop of the range
bool isempty; ///< \b true if set is empty
int4 step; ///< Explicit step size
int4 shift; ///< Number of bits in step. Equal to log2(step)
static const char arrange[]; ///< Map from raw overlaps to normalized overlap code
void calcStepShift(void); ///< Calculate explicit \b step and \b skip from \b mask
void normalize(void); ///< Normalize the representation of full sets
void complement(void); ///< Set \b this to the complement of itself
void convertToBoolean(void); ///< Convert \b this to boolean.
static bool newStride(uintb newmask,uintb &myleft,uintb &myright); ///< Recalculate range based on new size and stride
bool convertToBoolean(void); ///< Convert \b this to boolean.
static bool newStride(uintb mask,int4 step,int4 oldStep,uint4 rem,uintb &myleft,uintb &myright);
static bool newDomain(uintb newMask,int4 newStep,uintb &myleft,uintb &myright);
static char encodeRangeOverlaps(uintb op1left,uintb op1right,uintb op2left,uintb op2right); ///< Calculate overlap code
public:
CircleRange(void) { isempty=true; } ///< Construct an empty range
CircleRange(uintb mn,uintb mx,uintb m); ///< Construct given specific boundaries.
CircleRange(uintb lft,uintb rgt,int4 size,int4 stp); ///< Construct given specific boundaries.
CircleRange(bool val); ///< Construct a boolean range
CircleRange(uintb val,int4 size); ///< Construct range with single value
void setRange(uintb lft,uintb rgt,int4 size,int4 step); ///< Set directly to a specific range
void setRange(uintb val,int4 size); ///< Set range with a single value
void setFull(int4 size); ///< Set a completely full range
bool isEmpty(void) const { return isempty; } ///< Return \b true if \b this range is empty
bool isFull(void) const { return ((!isempty) && (step == 1) && (left == right)); } ///< Return \b true if \b this contains all possible values
bool isSingle(void) const { return (!isempty) && (right == ((left + step)& mask)); } ///< Return \b true if \b this contains single value
uintb getMin(void) const { return left; } ///< Get the left boundary of the range
uintb getMax(void) const { return (right-step)&mask; } ///< Get the right-most integer contained in the range
uintb getEnd(void) const { return right; } ///< Get the right boundary of the range
uintb getMask(void) const { return mask; } ///< Get the mask
uintb getSize(void) const; ///< Get the size of this range
int4 getStep(void) const { return step; } ///< Get the step for \b this range
int4 getMaxInfo(void) const; ///< Get maximum information content of range
bool operator==(const CircleRange &op2) const; ///< Equals operator
bool getNext(uintb &val) const { val = (val+step)&mask; return (val!=right); } ///< Advance an integer within the range
bool contains(const CircleRange &op2) const; ///< Check containment of another range in \b this.
bool contains(uintb val) const; ///< Check containment of a specific integer.
int4 intersect(const CircleRange &op2); ///< Intersect \b this with another range
bool setNZMask(uintb nzmask,int4 size); ///< Set the range based on a putative mask.
int4 circleUnion(const CircleRange &op2); ///< Union two ranges.
void setStride(int4 newshift); ///< Set a new stride on \b this range.
bool minimalContainer(const CircleRange &op2,int4 maxStep); ///< Construct minimal range that contains both \b this and another range
int4 invert(void); ///< Convert to complementary range
void setStride(int4 newStep,uintb rem); ///< Set a new step on \b this range.
bool pullBackUnary(OpCode opc,int4 inSize,int4 outSize); ///< Pull-back \b this through the given unary operator
bool pullBackBinary(OpCode opc,uintb val,int4 slot,int4 inSize,int4 outSize); ///< Pull-back \b this thru binary operator
Varnode *pullBack(PcodeOp *op,Varnode **constMarkup,bool usenzmask); ///< Pull-back \b this range through given PcodeOp.
bool pushForwardUnary(OpCode opc,const CircleRange &in1,int4 inSize,int4 outSize); ///< Push-forward thru given unary operator
bool pushForwardBinary(OpCode opc,const CircleRange &in1,const CircleRange &in2,int4 inSize,int4 outSize,int4 maxStep);
bool pushForwardTrinary(OpCode opc,const CircleRange &in1,const CircleRange &in2,const CircleRange &in3,
int4 inSize,int4 outSize,int4 maxStep);
void widen(const CircleRange &op2,bool leftIsStable); ///< Widen the unstable bound to match containing range
int4 translate2Op(OpCode &opc,uintb &c,int4 &cslot) const; ///< Translate range to a comparison op
void printRaw(ostream &s) const; ///< Write a text representation of \b this to stream
};
class Partition; // Forward declaration
class Widener; // Forward declaration
/// \brief A range of values attached to a Varnode within a data-flow subsystem
///
/// This class acts as both the set of values for the Varnode and as a node in a
/// sub-graph overlaying the full data-flow of the function containing the Varnode.
/// The values are stored in the CircleRange field and can be interpreted either as
/// absolute values (if \b typeCode is 0) or as values relative to a stack pointer
/// or some other register (if \b typeCode is non-zero).
class ValueSet {
public:
static const int4 MAX_STEP; ///< Maximum step inferred for a value set
/// \brief An external that can be applied to a ValueSet
///
/// An Equation is attached to a particular ValueSet and its underlying Varnode
/// providing additional restriction on the ValueSet of an input parameter of the
/// operation producing the Varnode.
class Equation {
friend class ValueSet;
int4 slot; ///< The input parameter slot to which the constraint is attached
int4 typeCode; ///< The constraint characteristic 0=absolute 1=relative to a spacebase register
CircleRange range; ///< The range constraint
public:
Equation(int4 s,int4 tc,const CircleRange &rng) { slot=s; typeCode = tc; range = rng; } ///< Constructor
};
private:
friend class ValueSetSolver;
int4 typeCode; ///< 0=pure constant 1=stack relative
int4 numParams; ///< Number of input parameters to defining operation
int4 count; ///< Depth first numbering / widening count
OpCode opCode; ///< Op-code defining Varnode
bool leftIsStable; ///< Set to \b true if left boundary of range didn't change (last iteration)
bool rightIsStable; ///< Set to \b true if right boundary of range didn't change (last iteration)
Varnode *vn; ///< Varnode whose set this represents
CircleRange range; ///< Range of values or offsets in this set
vector<Equation> equations; ///< Any equations associated with this value set
Partition *partHead; ///< If Varnode is a component head, pointer to corresponding Partition
ValueSet *next; ///< Next ValueSet to iterate
bool doesEquationApply(int4 num,int4 slot) const; ///< Does the indicated equation apply for the given input slot
void setFull(void) { range.setFull(vn->getSize()); typeCode = 0; } ///< Mark value set as possibly containing any value
void setVarnode(Varnode *v,int4 tCode); ///< Attach \b this to given Varnode and set initial values
void addEquation(int4 slot,int4 type,const CircleRange &constraint); ///< Insert an equation restricting \b this value set
void addLandmark(int4 type,const CircleRange &constraint) { addEquation(numParams,type,constraint); } ///< Add a widening landmark
bool computeTypeCode(void); ///< Figure out if \b this value set is absolute or relative
bool iterate(Widener &widener); ///< Regenerate \b this value set from operator inputs
public:
int4 getCount(void) const { return count; } ///< Get the current iteration count
const CircleRange *getLandMark(void) const; ///< Get any \e landmark range
int4 getTypeCode(void) const { return typeCode; } ///< Return '0' for normal constant, '1' for spacebase relative
Varnode *getVarnode(void) const { return vn; } ///< Get the Varnode attached to \b this ValueSet
const CircleRange &getRange(void) const { return range; } ///< Get the actual range of values
bool isLeftStable(void) const { return leftIsStable; } ///< Return \b true if the left boundary hasn't been changing
bool isRightStable(void) const { return rightIsStable; } ///< Return \b true if the right boundary hasn't been changing
void printRaw(ostream &s) const; ///< Write a text description of \b to the given stream
};
/// \brief A range of nodes (within the weak topological ordering) that are iterated together
class Partition {
friend class ValueSetSolver;
ValueSet *startNode; ///< Starting node of component
ValueSet *stopNode; ///< Ending node of component
bool isDirty; ///< Set to \b true if a node in \b this component has changed this iteration
public:
Partition(void) {
startNode = (ValueSet *)0; stopNode = (ValueSet *)0; isDirty = false;
} ///< Construct empty partition
};
/// \brief A special form of ValueSet associated with the \e read \e point of a Varnode
///
/// When a Varnode is read, it may have a more restricted range at the point of the read
/// compared to the full scope. This class officially stores the value set at the point
/// of the read (specified by PcodeOp and slot). It is computed as a final step after
/// the main iteration has completed.
class ValueSetRead {
friend class ValueSetSolver;
int4 typeCode; ///< 0=pure constant 1=stack relative
int4 slot; ///< The slot being read
PcodeOp *op; ///< The PcodeOp at the point of the value set read
CircleRange range; ///< Range of values or offsets in this set
CircleRange equationConstraint; ///< Constraint associated with the equation
int4 equationTypeCode; ///< Type code of the associated equation
bool leftIsStable; ///< Set to \b true if left boundary of range didn't change (last iteration)
bool rightIsStable; ///< Set to \b true if right boundary of range didn't change (last iteration)
void setPcodeOp(PcodeOp *o,int4 slt); ///< Establish \e read this value set corresponds to
void addEquation(int4 slt,int4 type,const CircleRange &constraint); ///< Insert an equation restricting \b this value set
public:
int4 getTypeCode(void) const { return typeCode; } ///< Return '0' for normal constant, '1' for spacebase relative
const CircleRange &getRange(void) const { return range; } ///< Get the actual range of values
bool isLeftStable(void) const { return leftIsStable; } ///< Return \b true if the left boundary hasn't been changing
bool isRightStable(void) const { return rightIsStable; } ///< Return \b true if the right boundary hasn't been changing
void compute(void); ///< Compute \b this value set
void printRaw(ostream &s) const; ///< Write a text description of \b to the given stream
};
/// \brief Class holding a particular widening strategy for the ValueSetSolver iteration algorithm
///
/// This obects gets to decide when a value set gets \e frozen (checkFreeze()), meaning the set
/// doesn't change for the remaining iteration steps. It also gets to decide when and by how much
/// value sets get artificially increased in size to accelerate reaching their stable state (doWidening()).
class Widener {
public:
virtual ~Widener(void) {} ///< Destructor
/// \brief Upon entering a fresh partition, determine how the given ValueSet count should be reset
///
/// \param valueSet is the given value set
/// \return the value of the iteration counter to reset to
virtual int4 determineIterationReset(const ValueSet &valueSet)=0;
/// \brief Check if the given value set has been frozen for the remainder of the iteration process
///
/// \param valueSet is the given value set
/// \return \b true if the valueSet will no longer change
virtual bool checkFreeze(const ValueSet &valueSet)=0;
/// \brief For an iteration that isn't stabilizing attempt to widen the given ValueSet
///
/// Change the given range based on its previous iteration so that it stabilizes more
/// rapidly on future iterations.
/// \param valueSet is the given value set
/// \param range is the previous form of the given range (and storage for the widening result)
/// \param newRange is the current iteration of the given range
/// \return \b true if widening succeeded
virtual bool doWidening(const ValueSet &valueSet,CircleRange &range,const CircleRange &newRange)=0;
};
/// \brief Class for doing normal widening
///
/// Widening is attempted at a specific iteration. If a landmark is available, it is used
/// to do a controlled widening, holding the stable range boundary constant. Otherwise a
/// full range is produced. At a later iteration, a full range is produced automatically.
class WidenerFull : public Widener {
int4 widenIteration; ///< The iteration at which widening is attempted
int4 fullIteration; ///< The iteration at which a full range is produced
public:
WidenerFull(void) { widenIteration = 2; fullIteration = 5; } ///< Constructor with default iterations
WidenerFull(int4 wide,int4 full) { widenIteration = wide; fullIteration = full; } ///< Constructor specifying iterations
virtual int4 determineIterationReset(const ValueSet &valueSet);
virtual bool checkFreeze(const ValueSet &valueSet);
virtual bool doWidening(const ValueSet &valueSet,CircleRange &range,const CircleRange &newRange);
};
/// \brief Class for freezing value sets at a specific iteration (to accelerate convergence)
///
/// The value sets don't reach a true stable state but instead lock in a description of the
/// first few values that \e reach a given Varnode. The ValueSetSolver does normal iteration,
/// but individual ValueSets \e freeze after a specific number of iterations (3 by default),
/// instead of growing to a true stable state. This gives evidence of iteration in the underlying
/// code, showing the initial value and frequently the step size.
class WidenerNone : public Widener {
int4 freezeIteration; ///< The iteration at which all change ceases
public:
WidenerNone(void) { freezeIteration = 3; }
virtual int4 determineIterationReset(const ValueSet &valueSet);
virtual bool checkFreeze(const ValueSet &valueSet);
virtual bool doWidening(const ValueSet &valueSet,CircleRange &range,const CircleRange &newRange);
};
/// \brief Class that determines a ValueSet for each Varnode in a data-flow system
///
/// This class uses \e value \e set \e analysis to calculate (an overestimation of)
/// the range of values that can reach each Varnode. The system is formed by providing
/// a set of Varnodes for which the range is desired (the sinks) via establishValueSets().
/// This creates a system of Varnodes (within the single function) that can flow to the sinks.
/// Running the method solve() does the analysis, and the caller can examine the results
/// by examining the ValueSet attached to any of the Varnodes in the system (via Varnode::getValueSet()).
/// The ValueSetSolver::solve() starts with minimal value sets and does iteration steps by pushing
/// them through the PcodeOps until stability is reached. A Widener object is passed to solve()
/// which selects the specific strategy for accelerating convergence.
class ValueSetSolver {
/// \brief An iterator over out-bound edges for a single ValueSet node in a data-flow system
///
/// This is a helper class for walking a collection of ValueSets as a graph.
/// Mostly the graph mirrors the data-flow of the Varnodes underlying the ValueSets, but
/// there is support for a simulated root node. This class acts as an iterator over the outgoing
/// edges of a particular ValueSet in the graph.
class ValueSetEdge {
const vector<ValueSet *> *rootEdges; ///< The list of nodes attached to the simulated root node (or NULL)
int4 rootPos; ///< The iterator position for the simulated root node
Varnode *vn; ///< The Varnode attached to a normal ValueSet node (or NULL)
list<PcodeOp *>::const_iterator iter; ///< The iterator position for a normal ValueSet node
public:
ValueSetEdge(ValueSet *node,const vector<ValueSet *> &roots);
ValueSet *getNext(void);
};
list<ValueSet> valueNodes; ///< Storage for all the current value sets
map<SeqNum,ValueSetRead> readNodes; ///< Additional, after iteration, add-on value sets
Partition orderPartition; ///< Value sets in iteration order
list<Partition> recordStorage; ///< Storage for the Partitions establishing components
vector<ValueSet *> rootNodes; ///< Values treated as inputs
vector<ValueSet *> nodeStack; ///< Stack used to generate the topological ordering
int4 depthFirstIndex; ///< (Global) depth first numbering for topological ordering
int4 numIterations; ///< Count of individual ValueSet iterations
int4 maxIterations; ///< Maximum number of iterations before forcing termination
void newValueSet(Varnode *vn,int4 tCode); ///< Allocate storage for a new ValueSet
static void partitionPrepend(ValueSet *vertex,Partition &part); ///< Prepend a vertex to a partition
static void partitionPrepend(const Partition &head,Partition &part); ///< Prepend full Partition to given Partition
void partitionSurround(Partition &part); ///< Create a full partition component
void component(ValueSet *vertex,Partition &part); ///< Generate a partition component given its head
int4 visit(ValueSet *vertex,Partition &part); ///< Recursively walk the data-flow graph finding partitions
void establishTopologicalOrder(void); ///< Find the optimal order for iterating through the ValueSets
void generateTrueEquation(Varnode *vn,PcodeOp *op,int4 slot,int4 type,const CircleRange &range);
void generateFalseEquation(Varnode *vn,PcodeOp *op,int4 slot,int4 type,const CircleRange &range);
void applyConstraints(Varnode *vn,int4 type,const CircleRange &range,PcodeOp *cbranch);
void constraintsFromPath(int4 type,CircleRange &lift,Varnode *startVn,Varnode *endVn,PcodeOp *cbranch);
void constraintsFromCBranch(PcodeOp *cbranch); ///< Generate constraints arising from the given branch
void generateConstraints(const vector<Varnode *> &worklist,const vector<PcodeOp *> &reads); ///< Generate constraints given a system of Varnodes
bool checkRelativeConstant(Varnode *vn,int4 &typeCode,uintb &value) const; ///< Check if the given Varnode is a \e relative constant
void generateRelativeConstraint(PcodeOp *compOp,PcodeOp *cbranch); ///< Try to find a \e relative constraint
public:
void establishValueSets(const vector<Varnode *> &sinks,const vector<PcodeOp *> &reads,Varnode *stackReg,bool indirectAsCopy);
int4 getNumIterations(void) const { return numIterations; } ///< Get the current number of iterations
void solve(int4 max,Widener &widener); ///< Iterate the ValueSet system until it stabilizes
list<ValueSet>::const_iterator beginValueSets(void) const { return valueNodes.begin(); } ///< Start of all ValueSets in the system
list<ValueSet>::const_iterator endValueSets(void) const { return valueNodes.end(); } ///< End of all ValueSets in the system
map<SeqNum,ValueSetRead>::const_iterator beginValueSetReads(void) const { return readNodes.begin(); } ///< Start of ValueSetReads
map<SeqNum,ValueSetRead>::const_iterator endValueSetReads(void) const { return readNodes.end(); } ///< End of ValueSetReads
const ValueSetRead &getValueSetRead(const SeqNum &seq) { return (*readNodes.find(seq)).second; } ///< Get ValueSetRead by SeqNum
#ifdef CPUI_DEBUG
void dumpValueSets(ostream &s) const;
#endif
};
/// \param op2 is the range to compare \b this to
/// \return \b true if the two ranges are equal
inline bool CircleRange::operator==(const CircleRange &op2) const
{
if (isempty != op2.isempty) return false;
if (isempty) return true;
return (left == op2.left) && (right == op2.right) && (mask == op2.mask) && (step == op2.step);
}
/// If two ranges are labeled [l , r) and [op2.l, op2.r), the
/// overlap of the ranges can be characterized by listing the four boundary
/// values in order, as the circle is traversed in a clock-wise direction. This characterization can be
@ -111,4 +365,43 @@ inline char CircleRange::encodeRangeOverlaps(uintb op1left, uintb op1right, uint
return arrange[val];
}
/// Perform basic checks that the selected Equation exists and applies
/// to the indicated input slot.
/// \param num is the index selecting an Equation
/// \param slot is the indicated slot
/// \return \b true if the Equation exists and applies
inline bool ValueSet::doesEquationApply(int4 num,int4 slot) const
{
if (num < equations.size()) {
if (equations[num].slot == slot) {
if (equations[num].typeCode == typeCode)
return true;
}
}
return false;
}
/// \param vertex is the node that will be prepended
/// \param part is the Partition being modified
inline void ValueSetSolver::partitionPrepend(ValueSet *vertex,Partition &part)
{
vertex->next = part.startNode; // Attach new vertex to beginning of list
part.startNode = vertex; // Change the first value set to be the new vertex
if (part.stopNode == (ValueSet *)0)
part.stopNode = vertex;
}
/// \param head is the partition to be prepended
/// \param part is the given partition being modified (prepended to)
inline void ValueSetSolver::partitionPrepend(const Partition &head,Partition &part)
{
head.stopNode->next = part.startNode;
part.startNode = head.startNode;
if (part.stopNode == (ValueSet *)0)
part.stopNode = head.stopNode;
}
#endif

View file

@ -3347,7 +3347,8 @@ int4 RulePropagateCopy::applyOp(PcodeOp *op,Funcdata &data)
if (invn == vn)
throw LowlevelError("Self-defined varnode");
if (op->isMarker()) {
if (invn->isConstant()) continue; // Don't propagate constants into markers
if (invn->isConstant()) continue; // Don't propagate constants into markers
if (vn->isAddrForce()) continue; // Don't propagate if we are keeping the COPY anyway
if (invn->isAddrTied() && op->getOut()->isAddrTied() &&
(op->getOut()->getAddr() != invn->getAddr()))
continue; // We must not allow merging of different addrtieds

View file

@ -308,10 +308,10 @@ uintb AddrSpace::read(const string &s,int4 &size) const
offset = addressToByte(offset,wordsize);
enddata = (const char *) tmpdata;
if (enddata - s.c_str() == s.size()) { // If no size or offset override
size = getAddrSize(); // Return "natural" size
size = manage->getDefaultSize(); // Return "natural" size
return offset;
}
size = getAddrSize();
size = manage->getDefaultSize();
}
if (append != string::npos) {
enddata = s.c_str()+append;

File diff suppressed because it is too large Load diff

View file

@ -13,84 +13,122 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Structure for dealing with local variables
/// \file varmap.hh
/// \brief Classes for keeping track of local variables and reconstructing stack layout
#ifndef __CPUI_VARMAP__
#define __CPUI_VARMAP__
#include "database.hh"
class AddressSorter {
Address addr;
Address useaddr;
int4 size; // Recommended size
/// \brief An Address pair with a point of use
///
/// A storage location and a point in the code where the storage is referenced.
/// This object sorts first based on the storage address then on the use point.
class AddressUsePointPair {
Address addr; ///< The starting address of the storage location
Address useaddr; ///< The code address at the point of use
int4 size; ///< An optional/recommended size for the variable being stored
public:
AddressSorter(const Address &ad,const Address &use,int4 sz);
AddressSorter(const AddressSorter &op2) : addr(op2.addr), useaddr(op2.useaddr) { size = op2.size; }
const Address &getAddr(void) const { return addr; }
const Address &getUseAddr(void) const { return useaddr; }
int4 getSize(void) const { return size; }
bool operator<(const AddressSorter &op2) const;
bool operator==(const AddressSorter &op2) const;
bool operator!=(const AddressSorter &op2) const;
AddressUsePointPair(const Address &ad,const Address &use,int4 sz); ///< Constructor
AddressUsePointPair(const AddressUsePointPair &op2) : addr(op2.addr), useaddr(op2.useaddr) {
size = op2.size; } ///< Copy constructor
const Address &getAddr(void) const { return addr; } ///< Get the storage address
const Address &getUseAddr(void) const { return useaddr; } ///< Get the use point address
int4 getSize(void) const { return size; } ///< Get the optional size
bool operator<(const AddressUsePointPair &op2) const; ///< Compare operation
bool operator==(const AddressUsePointPair &op2) const; ///< Test for equality
};
// All offsets are byte based (as opposed to unit based)
// Structure for internal map layout
struct MapRange {
uintb start; // Start of range
int4 size;
intb sstart; // Signed version of start point
Datatype *type;
uint4 flags;
bool arrayyes;
int4 lowind; // Lower bound of index
int4 highind; // Upper bound of index
MapRange(void) {}
MapRange(uintb st,int4 sz,intb sst,Datatype *ct,uint4 fl,bool ay,int4 lo,int4 hi) {
start=st; size=sz; sstart=sst; type=ct; flags=fl; arrayyes=ay; lowind=lo; highind=hi; }
/// \brief Partial data-type information mapped to a specific range of bytes
///
/// This object gives a hint about the data-type for a sequence of bytes
/// starting at a specific address offset (typically on the stack). It describes
/// where the data-type starts, what data-type it might be, and how far it extends
/// from the start point (possibly as an array).
class RangeHint {
friend class ScopeLocal;
public:
/// \brief The basic categorization of the range
enum RangeType {
fixed = 0, ///< A data-type with a fixed size
open = 1, ///< An array with a (possibly unknown) number of elements
endpoint = 2 ///< An (artificial) boundary to the range of bytes getting analyzed
};
private:
uintb start; ///< Starting offset of \b this range of bytes
int4 size; ///< Number of bytes in a single element of this range
intb sstart; ///< A signed version of the starting offset
Datatype *type; ///< Putative data-type for a single element of this range
uint4 flags; ///< Additional boolean properties of this range
RangeType rangeType; ///< The type of range
int4 highind; ///< Minimum upper bound on the array index (if \b this is \e open)
public:
RangeHint(void) {} ///< Uninitialized constructor
RangeHint(uintb st,int4 sz,intb sst,Datatype *ct,uint4 fl,RangeType rt,int4 hi) {
start=st; size=sz; sstart=sst; type=ct; flags=fl; rangeType = rt; highind=hi; } ///< Initialized constructor
bool reconcile(const RangeHint *b) const;
bool contain(const RangeHint *b) const;
bool preferred(const RangeHint *b,bool reconcile) const;
bool absorb(RangeHint *b); ///< Try to absorb the other RangeHint into \b this
bool merge(RangeHint *b,AddrSpace *space,TypeFactory *typeFactory); ///< Try to form the union of \b this with another RangeHint
static bool compareRanges(const RangeHint *a,const RangeHint *b); ///< Compare to RangeHint pointers
};
class ProtoModel;
class LoadGuard;
/// \brief A light-weight class for analyzing pointers and aliasing on the stack
///
/// The gather() method looks for pointer references into a specific AddressSpace
/// (usually the stack). Then hasLocalAlias() checks if a specific Varnode within
/// the AddressSpace is (possibly) aliased by one of the gathered pointer references.
class AliasChecker {
public:
/// \brief A helper class holding a Varnode pointer reference and a possible index added to it
struct AddBase {
Varnode *base;
Varnode *index;
AddBase(Varnode *b,Varnode *i) { base=b; index=i; }
Varnode *base; ///< The Varnode holding the base pointer
Varnode *index; ///< The index value or NULL
AddBase(Varnode *b,Varnode *i) { base=b; index=i; } ///< Constructor
};
private:
const Funcdata *fd; // getFuncdata to search for aliases
AddrSpace *spaceid; // Space in which to search
mutable vector<AddBase> addbase; // Collection of pointer exprs
mutable vector<uintb> alias; // List of aliased addresses
mutable bool calculated; // Are aliases cached
uintb localextreme; // Local varible which is deepest on stack
uintb localboundary; // Boundary between locals and parameters
mutable uintb aliasboundary; // Shallowest alias
int4 direction; // 1=stack grows negative, -1=positive
void deriveBoundaries(const FuncProto &proto);
void gatherInternal(void) const;
const Funcdata *fd; ///< Function being searched for aliases
AddrSpace *space; ///< AddressSpace in which to search
mutable vector<AddBase> addBase; ///< Collection of pointers into the AddressSpace
mutable vector<uintb> alias; ///< List of aliased addresses (as offsets)
mutable bool calculated; ///< Have aliases been calculated
uintb localExtreme; ///< Largest possible offset for a local variable
uintb localBoundary; ///< Boundary offset separating locals and parameters
mutable uintb aliasBoundary; ///< Shallowest alias
int4 direction; ///< 1=stack grows negative, -1=positive
void deriveBoundaries(const FuncProto &proto); ///< Set up basic boundaries for the stack layout
void gatherInternal(void) const; ///< Run through Varnodes looking for pointers into the stack
public:
AliasChecker() { fd = (const Funcdata *)0; spaceid = (AddrSpace *)0; calculated=false; }
void gather(const Funcdata *f,AddrSpace *spc,bool defer);
bool hasLocalAlias(Varnode *vn) const;
void sortAlias(void) const;
const vector<AddBase> &getAddBase(void) const { return addbase; }
const vector<uintb> &getAlias(void) const { return alias; }
static void gatherAdditiveBase(Varnode *startvn,vector<AddBase> &addbase); // Gather result varnodes for all \e sums that \b startvn is involved in
static uintb gatherOffset(Varnode *vn); // If \b vn is a sum result, return the constant portion of this sum
AliasChecker() { fd = (const Funcdata *)0; space = (AddrSpace *)0; calculated=false; } ///< Constructor
void gather(const Funcdata *f,AddrSpace *spc,bool defer); ///< Gather Varnodes that point on the stack
bool hasLocalAlias(Varnode *vn) const; ///< Return \b true if it looks like the given Varnode is aliased by a pointer
void sortAlias(void) const; ///< Sort the alias starting offsets
const vector<AddBase> &getAddBase(void) const { return addBase; } ///< Get the collection of pointer Varnodes
const vector<uintb> &getAlias(void) const { return alias; } ///< Get the list of alias starting offsets
static void gatherAdditiveBase(Varnode *startvn,vector<AddBase> &addbase);
static uintb gatherOffset(Varnode *vn);
};
/// \brief A container for hints about the data-type layout of an address space
///
/// A collection of data-type hints for the address space (as RangeHint objects) can
/// be collected from Varnodes, HighVariables or other sources, using the
/// gatherVarnodes(), gatherHighs(), and gatherOpen() methods. This class can then sort
/// and iterate through the RangeHint objects.
class MapState {
AddrSpace *spaceid;
RangeList range;
vector<MapRange *> maplist;
vector<MapRange *>::iterator iter;
Datatype *default_type;
AliasChecker checker;
AddrSpace *spaceid; ///< The address space being analyzed
RangeList range; ///< The subset of ranges, within the whole address space to analyze
vector<RangeHint *> maplist; ///< The list of collected RangeHints
vector<RangeHint *>::iterator iter; ///< The current iterator into the RangeHints
Datatype *defaultType; ///< The default data-type to use for RangeHints
AliasChecker checker; ///< A collection of pointer Varnodes into our address space
void addGuard(const LoadGuard &guard,OpCode opc,TypeFactory *typeFactory); ///< Add LoadGuard record as a hint to the collection
void addRange(uintb st,Datatype *ct,uint4 fl,RangeHint::RangeType rt,int4 hi); ///< Add a hint to the collection
public:
#ifdef OPACTION_DEBUG
mutable bool debugon;
@ -98,43 +136,51 @@ public:
void turnOnDebug(Architecture *g) const { debugon = true; glb=g; }
void turnOffDebug(void) const { debugon = false; }
#endif
MapState(AddrSpace *spc,const RangeList &rn,const RangeList &pm,Datatype *dt);
~MapState(void);
void addRange(uintb st,Datatype *ct,uint4 fl,bool ay,int4 lo,int4 hi);
void addRange(const EntryMap *rangemap);
bool initialize(void);
void sortAlias(void) { checker.sortAlias(); }
const vector<uintb> &getAlias(void) { return checker.getAlias(); }
void gatherVarnodes(const Funcdata &fd);
void gatherHighs(const Funcdata &fd);
void gatherOpen(const Funcdata &fd);
MapRange *next(void) { return *iter; }
bool getNext(void) { ++iter; if (iter==maplist.end()) return false; return true; }
MapState(AddrSpace *spc,const RangeList &rn,const RangeList &pm,Datatype *dt); ///< Constructor
~MapState(void); ///< Destructor
bool initialize(void); ///< Initialize the hint collection for iteration
void sortAlias(void) { checker.sortAlias(); } ///< Sort the alias starting offsets
const vector<uintb> &getAlias(void) { return checker.getAlias(); } ///< Get the list of alias starting offsets
void gatherSymbols(const EntryMap *rangemap); ///< Add Symbol information as hints to the collection
void gatherVarnodes(const Funcdata &fd); ///< Add stack Varnodes as hints to the collection
void gatherHighs(const Funcdata &fd); ///< Add HighVariables as hints to the collection
void gatherOpen(const Funcdata &fd); ///< Add pointer references as hints to the collection
RangeHint *next(void) { return *iter; } ///< Get the current RangeHint in the collection
bool getNext(void) { ++iter; if (iter==maplist.end()) return false; return true; } ///< Advance the iterator, return \b true if another hint is available
};
/// \brief A Symbol scope for \e local variables of a particular function.
///
/// This acts like any other variable Scope, but is associated with a specific function
/// and the address space where the function maps its local variables and parameters, typically
/// the \e stack space. This object in addition to managing the local Symbols, builds up information
/// about the \e stack address space: what portions of it are used for mapped local variables, what
/// portions are used for temporary storage (not mapped), and what portion is for parameters.
class ScopeLocal : public ScopeInternal {
enum { range_locked=1 };
AddrSpace *spaceid; // Space containing main local stack
bool stackgrowsnegative;
RangeList localrange; // Address ranges that might hold mapped locals (not parameters)
bool overlapproblems; // Cached problem flag
uint4 qflags;
map<AddressSorter,string> name_recommend;
bool adjustFit(MapRange &a) const;
void createEntry(const MapRange &a);
bool rangeAbsorb(MapRange *a,MapRange *b);
void rangeUnion(MapRange *a,MapRange *b,bool warning);
void restructure(MapState &state,bool warning);
void markUnaliased(const vector<uintb> &alias);
void fakeInputSymbols(void);
void collectNameRecs(void);
AddrSpace *space; ///< Address space containing the local stack
RangeList localRange; ///< The set of addresses that might hold mapped locals (not parameters)
map<AddressUsePointPair,string> nameRecommend; ///< Symbol name recommendations for specific addresses
bool stackGrowsNegative; ///< Marked \b true if the stack is considered to \e grow towards smaller offsets
bool rangeLocked; ///< True if the subset of addresses \e mapped to \b this scope has been locked
bool adjustFit(RangeHint &a) const; ///< Make the given RangeHint fit in the current Symbol map
void createEntry(const RangeHint &a); ///< Create a Symbol entry corresponding to the given (fitted) RangeHint
bool restructure(MapState &state); ///< Merge hints into a formal Symbol layout of the address space
void markUnaliased(const vector<uintb> &alias); ///< Mark all local symbols for which there are no aliases
void fakeInputSymbols(void); ///< Make sure all stack inputs have an associated Symbol
void collectNameRecs(void); ///< Collect names of unlocked Symbols on the stack
public:
ScopeLocal(AddrSpace *spc,Funcdata *fd,Architecture *g);
virtual ~ScopeLocal(void) {}
ScopeLocal(AddrSpace *spc,Funcdata *fd,Architecture *g); ///< Constructor
virtual ~ScopeLocal(void) {} ///< Destructor
AddrSpace *getSpaceId(void) const { return spaceid; }
bool isUnaffectedStorage(Varnode *vn) const { return (vn->getSpace() == spaceid); }
void markNotMapped(AddrSpace *spc,uintb first,int4 sz,bool param);
AddrSpace *getSpaceId(void) const { return space; } ///< Get the associated (stack) address space
/// \brief Is this a storage location for \e unaffected registers
///
/// \param vn is the Varnode storing an \e unaffected register
/// \return \b true is the Varnode can be used as unaffected storage
bool isUnaffectedStorage(Varnode *vn) const { return (vn->getSpace() == space); }
void markNotMapped(AddrSpace *spc,uintb first,int4 sz,bool param); ///< Mark a specific address range is not mapped
// Routines that are specific to one address space
virtual void saveXml(ostream &s) const;
@ -143,10 +189,9 @@ public:
const Address &pc,
Datatype *ct,
int4 &index,uint4 flags) const;
void resetLocalWindow(void);
void restructureVarnode(bool aliasyes);
void restructureHigh(void);
bool makeNameRecommendation(string &res,const Address &addr,const Address &usepoint) const;
void resetLocalWindow(void); ///< Reset the set of addresses that are considered mapped by the scope to the default
void restructureVarnode(bool aliasyes); ///< Layout mapped symbols based on Varnode information
void restructureHigh(void); ///< Layout mapped symbols based on HighVariable information
void makeNameRecommendationsForSymbols(vector<string> &resname,vector<Symbol *> &ressym) const;
void addRecommendName(const Address &addr,const Address &usepoint,const string &nm,int4 sz);
};

View file

@ -28,6 +28,7 @@ class VarnodeBank;
class Merge;
class Funcdata;
class SymbolEntry;
class ValueSet;
/// \brief Compare two Varnode pointers by location then definition
struct VarnodeCompareLocDef {
@ -134,7 +135,10 @@ private:
VarnodeDefSet::iterator defiter; ///< Iterator into VarnodeBank sorted by definition
list<PcodeOp *> descend; ///< List of every op using this varnode as input
mutable Cover *cover; ///< Addresses covered by the def->use of this Varnode
mutable Datatype *temptype; ///< For type propagate algorithm
mutable union {
Datatype *dataType; ///< For type propagate algorithm
ValueSet *valueSet;
} temp;
uintb consumed; ///< What parts of this varnode are used
uintb nzm; ///< Which bits do we know are zero
friend class VarnodeBank;
@ -167,8 +171,10 @@ public:
SymbolEntry *getSymbolEntry(void) const { return mapentry; } ///< Get symbol and scope information associated with this Varnode
uint4 getFlags(void) const { return flags; } ///< Get all the boolean attributes
Datatype *getType(void) const { return type; } ///< Get the Datatype associated with this Varnode
void setTempType(Datatype *t) const { temptype = t; } ///< Set the temporary Datatype
Datatype *getTempType(void) const { return temptype; } ///< Get the temporary Datatype (used during type propagation)
void setTempType(Datatype *t) const { temp.dataType = t; } ///< Set the temporary Datatype
Datatype *getTempType(void) const { return temp.dataType; } ///< Get the temporary Datatype (used during type propagation)
void setValueSet(ValueSet *v) const { temp.valueSet = v; } ///< Set the temporary ValueSet record
ValueSet *getValueSet(void) const { return temp.valueSet; } ///< Get the temporary ValueSet record
uint4 getCreateIndex(void) const { return create_index; } ///< Get the creation index
Cover *getCover(void) const { updateCover(); return cover; } ///< Get Varnode coverage information
list<PcodeOp *>::const_iterator beginDescend(void) const { return descend.begin(); } ///< Get iterator to list of syntax tree descendants (reads)