diff --git a/Ghidra/Features/Decompiler/src/decompile/cpp/coreaction.cc b/Ghidra/Features/Decompiler/src/decompile/cpp/coreaction.cc index 02188ff79b..3f1f9a9ef0 100644 --- a/Ghidra/Features/Decompiler/src/decompile/cpp/coreaction.cc +++ b/Ghidra/Features/Decompiler/src/decompile/cpp/coreaction.cc @@ -495,23 +495,18 @@ int4 ActionStackPtrFlow::apply(Funcdata &data) return 0; } -/// \brief Try to divide a single Varnode into lanes +/// \brief Examine the PcodeOps using the given Varnode to determine possible lane sizes /// -/// Look for a CPUI_SUBPIECE op that takes the given Varnode as the input or a -/// CPUI_PIECE op that defines it. The smallest piece involved in this op is considered the -/// putative lane size. If this lane size is acceptable, try to split data-flow through -/// this Varnode using this lane scheme. Try a split for every CPUI_SUBPIECE/CPUI_PIECE the -/// given Varnode is involved in until one succeeds. -/// \param data is the function being transformed -/// \param vn is the given single Varnode -/// \param lanedRegister is acceptable set of lane sizes for the Varnode -/// \param allowDowncast is \b true if we allow lane systems with SUBPIECE terminators -/// \return \b true if the Varnode (and its data-flow) was successfully split -bool ActionLaneDivide::processVarnode(Funcdata &data,Varnode *vn,const LanedRegister &lanedRegister,bool allowDowncast) +/// Run through the defining op and any descendant ops of the given Varnode, looking for +/// CPUI_PIECE and CPUI_SUBPIECE. Use these to determine possible lane sizes and +/// register them with the given LanedRegister object. +/// \param vn is the given Varnode +/// \param allowedLanes is used to determine if a putative lane size is allowed +/// \param checkLanes collects the possible lane sizes +void ActionLaneDivide::collectLaneSizes(Varnode *vn,const LanedRegister &allowedLanes,LanedRegister &checkLanes) { list::const_iterator iter = vn->beginDescend(); - LanedRegister checkedLanes; int4 step = 0; // 0 = descendants, 1 = def, 2 = done if (iter == vn->endDescend()) { step = 1; @@ -536,16 +531,46 @@ bool ActionLaneDivide::processVarnode(Funcdata &data,Varnode *vn,const LanedRegi if (tmpSize < curSize) curSize = tmpSize; } - if (lanedRegister.allowedLane(curSize)) { - if (checkedLanes.allowedLane(curSize)) continue; - checkedLanes.addLaneSize(curSize); // Only check this scheme once - LaneDescription description(lanedRegister.getWholeSize(),curSize); // Lane scheme dictated by curSize - LaneDivide laneDivide(&data,vn,description,allowDowncast); - if (laneDivide.doTrace()) { - laneDivide.apply(); - count += 1; // Indicate a change was made - return true; - } + if (allowedLanes.allowedLane(curSize)) + checkLanes.addLaneSize(curSize); // Register this possible size + } +} + +/// \brief Search for a likely lane size and try to divide a single Varnode into these lanes +/// +/// There are different ways to search for a lane size: +/// +/// Mode 0: Collect putative lane sizes based on the local ops using the Varnode. Attempt +/// to divide based on each of those lane sizes in turn. +/// +/// Mode 1: Similar to mode 0, except we allow for SUBPIECE operations that truncate to +/// variables that are smaller than the lane size. +/// +/// Mode 2: Attempt to divide based on a default lane size. +/// \param data is the function being transformed +/// \param vn is the given single Varnode +/// \param lanedRegister is acceptable set of lane sizes for the Varnode +/// \param mode is the lane size search mode (0, 1, or 2) +/// \return \b true if the Varnode (and its data-flow) was successfully split +bool ActionLaneDivide::processVarnode(Funcdata &data,Varnode *vn,const LanedRegister &lanedRegister,int4 mode) + +{ + LanedRegister checkLanes; // Lanes we are going to try, initialized to no lanes + bool allowDowncast = (mode > 0); + if (mode < 2) + collectLaneSizes(vn,lanedRegister,checkLanes); + else { + checkLanes.addLaneSize(4); // Default lane size + } + LanedRegister::const_iterator enditer = checkLanes.end(); + for(LanedRegister::const_iterator iter=checkLanes.begin();iter!=enditer;++iter) { + int4 curSize = *iter; + LaneDescription description(lanedRegister.getWholeSize(),curSize); // Lane scheme dictated by curSize + LaneDivide laneDivide(&data,vn,description,allowDowncast); + if (laneDivide.doTrace()) { + laneDivide.apply(); + count += 1; // Indicate a change was made + return true; } } return false; @@ -555,25 +580,31 @@ int4 ActionLaneDivide::apply(Funcdata &data) { map::const_iterator iter; - bool allowDowncast = false; - for(int4 i=0;i<2;++i) { + for(int4 mode=0;mode<3;++mode) { + bool allStorageProcessed = true; for(iter=data.beginLaneAccess();iter!=data.endLaneAccess();++iter) { const LanedRegister *lanedReg = (*iter).second; Address addr = (*iter).first.getAddr(); int4 sz = (*iter).first.size; VarnodeLocSet::const_iterator viter = data.beginLoc(sz,addr); VarnodeLocSet::const_iterator venditer = data.endLoc(sz,addr); + bool allVarnodesProcessed = true; while(viter != venditer) { Varnode *vn = *viter; - if (processVarnode(data, vn, *lanedReg, allowDowncast)) { + if (processVarnode(data, vn, *lanedReg, mode)) { viter = data.beginLoc(sz,addr); venditer = data.endLoc(sz, addr); // Recalculate bounds + allVarnodesProcessed = true; } - else + else { ++viter; + allVarnodesProcessed = false; + } } + if (!allVarnodesProcessed) + allStorageProcessed = false; } - allowDowncast = true; + if (allStorageProcessed) break; } data.clearLanedAccessMap(); return 0; diff --git a/Ghidra/Features/Decompiler/src/decompile/cpp/coreaction.hh b/Ghidra/Features/Decompiler/src/decompile/cpp/coreaction.hh index dfec92b102..deeabd785d 100644 --- a/Ghidra/Features/Decompiler/src/decompile/cpp/coreaction.hh +++ b/Ghidra/Features/Decompiler/src/decompile/cpp/coreaction.hh @@ -104,7 +104,8 @@ public: /// if a particular lane scheme makes sense in terms of the function's data-flow, and then /// rewrites the data-flow so that the lanes become explicit Varnodes. class ActionLaneDivide : public Action { - bool processVarnode(Funcdata &data,Varnode *vn,const LanedRegister &lanedRegister,bool allowDowncast); + void collectLaneSizes(Varnode *vn,const LanedRegister &allowedLanes,LanedRegister &checkLanes); + bool processVarnode(Funcdata &data,Varnode *vn,const LanedRegister &lanedRegister,int4 mode); public: ActionLaneDivide(const string &g) : Action(rule_onceperfunc,"lanedivide",g) {} ///< Constructor virtual Action *clone(const ActionGroupList &grouplist) const { diff --git a/Ghidra/Features/Decompiler/src/decompile/cpp/transform.cc b/Ghidra/Features/Decompiler/src/decompile/cpp/transform.cc index 484d615075..010d8f54db 100644 --- a/Ghidra/Features/Decompiler/src/decompile/cpp/transform.cc +++ b/Ghidra/Features/Decompiler/src/decompile/cpp/transform.cc @@ -261,6 +261,19 @@ bool TransformOp::attemptInsertion(Funcdata *fd) return true; // Already inserted } +void LanedRegister::LanedIterator::normalize(void) + +{ + uint4 flag = 1; + flag <<= size; + while(flag <= mask) { + if ((flag & mask) != 0) return; // Found a valid lane size + size += 1; + flag <<= 1; + } + size = -1; // Indicate ending iterator +} + /// Read XML of the form \ /// \param el is the particular \e register tag /// \param manage is used to map register names to storage info diff --git a/Ghidra/Features/Decompiler/src/decompile/cpp/transform.hh b/Ghidra/Features/Decompiler/src/decompile/cpp/transform.hh index c7f194b3eb..671ffd70ba 100644 --- a/Ghidra/Features/Decompiler/src/decompile/cpp/transform.hh +++ b/Ghidra/Features/Decompiler/src/decompile/cpp/transform.hh @@ -86,6 +86,24 @@ public: /// \brief Describes a (register) storage location and the ways it might be split into lanes class LanedRegister { + friend class LanedIterator; +public: + /// \brief Class for iterating over possible lane sizes + class LanedIterator { + int4 size; ///< Current lane size + uint4 mask; ///< Collection being iterated over + void normalize(void); ///< Normalize the iterator, after increment or initialization + public: + LanedIterator(const LanedRegister *lanedR) { size = 0; mask = lanedR->sizeBitMask; normalize(); } ///< Constructor + LanedIterator(void) { size = -1; mask = 0; } ///< Constructor for ending iterator + LanedIterator &operator++(void) { size += 1; normalize(); return *this; } ///< Preincrement operator + int4 operator*(void) const { return size; } /// Dereference operator + LanedIterator &operator=(const LanedIterator &op2) { size = op2.size; mask = op2.mask; return *this; } ///< Assignment + bool operator==(const LanedIterator &op2) const { return (size == op2.size); } ///< Equal operator + bool operator!=(const LanedIterator &op2) const { return (size != op2.size); } ///< Not-equal operator + }; + typedef LanedIterator const_iterator; +private: int4 wholeSize; ///< Size of the whole register uint4 sizeBitMask; ///< A 1-bit for every permissible lane size public: @@ -96,6 +114,8 @@ public: uint4 getSizeBitMask(void) const { return sizeBitMask; } ///< Get the bit mask of possible lane sizes void addLaneSize(int4 size) { sizeBitMask |= ((uint4)1 << size); } ///< Add a new \e size to the allowed list bool allowedLane(int4 size) const { return (((sizeBitMask >> size) & 1) != 0); } ///< Is \e size among the allowed lane sizes + const_iterator begin(void) const { return LanedIterator(this); } ///< Starting iterator over possible lane sizes + const_iterator end(void) const { return LanedIterator(); } ///< Ending iterator over possible lane sizes }; /// \brief Description of logical lanes within a \b big Varnode