mirror of
https://github.com/NationalSecurityAgency/ghidra.git
synced 2025-10-05 10:49:34 +02:00
Merge remote-tracking branch 'origin/patch'
This commit is contained in:
commit
22e5c1a48b
5 changed files with 32 additions and 15 deletions
|
@ -2085,11 +2085,11 @@ bool SplitDatatype::generateConstants(Varnode *vn,vector<Varnode *> &inVarnodes)
|
|||
/// based on the input offsets in \b dataTypePieces.
|
||||
/// \param rootVn is the given root constant
|
||||
/// \param inVarnodes is the container for the new Varnodes
|
||||
void SplitDatatype::buildInConstants(Varnode *rootVn,vector<Varnode *> &inVarnodes)
|
||||
/// \param bigEndian is \b true if the output address space is big endian
|
||||
void SplitDatatype::buildInConstants(Varnode *rootVn,vector<Varnode *> &inVarnodes,bool bigEndian)
|
||||
|
||||
{
|
||||
uintb baseVal = rootVn->getOffset();
|
||||
bool bigEndian = rootVn->getSpace()->isBigEndian();
|
||||
for(int4 i=0;i<dataTypePieces.size();++i) {
|
||||
Datatype *dt = dataTypePieces[i].inType;
|
||||
int4 off = dataTypePieces[i].offset;
|
||||
|
@ -2344,7 +2344,7 @@ bool SplitDatatype::splitCopy(PcodeOp *copyOp,Datatype *inType,Datatype *outType
|
|||
vector<Varnode *> inVarnodes;
|
||||
vector<Varnode *> outVarnodes;
|
||||
if (inVn->isConstant())
|
||||
buildInConstants(inVn,inVarnodes);
|
||||
buildInConstants(inVn,inVarnodes,outVn->getSpace()->isBigEndian());
|
||||
else
|
||||
buildInSubpieces(inVn,copyOp,inVarnodes);
|
||||
buildOutVarnodes(outVn,outVarnodes);
|
||||
|
@ -2459,9 +2459,10 @@ bool SplitDatatype::splitStore(PcodeOp *storeOp,Datatype *outType)
|
|||
return false;
|
||||
}
|
||||
|
||||
AddrSpace *storeSpace = storeOp->getIn(0)->getSpaceFromConst();
|
||||
vector<Varnode *> inVarnodes;
|
||||
if (inVn->isConstant())
|
||||
buildInConstants(inVn,inVarnodes);
|
||||
buildInConstants(inVn,inVarnodes,storeSpace->isBigEndian());
|
||||
else if (loadOp != (PcodeOp *)0) {
|
||||
vector<Varnode *> loadPtrs;
|
||||
buildPointers(loadRoot.pointer, loadRoot.ptrType, loadRoot.baseOffset, loadOp, loadPtrs, true);
|
||||
|
@ -2483,7 +2484,6 @@ bool SplitDatatype::splitStore(PcodeOp *storeOp,Datatype *outType)
|
|||
|
||||
vector<Varnode *> storePtrs;
|
||||
buildPointers(storeRoot.pointer, storeRoot.ptrType, storeRoot.baseOffset, storeOp, storePtrs, false);
|
||||
AddrSpace *storeSpace = storeOp->getIn(0)->getSpaceFromConst();
|
||||
// Preserve original STORE object, so that INDIRECT references are still valid
|
||||
// but convert it into the first of the smaller STOREs
|
||||
data.opSetInput(storeOp,storePtrs[0],1);
|
||||
|
|
|
@ -188,7 +188,7 @@ class SplitDatatype {
|
|||
bool testDatatypeCompatibility(Datatype *inBase,Datatype *outBase,bool inConstant);
|
||||
bool testCopyConstraints(PcodeOp *copyOp);
|
||||
bool generateConstants(Varnode *vn,vector<Varnode *> &inVarnodes);
|
||||
void buildInConstants(Varnode *rootVn,vector<Varnode *> &inVarnodes);
|
||||
void buildInConstants(Varnode *rootVn,vector<Varnode *> &inVarnodes,bool bigEndian);
|
||||
void buildInSubpieces(Varnode *rootVn,PcodeOp *followOp,vector<Varnode *> &inVarnodes);
|
||||
void buildOutVarnodes(Varnode *rootVn,vector<Varnode *> &outVarnodes);
|
||||
void buildOutConcats(Varnode *rootVn,PcodeOp *previousOp,vector<Varnode *> &outVarnodes);
|
||||
|
|
|
@ -150,6 +150,12 @@ public abstract class AbstractMsf implements Msf {
|
|||
this.pdbOptions = Objects.requireNonNull(pdbOptions, "PdbOptions may not be null");
|
||||
// Do initial configuration with largest possible page size. ConfigureParameters will
|
||||
// be called again later with the proper pageSize set.
|
||||
// GP-3603... considered changing the pagesize for the initial header read from
|
||||
// 0x1000 to 0x2000, but I don't think there is anything needed beyond 0x1000 offset
|
||||
// in terms of header information, and if we did change it, then we'd run the risk
|
||||
// of an extremely small PDB (pagesize of 0x200 and less than 16 pages) not being
|
||||
// able to be read (initial header read would fail). Determining if such a small
|
||||
// PDB is possible would be more work than I fell necessary at this time.
|
||||
pageSize = 0x1000;
|
||||
configureParameters();
|
||||
// Create components.
|
||||
|
@ -217,6 +223,7 @@ public abstract class AbstractMsf implements Msf {
|
|||
* Returns the file reader
|
||||
* @return the file reader
|
||||
*/
|
||||
@Override
|
||||
public MsfFileReader getFileReader() {
|
||||
return fileReader;
|
||||
}
|
||||
|
|
|
@ -92,6 +92,10 @@ public class Msf700 extends AbstractMsf {
|
|||
log2PageSize = 12;
|
||||
freePageMapNumSequentialPage = 1;
|
||||
break;
|
||||
case 0x2000:
|
||||
log2PageSize = 13;
|
||||
freePageMapNumSequentialPage = 1;
|
||||
break;
|
||||
default:
|
||||
throw new PdbException(String.format("Unknown page size: 0X%08X", pageSize));
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ class X86_64_ElfRelocationContext extends ElfRelocationContext {
|
|||
// NOTE: GOT allocation calculation assumes all GOT entries correspond to a specific
|
||||
// symbol and not a computed offset. This assumption may need to be revised based upon
|
||||
// uses of getGotEntryAddress method
|
||||
Set<Long> uniqueSymbolValues = new HashSet<>();
|
||||
Set<Object> uniqueSymbolValues = new HashSet<>();
|
||||
for (ElfRelocationTable rt : getElfHeader().getRelocationTables()) {
|
||||
ElfSymbolTable st = rt.getAssociatedSymbolTable();
|
||||
if (st == null) {
|
||||
|
@ -102,7 +102,9 @@ class X86_64_ElfRelocationContext extends ElfRelocationContext {
|
|||
if (elfSymbol == null) {
|
||||
continue;
|
||||
}
|
||||
uniqueSymbolValues.add(elfSymbol.getValue());
|
||||
long value = elfSymbol.getValue();
|
||||
Object uniqueValue = value == 0 ? elfSymbol.getNameAsString() : Long.valueOf(value);
|
||||
uniqueSymbolValues.add(uniqueValue);
|
||||
}
|
||||
}
|
||||
return Math.max(8, uniqueSymbolValues.size() * 8);
|
||||
|
@ -130,13 +132,14 @@ class X86_64_ElfRelocationContext extends ElfRelocationContext {
|
|||
nextAllocatedGotEntryAddress = Address.NO_ADDRESS;
|
||||
|
||||
ElfSymbol gotElfSymbol = findGotElfSymbol();
|
||||
if (gotElfSymbol == null) {
|
||||
// TODO: may need to support cases where GOT symbol not defined
|
||||
|
||||
if (gotElfSymbol == null && !getElfHeader().isRelocatable()) {
|
||||
loadHelper.log(
|
||||
"GOT allocatiom failed. " + ElfConstants.GOT_SYMBOL_NAME + " not defined");
|
||||
return null;
|
||||
}
|
||||
if (getSymbolAddress(gotElfSymbol) != null) {
|
||||
|
||||
if (gotElfSymbol != null && getSymbolAddress(gotElfSymbol) != null) {
|
||||
throw new AssertException(ElfConstants.GOT_SYMBOL_NAME + " already allocated");
|
||||
}
|
||||
|
||||
|
@ -147,7 +150,9 @@ class X86_64_ElfRelocationContext extends ElfRelocationContext {
|
|||
if (allocatedGotLimits != null &&
|
||||
allocatedGotLimits.getMinAddress().getOffset() < Integer.MAX_VALUE) {
|
||||
// GOT must fall within first 32-bit segment
|
||||
symbolMap.put(gotElfSymbol, allocatedGotLimits.getMinAddress());
|
||||
if (gotElfSymbol != null) {
|
||||
symbolMap.put(gotElfSymbol, allocatedGotLimits.getMinAddress());
|
||||
}
|
||||
allocatedGotAddress = allocatedGotLimits.getMinAddress();
|
||||
nextAllocatedGotEntryAddress = allocatedGotAddress;
|
||||
gotMap = new HashMap<>();
|
||||
|
@ -156,8 +161,7 @@ class X86_64_ElfRelocationContext extends ElfRelocationContext {
|
|||
return allocatedGotAddress;
|
||||
}
|
||||
|
||||
loadHelper.log("Failed to allocate " + ElfRelocationHandler.GOT_BLOCK_NAME +
|
||||
" block required for relocation processing");
|
||||
loadHelper.log("Failed to allocate GOT block required for relocation processing");
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -213,7 +217,9 @@ class X86_64_ElfRelocationContext extends ElfRelocationContext {
|
|||
}
|
||||
if (addr == null) {
|
||||
addr = getNextAllocatedGotEntryAddress();
|
||||
gotMap.put(symbolValue, addr);
|
||||
if (gotMap != null) {
|
||||
gotMap.put(symbolValue, addr);
|
||||
}
|
||||
}
|
||||
return addr == Address.NO_ADDRESS ? null : addr;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue