Merge remote-tracking branch 'origin/patch'

This commit is contained in:
ghidra1 2021-06-25 14:42:47 -04:00
commit c96244af02
8 changed files with 160 additions and 123 deletions

View file

@ -525,22 +525,28 @@ public class DBTrace extends DBCachedDomainObjectAdapter implements Trace, Trace
@Override
public DBTraceProgramView getFixedProgramView(long snap) {
synchronized (fixedProgramViews) {
DBTraceProgramView view = fixedProgramViews.computeIfAbsent(snap, t -> {
Msg.debug(this, "Creating fixed view at snap=" + snap);
return new DBTraceProgramView(this, snap, baseCompilerSpec);
});
return view;
// NOTE: The new viewport will need to read from the time manager during init
try (LockHold hold = lockRead()) {
synchronized (fixedProgramViews) {
DBTraceProgramView view = fixedProgramViews.computeIfAbsent(snap, t -> {
Msg.debug(this, "Creating fixed view at snap=" + snap);
return new DBTraceProgramView(this, snap, baseCompilerSpec);
});
return view;
}
}
}
@Override
public DBTraceVariableSnapProgramView createProgramView(long snap) {
synchronized (programViews) {
DBTraceVariableSnapProgramView view =
new DBTraceVariableSnapProgramView(this, snap, baseCompilerSpec);
programViews.put(view, null);
return view;
// NOTE: The new viewport will need to read from the time manager during init
try (LockHold hold = lockRead()) {
synchronized (programViews) {
DBTraceVariableSnapProgramView view =
new DBTraceVariableSnapProgramView(this, snap, baseCompilerSpec);
programViews.put(view, null);
return view;
}
}
}

View file

@ -612,7 +612,9 @@ public class GhidraScriptComponentProvider extends ComponentProviderAdapter {
void runScript(ResourceFile scriptFile, TaskListener listener) {
lastRunScript = scriptFile;
GhidraScript script = doGetScriptInstance(scriptFile);
doRunScript(script, listener);
if (script != null) {
doRunScript(script, listener);
}
}
private GhidraScript doGetScriptInstance(ResourceFile scriptFile) {

View file

@ -55,6 +55,15 @@ public class Ext4ExtentIdx implements StructConverter {
return ei_leaf_hi;
}
/**
* Return the calculated ei_leaf value by combining ei_leaf_lo and ei_leaf_hi
*
* @return the calculated ei_leaf value by combining ei_leaf_lo and ei_leaf_hi
*/
public long getEi_leaf() {
return (Short.toUnsignedLong(ei_leaf_hi) << 32) | Integer.toUnsignedLong(ei_leaf_lo);
}
public short getEi_unused() {
return ei_unused;
}

View file

@ -40,6 +40,7 @@ public class Ext4FileSystem implements GFileSystem {
private ByteProvider provider;
private String volumeName;
private String uuid;
private Ext4SuperBlock superBlock;
public Ext4FileSystem(FSRLRoot fsrl, ByteProvider provider) {
this.fsrl = fsrl;
@ -51,17 +52,18 @@ public class Ext4FileSystem implements GFileSystem {
BinaryReader reader = new BinaryReader(provider, true);
reader.setPointerIndex(0x400);
Ext4SuperBlock superBlock = new Ext4SuperBlock(reader);
this.superBlock = new Ext4SuperBlock(reader);
this.volumeName = superBlock.getVolumeName();
this.uuid = NumericUtilities.convertBytesToString(superBlock.getS_uuid());
long blockCount = superBlock.getS_blocks_count();
int s_log_block_size = superBlock.getS_log_block_size();
blockSize = (int) Math.pow(2, (10 + s_log_block_size));
this.blockSize = (int) Math.pow(2, (10 + s_log_block_size));
int groupSize = blockSize * superBlock.getS_blocks_per_group();
if (groupSize <= 0) {
throw new IOException("Invalid groupSize: " + groupSize);
}
int numGroups = (int) (blockCount / superBlock.getS_blocks_per_group());
if (blockCount % superBlock.getS_blocks_per_group() != 0) {
numGroups++;
@ -79,7 +81,7 @@ public class Ext4FileSystem implements GFileSystem {
monitor.incrementProgress(1);
}
Ext4Inode[] inodes = getInodes(reader, superBlock, groupDescriptors, monitor);
Ext4Inode[] inodes = getInodes(reader, groupDescriptors, monitor);
int s_inodes_count = superBlock.getS_inodes_count();
for (int i = 0; i < s_inodes_count; i++) {
@ -88,17 +90,16 @@ public class Ext4FileSystem implements GFileSystem {
continue;
}
if ((inode.getI_mode() & Ext4Constants.I_MODE_MASK) == Ext4Constants.S_IFDIR) {
processDirectory(reader, superBlock, inodes, i, null, null, monitor);
processDirectory(reader, inodes, i, null, null, monitor);
}
else if ((inode.getI_mode() & Ext4Constants.I_MODE_MASK) == Ext4Constants.S_IFREG) {
processFile(reader, superBlock, inode, monitor);
// TODO: handle files if needed here
}
}
}
private void processDirectory(BinaryReader reader, Ext4SuperBlock superBlock,
Ext4Inode[] inodes, int index, String name, GFile parent, TaskMonitor monitor)
throws IOException, CancelledException {
private void processDirectory(BinaryReader reader, Ext4Inode[] inodes, int index, String name,
GFile parent, TaskMonitor monitor) throws IOException, CancelledException {
if (name != null && (name.equals(".") || name.equals(".."))) {
return;
@ -119,20 +120,23 @@ public class Ext4FileSystem implements GFileSystem {
}
boolean isDirEntry2 =
(superBlock.getS_feature_incompat() & Ext4Constants.INCOMPAT_FILETYPE) != 0;
// if uses extents
if ((inode.getI_flags() & Ext4Constants.EXT4_EXTENTS_FL) != 0) {
Ext4IBlock i_block = inode.getI_block();
processIBlock(reader, superBlock, inodes, parent, isDirEntry2, i_block, monitor);
}
else {
throw new IOException("File system fails to use extents.");
}
Ext4IBlock i_block = inode.getI_block();
processDirectoryIBlock(reader, inodes, parent, isDirEntry2, i_block, monitor);
// null out directories that have already been processed
inodes[index] = null;
}
private void processIBlock(BinaryReader reader, Ext4SuperBlock superBlock, Ext4Inode[] inodes,
GFile parent, boolean isDirEntry2, Ext4IBlock i_block, TaskMonitor monitor)
throws CancelledException, IOException {
interface Checked2Consumer<T, E1 extends Throwable, E2 extends Throwable> {
void accept(T t) throws E1, E2;
}
interface ExtentConsumer extends Checked2Consumer<Ext4Extent, IOException, CancelledException> {
// no additional def
}
private void forEachExtentEntry(Ext4IBlock i_block, ExtentConsumer extentConsumer,
TaskMonitor monitor) throws CancelledException, IOException {
Ext4ExtentHeader header = i_block.getHeader();
if (header.getEh_depth() == 0) {
short numEntries = header.getEh_entries();
@ -140,42 +144,41 @@ public class Ext4FileSystem implements GFileSystem {
for (int i = 0; i < numEntries; i++) {
monitor.checkCanceled();
Ext4Extent extent = entries.get(i);
long offset = extent.getExtentStartBlockNumber() * blockSize;
reader.setPointerIndex(offset);
if (isDirEntry2) {
processDirEntry2(reader, superBlock, inodes, parent, monitor, extent, offset);
}
else {
processDirEntry(reader, superBlock, inodes, parent, monitor, extent, offset);
}
extentConsumer.accept(extent);
}
}
else {
//throw new IOException( "Unhandled extent tree depth > 0 for inode " + index );
short numEntries = header.getEh_entries();
List<Ext4ExtentIdx> entries = i_block.getIndexEntries();
for (int i = 0; i < numEntries; i++) {
monitor.checkCanceled();
Ext4ExtentIdx extentIndex = entries.get(i);
long lo = extentIndex.getEi_leaf_lo();
long hi = extentIndex.getEi_leaf_hi();
long physicalBlockOfNextLevel = (hi << 16) | lo;
long offset = physicalBlockOfNextLevel * blockSize;
long offset = extentIndex.getEi_leaf() * blockSize;
// System.out.println( ""+physicalBlockOfNextLevel );
// System.out.println( "" );
reader.setPointerIndex(offset);
Ext4IBlock intermediateBlock = new Ext4IBlock(reader, true);
processIBlock(reader, superBlock, inodes, parent, isDirEntry2, intermediateBlock,
monitor);
forEachExtentEntry(Ext4IBlock.readIBlockWithExtents(provider, offset),
extentConsumer, monitor);
}
}
}
private void processDirEntry(BinaryReader reader, Ext4SuperBlock superBlock, Ext4Inode[] inodes,
GFile parent, TaskMonitor monitor, Ext4Extent extent, long offset)
private void processDirectoryIBlock(BinaryReader reader, Ext4Inode[] inodes, GFile parent,
boolean isDirEntry2, Ext4IBlock i_block, TaskMonitor monitor)
throws CancelledException, IOException {
forEachExtentEntry(i_block, extent -> {
long offset = extent.getExtentStartBlockNumber() * blockSize;
reader.setPointerIndex(offset);
if (isDirEntry2) {
processDirEntry2(reader, inodes, parent, monitor, extent, offset);
}
else {
processDirEntry(reader, inodes, parent, monitor, extent, offset);
}
}, monitor);
}
private void processDirEntry(BinaryReader reader, Ext4Inode[] inodes, GFile parent,
TaskMonitor monitor, Ext4Extent extent, long offset)
throws CancelledException, IOException {
while ((reader.getPointerIndex() - offset) < ((long) extent.getEe_len() * blockSize)) {
@ -189,8 +192,7 @@ public class Ext4FileSystem implements GFileSystem {
if ((child.getI_mode() & Ext4Constants.I_MODE_MASK) == Ext4Constants.S_IFDIR) {
String childName = dirEnt.getName();
long readerOffset = reader.getPointerIndex();
processDirectory(reader, superBlock, inodes, childIndex, childName, parent,
monitor);
processDirectory(reader, inodes, childIndex, childName, parent, monitor);
reader.setPointerIndex(readerOffset);
}
else if ((child.getI_mode() & Ext4Constants.I_MODE_MASK) == Ext4Constants.S_IFREG ||
@ -204,8 +206,8 @@ public class Ext4FileSystem implements GFileSystem {
}
}
private void processDirEntry2(BinaryReader reader, Ext4SuperBlock superBlock,
Ext4Inode[] inodes, GFile parent, TaskMonitor monitor, Ext4Extent extent, long offset)
private void processDirEntry2(BinaryReader reader, Ext4Inode[] inodes, GFile parent,
TaskMonitor monitor, Ext4Extent extent, long offset)
throws CancelledException, IOException {
while ((reader.getPointerIndex() - offset) < ((long) extent.getEe_len() * blockSize)) {
@ -218,8 +220,7 @@ public class Ext4FileSystem implements GFileSystem {
int childInode = dirEnt2.getInode();
String childName = dirEnt2.getName();
long readerOffset = reader.getPointerIndex();
processDirectory(reader, superBlock, inodes, childInode, childName, parent,
monitor);
processDirectory(reader, inodes, childInode, childName, parent, monitor);
reader.setPointerIndex(readerOffset);
}
else if (dirEnt2.getFile_type() == Ext4Constants.FILE_TYPE_REGULAR_FILE ||
@ -254,11 +255,6 @@ public class Ext4FileSystem implements GFileSystem {
inodes[fileInodeNum] = null;
}
private void processFile(BinaryReader reader, Ext4SuperBlock superBlock, Ext4Inode inode,
TaskMonitor monitor) {
}
@Override
public int getFileCount() {
return fsih.getFileCount();
@ -368,38 +364,38 @@ public class Ext4FileSystem implements GFileSystem {
throw new IOException("Unsupported file storage: not EXT4_EXTENTS: " + file.getPath());
}
Ext4IBlock i_block = inode.getI_block();
Ext4ExtentHeader header = i_block.getHeader();
if (header.getEh_depth() != 0) {
throw new IOException("Unsupported file storage: eh_depth: " + file.getPath());
}
try {
long fileSize = inode.getSize();
ExtentsByteProvider result = new ExtentsByteProvider(provider, file.getFSRL());
long fileSize = inode.getSize();
ExtentsByteProvider ebp = new ExtentsByteProvider(provider, file.getFSRL());
for (Ext4Extent extent : i_block.getExtentEntries()) {
long startPos = extent.getStreamBlockNumber() * blockSize;
long providerOfs = extent.getExtentStartBlockNumber() * blockSize;
long extentLen = extent.getExtentBlockCount() * blockSize;
if (ebp.length() < startPos) {
ebp.addSparseExtent(startPos - ebp.length());
}
if (ebp.length() + extentLen > fileSize) {
// the last extent may have a trailing partial block
extentLen = fileSize - ebp.length();
}
Ext4IBlock i_block = inode.getI_block();
forEachExtentEntry(i_block, extent -> {
long startPos = extent.getStreamBlockNumber() * blockSize;
long providerOfs = extent.getExtentStartBlockNumber() * blockSize;
long extentLen = extent.getExtentBlockCount() * blockSize;
if (result.length() < startPos) {
result.addSparseExtent(startPos - result.length());
}
if (result.length() + extentLen > fileSize) {
// the last extent may have a trailing partial block
extentLen = fileSize - result.length();
}
ebp.addExtent(providerOfs, extentLen);
result.addExtent(providerOfs, extentLen);
}, monitor);
if (result.length() < fileSize) {
// trailing sparse. not sure if possible.
result.addSparseExtent(fileSize - result.length());
}
return result;
}
if (ebp.length() < fileSize) {
// trailing sparse. not sure if possible.
ebp.addSparseExtent(fileSize - ebp.length());
catch (CancelledException e) {
throw new IOException(e);
}
return ebp;
}
private Ext4Inode[] getInodes(BinaryReader reader, Ext4SuperBlock superBlock,
Ext4GroupDescriptor[] groupDescriptors, TaskMonitor monitor)
throws IOException, CancelledException {
private Ext4Inode[] getInodes(BinaryReader reader, Ext4GroupDescriptor[] groupDescriptors,
TaskMonitor monitor) throws IOException, CancelledException {
int inodeCount = superBlock.getS_inodes_count();
int inodesPerGroup = superBlock.getS_inodes_per_group();

View file

@ -36,6 +36,21 @@ public class Ext4IBlock implements StructConverter {
private boolean isExtentTree;
/**
* Reads an IBlock that is expected to contain extents
*
* @param provider {@link ByteProvider} to read from
* @param offset offset of the extent header
* @return new Ext4IBlock
* @throws IOException if error
*/
public static Ext4IBlock readIBlockWithExtents(ByteProvider provider, long offset)
throws IOException {
BinaryReader reader = new BinaryReader(provider, true);
reader.setPointerIndex(offset);
return new Ext4IBlock(reader, true);
}
public Ext4IBlock(ByteProvider provider, boolean isExtentTree) throws IOException {
this( new BinaryReader( provider, true ), isExtentTree );
}
@ -48,14 +63,14 @@ public class Ext4IBlock implements StructConverter {
count++;
short numEntries = header.getEh_entries();
if( header.getEh_depth() > 0 ) {
indexEntries = new ArrayList<Ext4ExtentIdx>();
indexEntries = new ArrayList<>();
for( int i = 0; i < numEntries; i++ ) {
indexEntries.add( new Ext4ExtentIdx(reader) );
count++;
}
}
else {
extentEntries = new ArrayList<Ext4Extent>();
extentEntries = new ArrayList<>();
for( int i = 0; i < numEntries; i++ ) {
extentEntries.add( new Ext4Extent(reader) );
count++;

View file

@ -21,8 +21,8 @@ import java.util.*;
/**
* <code>ObjectClass</code> provides a fixed-size long-key-based object cache.
* Both a hard and weak cache are maintained, where the weak cache is only
* limited by available memory. This cache mechanism is useful in ensuring that
* Both a hard and weak cache are maintained, where the weak cache is only
* limited by available memory. This cache mechanism is useful in ensuring that
* only a single object instance for a given key exists.
* <p>
* The weak cache is keyed, while the hard cache simply maintains the presence of
@ -42,8 +42,8 @@ public class ObjectCache {
public ObjectCache(int hardCacheSize) {
this.hardCacheSize = hardCacheSize;
hashTable = new HashMap<>();
refQueue = new ReferenceQueue<Object>();
hardCache = new LinkedList<Object>();
refQueue = new ReferenceQueue<>();
hardCache = new LinkedList<>();
}
/**
@ -100,7 +100,7 @@ public class ObjectCache {
*/
public synchronized void put(long key, Object obj) {
processQueue();
KeyedSoftReference<?> ref = new KeyedSoftReference<Object>(key, obj, refQueue);
KeyedSoftReference<?> ref = new KeyedSoftReference<>(key, obj, refQueue);
hashTable.put(key, ref);
addToHardCache(obj);
}
@ -143,7 +143,7 @@ public class ObjectCache {
/**
* Provides a weak wrapper for a keyed-object
*/
private class KeyedSoftReference<T> extends WeakReference<T> {
private static class KeyedSoftReference<T> extends WeakReference<T> {
private long key;
/**

View file

@ -35,7 +35,7 @@ import ghidra.program.model.address.KeyRange;
*/
public class DBObjectCache<T extends DatabaseObject> {
private Map<Long, KeyedSoftReference> map;
private Map<Long, KeyedSoftReference<T>> map;
private ReferenceQueue<T> refQueue;
private LinkedList<T> hardCache;
private int hardCacheSize;
@ -49,9 +49,9 @@ public class DBObjectCache<T extends DatabaseObject> {
*/
public DBObjectCache(int hardCacheSize) {
this.hardCacheSize = hardCacheSize;
map = new HashMap<Long, KeyedSoftReference>();
refQueue = new ReferenceQueue<T>();
hardCache = new LinkedList<T>();
map = new HashMap<>();
refQueue = new ReferenceQueue<>();
hardCache = new LinkedList<>();
}
/**
@ -60,7 +60,7 @@ public class DBObjectCache<T extends DatabaseObject> {
* @return the cached object or null if the object with that key is not currently cached.
*/
public synchronized T get(long key) {
KeyedSoftReference ref = map.get(key);
KeyedSoftReference<T> ref = map.get(key);
if (ref != null) {
T obj = ref.get();
if (obj == null) {
@ -89,7 +89,7 @@ public class DBObjectCache<T extends DatabaseObject> {
*/
public synchronized T get(DBRecord objectRecord) {
long key = objectRecord.getKey();
KeyedSoftReference ref = map.get(key);
KeyedSoftReference<T> ref = map.get(key);
if (ref != null) {
T obj = ref.get();
if (obj == null) {
@ -133,7 +133,7 @@ public class DBObjectCache<T extends DatabaseObject> {
processQueue();
long key = data.getKey();
addToHardCache(data);
KeyedSoftReference ref = new KeyedSoftReference(key, data, refQueue);
KeyedSoftReference<T> ref = new KeyedSoftReference<>(key, data, refQueue);
map.put(key, ref);
}
@ -142,9 +142,9 @@ public class DBObjectCache<T extends DatabaseObject> {
* @return an List of all the cached objects.
*/
public synchronized List<T> getCachedObjects() {
ArrayList<T> list = new ArrayList<T>();
ArrayList<T> list = new ArrayList<>();
processQueue();
for (KeyedSoftReference ref : map.values()) {
for (KeyedSoftReference<T> ref : map.values()) {
T obj = ref.get();
if (obj != null) {
list.add(obj);
@ -180,7 +180,7 @@ public class DBObjectCache<T extends DatabaseObject> {
private void deleteSmallKeyRanges(List<KeyRange> keyRanges) {
for (KeyRange range : keyRanges) {
for (long key = range.minKey; key <= range.maxKey; key++) {
KeyedSoftReference ref = map.remove(key);
KeyedSoftReference<T> ref = map.remove(key);
if (ref != null) {
DatabaseObject obj = ref.get();
if (obj != null) {
@ -202,7 +202,7 @@ public class DBObjectCache<T extends DatabaseObject> {
map.values().removeIf(ref -> checkRef(ref, keyRanges));
}
private boolean checkRef(KeyedSoftReference ref, List<KeyRange> keyRanges) {
private boolean checkRef(KeyedSoftReference<T> ref, List<KeyRange> keyRanges) {
long key = ref.getKey();
if (keyRangesContain(keyRanges, key)) {
DatabaseObject obj = ref.get();
@ -252,7 +252,7 @@ public class DBObjectCache<T extends DatabaseObject> {
processQueue();
if (++invalidateCount <= 0) {
invalidateCount = 1;
for (KeyedSoftReference ref : map.values()) {
for (KeyedSoftReference<T> ref : map.values()) {
DatabaseObject obj = ref.get();
if (obj != null) {
obj.setInvalid();
@ -276,7 +276,7 @@ public class DBObjectCache<T extends DatabaseObject> {
*/
public synchronized void delete(long key) {
processQueue();
KeyedSoftReference ref = map.get(key);
KeyedSoftReference<T> ref = map.get(key);
if (ref != null) {
T obj = ref.get();
if (obj != null) {
@ -297,10 +297,10 @@ public class DBObjectCache<T extends DatabaseObject> {
// we know the cast is safe--we put them in there
@SuppressWarnings("unchecked")
private void processQueue() {
KeyedSoftReference ref;
while ((ref = (KeyedSoftReference) refQueue.poll()) != null) {
KeyedSoftReference<T> ref;
while ((ref = (KeyedSoftReference<T>) refQueue.poll()) != null) {
long key = ref.getKey();
KeyedSoftReference oldValue = map.remove(key);
KeyedSoftReference<T> oldValue = map.remove(key);
if (oldValue != null && oldValue != ref) {
// we have put another item in the cache with the same key. Further, we
@ -312,7 +312,7 @@ public class DBObjectCache<T extends DatabaseObject> {
}
}
private class KeyedSoftReference extends WeakReference<T> {
private static class KeyedSoftReference<T> extends WeakReference<T> {
private long key;
KeyedSoftReference(long key, T obj, ReferenceQueue<T> queue) {
@ -328,7 +328,7 @@ public class DBObjectCache<T extends DatabaseObject> {
public synchronized void keyChanged(long oldKey, long newKey) {
processQueue();
KeyedSoftReference ref = map.remove(oldKey);
KeyedSoftReference<T> ref = map.remove(oldKey);
if (ref != null) {
map.put(newKey, ref);
T t = ref.get();

View file

@ -15,11 +15,9 @@
*/
package ghidra;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.*;
import java.net.URL;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.util.*;
import generic.jar.ResourceFile;
@ -93,6 +91,17 @@ public class GhidraJarApplicationLayout extends GhidraApplicationLayout {
* @return A {@link ResourceFile} from the given {@link URL}
*/
private ResourceFile fromUrl(URL url) {
return new ResourceFile(URLDecoder.decode(url.toExternalForm(), StandardCharsets.UTF_8));
String urlString = url.toExternalForm();
try {
// Decode the URL to replace things like %20 with real spaces.
// Note: can't use URLDecoder.decode(String, Charset) because Utility must be
// Java 1.8 compatible.
urlString = URLDecoder.decode(urlString, "UTF-8");
}
catch (UnsupportedEncodingException e) {
// Shouldn't happen, but failed to find UTF-8 encoding.
// Proceed without decoding, and hope for the best.
}
return new ResourceFile(urlString);
}
}