mirror of
https://github.com/NationalSecurityAgency/ghidra.git
synced 2025-10-05 02:39:44 +02:00
replace LongObjectHashMap with java's hashMap
This commit is contained in:
parent
c999cfc308
commit
0efc173756
1 changed files with 40 additions and 40 deletions
|
@ -24,14 +24,18 @@ import java.util.*;
|
||||||
|
|
||||||
import db.Record;
|
import db.Record;
|
||||||
import ghidra.program.model.address.KeyRange;
|
import ghidra.program.model.address.KeyRange;
|
||||||
import ghidra.util.datastruct.LongObjectHashtable;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generic cache implementation for objects that extend DatabaseObject.
|
* Generic cache implementation for objects that extend DatabaseObject. This is a reference based
|
||||||
|
* cache such that objects are only ever automatically removed from the cache when there are no
|
||||||
|
* references to that object. It also maintains small "hard" cache so that recently accessed objects
|
||||||
|
* are not prematurely removed from the cache if there are no references to them.
|
||||||
|
*
|
||||||
|
* @param <T> The type of the object stored in this cache
|
||||||
*/
|
*/
|
||||||
public class DBObjectCache<T extends DatabaseObject> {
|
public class DBObjectCache<T extends DatabaseObject> {
|
||||||
|
|
||||||
private LongObjectHashtable<KeyedSoftReference> hashTable;
|
private Map<Long, KeyedSoftReference> map;
|
||||||
private ReferenceQueue<T> refQueue;
|
private ReferenceQueue<T> refQueue;
|
||||||
private LinkedList<T> hardCache;
|
private LinkedList<T> hardCache;
|
||||||
private int hardCacheSize;
|
private int hardCacheSize;
|
||||||
|
@ -45,7 +49,7 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
*/
|
*/
|
||||||
public DBObjectCache(int hardCacheSize) {
|
public DBObjectCache(int hardCacheSize) {
|
||||||
this.hardCacheSize = hardCacheSize;
|
this.hardCacheSize = hardCacheSize;
|
||||||
hashTable = new LongObjectHashtable<KeyedSoftReference>();
|
map = new HashMap<Long, KeyedSoftReference>();
|
||||||
refQueue = new ReferenceQueue<T>();
|
refQueue = new ReferenceQueue<T>();
|
||||||
hardCache = new LinkedList<T>();
|
hardCache = new LinkedList<T>();
|
||||||
}
|
}
|
||||||
|
@ -56,18 +60,18 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
* @return the cached object or null if the object with that key is not currently cached.
|
* @return the cached object or null if the object with that key is not currently cached.
|
||||||
*/
|
*/
|
||||||
public synchronized T get(long key) {
|
public synchronized T get(long key) {
|
||||||
KeyedSoftReference ref = hashTable.get(key);
|
KeyedSoftReference ref = map.get(key);
|
||||||
if (ref != null) {
|
if (ref != null) {
|
||||||
T obj = ref.get();
|
T obj = ref.get();
|
||||||
if (obj == null) {
|
if (obj == null) {
|
||||||
hashTable.remove(key);
|
map.remove(key);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (obj.checkIsValid()) {
|
if (obj.checkIsValid()) {
|
||||||
addToHardCache(obj);
|
addToHardCache(obj);
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
hashTable.remove(key);
|
map.remove(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
@ -85,18 +89,18 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
*/
|
*/
|
||||||
public synchronized T get(Record objectRecord) {
|
public synchronized T get(Record objectRecord) {
|
||||||
long key = objectRecord.getKey();
|
long key = objectRecord.getKey();
|
||||||
KeyedSoftReference ref = hashTable.get(key);
|
KeyedSoftReference ref = map.get(key);
|
||||||
if (ref != null) {
|
if (ref != null) {
|
||||||
T obj = ref.get();
|
T obj = ref.get();
|
||||||
if (obj == null) {
|
if (obj == null) {
|
||||||
hashTable.remove(key);
|
map.remove(key);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (obj.checkIsValid(objectRecord)) {
|
if (obj.checkIsValid(objectRecord)) {
|
||||||
addToHardCache(obj);
|
addToHardCache(obj);
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
hashTable.remove(key);
|
map.remove(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
@ -104,9 +108,10 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the number of objects currently in the cache.
|
* Returns the number of objects currently in the cache.
|
||||||
|
* @return the number of objects currently in the cache.
|
||||||
*/
|
*/
|
||||||
public int size() {
|
public int size() {
|
||||||
return hashTable.size();
|
return map.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -129,23 +134,22 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
long key = data.getKey();
|
long key = data.getKey();
|
||||||
addToHardCache(data);
|
addToHardCache(data);
|
||||||
KeyedSoftReference ref = new KeyedSoftReference(key, data, refQueue);
|
KeyedSoftReference ref = new KeyedSoftReference(key, data, refQueue);
|
||||||
hashTable.put(key, ref);
|
map.put(key, ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns an array of all the cached objects.
|
* Returns an List of all the cached objects.
|
||||||
|
* @return an List of all the cached objects.
|
||||||
*/
|
*/
|
||||||
public synchronized ArrayList<T> getCachedObjects() {
|
public synchronized List<T> getCachedObjects() {
|
||||||
ArrayList<T> list = new ArrayList<T>();
|
ArrayList<T> list = new ArrayList<T>();
|
||||||
processQueue();
|
for (KeyedSoftReference ref : map.values()) {
|
||||||
long[] keys = hashTable.getKeys();
|
|
||||||
for (int i = 0; i < keys.length; i++) {
|
|
||||||
KeyedSoftReference ref = hashTable.get(keys[i]);
|
|
||||||
T obj = ref.get();
|
T obj = ref.get();
|
||||||
if (obj != null) {
|
if (obj != null) {
|
||||||
list.add(obj);
|
list.add(obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
processQueue();
|
||||||
return list;
|
return list;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +162,7 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
hardCache.clear();
|
hardCache.clear();
|
||||||
processQueue();
|
processQueue();
|
||||||
long rangesSize = getKeyRangesSize(keyRanges); // < 0 too many ranges
|
long rangesSize = getKeyRangesSize(keyRanges); // < 0 too many ranges
|
||||||
if (rangesSize < 0 || rangesSize > hashTable.size()) {
|
if (rangesSize < 0 || rangesSize > map.size()) {
|
||||||
deleteLargeKeyRanges(keyRanges);
|
deleteLargeKeyRanges(keyRanges);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -175,7 +179,7 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
private void deleteSmallKeyRanges(List<KeyRange> keyRanges) {
|
private void deleteSmallKeyRanges(List<KeyRange> keyRanges) {
|
||||||
for (KeyRange range : keyRanges) {
|
for (KeyRange range : keyRanges) {
|
||||||
for (long key = range.minKey; key <= range.maxKey; key++) {
|
for (long key = range.minKey; key <= range.maxKey; key++) {
|
||||||
KeyedSoftReference ref = hashTable.remove(key);
|
KeyedSoftReference ref = map.remove(key);
|
||||||
if (ref != null) {
|
if (ref != null) {
|
||||||
DatabaseObject obj = ref.get();
|
DatabaseObject obj = ref.get();
|
||||||
if (obj != null) {
|
if (obj != null) {
|
||||||
|
@ -194,10 +198,9 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
* @param keyRanges key ranges to delete
|
* @param keyRanges key ranges to delete
|
||||||
*/
|
*/
|
||||||
private void deleteLargeKeyRanges(List<KeyRange> keyRanges) {
|
private void deleteLargeKeyRanges(List<KeyRange> keyRanges) {
|
||||||
long[] keys = hashTable.getKeys();
|
for (Long key : map.keySet()) {
|
||||||
for (int i = 0; i < keys.length; i++) {
|
if (keyRangesContain(keyRanges, key)) {
|
||||||
if (keyRangesContain(keyRanges, keys[i])) {
|
KeyedSoftReference ref = map.remove(key);
|
||||||
KeyedSoftReference ref = hashTable.remove(keys[i]);
|
|
||||||
DatabaseObject obj = ref.get();
|
DatabaseObject obj = ref.get();
|
||||||
if (obj != null) {
|
if (obj != null) {
|
||||||
obj.setDeleted();
|
obj.setDeleted();
|
||||||
|
@ -209,7 +212,7 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return total number of keys covered by list of keyRanges.
|
* Return total number of keys covered by list of keyRanges.
|
||||||
* @param keyRanges
|
* @param keyRanges key ranges to get the number of keys
|
||||||
* @return number of keys, or -1 if more than Long.MAX_VALUE keys
|
* @return number of keys, or -1 if more than Long.MAX_VALUE keys
|
||||||
*/
|
*/
|
||||||
private long getKeyRangesSize(List<KeyRange> keyRanges) {
|
private long getKeyRangesSize(List<KeyRange> keyRanges) {
|
||||||
|
@ -243,9 +246,7 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
processQueue();
|
processQueue();
|
||||||
if (++invalidateCount <= 0) {
|
if (++invalidateCount <= 0) {
|
||||||
invalidateCount = 1;
|
invalidateCount = 1;
|
||||||
long[] keys = hashTable.getKeys();
|
for (KeyedSoftReference ref : map.values()) {
|
||||||
for (int i = 0; i < keys.length; i++) {
|
|
||||||
KeyedSoftReference ref = hashTable.get(keys[i]);
|
|
||||||
DatabaseObject obj = ref.get();
|
DatabaseObject obj = ref.get();
|
||||||
if (obj != null) {
|
if (obj != null) {
|
||||||
obj.setInvalid();
|
obj.setInvalid();
|
||||||
|
@ -269,16 +270,15 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
* @param endKey the last key in the range to invalidate.
|
* @param endKey the last key in the range to invalidate.
|
||||||
*/
|
*/
|
||||||
public synchronized void invalidate(long startKey, long endKey) {
|
public synchronized void invalidate(long startKey, long endKey) {
|
||||||
if (endKey - startKey < hashTable.size()) {
|
if (endKey - startKey < map.size()) {
|
||||||
for (long i = startKey; i <= endKey; i++) {
|
for (long i = startKey; i <= endKey; i++) {
|
||||||
invalidate(i);
|
invalidate(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
long[] keys = hashTable.getKeys();
|
for (long key : map.keySet()) {
|
||||||
for (int i = 0; i < keys.length; i++) {
|
if (key >= startKey && key <= endKey) {
|
||||||
if (keys[i] >= startKey && keys[i] <= endKey) {
|
invalidate(key);
|
||||||
invalidate(keys[i]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -290,14 +290,14 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
*/
|
*/
|
||||||
public synchronized void delete(long key) {
|
public synchronized void delete(long key) {
|
||||||
processQueue();
|
processQueue();
|
||||||
KeyedSoftReference ref = hashTable.get(key);
|
KeyedSoftReference ref = map.get(key);
|
||||||
if (ref != null) {
|
if (ref != null) {
|
||||||
T obj = ref.get();
|
T obj = ref.get();
|
||||||
if (obj != null) {
|
if (obj != null) {
|
||||||
obj.setDeleted();
|
obj.setDeleted();
|
||||||
ref.clear();
|
ref.clear();
|
||||||
}
|
}
|
||||||
hashTable.remove(key);
|
map.remove(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,7 +307,7 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
*/
|
*/
|
||||||
public synchronized void invalidate(long key) {
|
public synchronized void invalidate(long key) {
|
||||||
processQueue();
|
processQueue();
|
||||||
KeyedSoftReference ref = hashTable.get(key);
|
KeyedSoftReference ref = map.get(key);
|
||||||
if (ref != null) {
|
if (ref != null) {
|
||||||
T obj = ref.get();
|
T obj = ref.get();
|
||||||
if (obj != null) {
|
if (obj != null) {
|
||||||
|
@ -329,14 +329,14 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
KeyedSoftReference ref;
|
KeyedSoftReference ref;
|
||||||
while ((ref = (KeyedSoftReference) refQueue.poll()) != null) {
|
while ((ref = (KeyedSoftReference) refQueue.poll()) != null) {
|
||||||
long key = ref.getKey();
|
long key = ref.getKey();
|
||||||
KeyedSoftReference oldValue = hashTable.remove(key);
|
KeyedSoftReference oldValue = map.remove(key);
|
||||||
|
|
||||||
if (oldValue != null && oldValue != ref) {
|
if (oldValue != null && oldValue != ref) {
|
||||||
// we have put another item in the cache with the same key. Further, we
|
// we have put another item in the cache with the same key. Further, we
|
||||||
// removed the item, but the garbage collector had not put the item on the
|
// removed the item, but the garbage collector had not put the item on the
|
||||||
// reference queue until after we added a new reference to the cache.
|
// reference queue until after we added a new reference to the cache.
|
||||||
// We want to keep the last value that was added, as it has not been deleted.
|
// We want to keep the last value that was added, as it has not been deleted.
|
||||||
hashTable.put(key, oldValue);
|
map.put(key, oldValue);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -357,9 +357,9 @@ public class DBObjectCache<T extends DatabaseObject> {
|
||||||
public synchronized void keyChanged(long oldKey, long newKey) {
|
public synchronized void keyChanged(long oldKey, long newKey) {
|
||||||
processQueue();
|
processQueue();
|
||||||
|
|
||||||
KeyedSoftReference ref = hashTable.remove(oldKey);
|
KeyedSoftReference ref = map.remove(oldKey);
|
||||||
if (ref != null) {
|
if (ref != null) {
|
||||||
hashTable.put(newKey, ref);
|
map.put(newKey, ref);
|
||||||
T t = ref.get();
|
T t = ref.get();
|
||||||
if (t != null) {
|
if (t != null) {
|
||||||
t.setInvalid();
|
t.setInvalid();
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue