diff --git a/Ghidra/Framework/DB/src/main/java/db/DBFieldIterator.java b/Ghidra/Framework/DB/src/main/java/db/DBFieldIterator.java index 99b5cc6828..1fc551772e 100644 --- a/Ghidra/Framework/DB/src/main/java/db/DBFieldIterator.java +++ b/Ghidra/Framework/DB/src/main/java/db/DBFieldIterator.java @@ -1,6 +1,5 @@ /* ### * IP: GHIDRA - * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,31 +22,31 @@ import java.io.IOException; * Field values within a table. */ public interface DBFieldIterator { - + /** * Return true if a Field is available in the forward direction. * @throws IOException thrown if an IO error occurs */ public boolean hasNext() throws IOException; - + /** * Return true if a Field is available in the reverse direction * @throws IOException thrown if an IO error occurs */ public boolean hasPrevious() throws IOException; - + /** - * Return the nexy Field value or null if one is not available. + * Return the next Field value or null if one is not available. * @throws IOException thrown if an IO error occurs */ public Field next() throws IOException; - + /** * Return the previous Field value or null if one is not available. * @throws IOException thrown if an IO error occurs */ public Field previous() throws IOException; - + /** * Delete the last record(s) associated with the last Field value * read via the next or previous methods. diff --git a/Ghidra/Framework/DB/src/main/java/db/FieldIndexTable.java b/Ghidra/Framework/DB/src/main/java/db/FieldIndexTable.java index 9724dc62ce..52101bd5d7 100644 --- a/Ghidra/Framework/DB/src/main/java/db/FieldIndexTable.java +++ b/Ghidra/Framework/DB/src/main/java/db/FieldIndexTable.java @@ -1,6 +1,5 @@ /* ### * IP: GHIDRA - * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,15 +21,13 @@ import java.util.NoSuchElementException; public class FieldIndexTable extends IndexTable { - private static final Class[] fieldClasses = { - }; - - private static final String[] fieldNames = { - }; - + private static final Class[] fieldClasses = {}; + + private static final String[] fieldNames = {}; + private final Schema indexSchema; private final int indexColumn; - + /** * Construct a new secondary index which is based upon a specific field within the * primary table specified by name. @@ -40,14 +37,10 @@ public class FieldIndexTable extends IndexTable { * @throws IOException */ FieldIndexTable(Table primaryTable, int colIndex) throws IOException { - this(primaryTable, - primaryTable.getDBHandle().getMasterTable().createTableRecord( - primaryTable.getName(), - getIndexTableSchema(primaryTable, colIndex), - colIndex) - ); + this(primaryTable, primaryTable.getDBHandle().getMasterTable().createTableRecord( + primaryTable.getName(), getIndexTableSchema(primaryTable, colIndex), colIndex)); } - + /** * Construct a new or existing secondary index. An existing index must have * its' root ID specified within the tableRecord. @@ -60,23 +53,23 @@ public class FieldIndexTable extends IndexTable { this.indexSchema = indexTable.getSchema(); this.indexColumn = indexTableRecord.getIndexedColumn(); } - + private static Schema getIndexTableSchema(Table primaryTable, int colIndex) { byte fieldType = primaryTable.getSchema().getField(colIndex).getFieldType(); IndexField indexKeyField = IndexField.getIndexField(fieldType); return new Schema(0, indexKeyField.getClass(), "IndexKey", fieldClasses, fieldNames); } - + /* * @see ghidra.framework.store.db.IndexTable#findPrimaryKeys(ghidra.framework.store.db.Field) */ @Override - long[] findPrimaryKeys(Field indexValue) throws IOException { + long[] findPrimaryKeys(Field indexValue) throws IOException { IndexField indexField = IndexField.getIndexField(indexValue, Long.MIN_VALUE); DBFieldIterator iter = indexTable.fieldKeyIterator(indexField); - ArrayList list = new ArrayList(20); + ArrayList list = new ArrayList<>(20); while (iter.hasNext()) { - IndexField f = (IndexField)iter.next(); + IndexField f = (IndexField) iter.next(); if (!f.hasSameIndex(indexField)) { break; } @@ -102,7 +95,7 @@ public class FieldIndexTable extends IndexTable { * @see ghidra.framework.store.db.IndexTable#getKeyCount(ghidra.framework.store.db.Field) */ @Override - int getKeyCount(Field indexValue) throws IOException { + int getKeyCount(Field indexValue) throws IOException { return findPrimaryKeys(indexValue).length; } @@ -110,7 +103,7 @@ public class FieldIndexTable extends IndexTable { * @see ghidra.framework.store.db.IndexTable#addEntry(ghidra.framework.store.db.Record) */ @Override - void addEntry(Record record) throws IOException { + void addEntry(Record record) throws IOException { Field indexedField = record.getField(colIndex); IndexField f = IndexField.getIndexField(indexedField, record.getKey()); Record rec = indexSchema.createRecord(f); @@ -121,7 +114,7 @@ public class FieldIndexTable extends IndexTable { * @see ghidra.framework.store.db.IndexTable#deleteEntry(ghidra.framework.store.db.Record) */ @Override - void deleteEntry(Record record) throws IOException { + void deleteEntry(Record record) throws IOException { Field indexedField = record.getField(colIndex); IndexField f = IndexField.getIndexField(indexedField, record.getKey()); indexTable.deleteRecord(f); @@ -131,7 +124,7 @@ public class FieldIndexTable extends IndexTable { * @see ghidra.framework.store.db.IndexTable#indexIterator() */ @Override - DBFieldIterator indexIterator() throws IOException { + DBFieldIterator indexIterator() throws IOException { return new IndexFieldIterator(); } @@ -139,25 +132,26 @@ public class FieldIndexTable extends IndexTable { * @see ghidra.framework.store.db.IndexTable#indexIterator(ghidra.framework.store.db.Field, ghidra.framework.store.db.Field, boolean) */ @Override - DBFieldIterator indexIterator(Field minField, Field maxField, boolean atMin) - throws IOException { - return new IndexFieldIterator(minField, maxField, atMin); + DBFieldIterator indexIterator(Field minField, Field maxField, boolean before) + throws IOException { + return new IndexFieldIterator(minField, maxField, before); } /* * @see db.IndexTable#indexIterator(db.Field, db.Field, db.Field, boolean) */ @Override - DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before) throws IOException { + DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before) + throws IOException { return new IndexFieldIterator(minField, maxField, startField, before); } - + /** * Iterates over index field values within a specified range. * NOTE: Index fields which have been truncated may be returned out of order. */ class IndexFieldIterator implements DBFieldIterator { - + private IndexField min; private IndexField max; private IndexField lastKey; @@ -165,52 +159,56 @@ public class FieldIndexTable extends IndexTable { private DBFieldIterator indexIterator; private boolean hasNext = false; private boolean hasPrev = false; - + /** * Construct an index field iterator starting with the minimum index value. */ IndexFieldIterator() throws IOException { this(null, null, true); } - + /** * Construct an index field iterator. The iterator is positioned at index * value identified by startValue. * @param minValue minimum index value or null if no minimum * @param maxValue maximum index value or null if no maximum - * @param atMin if true initial position is before minValue, else position + * @param before if true initial position is before minValue, else position * after maxValue * @throws IOException */ - IndexFieldIterator(Field minValue, Field maxValue, boolean atMin) throws IOException { - + IndexFieldIterator(Field minValue, Field maxValue, boolean before) throws IOException { + if (primaryTable.getSchema().getField(indexColumn).isVariableLength()) { - throw new UnsupportedOperationException("Due to potential truncation issues, operation not permitted on variable length fields"); + throw new UnsupportedOperationException( + "Due to potential truncation issues, operation not permitted on variable length fields"); } - - min = minValue != null ? - IndexField.getIndexField(minValue, Long.MIN_VALUE) : null; - max = maxValue != null ? - IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null; - + + min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null; + max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null; + IndexField start = null; - if (atMin && minValue != null) { + if (before && minValue != null) { start = min; } - else if (!atMin && maxValue != null){ + else if (!before && maxValue != null) { start = max; } - - indexIterator = indexTable.fieldKeyIterator(min, max, start); - + + if (start != null) { + indexIterator = indexTable.fieldKeyIterator(min, max, start); + } + else { + indexIterator = indexTable.fieldKeyIterator(min, max, before); + } + if (indexIterator.hasNext()) { indexIterator.next(); - if (atMin) { + if (before) { indexIterator.previous(); } } } - + /** * @param minField * @param maxField @@ -218,32 +216,34 @@ public class FieldIndexTable extends IndexTable { * @param before * @throws IOException */ - public IndexFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before) throws IOException { - + public IndexFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before) + throws IOException { + if (primaryTable.getSchema().getField(indexColumn).isVariableLength()) { - throw new UnsupportedOperationException("Due to potential truncation issues, operation not permitted on variable length fields"); + throw new UnsupportedOperationException( + "Due to potential truncation issues, operation not permitted on variable length fields"); } - + if (startValue == null) { throw new IllegalArgumentException("starting index value required"); } - min = minValue != null ? - IndexField.getIndexField(minValue, Long.MIN_VALUE) : null; - max = maxValue != null ? - IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null; - - IndexField start = IndexField.getIndexField(startValue, before ? Long.MIN_VALUE : Long.MAX_VALUE); + min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null; + max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null; + + IndexField start = + IndexField.getIndexField(startValue, before ? Long.MIN_VALUE : Long.MAX_VALUE); indexIterator = indexTable.fieldKeyIterator(min, max, start); - + if (indexIterator.hasNext()) { - IndexField f = (IndexField)indexIterator.next(); + IndexField f = (IndexField) indexIterator.next(); if (before || !f.getIndexField().equals(startValue)) { indexIterator.previous(); } } } + @Override public boolean hasNext() throws IOException { if (hasNext) return true; @@ -253,7 +253,7 @@ public class FieldIndexTable extends IndexTable { while (indexKey != null && indexKey.hasSameIndex(lastKey)) { if (++skipCnt > 10) { // Reinit iterator to skip large number of same index value - indexIterator = indexTable.fieldKeyIterator(min, max, + indexIterator = indexTable.fieldKeyIterator(min, max, IndexField.getIndexField(indexKey.getIndexField(), Long.MAX_VALUE)); skipCnt = 0; } @@ -267,6 +267,7 @@ public class FieldIndexTable extends IndexTable { return true; } + @Override public boolean hasPrevious() throws IOException { if (hasPrev) return true; @@ -276,7 +277,7 @@ public class FieldIndexTable extends IndexTable { while (indexKey != null && indexKey.hasSameIndex(lastKey)) { if (++skipCnt > 10) { // Reinit iterator to skip large number of same index value - indexIterator = indexTable.fieldKeyIterator(min, max, + indexIterator = indexTable.fieldKeyIterator(min, max, IndexField.getIndexField(indexKey.getIndexField(), Long.MIN_VALUE)); skipCnt = 0; } @@ -290,6 +291,7 @@ public class FieldIndexTable extends IndexTable { return true; } + @Override public Field next() throws IOException { if (hasNext || hasNext()) { hasNext = false; @@ -300,7 +302,8 @@ public class FieldIndexTable extends IndexTable { } return null; } - + + @Override public Field previous() throws IOException { if (hasPrev || hasPrevious()) { hasNext = true; @@ -317,6 +320,7 @@ public class FieldIndexTable extends IndexTable { * index value (lastKey). * @see db.DBFieldIterator#delete() */ + @Override public boolean delete() throws IOException { if (lastKey == null) return false; @@ -335,7 +339,7 @@ public class FieldIndexTable extends IndexTable { * @see ghidra.framework.store.db.IndexTable#hasRecord(ghidra.framework.store.db.Field) */ @Override - boolean hasRecord(Field field) throws IOException { + boolean hasRecord(Field field) throws IOException { IndexField indexField = IndexField.getIndexField(field, Long.MIN_VALUE); DBFieldIterator iter = indexTable.fieldKeyIterator(indexField); while (iter.hasNext()) { @@ -362,10 +366,10 @@ public class FieldIndexTable extends IndexTable { * @throws IOException thrown if IO error occurs */ @Override - DBLongIterator keyIterator() throws IOException { - return new PrimaryKeyIterator(); + DBLongIterator keyIterator() throws IOException { + return new PrimaryKeyIterator(); } - + /** * Iterate over all primary keys sorted based upon the associated index key. * The iterator is initially positioned before the first index buffer whose index key @@ -375,10 +379,10 @@ public class FieldIndexTable extends IndexTable { * @throws IOException thrown if IO error occurs */ @Override - DBLongIterator keyIteratorBefore(Field startField) throws IOException { - return new PrimaryKeyIterator(startField, false); + DBLongIterator keyIteratorBefore(Field startField) throws IOException { + return new PrimaryKeyIterator(startField, false); } - + /** * Iterate over all primary keys sorted based upon the associated index key. * The iterator is initially positioned after the index buffer whose index key @@ -389,10 +393,10 @@ public class FieldIndexTable extends IndexTable { * @throws IOException thrown if IO error occurs */ @Override - DBLongIterator keyIteratorAfter(Field startField) throws IOException { - return new PrimaryKeyIterator(startField, true); + DBLongIterator keyIteratorAfter(Field startField) throws IOException { + return new PrimaryKeyIterator(startField, true); } - + /** * Iterate over all primary keys sorted based upon the associated index key. * The iterator is initially positioned before the primaryKey within the index buffer @@ -404,10 +408,10 @@ public class FieldIndexTable extends IndexTable { * @throws IOException thrown if IO error occurs */ @Override - DBLongIterator keyIteratorBefore(Field startField, long primaryKey) throws IOException { - return new PrimaryKeyIterator(null, null, startField, primaryKey, false); + DBLongIterator keyIteratorBefore(Field startField, long primaryKey) throws IOException { + return new PrimaryKeyIterator(null, null, startField, primaryKey, false); } - + /** * Iterate over all primary keys sorted based upon the associated index key. * The iterator is initially positioned after the primaryKey within the index buffer @@ -419,10 +423,10 @@ public class FieldIndexTable extends IndexTable { * @throws IOException thrown if IO error occurs */ @Override - DBLongIterator keyIteratorAfter(Field startField, long primaryKey) throws IOException { - return new PrimaryKeyIterator(null, null, startField, primaryKey, true); + DBLongIterator keyIteratorAfter(Field startField, long primaryKey) throws IOException { + return new PrimaryKeyIterator(null, null, startField, primaryKey, true); } - + /** * Iterate over all primary keys sorted based upon the associated index key. * The iterator is limited to range of index keys of startField through endField, inclusive. @@ -438,20 +442,22 @@ public class FieldIndexTable extends IndexTable { * @throws IOException thrown if IO error occurs */ @Override - DBLongIterator keyIterator(Field startField, Field endField, boolean atStart) throws IOException { - return new PrimaryKeyIterator(startField, endField, atStart ? startField : endField, - atStart ? Long.MIN_VALUE : Long.MAX_VALUE, !atStart); + DBLongIterator keyIterator(Field startField, Field endField, boolean atStart) + throws IOException { + return new PrimaryKeyIterator(startField, endField, atStart ? startField : endField, + atStart ? Long.MIN_VALUE : Long.MAX_VALUE, !atStart); } - + /** * @see db.IndexTable#keyIterator(db.Field, db.Field, db.Field, boolean) */ @Override - DBLongIterator keyIterator(Field minField, Field maxField, Field startField, boolean before) throws IOException { - return new PrimaryKeyIterator(minField, maxField, startField, - before ? Long.MIN_VALUE : Long.MAX_VALUE, !before); + DBLongIterator keyIterator(Field minField, Field maxField, Field startField, boolean before) + throws IOException { + return new PrimaryKeyIterator(minField, maxField, startField, + before ? Long.MIN_VALUE : Long.MAX_VALUE, !before); } - + /** * Iterates over primary keys which correspond to index field values within a specified range. * NOTE: Primary keys corresponding to index fields which have been truncated may be returned out of order. @@ -461,19 +467,19 @@ public class FieldIndexTable extends IndexTable { private IndexField min; private IndexField max; private DBFieldIterator indexIterator; - + private boolean hasNext = false; private boolean hasPrev = false; private IndexField key; private IndexField lastKey; - + /** * Construct a key iterator starting with the minimum secondary key. */ PrimaryKeyIterator() throws IOException { indexIterator = indexTable.fieldKeyIterator(); } - + /** * Construct a key iterator. The iterator is positioned immediately before * the key associated with the first occurance of the startValue. @@ -484,7 +490,7 @@ public class FieldIndexTable extends IndexTable { PrimaryKeyIterator(Field startValue, boolean after) throws IOException { this(null, null, startValue, after ? Long.MAX_VALUE : Long.MIN_VALUE, after); } - + /** * Construct a key iterator. The iterator is positioned immediately before * or after the key associated with the specified startValue/primaryKey. @@ -497,18 +503,15 @@ public class FieldIndexTable extends IndexTable { * otherwise immediately before. * @throws IOException */ - PrimaryKeyIterator(Field minValue, Field maxValue, Field startValue, - long primaryKey, boolean after) throws IOException - { - min = minValue != null ? - IndexField.getIndexField(minValue, Long.MIN_VALUE) : null; - max = maxValue != null ? - IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null; - IndexField start = startValue != null ? - IndexField.getIndexField(startValue, primaryKey) : null; - + PrimaryKeyIterator(Field minValue, Field maxValue, Field startValue, long primaryKey, + boolean after) throws IOException { + min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null; + max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null; + IndexField start = + startValue != null ? IndexField.getIndexField(startValue, primaryKey) : null; + indexIterator = indexTable.fieldKeyIterator(min, max, start); - + if (indexIterator.hasNext()) { Field f = indexIterator.next(); if (!after || !f.equals(start)) { @@ -516,7 +519,7 @@ public class FieldIndexTable extends IndexTable { } } } - + /** * If min or max index values was truncated, a comparison of the actual * indexed field value (i.e., primary table value) is done with the min and/or max values. @@ -550,6 +553,7 @@ public class FieldIndexTable extends IndexTable { /* (non-Javadoc) * @see ghidra.framework.store.db.DBLongIterator#hasNext() */ + @Override public boolean hasNext() throws IOException { if (hasNext) { return true; @@ -568,6 +572,7 @@ public class FieldIndexTable extends IndexTable { /* (non-Javadoc) * @see ghidra.framework.store.db.DBLongIterator#hasPrevious() */ + @Override public boolean hasPrevious() throws IOException { if (hasPrev) { return true; @@ -586,6 +591,7 @@ public class FieldIndexTable extends IndexTable { /* (non-Javadoc) * @see ghidra.framework.store.db.DBLongIterator#next() */ + @Override public long next() throws IOException { if (hasNext()) { lastKey = key; @@ -598,6 +604,7 @@ public class FieldIndexTable extends IndexTable { /* (non-Javadoc) * @see ghidra.framework.store.db.DBLongIterator#previous() */ + @Override public long previous() throws IOException { if (hasPrevious()) { lastKey = key; @@ -610,6 +617,7 @@ public class FieldIndexTable extends IndexTable { /* (non-Javadoc) * @see ghidra.framework.store.db.DBLongIterator#delete() */ + @Override public boolean delete() throws IOException { if (lastKey != null) { long primaryKey = lastKey.getPrimaryKey(); diff --git a/Ghidra/Framework/DB/src/main/java/db/IndexTable.java b/Ghidra/Framework/DB/src/main/java/db/IndexTable.java index 2722efccd3..4969dc0bdc 100644 --- a/Ghidra/Framework/DB/src/main/java/db/IndexTable.java +++ b/Ghidra/Framework/DB/src/main/java/db/IndexTable.java @@ -1,6 +1,5 @@ /* ### * IP: GHIDRA - * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,13 +15,13 @@ */ package db; +import java.io.IOException; +import java.util.NoSuchElementException; + import ghidra.util.exception.AssertException; import ghidra.util.exception.CancelledException; import ghidra.util.task.TaskMonitor; -import java.io.IOException; -import java.util.NoSuchElementException; - /** * IndexTable maintains a secondary index within a private Table instance. * This index facilitates the indexing of non-unique secondary keys within a @@ -213,12 +212,12 @@ abstract class IndexTable { * by minField and maxField. Index values are returned in an ascending sorted order. * @param minField minimum index column value, if null absolute minimum is used * @param maxField maximum index column value, if null absolute maximum is used - * @param atStart if true initial position is before minField, else position + * @param before if true initial position is before minField, else position * is after endField * @return index field iterator. * @throws IOException */ - abstract DBFieldIterator indexIterator(Field minField, Field maxField, boolean atMin) + abstract DBFieldIterator indexIterator(Field minField, Field maxField, boolean before) throws IOException; /** @@ -307,14 +306,14 @@ abstract class IndexTable { * buffer whose index key is less than or equal to the specified maxField value. * @param minField minimum index key value * @param maxField maximum index key value - * @param atMin if true, position iterator before minField value, + * @param before if true, position iterator before minField value, * Otherwise, position iterator after maxField value. * @return primary key iterator * @throws IOException thrown if IO error occurs */ - DBLongIterator keyIterator(Field minField, Field maxField, boolean atMin) throws IOException { - return new PrimaryKeyIterator(minField, maxField, atMin ? minField : maxField, - atMin ? Long.MIN_VALUE : Long.MAX_VALUE, !atMin); + DBLongIterator keyIterator(Field minField, Field maxField, boolean before) throws IOException { + return new PrimaryKeyIterator(minField, maxField, before ? minField : maxField, + before ? Long.MIN_VALUE : Long.MAX_VALUE, !before); } /** @@ -324,14 +323,14 @@ abstract class IndexTable { * @param minField minimum index key value * @param maxField maximum index key value * @param startField starting indexed value position - * @param before if true positioned before startField value, else positioned after + * @param before if true positioned before startField value, else positioned after maxField value * @return primary key iterator * @throws IOException thrown if IO error occurs */ DBLongIterator keyIterator(Field minField, Field maxField, Field startField, boolean before) throws IOException { - return new PrimaryKeyIterator(minField, maxField, startField, before ? Long.MIN_VALUE - : Long.MAX_VALUE, !before); + return new PrimaryKeyIterator(minField, maxField, startField, + before ? Long.MIN_VALUE : Long.MAX_VALUE, !before); } /** @@ -407,6 +406,7 @@ abstract class IndexTable { } } + @Override public boolean hasNext() throws IOException { if (hasNext) { return true; @@ -491,6 +491,7 @@ abstract class IndexTable { } } + @Override public boolean hasPrevious() throws IOException { if (hasPrev) { return true; @@ -574,6 +575,7 @@ abstract class IndexTable { } } + @Override public long next() throws IOException { if (hasNext || hasNext()) { long key = indexBuffer.getPrimaryKey(index); @@ -585,6 +587,7 @@ abstract class IndexTable { throw new NoSuchElementException(); } + @Override public long previous() throws IOException { if (hasPrev || hasPrevious()) { long key = indexBuffer.getPrimaryKey(index); @@ -601,6 +604,7 @@ abstract class IndexTable { * after each record deletion. * @see db.DBLongIterator#delete() */ + @Override public boolean delete() throws IOException { if (lastKey == null) return false; diff --git a/Ghidra/Framework/DB/src/main/java/db/LongKeyNode.java b/Ghidra/Framework/DB/src/main/java/db/LongKeyNode.java index bc2daeb6b8..1bf69e8404 100644 --- a/Ghidra/Framework/DB/src/main/java/db/LongKeyNode.java +++ b/Ghidra/Framework/DB/src/main/java/db/LongKeyNode.java @@ -1,6 +1,5 @@ /* ### * IP: GHIDRA - * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -63,16 +62,12 @@ abstract class LongKeyNode implements BTreeNode { nodeMgr.addNode(this); } - /* - * @see ghidra.framework.store.db.BTreeNode#getBufferId() - */ + @Override public int getBufferId() { return buffer.getId(); } - /* - * @see ghidra.framework.store.db.BTreeNode#getBuffer() - */ + @Override public DataBuffer getBuffer() { return buffer; } @@ -88,16 +83,12 @@ abstract class LongKeyNode implements BTreeNode { return this; } - /* - * @see ghidra.framework.store.db.BTreeNode#getKeyCount() - */ + @Override public int getKeyCount() { return keyCount; } - /* - * @see ghidra.framework.store.db.BTreeNode#setKeyCount(int) - */ + @Override public void setKeyCount(int cnt) { keyCount = cnt; buffer.putInt(KEY_COUNT_OFFSET, keyCount); @@ -118,9 +109,4 @@ abstract class LongKeyNode implements BTreeNode { */ abstract LongKeyRecordNode getLeafNode(long key) throws IOException; - /* - * @see ghidra.framework.store.db.BTreeNode#delete() - */ - public abstract void delete() throws IOException; - } diff --git a/Ghidra/Framework/DB/src/main/java/db/Table.java b/Ghidra/Framework/DB/src/main/java/db/Table.java index 45e32e1e42..73c1a62097 100644 --- a/Ghidra/Framework/DB/src/main/java/db/Table.java +++ b/Ghidra/Framework/DB/src/main/java/db/Table.java @@ -43,7 +43,7 @@ public class Table { private int recordCount; private long maximumKey; - private IntObjectHashtable secondaryIndexes = new IntObjectHashtable(); + private IntObjectHashtable secondaryIndexes = new IntObjectHashtable<>(); private int[] indexedColumns = new int[0]; private boolean isIndexed = false; @@ -341,8 +341,8 @@ public class Table { tableRecord.setRecordCount(actualCount); if (!isConsistent(monitor)) { - throw new IOException("Consistency check failed after rebuilding table " + - getName()); + throw new IOException( + "Consistency check failed after rebuilding table " + getName()); } } } @@ -357,8 +357,8 @@ public class Table { return isConsistent(null, monitor); } - boolean isConsistent(String indexName, TaskMonitor monitor) throws IOException, - CancelledException { + boolean isConsistent(String indexName, TaskMonitor monitor) + throws IOException, CancelledException { synchronized (db) { if (rootBufferId < 0) @@ -396,7 +396,7 @@ public class Table { consistent &= indexTable.isConsistent(monitor); } - HashMap missingIndexRecMap = new HashMap(); + HashMap missingIndexRecMap = new HashMap<>(); int actualCount = 0; RecordIterator recIter = iterator(); while (recIter.hasNext()) { @@ -407,7 +407,8 @@ public class Table { for (int indexedColumn : indexedColumns) { IndexTable indexTable = secondaryIndexes.get(indexedColumn); boolean found = false; - for (long key : indexTable.findPrimaryKeys(rec.getField(indexTable.getColumnIndex()))) { + for (long key : indexTable.findPrimaryKeys( + rec.getField(indexTable.getColumnIndex()))) { if (key == rec.getKey()) { found = true; break; @@ -437,8 +438,8 @@ public class Table { } for (int indexCol : missingIndexRecMap.keySet()) { int missing = missingIndexRecMap.get(indexCol); - logIndexConsistencyError(schema.getFieldNames()[indexCol], "Index is missing " + - missing + " record references"); + logIndexConsistencyError(schema.getFieldNames()[indexCol], + "Index is missing " + missing + " record references"); } // Check for bad index tables (missing or invalid entries) @@ -448,7 +449,7 @@ public class Table { monitor.setMessage("Check Index " + getName() + "." + schema.getFieldNames()[indexTable.getColumnIndex()]); - HashSet keySet = new HashSet(); + HashSet keySet = new HashSet<>(); int extra = 0; DBLongIterator keyIterator = indexTable.keyIterator(); while (keyIterator.hasNext()) { @@ -573,8 +574,10 @@ public class Table { } /** - * Get the maximum record key which has been assigned within this table. - * This method is only valid for those tables which employ a long key. + * Get the maximum record key which has ever been assigned within this table. + * This method is only valid for those tables which employ a long key and may + * not reflect records which have been removed (i.e., returned key may not + * correspond to an existing record). * @return maximum record key. */ public long getMaxKey() { @@ -1428,19 +1431,19 @@ public class Table { * by minField and maxField. Index values are returned in an ascending sorted order. * @param minField minimum index column value, if null absolute minimum is used * @param maxField maximum index column value, if null absolute maximum is used - * @param atStart if true initial position is before minField, else position - * is after endField + * @param before if true initial position is before minField, else position + * is after maxField * @param columnIndex identifies an indexed column. * @return index field iterator. * @throws IOException */ - public DBFieldIterator indexFieldIterator(Field minField, Field maxField, boolean atMin, + public DBFieldIterator indexFieldIterator(Field minField, Field maxField, boolean before, int columnIndex) throws IOException { synchronized (db) { IndexTable indexTable = secondaryIndexes.get(columnIndex); if (indexTable == null) throw new IOException("Index required (" + getName() + "," + columnIndex + ")"); - return indexTable.indexIterator(minField, maxField, atMin); + return indexTable.indexIterator(minField, maxField, before); } } @@ -1506,8 +1509,8 @@ public class Table { IndexTable indexTable = secondaryIndexes.get(columnIndex); if (indexTable == null) throw new IOException("Index required (" + getName() + "," + columnIndex + ")"); - return new KeyToRecordIterator(this, indexTable.keyIterator(startValue, endValue, - atStart)); + return new KeyToRecordIterator(this, + indexTable.keyIterator(startValue, endValue, atStart)); } } @@ -1544,7 +1547,8 @@ public class Table { * @throws IOException if a secondary index does not exist for the specified * column, or the wrong field type was specified, or an I/O error occurs. */ - public RecordIterator indexIteratorBefore(int columnIndex, Field startValue) throws IOException { + public RecordIterator indexIteratorBefore(int columnIndex, Field startValue) + throws IOException { synchronized (db) { IndexTable indexTable = secondaryIndexes.get(columnIndex); if (indexTable == null) @@ -1600,8 +1604,8 @@ public class Table { IndexTable indexTable = secondaryIndexes.get(columnIndex); if (indexTable == null) throw new IOException("Index required (" + getName() + "," + columnIndex + ")"); - return new KeyToRecordIterator(this, indexTable.keyIteratorBefore(startValue, - primaryKey)); + return new KeyToRecordIterator(this, + indexTable.keyIteratorBefore(startValue, primaryKey)); } } @@ -1896,9 +1900,8 @@ public class Table { /** * Iterate over the primary keys in ascending sorted order - * starting at the specified startKey. If startKey is null, the - * starting position will be before the minimum key. - * @param startKey the first primary key, may be null. + * starting at the specified startKey. + * @param startKey the first primary key. If null the minimum key value will be assumed. * @return Field type key iterator * @throws IOException if an I/O error occurs. */ @@ -1912,11 +1915,11 @@ public class Table { /** * Iterate over the records in ascending sorted order - * starting at the specified startKey. If startKey is null, the - * starting position will be set to minKey. - * @param minKey the minimum primary key, may be null. - * @param endKey the maximum primary key, may be null. - * @param startKey the initial iterator position, if null minKey is also start. + * starting at the specified startKey. + * @param minKey minimum key value. Null corresponds to minimum key value. + * @param maxKey maximum key value. Null corresponds to maximum key value. + * @param startKey the initial iterator position. If null minKey will be assumed, + * if still null the minimum key value will be assumed. * @return Field type key iterator * @throws IOException if an I/O error occurs. */ @@ -1929,6 +1932,25 @@ public class Table { } } + /** + * Iterate over the records in ascending sorted order + * starting at the specified startKey. + * @param minKey minimum key value. Null corresponds to minimum key value. + * @param maxKey maximum key value. Null corresponds to maximum key value. + * @param before if true initial position is before minKey, else position + * is after maxKey. + * @return Field type key iterator + * @throws IOException if an I/O error occurs. + */ + public DBFieldIterator fieldKeyIterator(Field minKey, Field maxKey, boolean before) + throws IOException { + synchronized (db) { + if (useLongKeyNodes) + throw new AssertException(); + return new VarKeyIterator(minKey, maxKey, before); + } + } + /** * A RecordIterator class for use with table data contained within LeafNode's. */ @@ -2319,23 +2341,20 @@ public class Table { recordIndex = -(recordIndex + 1); if (recordIndex == leaf.keyCount) { --recordIndex; - hasPrev = - minKey == null ? true - : (leaf.getKey(recordIndex).compareTo(minKey) >= 0); + hasPrev = minKey == null ? true + : (leaf.getKey(recordIndex).compareTo(minKey) >= 0); if (!hasPrev) { leaf = leaf.getNextLeaf(); if (leaf == null) return; recordIndex = 0; - hasNext = - maxKey == null ? true : (leaf.getKey(recordIndex).compareTo( - maxKey) <= 0); + hasNext = maxKey == null ? true + : (leaf.getKey(recordIndex).compareTo(maxKey) <= 0); } } else { - hasNext = - maxKey == null ? true - : (leaf.getKey(recordIndex).compareTo(maxKey) <= 0); + hasNext = maxKey == null ? true + : (leaf.getKey(recordIndex).compareTo(maxKey) <= 0); if (!hasNext) { // position to previous record if (recordIndex == 0) { @@ -2347,9 +2366,8 @@ public class Table { else { --recordIndex; } - hasPrev = - minKey == null ? true : (leaf.getKey(recordIndex).compareTo( - minKey) >= 0); + hasPrev = minKey == null ? true + : (leaf.getKey(recordIndex).compareTo(minKey) >= 0); } } } @@ -2463,9 +2481,8 @@ public class Table { // Load next record Record nextRecord = leaf.getRecord(schema, nextIndex); - hasNext = - maxKey == null ? true - : (nextRecord.getKeyField().compareTo(maxKey) <= 0); + hasNext = maxKey == null ? true + : (nextRecord.getKeyField().compareTo(maxKey) <= 0); if (hasNext) { bufferId = nextBufferId; recordIndex = nextIndex; @@ -2513,9 +2530,8 @@ public class Table { // Load previous record Record prevRecord = leaf.getRecord(schema, prevIndex); - hasPrev = - minKey == null ? true - : (prevRecord.getKeyField().compareTo(minKey) >= 0); + hasPrev = minKey == null ? true + : (prevRecord.getKeyField().compareTo(minKey) >= 0); if (hasPrev) { bufferId = prevBufferId; recordIndex = prevIndex; @@ -2874,8 +2890,8 @@ public class Table { try { if (bufferId == -1) return false; - LongKeyRecordNode leaf = - ((LongKeyRecordNode) nodeMgr.getLongKeyNode(bufferId)).getNextLeaf(); + LongKeyRecordNode leaf = ((LongKeyRecordNode) nodeMgr.getLongKeyNode( + bufferId)).getNextLeaf(); if (leaf == null || leaf.getKey(0) > maxKey) return false; getKeys(leaf); @@ -2923,8 +2939,8 @@ public class Table { try { if (bufferId == -1) return false; - LongKeyRecordNode leaf = - ((LongKeyRecordNode) nodeMgr.getLongKeyNode(bufferId)).getPreviousLeaf(); + LongKeyRecordNode leaf = ((LongKeyRecordNode) nodeMgr.getLongKeyNode( + bufferId)).getPreviousLeaf(); if (leaf == null) return false; prevIndex = leaf.keyCount - 1; @@ -3305,18 +3321,41 @@ public class Table { /** * Construct a record iterator. - * @param minKey minimum allowed primary key. - * @param maxKey maximum allowed primary key. - * @param startKey the first primary key value. + * @param minKey minimum key value. Null corresponds to minimum key value. + * @param maxKey maximum key value. Null corresponds to maximum key value. + * @param startKey the first primary key value. If null minKey will be assumed, + * if still null the minimum indexed value will be assumed. * @throws IOException */ VarKeyIterator(Field minKey, Field maxKey, Field startKey) throws IOException { keyIter = new VarKeyIterator2(minKey, maxKey, startKey); } - /* - * @see ghidra.framework.store.db.DBFieldIterator#hasNext() + /** + * Construct a record iterator. + * @param minKey minimum key value. Null corresponds to minimum key value. + * @param maxKey maximum key value. Null corresponds to maximum key value. + * @param before + * @throws IOException */ + VarKeyIterator(Field minKey, Field maxKey, boolean before) throws IOException { + + Field startKey = before ? minKey : maxKey; + + if (startKey == null && !before && rootBufferId != -1) { + try { + VarKeyNode rightmostLeaf = + nodeMgr.getVarKeyNode(rootBufferId).getRightmostLeafNode(); + startKey = rightmostLeaf.getKey(rightmostLeaf.keyCount - 1); + } + finally { + nodeMgr.releaseNodes(); + } + } + + keyIter = new VarKeyIterator2(minKey, maxKey, startKey); + } + @Override public boolean hasNext() throws IOException { synchronized (db) { @@ -3330,9 +3369,6 @@ public class Table { } } - /* - * @see ghidra.framework.store.db.DBFieldIterator#hasPrevious() - */ @Override public boolean hasPrevious() throws IOException { synchronized (db) { @@ -3346,25 +3382,16 @@ public class Table { } } - /* - * @see ghidra.framework.store.db.DBFieldIterator#next() - */ @Override public Field next() throws IOException { return keyIter.next(); } - /* - * @see ghidra.framework.store.db.DBFieldIterator#previous() - */ @Override public Field previous() throws IOException { return keyIter.previous(); } - /* - * @see ghidra.framework.store.db.DBFieldIterator#delete() - */ @Override public boolean delete() throws IOException { return keyIter.delete(); @@ -3419,7 +3446,7 @@ public class Table { /** * Initialize (or re-initialize) iterator state. * An empty or null keys array will force a complete initialization. - * Otherwise, following the deletethe keys array and keyIndex should reflect the state + * Otherwise, following the delete the keys array and keyIndex should reflect the state * following a delete. * @param targetKey the initial key. For construction this is the startKey, * following a delete this is the deleted key. @@ -3626,8 +3653,8 @@ public class Table { // Process previous leaf if needed if (prevIndex < 0) { try { - VarKeyRecordNode leaf = - ((VarKeyRecordNode) nodeMgr.getVarKeyNode(bufferId)).getPreviousLeaf(); + VarKeyRecordNode leaf = ((VarKeyRecordNode) nodeMgr.getVarKeyNode( + bufferId)).getPreviousLeaf(); if (leaf == null) return false; prevIndex = leaf.keyCount - 1; @@ -3737,9 +3764,10 @@ public class Table { /** * Construct a record iterator. - * @param minKey minimum allowed primary key. - * @param maxKey maximum allowed primary key. - * @param startKey the first primary key value. + * @param minKey minimum key value. Null corresponds to minimum key value. + * @param maxKey maximum key value. Null corresponds to maximum key value. + * @param startKey the first primary key value. If null minKey will be assumed, + * if still null the minimum indexed value will be assumed. * @throws IOException */ VarKeyIterator2(Field minKey, Field maxKey, Field startKey) throws IOException { @@ -3754,7 +3782,7 @@ public class Table { /** * Initialize (or re-initialize) iterator state. * An empty or null keys array will force a complete initialization. - * Otherwise, following the deletethe keys array and keyIndex should reflect the state + * Otherwise, following the delete the keys array and keyIndex should reflect the state * following a delete. * @param targetKey the initial key. For construction this is the startKey, * following a delete this is the deleted key. diff --git a/Ghidra/Framework/DB/src/main/java/db/VarIndexTable.java b/Ghidra/Framework/DB/src/main/java/db/VarIndexTable.java index 86f88aa48e..8831f0aa72 100644 --- a/Ghidra/Framework/DB/src/main/java/db/VarIndexTable.java +++ b/Ghidra/Framework/DB/src/main/java/db/VarIndexTable.java @@ -1,6 +1,5 @@ /* ### * IP: GHIDRA - * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,17 +25,14 @@ import java.io.IOException; * correspond to an index value. */ class VarIndexTable extends IndexTable { - - private static final Class[] fieldClasses = { - BinaryField.class, // index data + + private static final Class[] fieldClasses = { BinaryField.class, // index data }; - - private static final String[] fieldNames = { - "IndexBuffer" - }; - + + private static final String[] fieldNames = { "IndexBuffer" }; + private Schema indexSchema; - + /** * Construct a new secondary index which is based upon a field within the * primary table specified by name. @@ -46,14 +42,13 @@ class VarIndexTable extends IndexTable { * @throws IOException */ VarIndexTable(Table primaryTable, int colIndex) throws IOException { - this(primaryTable, - primaryTable.getDBHandle().getMasterTable().createTableRecord( - primaryTable.getName(), - new Schema(0, primaryTable.getSchema().getField(colIndex).getClass(), "IndexKey", fieldClasses, fieldNames), - colIndex) - ); + this(primaryTable, + primaryTable.getDBHandle().getMasterTable().createTableRecord(primaryTable.getName(), + new Schema(0, primaryTable.getSchema().getField(colIndex).getClass(), "IndexKey", + fieldClasses, fieldNames), + colIndex)); } - + /** * Construct a new or existing secondary index. An existing index must have * its' root ID specified within the tableRecord. @@ -65,7 +60,7 @@ class VarIndexTable extends IndexTable { super(primaryTable, indexTableRecord); this.indexSchema = indexTable.getSchema(); } - + /** * Find all primary keys which correspond to the specified indexed field * value. @@ -73,16 +68,16 @@ class VarIndexTable extends IndexTable { * @return list of primary keys */ @Override - long[] findPrimaryKeys(Field indexValue) throws IOException { + long[] findPrimaryKeys(Field indexValue) throws IOException { if (!indexValue.getClass().equals(fieldType.getClass())) throw new IllegalArgumentException("Incorrect indexed field type"); Record indexRecord = indexTable.getRecord(indexValue); if (indexRecord == null) return emptyKeyArray; IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0)); - return indexBuffer.getPrimaryKeys(); + return indexBuffer.getPrimaryKeys(); } - + /** * Get the number of primary keys which correspond to the specified indexed field * value. @@ -90,21 +85,21 @@ class VarIndexTable extends IndexTable { * @return key count */ @Override - int getKeyCount(Field indexValue) throws IOException { + int getKeyCount(Field indexValue) throws IOException { if (!indexValue.getClass().equals(fieldType.getClass())) throw new IllegalArgumentException("Incorrect indexed field type"); Record indexRecord = indexTable.getRecord(indexValue); if (indexRecord == null) return 0; IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0)); - return indexBuffer.keyCount; + return indexBuffer.keyCount; } - + /* * @see ghidra.framework.store.db.IndexTable#addEntry(ghidra.framework.store.db.Record) */ @Override - void addEntry(Record record) throws IOException { + void addEntry(Record record) throws IOException { Field indexField = record.getField(colIndex); Record indexRecord = indexTable.getRecord(indexField); if (indexRecord == null) { @@ -120,7 +115,7 @@ class VarIndexTable extends IndexTable { * @see ghidra.framework.store.db.IndexTable#deleteEntry(ghidra.framework.store.db.Record) */ @Override - void deleteEntry(Record record) throws IOException { + void deleteEntry(Record record) throws IOException { Field indexField = record.getField(colIndex); Record indexRecord = indexTable.getRecord(indexField); if (indexRecord != null) { @@ -136,7 +131,7 @@ class VarIndexTable extends IndexTable { } } } - + /** * Get the index buffer associated with the specified index key * @param indexKey index key @@ -147,84 +142,90 @@ class VarIndexTable extends IndexTable { Record indexRec = indexTable.getRecord(indexKey); return indexRec != null ? new IndexBuffer(indexKey, indexRec.getBinaryData(0)) : null; } - + /* * @see ghidra.framework.store.db.IndexTable#indexIterator() */ @Override - DBFieldIterator indexIterator() throws IOException { - return new IndexFieldIterator(); + DBFieldIterator indexIterator() throws IOException { + return new IndexVarFieldIterator(); } - + /* * @see ghidra.framework.store.db.IndexTable#indexIterator(ghidra.framework.store.db.Field, ghidra.framework.store.db.Field, boolean) */ @Override - DBFieldIterator indexIterator(Field minField, Field maxField, boolean atMin) throws IOException { - return new IndexFieldIterator(minField, maxField, atMin); + DBFieldIterator indexIterator(Field minField, Field maxField, boolean before) + throws IOException { + return new IndexVarFieldIterator(minField, maxField, before); } - + /* * @see db.IndexTable#indexIterator(db.Field, db.Field, db.Field, boolean) */ @Override - DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before) throws IOException { - return new IndexFieldIterator(minField, maxField, startField, before); + DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before) + throws IOException { + return new IndexVarFieldIterator(minField, maxField, startField, before); } - + /** * Iterates over index field values within a specified range. */ - class IndexFieldIterator implements DBFieldIterator { + class IndexVarFieldIterator implements DBFieldIterator { private Field lastKey; private Field keyField; private DBFieldIterator indexIterator; private boolean hasNext = false; private boolean hasPrev = false; - + /** * Construct an index field iterator starting with the minimum index value. */ - IndexFieldIterator() throws IOException { + IndexVarFieldIterator() throws IOException { this(null, null, true); } - + /** * Construct an index field iterator. The iterator is positioned at index * value identified by startValue. - * @param minValue minimum index value or null if no minimum - * @param maxValue maximum index value or null if no maximum - * @param startValue starting index value. + * @param minValue minimum index value. Null corresponds to minimum indexed value. + * @param maxValue maximum index value. Null corresponds to maximum indexed value. + * @param before if true initial position is before minValue, else position + * is after maxValue. * @throws IOException */ - IndexFieldIterator(Field minValue, Field maxValue, boolean atMin) throws IOException { + IndexVarFieldIterator(Field minValue, Field maxValue, boolean before) throws IOException { - Field start = atMin ? minValue : maxValue; + indexIterator = indexTable.fieldKeyIterator(minValue, maxValue, before); - indexIterator = indexTable.fieldKeyIterator(minValue, maxValue, start); - if (indexIterator.hasNext()) { indexIterator.next(); - if (atMin) { + if (before) { indexIterator.previous(); } } } - + /** - * @param minField - * @param maxField - * @param startField - * @param before + * Construct an index field iterator. The iterator is positioned at index + * value identified by startValue. + * @param minValue minimum index value. Null corresponds to minimum indexed value. + * @param maxValue maximum index value. Null corresponds to maximum indexed value. + * @param startValue identify initial position by value + * @param before if true initial position is before minValue, else position + * is after maxValue. + * @throws IOException */ - IndexFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before) throws IOException { - + IndexVarFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before) + throws IOException { + if (startValue == null) { throw new IllegalArgumentException("starting index value required"); } indexIterator = indexTable.fieldKeyIterator(minValue, maxValue, startValue); - + if (indexIterator.hasNext()) { Field f = indexIterator.next(); if (before || !f.equals(startValue)) { @@ -233,6 +234,7 @@ class VarIndexTable extends IndexTable { } } + @Override public boolean hasNext() throws IOException { if (hasNext) return true; @@ -245,6 +247,7 @@ class VarIndexTable extends IndexTable { return true; } + @Override public boolean hasPrevious() throws IOException { if (hasPrev) return true; @@ -257,6 +260,7 @@ class VarIndexTable extends IndexTable { return true; } + @Override public Field next() throws IOException { if (hasNext || hasNext()) { hasNext = false; @@ -266,7 +270,8 @@ class VarIndexTable extends IndexTable { } return null; } - + + @Override public Field previous() throws IOException { if (hasPrev || hasPrevious()) { hasNext = true; @@ -282,6 +287,7 @@ class VarIndexTable extends IndexTable { * index value (lastKey). * @see db.DBFieldIterator#delete() */ + @Override public boolean delete() throws IOException { if (lastKey == null) return false; @@ -303,5 +309,5 @@ class VarIndexTable extends IndexTable { } } } - + } diff --git a/Ghidra/Framework/DB/src/main/java/db/VarKeyInteriorNode.java b/Ghidra/Framework/DB/src/main/java/db/VarKeyInteriorNode.java index a21824a093..7f9901dcd9 100644 --- a/Ghidra/Framework/DB/src/main/java/db/VarKeyInteriorNode.java +++ b/Ghidra/Framework/DB/src/main/java/db/VarKeyInteriorNode.java @@ -1,6 +1,5 @@ /* ### * IP: GHIDRA - * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,15 +15,14 @@ */ package db; +import java.io.IOException; + +import db.buffers.DataBuffer; import ghidra.util.Msg; import ghidra.util.exception.AssertException; import ghidra.util.exception.CancelledException; import ghidra.util.task.TaskMonitor; -import java.io.IOException; - -import db.buffers.DataBuffer; - /** * LongKeyInteriorNode stores a BTree node for use as an interior * node when searching for Table records within the database. This type of node @@ -91,8 +89,8 @@ class VarKeyInteriorNode extends VarKeyNode { } @Override - public boolean isConsistent(String tableName, TaskMonitor monitor) throws IOException, - CancelledException { + public boolean isConsistent(String tableName, TaskMonitor monitor) + throws IOException, CancelledException { boolean consistent = true; Field lastMinKey = null; Field lastMaxKey = null; @@ -103,19 +101,19 @@ class VarKeyInteriorNode extends VarKeyNode { if (i != 0) { if (key.compareTo(lastMinKey) <= 0) { consistent = false; - logConsistencyError(tableName, "child[" + i + "].minKey <= child[" + (i - 1) + - "].minKey", null); - Msg.debug(this, " child[" + i + "].minKey = " + key + " bufferID=" + - getBufferId(i)); + logConsistencyError(tableName, + "child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null); + Msg.debug(this, + " child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i)); Msg.debug(this, " child[" + (i - 1) + "].minKey = " + lastMinKey + " bufferID=" + getBufferId(i - 1)); } else if (key.compareTo(lastMaxKey) <= 0) { consistent = false; - logConsistencyError(tableName, "child[" + i + "].minKey <= child[" + (i - 1) + - "].maxKey", null); - Msg.debug(this, " child[" + i + "].minKey = " + key + " bufferID=" + - getBufferId(i)); + logConsistencyError(tableName, + "child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null); + Msg.debug(this, + " child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i)); Msg.debug(this, " child[" + (i - 1) + "].maxKey = " + lastMaxKey + " bufferID=" + getBufferId(i - 1)); } @@ -150,10 +148,10 @@ class VarKeyInteriorNode extends VarKeyNode { Field childKey0 = node.getKey(0); if (!key.equals(childKey0)) { consistent = false; - logConsistencyError(tableName, "parent key entry mismatch with child[" + i + - "].minKey", null); - Msg.debug(this, " child[" + i + "].minKey = " + childKey0 + " bufferID=" + - getBufferId(i)); + logConsistencyError(tableName, + "parent key entry mismatch with child[" + i + "].minKey", null); + Msg.debug(this, + " child[" + i + "].minKey = " + childKey0 + " bufferID=" + getBufferId(i)); Msg.debug(this, " parent key entry = " + key); } @@ -506,9 +504,8 @@ class VarKeyInteriorNode extends VarKeyNode { } // New parent node becomes root - parent = - new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newNode.getKey(0), - newNode.getBufferId()); + parent = new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newNode.getKey(0), + newNode.getBufferId()); newNode.parent = parent; } @@ -598,9 +595,8 @@ class VarKeyInteriorNode extends VarKeyNode { } // New parent node becomes root - parent = - new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), rightKey, - newNode.getBufferId()); + parent = new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), rightKey, + newNode.getBufferId()); newNode.parent = parent; return parent; } @@ -624,6 +620,12 @@ class VarKeyInteriorNode extends VarKeyNode { return node.getLeftmostLeafNode(); } + @Override + VarKeyRecordNode getRightmostLeafNode() throws IOException { + VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(keyCount - 1)); + return node.getRightmostLeafNode(); + } + /** * Callback method allowing child node to remove itself from parent. * Rebalancing of the tree is performed if the interior node falls @@ -707,7 +709,8 @@ class VarKeyInteriorNode extends VarKeyNode { Field rightKey = rightNode.getKey(0); // Can right keys fit within left node - if ((rightKeySpace + (rightKeyCount * ENTRY_SIZE)) <= (len - BASE - leftKeySpace - (leftKeyCount * ENTRY_SIZE))) { + if ((rightKeySpace + (rightKeyCount * ENTRY_SIZE)) <= (len - BASE - leftKeySpace - + (leftKeyCount * ENTRY_SIZE))) { // Right node is elliminated and all entries stored in left node moveKeysLeft(leftNode, rightNode, rightKeyCount); nodeMgr.deleteNode(rightNode); @@ -845,6 +848,7 @@ class VarKeyInteriorNode extends VarKeyNode { /* * @see ghidra.framework.store.db.BTreeNode#getBufferReferences() */ + @Override public int[] getBufferReferences() { int[] ids = new int[keyCount]; for (int i = 0; i < keyCount; i++) { diff --git a/Ghidra/Framework/DB/src/main/java/db/VarKeyNode.java b/Ghidra/Framework/DB/src/main/java/db/VarKeyNode.java index 4ed15e7cf2..e54ea6eb69 100644 --- a/Ghidra/Framework/DB/src/main/java/db/VarKeyNode.java +++ b/Ghidra/Framework/DB/src/main/java/db/VarKeyNode.java @@ -1,6 +1,5 @@ /* ### * IP: GHIDRA - * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,8 +31,8 @@ abstract class VarKeyNode implements BTreeNode { private static final int KEY_TYPE_OFFSET = NodeMgr.NODE_HEADER_SIZE; private static final int KEY_COUNT_OFFSET = KEY_TYPE_OFFSET + KEY_TYPE_SIZE; - static final int VARKEY_NODE_HEADER_SIZE = NodeMgr.NODE_HEADER_SIZE + KEY_TYPE_SIZE + - KEY_COUNT_SIZE; + static final int VARKEY_NODE_HEADER_SIZE = + NodeMgr.NODE_HEADER_SIZE + KEY_TYPE_SIZE + KEY_COUNT_SIZE; protected final Field keyType; protected final int maxKeyLength; @@ -75,17 +74,11 @@ abstract class VarKeyNode implements BTreeNode { nodeMgr.addNode(this); } - /* - * @see ghidra.framework.store.db.BTreeNode#getBufferId() - */ @Override public int getBufferId() { return buffer.getId(); } - /* - * @see ghidra.framework.store.db.BTreeNode#getBuffer() - */ @Override public DataBuffer getBuffer() { return buffer; @@ -102,17 +95,11 @@ abstract class VarKeyNode implements BTreeNode { return this; } - /* - * @see ghidra.framework.store.db.BTreeNode#getKeyCount() - */ @Override public int getKeyCount() { return keyCount; } - /* - * @see ghidra.framework.store.db.BTreeNode#setKeyCount(int) - */ @Override public void setKeyCount(int cnt) { keyCount = cnt; @@ -142,10 +129,11 @@ abstract class VarKeyNode implements BTreeNode { */ abstract VarKeyRecordNode getLeftmostLeafNode() throws IOException; - /* - * @see ghidra.framework.store.db.BTreeNode#delete() + /** + * Get the right-most leaf node within the tree. + * @return right-most leaf node. + * @throws IOException thrown if IO error occurs */ - @Override - public abstract void delete() throws IOException; + abstract VarKeyRecordNode getRightmostLeafNode() throws IOException; } diff --git a/Ghidra/Framework/DB/src/main/java/db/VarKeyRecordNode.java b/Ghidra/Framework/DB/src/main/java/db/VarKeyRecordNode.java index 4b7e35b82c..351c8f1927 100644 --- a/Ghidra/Framework/DB/src/main/java/db/VarKeyRecordNode.java +++ b/Ghidra/Framework/DB/src/main/java/db/VarKeyRecordNode.java @@ -1,6 +1,5 @@ /* ### * IP: GHIDRA - * REVIEWED: YES * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +15,15 @@ */ package db; +import java.io.IOException; + +import db.buffers.DataBuffer; import ghidra.util.Msg; import ghidra.util.datastruct.IntArrayList; import ghidra.util.exception.AssertException; import ghidra.util.exception.CancelledException; import ghidra.util.task.TaskMonitor; -import java.io.IOException; - -import db.buffers.DataBuffer; - /** * LongKeyRecordNode is an implementation of a BTree leaf node * which utilizes variable-length key values and stores variable-length records. @@ -40,19 +38,19 @@ import db.buffers.DataBuffer; * whose 4-byte integer buffer ID has been stored within this leaf at the record offset. */ class VarKeyRecordNode extends VarKeyNode { - + private static final int ID_SIZE = 4; - + private static final int PREV_LEAF_ID_OFFSET = VARKEY_NODE_HEADER_SIZE; private static final int NEXT_LEAF_ID_OFFSET = PREV_LEAF_ID_OFFSET + ID_SIZE; - - static final int HEADER_SIZE = VARKEY_NODE_HEADER_SIZE + 2*ID_SIZE; - + + static final int HEADER_SIZE = VARKEY_NODE_HEADER_SIZE + 2 * ID_SIZE; + private static final int OFFSET_SIZE = 4; private static final int INDIRECT_OPTION_SIZE = 1; private static final int ENTRY_SIZE = OFFSET_SIZE + INDIRECT_OPTION_SIZE; - + /** * Construct an existing variable-length-key record leaf node. * @param nodeMgr table node manager instance @@ -61,7 +59,7 @@ class VarKeyRecordNode extends VarKeyNode { VarKeyRecordNode(NodeMgr nodeMgr, DataBuffer buf) { super(nodeMgr, buf); } - + /** * Construct a new variable-length-key record leaf node. * @param nodeMgr table node manager. @@ -70,14 +68,15 @@ class VarKeyRecordNode extends VarKeyNode { * @param keyType key Field type * @throws IOException thrown if IO error occurs */ - VarKeyRecordNode(NodeMgr nodeMgr, int prevLeafId, int nextLeafId, Field keyType) throws IOException { + VarKeyRecordNode(NodeMgr nodeMgr, int prevLeafId, int nextLeafId, Field keyType) + throws IOException { super(nodeMgr, NodeMgr.VARKEY_REC_NODE, keyType); - + // Initialize header buffer.putInt(PREV_LEAF_ID_OFFSET, prevLeafId); buffer.putInt(NEXT_LEAF_ID_OFFSET, nextLeafId); } - + /** * Construct a new variable-length-key record leaf node with no siblings. * @param nodeMgr table node manager. @@ -86,12 +85,12 @@ class VarKeyRecordNode extends VarKeyNode { */ VarKeyRecordNode(NodeMgr nodeMgr, Field keyType) throws IOException { super(nodeMgr, NodeMgr.VARKEY_REC_NODE, keyType); - + // Initialize header buffer.putInt(PREV_LEAF_ID_OFFSET, -1); buffer.putInt(NEXT_LEAF_ID_OFFSET, -1); } - + void logConsistencyError(String tableName, String msg, Throwable t) throws IOException { Msg.debug(this, "Consistency Error (" + tableName + "): " + msg); Msg.debug(this, " bufferID=" + getBufferId() + " key[0]=" + getKey(0)); @@ -99,9 +98,10 @@ class VarKeyRecordNode extends VarKeyNode { Msg.error(this, "Consistency Error (" + tableName + ")", t); } } - + @Override - public boolean isConsistent(String tableName, TaskMonitor monitor) throws IOException, CancelledException { + public boolean isConsistent(String tableName, TaskMonitor monitor) + throws IOException, CancelledException { boolean consistent = true; Field prevKey = null; for (int i = 0; i < keyCount; i++) { @@ -110,14 +110,14 @@ class VarKeyRecordNode extends VarKeyNode { if (i != 0) { if (key.compareTo(prevKey) <= 0) { consistent = false; - logConsistencyError(tableName, "key[" + i + "] <= key[" + (i-1) + "]", null); + logConsistencyError(tableName, "key[" + i + "] <= key[" + (i - 1) + "]", null); Msg.debug(this, " key[" + i + "].minKey = " + key); - Msg.debug(this, " key[" + (i-1) + "].minKey = " + prevKey); + Msg.debug(this, " key[" + (i - 1) + "].minKey = " + prevKey); } } prevKey = key; } - + if ((parent == null || parent.isLeftmostKey(getKey(0))) && getPreviousLeaf() != null) { consistent = false; logConsistencyError(tableName, "previous-leaf should not exist", null); @@ -141,27 +141,33 @@ class VarKeyRecordNode extends VarKeyNode { consistent = false; logConsistencyError(tableName, "this leaf is not linked to next-leaf", null); } - + return consistent; } - + /* * @see ghidra.framework.store.db.VarKeyNode#getLeafNode(long) */ @Override - VarKeyRecordNode getLeafNode(Field key) throws IOException { + VarKeyRecordNode getLeafNode(Field key) throws IOException { return this; } - + /* * @see ghidra.framework.store.db2.VarKeyNode#getLeftmostLeafNode() */ @Override - VarKeyRecordNode getLeftmostLeafNode() throws IOException { + VarKeyRecordNode getLeftmostLeafNode() throws IOException { VarKeyRecordNode leaf = getPreviousLeaf(); return leaf != null ? leaf.getLeftmostLeafNode() : this; } - + + @Override + VarKeyRecordNode getRightmostLeafNode() throws IOException { + VarKeyRecordNode leaf = getNextLeaf(); + return leaf != null ? leaf.getRightmostLeafNode() : this; + } + /** * Get this leaf node's right sibling * @return this leaf node's right sibling or null if right sibling does not exist. @@ -173,9 +179,9 @@ class VarKeyRecordNode extends VarKeyNode { if (nextLeafId >= 0) { leaf = (VarKeyRecordNode) nodeMgr.getVarKeyNode(nextLeafId); } - return leaf; + return leaf; } - + /** * Get this leaf node's left sibling * @return this leaf node's left sibling or null if left sibling does not exist. @@ -187,9 +193,9 @@ class VarKeyRecordNode extends VarKeyNode { if (nextLeafId >= 0) { leaf = (VarKeyRecordNode) nodeMgr.getVarKeyNode(nextLeafId); } - return leaf; + return leaf; } - + /** * Perform a binary search to locate the specified key and derive an index * into the Buffer ID storage. @@ -198,12 +204,12 @@ class VarKeyRecordNode extends VarKeyNode { * @throws IOException thrown if an IO error occurs */ int getKeyIndex(Field key) throws IOException { - + int min = 0; int max = keyCount - 1; - + while (min <= max) { - int i = (min + max)/2; + int i = (min + max) / 2; Field k = getKey(i); int rc = k.compareTo(key); if (rc == 0) { @@ -216,9 +222,9 @@ class VarKeyRecordNode extends VarKeyNode { max = i - 1; } } - return -(min+1); + return -(min + 1); } - + /** * Split this leaf node in half and update tree. * When a split is performed, the next operation must be performed @@ -227,7 +233,7 @@ class VarKeyRecordNode extends VarKeyNode { * @throws IOException thrown if an IO error occurs */ VarKeyNode split() throws IOException { - + // Create new leaf int oldSiblingId = buffer.getInt(NEXT_LEAF_ID_OFFSET); VarKeyRecordNode newLeaf = createNewLeaf(buffer.getId(), oldSiblingId); @@ -239,19 +245,20 @@ class VarKeyRecordNode extends VarKeyNode { VarKeyRecordNode leaf = (VarKeyRecordNode) nodeMgr.getVarKeyNode(oldSiblingId); leaf.buffer.putInt(PREV_LEAF_ID_OFFSET, newBufId); } - + // Split node creating two balanced leaves splitData(newLeaf); - + if (parent != null) { // Ask parent to insert new node and return root return parent.insert(newLeaf); } - + // New parent node becomes root - return new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newLeaf.getKey(0), newBufId); + return new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newLeaf.getKey(0), + newBufId); } - + /** * Append a leaf which contains one or more keys and update tree. Leaf is inserted * as the new right sibling of this leaf. @@ -260,29 +267,29 @@ class VarKeyRecordNode extends VarKeyNode { * @throws IOException thrown if an IO error occurs */ VarKeyNode appendLeaf(VarKeyRecordNode leaf) throws IOException { - + // Create new leaf and link leaf.buffer.putInt(PREV_LEAF_ID_OFFSET, buffer.getId()); int rightLeafBufId = buffer.getInt(NEXT_LEAF_ID_OFFSET); leaf.buffer.putInt(NEXT_LEAF_ID_OFFSET, rightLeafBufId); - + // Adjust this node int newBufId = leaf.buffer.getId(); buffer.putInt(NEXT_LEAF_ID_OFFSET, newBufId); - + // Adjust old right node if present if (rightLeafBufId >= 0) { VarKeyNode rightLeaf = nodeMgr.getVarKeyNode(rightLeafBufId); rightLeaf.buffer.putInt(PREV_LEAF_ID_OFFSET, newBufId); } - + if (parent != null) { // Ask parent to insert new node and return root - leaf parent is unknown return parent.insert(leaf); } - + // New parent node becomes root - return new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), leaf.getKey(0), newBufId); + return new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), leaf.getKey(0), newBufId); } /** @@ -293,10 +300,10 @@ class VarKeyRecordNode extends VarKeyNode { * @throws IOException thrown if IO error occurs */ VarKeyNode putRecord(Record record, Table table) throws IOException { - + Field key = record.getKeyField(); int index = getKeyIndex(key); - + // Handle record update case if (index >= 0) { if (table != null) { @@ -305,19 +312,19 @@ class VarKeyRecordNode extends VarKeyNode { VarKeyNode newRoot = updateRecord(index, record); return newRoot; } - + // Handle new record - see if we have room in this leaf - index = -index-1; + index = -index - 1; if (insertRecord(index, record)) { if (index == 0 && parent != null) { - parent.keyChanged(getKey(1), key, this); + parent.keyChanged(getKey(1), key, this); } if (table != null) { table.insertedRecord(record); } return getRoot(); } - + // Special Case - append new leaf to right if (index == keyCount) { VarKeyNode newRoot = appendNewLeaf(record); @@ -331,19 +338,19 @@ class VarKeyRecordNode extends VarKeyNode { VarKeyRecordNode leaf = split().getLeafNode(key); return leaf.putRecord(record, table); } - + /** * Append a new leaf and insert the specified record. * @param record data record with long key * @return root node which may have changed. * @throws IOException thrown if IO error occurs - */ + */ VarKeyNode appendNewLeaf(Record record) throws IOException { VarKeyRecordNode newLeaf = createNewLeaf(-1, -1); newLeaf.insertRecord(0, record); return appendLeaf(newLeaf); } - + /** * Delete the record identified by the specified key. * @param key record key @@ -358,7 +365,7 @@ class VarKeyRecordNode extends VarKeyNode { if (index < 0) { return getRoot(); } - + if (table != null) { table.deletedRecord(getRecord(table.getSchema(), index)); } @@ -371,10 +378,10 @@ class VarKeyRecordNode extends VarKeyNode { // Remove record within this node remove(index); - + // Notify parent of leftmost key change if (index == 0 && parent != null) { - parent.keyChanged(key, getKey(0), this); + parent.keyChanged(key, getKey(0), this); } return getRoot(); @@ -390,7 +397,7 @@ class VarKeyRecordNode extends VarKeyNode { Record getRecordBefore(Field key, Schema schema) throws IOException { int index = getKeyIndex(key); if (index < 0) { - index = -index-2; + index = -index - 2; } else { --index; @@ -399,9 +406,9 @@ class VarKeyRecordNode extends VarKeyNode { VarKeyRecordNode nextLeaf = getPreviousLeaf(); return nextLeaf != null ? nextLeaf.getRecord(schema, nextLeaf.keyCount - 1) : null; } - return getRecord(schema, index); + return getRecord(schema, index); } - + /** * Get the first record whoose key is greater than the specified key. * @param key record key @@ -412,7 +419,7 @@ class VarKeyRecordNode extends VarKeyNode { Record getRecordAfter(Field key, Schema schema) throws IOException { int index = getKeyIndex(key); if (index < 0) { - index = -(index+1); + index = -(index + 1); } else { ++index; @@ -423,7 +430,7 @@ class VarKeyRecordNode extends VarKeyNode { } return getRecord(schema, index); } - + /** * Get the first record whoose key is less than or equal to the specified * key. @@ -435,15 +442,15 @@ class VarKeyRecordNode extends VarKeyNode { Record getRecordAtOrBefore(Field key, Schema schema) throws IOException { int index = getKeyIndex(key); if (index < 0) { - index = -index-2; + index = -index - 2; } if (index < 0) { VarKeyRecordNode nextLeaf = getPreviousLeaf(); return nextLeaf != null ? nextLeaf.getRecord(schema, nextLeaf.keyCount - 1) : null; } - return getRecord(schema, index); + return getRecord(schema, index); } - + /** * Get the first record whoose key is greater than or equal to the specified * key. @@ -455,17 +462,15 @@ class VarKeyRecordNode extends VarKeyNode { Record getRecordAtOrAfter(Field key, Schema schema) throws IOException { int index = getKeyIndex(key); if (index < 0) { - index = -(index+1); + index = -(index + 1); } if (index == keyCount) { VarKeyRecordNode nextLeaf = getNextLeaf(); return nextLeaf != null ? nextLeaf.getRecord(schema, 0) : null; } - return getRecord(schema, index); + return getRecord(schema, index); } - - - + /** * Create a new leaf and add to the node manager. * The new leaf's parent is unknown. @@ -477,17 +482,17 @@ class VarKeyRecordNode extends VarKeyNode { VarKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException { return new VarKeyRecordNode(nodeMgr, prevLeafId, nextLeafId, keyType); } - + /* * @see ghidra.framework.store.db.VarKeyNode#getKey(int) */ @Override - Field getKey(int index) throws IOException { + Field getKey(int index) throws IOException { Field key = keyType.newField(); key.read(buffer, buffer.getInt(HEADER_SIZE + (index * ENTRY_SIZE))); return key; } - + /** * Get the record data offset within the buffer * @param index key index @@ -497,7 +502,7 @@ class VarKeyRecordNode extends VarKeyNode { int offset = buffer.getInt(HEADER_SIZE + (index * ENTRY_SIZE)); return offset + keyType.readLength(buffer, offset); } - + /** * Get the record key offset within the buffer * @param index key index @@ -516,7 +521,7 @@ class VarKeyRecordNode extends VarKeyNode { private void putRecordKeyOffset(int index, int offset) { buffer.putInt(HEADER_SIZE + (index * ENTRY_SIZE), offset); } - + /** * Determine if a record is utilizing a chained DBBuffer for data storage * @param index key index @@ -533,15 +538,15 @@ class VarKeyRecordNode extends VarKeyNode { */ private void enableIndirectStorage(int index, boolean state) { buffer.putByte(HEADER_SIZE + OFFSET_SIZE + (index * ENTRY_SIZE), - state ? (byte)1 : (byte)0); + state ? (byte) 1 : (byte) 0); } - + /** * @return unused free space within node */ private int getFreeSpace() { - return (keyCount == 0 ? buffer.length() : getRecordKeyOffset(keyCount - 1)) - - (keyCount * ENTRY_SIZE) - HEADER_SIZE; + return (keyCount == 0 ? buffer.length() : getRecordKeyOffset(keyCount - 1)) - + (keyCount * ENTRY_SIZE) - HEADER_SIZE; } /** @@ -549,12 +554,12 @@ class VarKeyRecordNode extends VarKeyNode { * @param keyIndex key index associated with record. */ private int getFullRecordLength(int keyIndex) { - if (keyIndex == 0) { + if (keyIndex == 0) { return buffer.length() - getRecordKeyOffset(0); } return getRecordKeyOffset(keyIndex - 1) - getRecordKeyOffset(keyIndex); } - + /** * Move all records from index to the end by the specified offset. * @param index the smaller key index (0 <= index1) @@ -562,33 +567,32 @@ class VarKeyRecordNode extends VarKeyNode { * @return insertion offset immediately following moved block. */ private int moveRecords(int index, int offset) { - + int lastIndex = keyCount - 1; - + // No movement needed for appended record if (index == keyCount) { if (index == 0) { - return buffer.length() + offset; + return buffer.length() + offset; } - return getRecordKeyOffset(lastIndex) + offset; + return getRecordKeyOffset(lastIndex) + offset; } - + // Determine block to be moved int start = getRecordKeyOffset(lastIndex); int end = (index == 0) ? buffer.length() : getRecordKeyOffset(index - 1); int len = end - start; - + // Move record data buffer.move(start, start + offset, len); - + // Adjust stored offsets for (int i = index; i < keyCount; i++) { putRecordKeyOffset(i, getRecordKeyOffset(i) + offset); } return end + offset; } - - + /** * Get the record located at the specified index. * @param schema record data schema @@ -600,16 +604,15 @@ class VarKeyRecordNode extends VarKeyNode { Record record = schema.createRecord(key); if (hasIndirectStorage(index)) { int bufId = buffer.getInt(getRecordDataOffset(index)); - ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), - bufId); + ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufId); record.read(chainedBuffer, 0); } else { record.read(buffer, getRecordDataOffset(index)); } - return record; + return record; } - + /** * Get the record identified by the specified key. * @param key record key @@ -623,20 +626,20 @@ class VarKeyRecordNode extends VarKeyNode { return null; return getRecord(schema, index); } - + /** * Find the index which represents the halfway point within the record data. * @returns key index. */ private int getSplitIndex() { - - int halfway = ((keyCount == 0 ? buffer.length() : getRecordKeyOffset(keyCount - 1)) - + buffer.length()) / 2; + + int halfway = ((keyCount == 0 ? buffer.length() : getRecordKeyOffset(keyCount - 1)) + + buffer.length()) / 2; int min = 0; int max = keyCount - 1; - + while (min <= max) { - int i = (min + max)/2; + int i = (min + max) / 2; int offset = getRecordKeyOffset(i); if (offset == halfway) { return i; @@ -657,25 +660,26 @@ class VarKeyRecordNode extends VarKeyNode { * @param newRightLeaf empty right sibling leaf */ private void splitData(VarKeyRecordNode rightNode) { - + int splitIndex = getSplitIndex(); int count = keyCount - splitIndex; int start = getRecordKeyOffset(keyCount - 1); // start of block to be moved int end = getRecordKeyOffset(splitIndex - 1); // end of block to be moved int splitLen = end - start; // length of block to be moved int rightOffset = buffer.length() - splitLen; // data offset within new leaf node - + // Copy data to new leaf node DataBuffer newBuf = rightNode.buffer; - newBuf.copy(rightOffset, buffer, start, splitLen); - newBuf.copy(HEADER_SIZE, buffer, HEADER_SIZE + (splitIndex * ENTRY_SIZE), count * ENTRY_SIZE); - + newBuf.copy(rightOffset, buffer, start, splitLen); + newBuf.copy(HEADER_SIZE, buffer, HEADER_SIZE + (splitIndex * ENTRY_SIZE), + count * ENTRY_SIZE); + // Fix record offsets in new leaf node int offsetCorrection = buffer.length() - end; for (int i = 0; i < count; i++) { rightNode.putRecordKeyOffset(i, rightNode.getRecordKeyOffset(i) + offsetCorrection); } - + // Adjust key counts setKeyCount(keyCount - count); rightNode.setKeyCount(count); @@ -689,26 +693,26 @@ class VarKeyRecordNode extends VarKeyNode { * @throws IOException thrown if IO error occurs */ private VarKeyNode updateRecord(int index, Record record) throws IOException { - + Field key = record.getKeyField(); int keyLen = key.length(); int offset = getRecordKeyOffset(index); int oldLen = getFullRecordLength(index) - keyLen; int len = record.length(); - + // Check for use of indirect chained record node(s) int maxRecordLength = ((buffer.length() - HEADER_SIZE) >> 2) - ENTRY_SIZE - keyLen; // min 4 records per node boolean wasIndirect = hasIndirectStorage(index); boolean useIndirect = (len > maxRecordLength); - + if (useIndirect) { // Store record in chained buffers len = 4; ChainedBuffer chainedBuffer = null; if (wasIndirect) { - chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), - buffer.getInt(offset + keyLen)); + chainedBuffer = + new ChainedBuffer(nodeMgr.getBufferMgr(), buffer.getInt(offset + keyLen)); chainedBuffer.setSize(record.length(), false); } else { @@ -722,10 +726,10 @@ class VarKeyRecordNode extends VarKeyNode { removeChainedBuffer(buffer.getInt(offset + keyLen)); enableIndirectStorage(index, false); } - + // See if updated record will fit in current buffer if (useIndirect || len <= (getFreeSpace() + oldLen)) { - + // Overwrite record data - move other data if needed int dataShift = oldLen - len; if (dataShift != 0) { @@ -758,7 +762,7 @@ class VarKeyRecordNode extends VarKeyNode { int keyLen = key.length(); if (keyLen > maxKeyLength) throw new AssertException("Key exceeds maximum key length of " + maxKeyLength); - + // Check for use of indirect chained record node(s) int len = record.length(); int maxRecordLength = ((buffer.length() - HEADER_SIZE) >> 2) - ENTRY_SIZE - keyLen; // min 4 records per node @@ -766,26 +770,27 @@ class VarKeyRecordNode extends VarKeyNode { if (useIndirect) { len = 4; } - + if ((len + keyLen + ENTRY_SIZE) > getFreeSpace()) return false; // insufficient space for record storage // Make room for new record int offset = moveRecords(keyIndex, -(len + keyLen)); - + // Make room for new key/offset entry int start = HEADER_SIZE + (keyIndex * ENTRY_SIZE); len = (keyCount - keyIndex) * ENTRY_SIZE; buffer.move(start, start + ENTRY_SIZE, len); - + // Store new record key/offset buffer.putInt(start, offset); setKeyCount(keyCount + 1); key.write(buffer, offset); - + // Store record data if (useIndirect) { - ChainedBuffer chainedBuffer = new ChainedBuffer(record.length(), nodeMgr.getBufferMgr()); + ChainedBuffer chainedBuffer = + new ChainedBuffer(record.length(), nodeMgr.getBufferMgr()); buffer.putInt(offset + keyLen, chainedBuffer.getId()); record.write(chainedBuffer, 0); } @@ -805,37 +810,37 @@ class VarKeyRecordNode extends VarKeyNode { */ void remove(int index) throws IOException { -if (index < 0 || index >= keyCount) -throw new AssertException(); - + if (index < 0 || index >= keyCount) + throw new AssertException(); + if (hasIndirectStorage(index)) { removeChainedBuffer(buffer.getInt(getRecordDataOffset(index))); enableIndirectStorage(index, false); } - + int len = getFullRecordLength(index); moveRecords(index + 1, len); - int start = HEADER_SIZE + ((index+1) * ENTRY_SIZE); + int start = HEADER_SIZE + ((index + 1) * ENTRY_SIZE); len = (keyCount - index - 1) * ENTRY_SIZE; buffer.move(start, start - ENTRY_SIZE, len); - setKeyCount(keyCount-1); + setKeyCount(keyCount - 1); } - + /** * Remove this leaf and all associated chained buffers from the tree. * @return root node which may have changed. * @throws IOException thrown if IO error occurs */ VarKeyNode removeLeaf() throws IOException { - + // Remove all chained buffers associated with this leaf for (int index = 0; index < keyCount; ++index) { if (hasIndirectStorage(index)) { removeChainedBuffer(buffer.getInt(getRecordDataOffset(index))); } } - + Field key = getKey(0); int prevBufferId = buffer.getInt(PREV_LEAF_ID_OFFSET); int nextBufferId = buffer.getInt(NEXT_LEAF_ID_OFFSET); @@ -847,14 +852,14 @@ throw new AssertException(); VarKeyRecordNode nextNode = (VarKeyRecordNode) nodeMgr.getVarKeyNode(nextBufferId); nextNode.getBuffer().putInt(PREV_LEAF_ID_OFFSET, prevBufferId); } - + nodeMgr.deleteNode(this); if (parent == null) { return null; - } + } return parent.deleteChild(key); } - + /** * Remove a chained buffer. * @param bufferId chained buffer ID @@ -863,13 +868,13 @@ throw new AssertException(); ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufferId); chainedBuffer.delete(); } - + /* * @see ghidra.framework.store.db.VarKeyNode#delete() */ @Override - public void delete() throws IOException { - + public void delete() throws IOException { + // Remove all chained buffers associated with this node. for (int index = 0; index < keyCount; index++) { if (hasIndirectStorage(index)) { @@ -879,14 +884,15 @@ throw new AssertException(); buffer.putInt(offset, -1); } } - + // Remove this node nodeMgr.deleteNode(this); } - + /* * @see ghidra.framework.store.db.BTreeNode#getBufferReferences() */ + @Override public int[] getBufferReferences() { IntArrayList idList = new IntArrayList(); for (int i = 0; i < keyCount; i++) { @@ -894,7 +900,8 @@ throw new AssertException(); try { int offset = getRecordDataOffset(i); idList.add(buffer.getInt(offset)); - } catch (IOException e) { + } + catch (IOException e) { } } } diff --git a/Ghidra/Framework/DB/src/test/java/db/DBIndexedTableTest.java b/Ghidra/Framework/DB/src/test/java/db/DBIndexedTableTest.java index b3a1cb26ef..93044a1528 100644 --- a/Ghidra/Framework/DB/src/test/java/db/DBIndexedTableTest.java +++ b/Ghidra/Framework/DB/src/test/java/db/DBIndexedTableTest.java @@ -763,7 +763,7 @@ public class DBIndexedTableTest extends AbstractGenericTest { Field f = fiter.next(); assertEquals(indexFields.get(ix++), f); } - assertEquals(ix, maxIx + 1); + assertEquals(maxIx + 1, ix); // Index field iterator (forward range of unique index values) minIx = indexFields.size() / 10; @@ -775,7 +775,19 @@ public class DBIndexedTableTest extends AbstractGenericTest { Field f = fiter.next(); assertEquals(indexFields.get(ix++), f); } - assertEquals(ix, maxIx + 1); + assertEquals(maxIx + 1, ix); + + // Index field iterator (forward over all indexed fields) + minIx = 0; + maxIx = indexFields.size() - 1; + fiter = table.indexFieldIterator(null, null, true, colIx); + ix = minIx; + assertTrue("Failed to position before min field", fiter.hasNext()); + while (fiter.hasNext()) { + Field f = fiter.next(); + assertEquals(indexFields.get(ix++), f); + } + assertEquals(maxIx + 1, ix); // Index field iterator (reverse range of unique index values) // minIx = indexFields.size() / 10; @@ -787,7 +799,19 @@ public class DBIndexedTableTest extends AbstractGenericTest { Field f = fiter.previous(); assertEquals(indexFields.get(ix--), f); } - assertEquals(ix, minIx - 1); + assertEquals(minIx - 1, ix); + + // Index field iterator (reverse over all indexed fields) + minIx = 0; + maxIx = indexFields.size() - 1; + fiter = table.indexFieldIterator(null, null, false, colIx); + ix = maxIx; + assertTrue("Failed to position after max field", fiter.hasPrevious()); + while (fiter.hasPrevious()) { + Field f = fiter.previous(); + assertEquals(indexFields.get(ix--), f); + } + assertEquals(-1, ix); // Index field iterator (forward range of unique index values) // minIx = indexFields.size() / 10; @@ -800,7 +824,7 @@ public class DBIndexedTableTest extends AbstractGenericTest { Field f = fiter.next(); assertEquals(indexFields.get(ix++), f); } - assertEquals(ix, maxIx + 1); + assertEquals(maxIx + 1, ix); // Index field iterator (reverse range of unique index values) // minIx = indexFields.size() / 10; @@ -813,7 +837,7 @@ public class DBIndexedTableTest extends AbstractGenericTest { Field f = fiter.previous(); assertEquals(indexFields.get(ix--), f); } - assertEquals(ix, minIx - 1); + assertEquals(minIx - 1, ix); } }