GT-3294 Added support for DB FixedField with improved indexing.

This commit is contained in:
ghidra1 2020-02-24 18:02:01 -05:00
parent 14d4c87ef4
commit fcb3151f94
224 changed files with 9574 additions and 7913 deletions

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -15,12 +14,11 @@
* limitations under the License. * limitations under the License.
*/ */
// Performs database consistency check on the current program // Performs database consistency check on the current program
import db.DBHandle;
import ghidra.app.script.GhidraScript; import ghidra.app.script.GhidraScript;
import ghidra.app.services.ProgramManager; import ghidra.app.services.ProgramManager;
import ghidra.framework.model.DomainFile; import ghidra.framework.model.DomainFile;
import ghidra.program.database.ProgramDB; import ghidra.program.database.ProgramDB;
import ghidra.program.model.listing.Program;
import db.DBHandle;
public class ConsistencyCheck extends GhidraScript { public class ConsistencyCheck extends GhidraScript {
@ -56,6 +54,8 @@ public class ConsistencyCheck extends GhidraScript {
return; return;
} }
monitor.checkCanceled();
if (!df.canSave() || !currentProgram.hasExclusiveAccess()) { if (!df.canSave() || !currentProgram.hasExclusiveAccess()) {
popup("Program database is NOT consistent!\nRebuild requires exclusive checkout."); popup("Program database is NOT consistent!\nRebuild requires exclusive checkout.");
return; return;
@ -67,19 +67,22 @@ public class ConsistencyCheck extends GhidraScript {
} }
end(false); end(false);
ProgramDB program = (ProgramDB) df.getDomainObject(this, false, false, monitor);
programMgr.closeProgram(currentProgram, true); programMgr.closeProgram(currentProgram, true);
currentProgram = (Program) df.getDomainObject(this, false, false, monitor); monitor.clearCanceled(); // compensate for Script Manager cancelling task on program close
dbh = ((ProgramDB) currentProgram).getDBHandle();
dbh = program.getDBHandle();
try { try {
boolean success = false; boolean success = false;
long txId = dbh.startTransaction(); int txId = program.startTransaction("Rebuild DB Indexes");
try { try {
success = dbh.rebuild(monitor); success = dbh.rebuild(monitor);
} }
finally { finally {
dbh.endTransaction(txId, success); program.endTransaction(txId, success);
} }
if (!success) { if (!success) {
@ -92,11 +95,12 @@ public class ConsistencyCheck extends GhidraScript {
return; return;
} }
currentProgram.save("DB Rebuild", monitor); program.save("DB Rebuild", monitor);
} }
finally { finally {
currentProgram.release(this); programMgr.openProgram(program);
currentProgram = programMgr.openProgram(df); program.release(this);
currentProgram = program;
start(); start();
} }
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,9 +15,6 @@
*/ */
package ghidra.app.plugin.debug.dbtable; package ghidra.app.plugin.debug.dbtable;
import ghidra.util.Msg;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import java.util.*; import java.util.*;
@ -26,6 +22,8 @@ import javax.swing.event.TableModelListener;
import javax.swing.table.TableModel; import javax.swing.table.TableModel;
import db.*; import db.*;
import ghidra.util.Msg;
import ghidra.util.exception.AssertException;
public class DbLargeTableModel implements TableModel { public class DbLargeTableModel implements TableModel {
private ArrayList<TableModelListener> listeners = new ArrayList<TableModelListener>(); private ArrayList<TableModelListener> listeners = new ArrayList<TableModelListener>();
@ -43,7 +41,7 @@ public class DbLargeTableModel implements TableModel {
this.table = table; this.table = table;
schema = table.getSchema(); schema = table.getSchema();
try { try {
keyType = schema.getKeyFieldClass().newInstance(); keyType = schema.getKeyFieldType();
} }
catch (Exception e) { catch (Exception e) {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e); Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
@ -59,39 +57,39 @@ public class DbLargeTableModel implements TableModel {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e); Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
} }
columns.add(getColumn(schema.getKeyFieldClass())); columns.add(getColumn(schema.getKeyFieldType()));
Class<?>[] classes = schema.getFieldClasses(); Field[] fields = schema.getFields();
int fieldCount = schema.getFieldCount(); for (Field field : fields) {
for (int i = 0; i < fieldCount; i++) { columns.add(getColumn(field));
columns.add(getColumn(classes[i]));
} }
} }
private AbstractColumnAdapter getColumn(Class<?> c) { private AbstractColumnAdapter getColumn(Field field) {
if (c == ByteField.class) { if (field instanceof ByteField) {
return new ByteColumnAdapter(); return new ByteColumnAdapter();
} }
else if (c == BooleanField.class) { else if (field instanceof BooleanField) {
return new BooleanColumnAdapter(); return new BooleanColumnAdapter();
} }
else if (c == ShortField.class) { else if (field instanceof ShortField) {
return new ShortColumnAdapter(); return new ShortColumnAdapter();
} }
else if (c == IntField.class) { else if (field instanceof IntField) {
return new IntegerColumnAdapter(); return new IntegerColumnAdapter();
} }
else if (c == LongField.class) { else if (field instanceof LongField) {
return new LongColumnAdapter(); return new LongColumnAdapter();
} }
else if (c == StringField.class) { else if (field instanceof StringField) {
return new StringColumnAdapter(); return new StringColumnAdapter();
} }
else if (c == BinaryField.class) { else if (field instanceof BinaryField) {
return new BinaryColumnAdapter(); return new BinaryColumnAdapter();
} }
throw new AssertException("New, unexpected DB column class type: " + c); throw new AssertException(
"New, unexpected DB column type: " + field.getClass().getSimpleName());
} }
private void findMinKey() throws IOException { private void findMinKey() throws IOException {

View file

@ -36,12 +36,11 @@ public class DbSmallTableModel extends AbstractSortedTableModel<Record> {
records = new ArrayList<>(table.getRecordCount()); records = new ArrayList<>(table.getRecordCount());
columns.add(getColumn(schema.getKeyFieldClass())); columns.add(getColumn(schema.getKeyFieldType()));
Class<?>[] classes = schema.getFieldClasses(); Field[] fields = schema.getFields();
int fieldCount = schema.getFieldCount(); for (Field field : fields) {
for (int i = 0; i < fieldCount; i++) { columns.add(getColumn(field));
columns.add(getColumn(classes[i]));
} }
try { try {
@ -55,29 +54,30 @@ public class DbSmallTableModel extends AbstractSortedTableModel<Record> {
} }
} }
private AbstractColumnAdapter getColumn(Class<?> c) { private AbstractColumnAdapter getColumn(Field field) {
if (c == ByteField.class) { if (field instanceof ByteField) {
return new ByteColumnAdapter(); return new ByteColumnAdapter();
} }
else if (c == BooleanField.class) { else if (field instanceof BooleanField) {
return new BooleanColumnAdapter(); return new BooleanColumnAdapter();
} }
else if (c == ShortField.class) { else if (field instanceof ShortField) {
return new ShortColumnAdapter(); return new ShortColumnAdapter();
} }
else if (c == IntField.class) { else if (field instanceof IntField) {
return new IntegerColumnAdapter(); return new IntegerColumnAdapter();
} }
else if (c == LongField.class) { else if (field instanceof LongField) {
return new LongColumnAdapter(); return new LongColumnAdapter();
} }
else if (c == StringField.class) { else if (field instanceof StringField) {
return new StringColumnAdapter(); return new StringColumnAdapter();
} }
else if (c == BinaryField.class) { else if (field instanceof BinaryField) {
return new BinaryColumnAdapter(); return new BinaryColumnAdapter();
} }
throw new AssertException("New, unexpected DB column class type: " + c); throw new AssertException(
"New, unexpected DB column type: " + field.getClass().getSimpleName());
} }
@Override @Override

View file

@ -66,7 +66,7 @@ public class AddressIndexPrimaryKeyIteratorTest extends AbstractGhidraHeadedInte
// Create table with indexed address column // Create table with indexed address column
Schema schema = Schema schema =
new Schema(0, "id", new Class[] { LongField.class }, new String[] { "addr" }); new Schema(0, "id", new Field[] { LongField.INSTANCE }, new String[] { "addr" });
DBHandle handle = program.getDBHandle(); DBHandle handle = program.getDBHandle();
myTable = handle.createTable("MyTable", schema, new int[] { 0 }); myTable = handle.createTable("MyTable", schema, new int[] { 0 });

View file

@ -32,7 +32,7 @@ import ghidra.util.datastruct.LongArray;
public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest { public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest {
private static Schema SCHEMA = private static Schema SCHEMA =
new Schema(0, "addr", new Class[] { StringField.class }, new String[] { "str" }); new Schema(0, "addr", new Field[] { StringField.INSTANCE }, new String[] { "str" });
private ProgramDB program; private ProgramDB program;
private AddressSpace space; private AddressSpace space;

View file

@ -35,7 +35,8 @@ import ghidra.util.Lock;
import ghidra.util.exception.CancelledException; import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitorAdapter; import ghidra.util.task.TaskMonitorAdapter;
public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest implements ErrorHandler { public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest
implements ErrorHandler {
private TestEnv env; // needed to discover languages private TestEnv env; // needed to discover languages
private ProgramDB program; private ProgramDB program;
@ -83,7 +84,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testTransaction() { public void testTransaction() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap, AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true); new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
try { try {
map.paintRange(addr(0), addr(0x1000), ONE); map.paintRange(addr(0), addr(0x1000), ONE);
@ -114,7 +115,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testPaint() { public void testPaint() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap, AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true); new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
int id = program.startTransaction("TEST"); int id = program.startTransaction("TEST");
try { try {
@ -152,7 +153,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testClear() { public void testClear() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap, AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true); new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
int id = program.startTransaction("TEST"); int id = program.startTransaction("TEST");
try { try {
@ -186,7 +187,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testAddressRangeIterator() { public void testAddressRangeIterator() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap, AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true); new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
int id = program.startTransaction("TEST"); int id = program.startTransaction("TEST");
try { try {
@ -248,7 +249,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testMove() { public void testMove() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap, AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true); new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
int id = program.startTransaction("TEST"); int id = program.startTransaction("TEST");
try { try {

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -21,8 +20,6 @@ import java.util.Random;
import db.*; import db.*;
public class DatabaseBenchMarks { public class DatabaseBenchMarks {
static int BUFFER_SIZE = 16 * 1024; static int BUFFER_SIZE = 16 * 1024;
static int CACHE_SIZE = 32 * 1024 * 1024; static int CACHE_SIZE = 32 * 1024 * 1024;
@ -54,14 +51,17 @@ public class DatabaseBenchMarks {
testRandomAccess(timer); testRandomAccess(timer);
} }
private static void testOrderedIntInsertions(TestTimer timer, int numInsertions) { private static void testOrderedIntInsertions(TestTimer timer, int numInsertions) {
try { try {
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE); DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction(); long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{IntField.class}, new String[]{"Value"}); Schema schema =
new Schema(1, "Key", new Field[] { IntField.INSTANCE }, new String[] { "Value" });
Table table = dbh.createTable("Test", schema); Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0); Record record = schema.createRecord(0);
timer.start("Inserting "+numInsertions+" sorted records with long keys and integer values"); timer.start(
"Inserting " + numInsertions + " sorted records with long keys and integer values");
for (int i = 0; i < numInsertions; i++) { for (int i = 0; i < numInsertions; i++) {
record.setKey(i); record.setKey(i);
record.setIntValue(0, i); record.setIntValue(0, i);
@ -70,7 +70,8 @@ public class DatabaseBenchMarks {
timer.end(); timer.end();
dbh.endTransaction(transactionID, true); dbh.endTransaction(transactionID, true);
dbh.close(); dbh.close();
}catch(IOException e) { }
catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
@ -79,10 +80,12 @@ public class DatabaseBenchMarks {
try { try {
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE); DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction(); long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{StringField.class}, new String[]{"Value"}); Schema schema = new Schema(1, "Key", new Field[] { StringField.INSTANCE },
new String[] { "Value" });
Table table = dbh.createTable("Test", schema); Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0); Record record = schema.createRecord(0);
timer.start("Inserting "+numInsertions+" sorted records with long keys and String (length = 8) values"); timer.start("Inserting " + numInsertions +
" sorted records with long keys and String (length = 8) values");
for (int i = 0; i < numInsertions; i++) { for (int i = 0; i < numInsertions; i++) {
record.setKey(i); record.setKey(i);
record.setString(0, "abcdefgh"); record.setString(0, "abcdefgh");
@ -91,7 +94,8 @@ public class DatabaseBenchMarks {
timer.end(); timer.end();
dbh.endTransaction(transactionID, true); dbh.endTransaction(transactionID, true);
dbh.close(); dbh.close();
}catch(IOException e) { }
catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
@ -101,10 +105,12 @@ public class DatabaseBenchMarks {
Random random = new Random(); Random random = new Random();
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE); DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction(); long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{IntField.class}, new String[]{"Value"}); Schema schema =
new Schema(1, "Key", new Field[] { IntField.INSTANCE }, new String[] { "Value" });
Table table = dbh.createTable("Test", schema); Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0); Record record = schema.createRecord(0);
timer.start("Inserting "+numInsertions+" random records with long keys and integer values"); timer.start(
"Inserting " + numInsertions + " random records with long keys and integer values");
for (int i = 0; i < numInsertions; i++) { for (int i = 0; i < numInsertions; i++) {
record.setKey(random.nextLong()); record.setKey(random.nextLong());
record.setIntValue(0, i); record.setIntValue(0, i);
@ -113,7 +119,8 @@ public class DatabaseBenchMarks {
timer.end(); timer.end();
dbh.endTransaction(transactionID, true); dbh.endTransaction(transactionID, true);
dbh.close(); dbh.close();
}catch(IOException e) { }
catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
@ -122,7 +129,8 @@ public class DatabaseBenchMarks {
try { try {
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE); DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction(); long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{IntField.class}, new String[]{"Value"}); Schema schema =
new Schema(1, "Key", new Field[] { IntField.INSTANCE }, new String[] { "Value" });
Table table = dbh.createTable("Test", schema); Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0); Record record = schema.createRecord(0);
System.out.print("building database..."); System.out.print("building database...");
@ -140,7 +148,8 @@ public class DatabaseBenchMarks {
dbh.endTransaction(transactionID, true); dbh.endTransaction(transactionID, true);
dbh.close(); dbh.close();
}catch(IOException e) { }
catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
@ -149,7 +158,8 @@ public class DatabaseBenchMarks {
try { try {
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE); DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction(); long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{IntField.class}, new String[]{"Value"}); Schema schema =
new Schema(1, "Key", new Field[] { IntField.INSTANCE }, new String[] { "Value" });
Table table = dbh.createTable("Test", schema); Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0); Record record = schema.createRecord(0);
System.out.print("building database..."); System.out.print("building database...");
@ -167,13 +177,12 @@ public class DatabaseBenchMarks {
dbh.endTransaction(transactionID, true); dbh.endTransaction(transactionID, true);
dbh.close(); dbh.close();
}catch(IOException e) { }
catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
} }
} }
} }
class TestTimer { class TestTimer {
@ -183,6 +192,7 @@ class TestTimer {
System.out.print(testMsg + "... "); System.out.print(testMsg + "... ");
start = System.currentTimeMillis(); start = System.currentTimeMillis();
} }
void end() { void end() {
long end = System.currentTimeMillis(); long end = System.currentTimeMillis();
System.out.println("" + (end - start) / 1000.0 + " seconds"); System.out.println("" + (end - start) / 1000.0 + " seconds");

View file

@ -23,7 +23,6 @@ import java.io.IOException;
import java.util.*; import java.util.*;
import javax.swing.*; import javax.swing.*;
import javax.swing.event.TableModelListener;
import javax.swing.table.TableModel; import javax.swing.table.TableModel;
import db.buffers.LocalBufferFile; import db.buffers.LocalBufferFile;
@ -33,6 +32,8 @@ import docking.widgets.combobox.GComboBox;
import docking.widgets.filechooser.GhidraFileChooser; import docking.widgets.filechooser.GhidraFileChooser;
import docking.widgets.label.GDLabel; import docking.widgets.label.GDLabel;
import docking.widgets.label.GLabel; import docking.widgets.label.GLabel;
import ghidra.app.plugin.debug.dbtable.DbLargeTableModel;
import ghidra.app.plugin.debug.dbtable.DbSmallTableModel;
import ghidra.framework.Application; import ghidra.framework.Application;
import ghidra.framework.store.db.PackedDatabase; import ghidra.framework.store.db.PackedDatabase;
import ghidra.util.Msg; import ghidra.util.Msg;
@ -292,525 +293,3 @@ public class DbViewer extends JFrame {
} }
} }
class ColumnAdapter {
static final int BYTE = 0;
static final int BOOLEAN = 1;
static final int SHORT = 2;
static final int INT = 3;
static final int LONG = 4;
static final int STRING = 5;
static final int BINARY = 6;
int type;
Class<?> valueClass;
ColumnAdapter(Class<?> c) {
if (c == ByteField.class) {
type = BYTE;
valueClass = Byte.class;
}
else if (c == BooleanField.class) {
type = BOOLEAN;
valueClass = Boolean.class;
}
else if (c == ShortField.class) {
type = SHORT;
valueClass = Short.class;
}
else if (c == IntField.class) {
type = INT;
valueClass = Integer.class;
}
else if (c == LongField.class) {
type = LONG;
//valueClass = Long.class;
valueClass = String.class;
}
else if (c == StringField.class) {
type = STRING;
valueClass = String.class;
}
else if (c == BinaryField.class) {
type = BINARY;
valueClass = String.class;
}
}
Class<?> getValueClass() {
return valueClass;
}
Object getKeyValue(Record rec) {
switch (type) {
case BYTE:
return new Byte(((ByteField) rec.getKeyField()).getByteValue());
case BOOLEAN:
return new Boolean(((BooleanField) rec.getKeyField()).getBooleanValue());
case SHORT:
return new Short(((ShortField) rec.getKeyField()).getShortValue());
case INT:
return new Integer(((IntField) rec.getKeyField()).getIntValue());
case LONG:
return "0x" + Long.toHexString(rec.getKey());
//return new Long(rec.getKey());
case STRING:
return ((StringField) rec.getKeyField()).getString();
case BINARY:
byte[] bytes = ((BinaryField) rec.getKeyField()).getBinaryData();
StringBuffer buf = new StringBuffer(" byte[" + bytes.length + "] = ");
if (bytes.length > 0) {
int len = Math.min(bytes.length, 20);
buf.append(bytes[0]);
for (int i = 1; i < len; i++) {
buf.append(",");
buf.append(bytes[i]);
}
if (bytes.length > 20) {
buf.append("...");
}
}
return buf.toString();
}
return "";
}
Object getValue(Record rec, int col) {
switch (type) {
case BYTE:
return new Byte(rec.getByteValue(col));
case BOOLEAN:
return Boolean.valueOf(rec.getBooleanValue(col));
case SHORT:
return new Short(rec.getShortValue(col));
case INT:
return new Integer(rec.getIntValue(col));
case LONG:
return "0x" + Long.toHexString(rec.getLongValue(col));
//return new Long(rec.getLongValue(col));
case STRING:
return " " + rec.getString(col);
case BINARY:
byte[] bytes = rec.getBinaryData(col);
StringBuffer buf = new StringBuffer(" byte[" + bytes.length + "] = ");
if (bytes.length > 0) {
int len = Math.min(bytes.length, 20);
String str = getByteString(bytes[0]);
buf.append(str);
for (int i = 1; i < len; i++) {
buf.append(",");
buf.append(getByteString(bytes[i]));
}
if (bytes.length > 20) {
buf.append("...");
}
}
return buf.toString();
}
return "";
}
private String getByteString(byte b) {
String str = Integer.toHexString(b);
if (str.length() > 2) {
str = str.substring(str.length() - 2);
}
return "0x" + str;
}
// private String format(long l, int size) {
// String hex = Long.toHexString(l);
// if (hex.length() > size) {
// hex = hex.substring(hex.length()-size);
// }
// else if (hex.length() < size) {
// StringBuffer b = new StringBuffer(20);
// for(int i=hex.length();i<size;i++) {
// b.append("");
// }
// b.append(hex);
// hex = b.toString();
// }
//
// return hex;
// }
}
class DbSmallTableModel implements TableModel {
ArrayList<TableModelListener> listeners = new ArrayList<>();
Table table;
Schema schema;
ColumnAdapter[] colAdapters;
ColumnAdapter keyAdapter;
Record[] records;
DbSmallTableModel(Table table) {
this.table = table;
schema = table.getSchema();
records = new Record[table.getRecordCount()];
keyAdapter = new ColumnAdapter(schema.getKeyFieldClass());
colAdapters = new ColumnAdapter[schema.getFieldCount()];
Class<?>[] classes = schema.getFieldClasses();
for (int i = 0; i < colAdapters.length; i++) {
colAdapters[i] = new ColumnAdapter(classes[i]);
}
try {
RecordIterator it = table.iterator();
for (int i = 0; i < records.length; i++) {
records[i] = it.next();
}
}
catch (IOException e) {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#addTableModelListener(javax.swing.event.TableModelListener)
*/
@Override
public void addTableModelListener(TableModelListener l) {
listeners.add(l);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnClass(int)
*/
@Override
public Class<?> getColumnClass(int columnIndex) {
if (columnIndex == 0) {
return keyAdapter.getValueClass();
}
return colAdapters[columnIndex - 1].getValueClass();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnCount()
*/
@Override
public int getColumnCount() {
return schema.getFieldCount() + 1;
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnName(int)
*/
@Override
public String getColumnName(int columnIndex) {
if (columnIndex == 0) {
return schema.getKeyName();
}
--columnIndex;
int[] indexCols = table.getIndexedColumns();
boolean isIndexed = false;
for (int indexCol : indexCols) {
if (indexCol == columnIndex) {
isIndexed = true;
break;
}
}
return schema.getFieldNames()[columnIndex] + (isIndexed ? "*" : "");
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getRowCount()
*/
@Override
public int getRowCount() {
return table.getRecordCount();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getValueAt(int, int)
*/
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
Record rec = records[rowIndex];
if (columnIndex == 0) {
return keyAdapter.getKeyValue(rec);
}
return colAdapters[columnIndex - 1].getValue(rec, columnIndex - 1);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#isCellEditable(int, int)
*/
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return false;
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#removeTableModelListener(javax.swing.event.TableModelListener)
*/
@Override
public void removeTableModelListener(TableModelListener l) {
listeners.remove(l);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#setValueAt(java.lang.Object, int, int)
*/
@Override
public void setValueAt(Object aValue, int rowIndex, int columnIndex) {
}
}
class DbLargeTableModel implements TableModel {
ArrayList<TableModelListener> listeners = new ArrayList<>();
Table table;
Schema schema;
ColumnAdapter keyAdapter;
ColumnAdapter[] colAdapters;
RecordIterator recIt;
Record lastRecord;
int lastIndex;
Field minKey;
Field maxKey;
Field keyType;
DbLargeTableModel(Table table) {
this.table = table;
schema = table.getSchema();
keyAdapter = new ColumnAdapter(schema.getKeyFieldClass());
try {
keyType = schema.getKeyFieldClass().newInstance();
}
catch (Exception e) {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
try {
recIt = table.iterator();
lastRecord = recIt.next();
lastIndex = 0;
findMaxKey();
findMinKey();
}
catch (IOException e) {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
colAdapters = new ColumnAdapter[schema.getFieldCount()];
Class<?>[] classes = schema.getFieldClasses();
for (int i = 0; i < colAdapters.length; i++) {
colAdapters[i] = new ColumnAdapter(classes[i]);
}
}
private void findMinKey() throws IOException {
RecordIterator iter = table.iterator();
Record rec = iter.next();
minKey = rec.getKeyField();
}
private void findMaxKey() throws IOException {
Field max = keyType.newField();
if (table.useLongKeys()) {
max.setLongValue(Long.MAX_VALUE);
}
else {
byte[] maxBytes = new byte[128];
Arrays.fill(maxBytes, 0, 128, (byte) 0x7f);
max.setBinaryData(maxBytes);
}
RecordIterator iter = table.iterator(max);
Record rec = iter.previous();
maxKey = rec.getKeyField();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#addTableModelListener(javax.swing.event.TableModelListener)
*/
@Override
public void addTableModelListener(TableModelListener l) {
listeners.add(l);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnClass(int)
*/
@Override
public Class<?> getColumnClass(int columnIndex) {
if (columnIndex == 0) {
return keyAdapter.getValueClass();
}
return colAdapters[columnIndex - 1].getValueClass();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnCount()
*/
@Override
public int getColumnCount() {
return schema.getFieldCount() + 1;
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnName(int)
*/
@Override
public String getColumnName(int columnIndex) {
if (columnIndex == 0) {
return schema.getKeyName();
}
--columnIndex;
int[] indexCols = table.getIndexedColumns();
boolean isIndexed = false;
for (int indexCol : indexCols) {
if (indexCol == columnIndex) {
isIndexed = true;
break;
}
}
return schema.getFieldNames()[columnIndex] + (isIndexed ? "*" : "");
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getRowCount()
*/
@Override
public int getRowCount() {
return table.getRecordCount();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getValueAt(int, int)
*/
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
Record rec = getRecord(rowIndex);
if (rec == null) {
return null;
}
if (columnIndex == 0) {
return keyAdapter.getKeyValue(rec);
}
return colAdapters[columnIndex - 1].getValue(rec, columnIndex - 1);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#isCellEditable(int, int)
*/
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return false;
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#removeTableModelListener(javax.swing.event.TableModelListener)
*/
@Override
public void removeTableModelListener(TableModelListener l) {
listeners.remove(l);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#setValueAt(java.lang.Object, int, int)
*/
@Override
public void setValueAt(Object aValue, int rowIndex, int columnIndex) {
}
private Record getRecord(int index) {
try {
if (index == lastIndex + 1) {
if (!recIt.hasNext()) {
// do something
}
lastRecord = recIt.next();
lastIndex = index;
}
else if (index != lastIndex) {
if (index < lastIndex && (lastIndex - index) < 200) {
int backup = lastIndex - index + 1;
for (int i = 0; i < backup; i++) {
if (recIt.hasPrevious()) {
recIt.previous();
}
}
lastRecord = recIt.next();
lastIndex = index;
}
else {
findRecord(index);
lastRecord = recIt.next();
lastIndex = index;
}
}
}
catch (IOException e) {
// XXX Auto-generated catch block
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
return lastRecord;
}
private void findRecord(int index) throws IOException {
if (index < 1000) {
recIt = table.iterator();
for (int i = 0; i < index; i++) {
recIt.next();
}
}
else if (index > table.getRecordCount() - 1000) {
recIt = table.iterator(maxKey);
if (recIt.hasNext()) {
recIt.next();
}
for (int i = 0; i < table.getRecordCount() - index; i++) {
recIt.previous();
}
}
else {
recIt = table.iterator(approxKey(index));
}
}
private Field approxKey(int index) {
Field key = keyType.newField();
if (table.useLongKeys()) {
long min = minKey.getLongValue();
long max = maxKey.getLongValue();
long k = min + ((max - min) * index / table.getRecordCount());
key.setLongValue(k);
}
else {
long min = getLong(minKey.getBinaryData());
long max = getLong(maxKey.getBinaryData());
long k = min + ((max - min) * index / table.getRecordCount());
byte[] bytes = new byte[8];
for (int i = 7; i >= 0; i--) {
bytes[i] = (byte) k;
k >>= 8;
}
key.setBinaryData(bytes);
}
return key;
}
private long getLong(byte[] bytes) {
if (bytes == null || bytes.length == 0) {
return 0;
}
long value = 0;
for (int i = 0; i < 8; i++) {
value <<= 8;
if (i < bytes.length) {
value += bytes[i] & 0xff;
}
}
return value;
}
}

View file

@ -44,11 +44,11 @@ public class FunctionsTable {
static final int CACHE_SIZE = 10000; static final int CACHE_SIZE = 10000;
// @formatter:off // @formatter:off
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "Function ID", new Class[] { static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "Function ID", new Field[] {
ShortField.class, LongField.class, ShortField.INSTANCE, LongField.INSTANCE,
ByteField.class, LongField.class, LongField.class, ByteField.INSTANCE, LongField.INSTANCE, LongField.INSTANCE,
LongField.class, LongField.class, LongField.class, LongField.INSTANCE, LongField.INSTANCE, LongField.INSTANCE,
ByteField.class ByteField.INSTANCE
}, new String[] { }, new String[] {
"Code Unit Size", "Full Hash", "Code Unit Size", "Full Hash",
"Specific Hash Additional Size", "Specific Hash", "Library ID", "Specific Hash Additional Size", "Specific Hash", "Library ID",
@ -133,14 +133,15 @@ public class FunctionsTable {
*/ */
public List<FunctionRecord> getFunctionRecordsByFullHash(long hash) throws IOException { public List<FunctionRecord> getFunctionRecordsByFullHash(long hash) throws IOException {
LongField hashField = new LongField(hash); LongField hashField = new LongField(hash);
DBLongIterator iterator = table.indexKeyIterator(FULL_HASH_COL, hashField, hashField, true); DBFieldIterator iterator =
table.indexKeyIterator(FULL_HASH_COL, hashField, hashField, true);
if (!iterator.hasNext()) { if (!iterator.hasNext()) {
return Collections.emptyList(); return Collections.emptyList();
} }
List<FunctionRecord> list = new ArrayList<>(); List<FunctionRecord> list = new ArrayList<>();
while (iterator.hasNext()) { while (iterator.hasNext()) {
long key = iterator.next(); Field key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key); FunctionRecord functionRecord = functionCache.get(key.getLongValue());
if (functionRecord == null) { if (functionRecord == null) {
Record record = table.getRecord(key); Record record = table.getRecord(key);
functionRecord = new FunctionRecord(fidDb, functionCache, record); functionRecord = new FunctionRecord(fidDb, functionCache, record);
@ -216,15 +217,15 @@ public class FunctionsTable {
*/ */
public List<FunctionRecord> getFunctionRecordsByNameSubstring(String nameSearch) public List<FunctionRecord> getFunctionRecordsByNameSubstring(String nameSearch)
throws IOException { throws IOException {
DBLongIterator iterator = table.indexKeyIterator(NAME_ID_COL); DBFieldIterator iterator = table.indexKeyIterator(NAME_ID_COL);
if (!iterator.hasNext()) { if (!iterator.hasNext()) {
return Collections.emptyList(); return Collections.emptyList();
} }
List<FunctionRecord> list = new ArrayList<>(); List<FunctionRecord> list = new ArrayList<>();
while (iterator.hasNext()) { while (iterator.hasNext()) {
long key = iterator.next(); Field key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key); FunctionRecord functionRecord = functionCache.get(key.getLongValue());
if (functionRecord == null) { if (functionRecord == null) {
Record record = table.getRecord(key); Record record = table.getRecord(key);
long nameID = record.getLongValue(NAME_ID_COL); long nameID = record.getLongValue(NAME_ID_COL);
@ -255,15 +256,15 @@ public class FunctionsTable {
*/ */
public List<FunctionRecord> getFunctionRecordsByNameRegex(String regex) throws IOException { public List<FunctionRecord> getFunctionRecordsByNameRegex(String regex) throws IOException {
Matcher matcher = Pattern.compile(regex).matcher(""); Matcher matcher = Pattern.compile(regex).matcher("");
DBLongIterator iterator = table.indexKeyIterator(NAME_ID_COL); DBFieldIterator iterator = table.indexKeyIterator(NAME_ID_COL);
if (!iterator.hasNext()) { if (!iterator.hasNext()) {
return Collections.emptyList(); return Collections.emptyList();
} }
List<FunctionRecord> list = new ArrayList<>(); List<FunctionRecord> list = new ArrayList<>();
while (iterator.hasNext()) { while (iterator.hasNext()) {
long key = iterator.next(); Field key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key); FunctionRecord functionRecord = functionCache.get(key.getLongValue());
if (functionRecord == null) { if (functionRecord == null) {
Record record = table.getRecord(key); Record record = table.getRecord(key);
long nameID = record.getLongValue(NAME_ID_COL); long nameID = record.getLongValue(NAME_ID_COL);
@ -347,15 +348,15 @@ public class FunctionsTable {
return Collections.emptyList(); return Collections.emptyList();
} }
LongField field = new LongField(stringID); LongField field = new LongField(stringID);
DBLongIterator iterator = table.indexKeyIterator(NAME_ID_COL, field, field, true); DBFieldIterator iterator = table.indexKeyIterator(NAME_ID_COL, field, field, true);
if (!iterator.hasNext()) { if (!iterator.hasNext()) {
return Collections.emptyList(); return Collections.emptyList();
} }
final long libraryKey = library.getLibraryID(); final long libraryKey = library.getLibraryID();
List<FunctionRecord> list = new ArrayList<>(); List<FunctionRecord> list = new ArrayList<>();
while (iterator.hasNext()) { while (iterator.hasNext()) {
long key = iterator.next(); Field key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key); FunctionRecord functionRecord = functionCache.get(key.getLongValue());
if (functionRecord == null) { if (functionRecord == null) {
Record record = table.getRecord(key); Record record = table.getRecord(key);
if (record.getLongValue(LIBRARY_ID_COL) == libraryKey) { if (record.getLongValue(LIBRARY_ID_COL) == libraryKey) {

View file

@ -49,10 +49,10 @@ public class LibrariesTable {
static final int GHIDRA_COMPILER_SPEC_ID_COL = 7; static final int GHIDRA_COMPILER_SPEC_ID_COL = 7;
// @formatter:off // @formatter:off
static final Schema SCHEMA = new Schema(VERSION, "Library ID", new Class[] { static final Schema SCHEMA = new Schema(VERSION, "Library ID", new Field[] {
StringField.class, StringField.class, StringField.class, StringField.INSTANCE, StringField.INSTANCE, StringField.INSTANCE,
StringField.class, StringField.class, IntField.class, IntField.class, StringField.INSTANCE, StringField.INSTANCE, IntField.INSTANCE, IntField.INSTANCE,
StringField.class StringField.INSTANCE
}, new String[] { }, new String[] {
"Library Family Name", "Library Version", "Library Variant", "Library Family Name", "Library Version", "Library Variant",
"Ghidra Version", "Ghidra Language ID", "Ghidra Language Version", "Ghidra Language Minor Version", "Ghidra Version", "Ghidra Language ID", "Ghidra Language Version", "Ghidra Language Minor Version",
@ -90,8 +90,9 @@ public class LibrariesTable {
if (libraryVersion != VERSION) { if (libraryVersion != VERSION) {
String msg = "Expected version " + VERSION + " for table " + LIBRARIES_TABLE + String msg = "Expected version " + VERSION + " for table " + LIBRARIES_TABLE +
" but got " + table.getSchema().getVersion(); " but got " + table.getSchema().getVersion();
throw new VersionException(msg, libraryVersion < VERSION throw new VersionException(msg,
? VersionException.OLDER_VERSION : VersionException.NEWER_VERSION, libraryVersion < VERSION ? VersionException.OLDER_VERSION
: VersionException.NEWER_VERSION,
false); false);
} }
} }
@ -155,14 +156,14 @@ public class LibrariesTable {
public List<LibraryRecord> getLibrariesByName(String name, String version, String variant) public List<LibraryRecord> getLibrariesByName(String name, String version, String variant)
throws IOException { throws IOException {
StringField hashField = new StringField(name); StringField hashField = new StringField(name);
DBLongIterator iterator = DBFieldIterator iterator =
table.indexKeyIterator(LIBRARY_FAMILY_NAME_COL, hashField, hashField, true); table.indexKeyIterator(LIBRARY_FAMILY_NAME_COL, hashField, hashField, true);
if (!iterator.hasNext()) { if (!iterator.hasNext()) {
return Collections.emptyList(); return Collections.emptyList();
} }
List<LibraryRecord> list = new ArrayList<LibraryRecord>(); List<LibraryRecord> list = new ArrayList<LibraryRecord>();
while (iterator.hasNext()) { while (iterator.hasNext()) {
long key = iterator.next(); Field key = iterator.next();
Record record = table.getRecord(key); Record record = table.getRecord(key);
LibraryRecord libraryRecord = new LibraryRecord(record); LibraryRecord libraryRecord = new LibraryRecord(record);
if (version != null) { if (version != null) {

View file

@ -27,8 +27,8 @@ public class RelationsTable {
// static final int CACHE_SIZE = 10000; // static final int CACHE_SIZE = 10000;
// @formatter:off // @formatter:off
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "Relation Smash", new Class[] { static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "Relation Smash",
}, new String[] { new Field[] { }, new String[] {
}); });
// @formatter:on // @formatter:on

View file

@ -35,11 +35,9 @@ public class StringsTable {
static final int CACHE_SIZE = 10000; static final int CACHE_SIZE = 10000;
// @formatter:off // @formatter:off
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "String ID", new Class[] { static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "String ID",
StringField.class new Field[] { StringField.INSTANCE },
}, new String[] { new String[] { "String Value" });
"String Value"
});
// @formatter:on // @formatter:on
static int[] INDEXED_COLUMNS = new int[] { STRING_VALUE_COL }; static int[] INDEXED_COLUMNS = new int[] { STRING_VALUE_COL };
@ -69,29 +67,30 @@ public class StringsTable {
* @throws IOException if the database has a problem * @throws IOException if the database has a problem
*/ */
long obtainStringID(String value) throws IOException { long obtainStringID(String value) throws IOException {
long[] records = table.findRecords(new StringField(value), STRING_VALUE_COL); Field[] records = table.findRecords(new StringField(value), STRING_VALUE_COL);
if (records == null || records.length == 0) { if (records == null || records.length == 0) {
// create // create
Record record = SCHEMA.createRecord(UniversalIdGenerator.nextID().getValue()); long key = UniversalIdGenerator.nextID().getValue();
Record record = SCHEMA.createRecord(key);
record.setString(STRING_VALUE_COL, value); record.setString(STRING_VALUE_COL, value);
table.putRecord(record); table.putRecord(record);
return record.getKey(); return key;
} }
return records[0]; return records[0].getLongValue();
} }
/** /**
* Lookup existing ID or return null for String value. * Lookup existing ID or return null for String value.
* @param value the string value * @param value the string value
* @return the existing interned string primary key, or null if nonexistent * @return the existing interned string primary key as LongField, or null if nonexistent
* @throws IOException if the database has a problem * @throws IOException if the database has a problem
*/ */
Long lookupStringID(String value) throws IOException { Long lookupStringID(String value) throws IOException {
long[] records = table.findRecords(new StringField(value), STRING_VALUE_COL); Field[] records = table.findRecords(new StringField(value), STRING_VALUE_COL);
if (records == null || records.length == 0) { if (records == null || records.length == 0) {
return null; return null;
} }
return records[0]; return records[0].getLongValue();
} }
/** /**

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,24 +13,24 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package db.util; package ghidra.feature.vt.api.db;
import db.Field; import db.Field;
public class TableColumn { public class TableColumn {
private final Class<? extends Field> columnClass; private final Field columnField;
private boolean indexed; private boolean indexed;
private int ordinal; private int ordinal;
private String name; private String name;
public TableColumn( Class<? extends Field> columnClass ) { public TableColumn(Field columnField) {
this( columnClass, false ); this(columnField, false);
} }
public TableColumn( Class<? extends Field> columnClass, boolean isIndexed ) { public TableColumn(Field columnField, boolean isIndexed) {
this.columnClass = columnClass; this.columnField = columnField;
indexed = isIndexed; indexed = isIndexed;
} }
@ -47,8 +46,8 @@ public class TableColumn {
return indexed; return indexed;
} }
public Class<? extends Field> getColumnClass() { public Field getColumnField() {
return columnClass; return columnField;
} }
public String name() { public String name() {

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,13 +13,12 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package db.util; package ghidra.feature.vt.api.db;
import ghidra.util.Msg;
import java.util.*; import java.util.*;
import db.Field; import db.Field;
import ghidra.util.Msg;
public class TableDescriptor { public class TableDescriptor {
@ -32,7 +30,6 @@ public class TableDescriptor {
private TableColumn[] discoverTableColumns() { private TableColumn[] discoverTableColumns() {
Class<? extends TableDescriptor> clazz = getClass(); Class<? extends TableDescriptor> clazz = getClass();
java.lang.reflect.Field[] fields = clazz.getFields(); java.lang.reflect.Field[] fields = clazz.getFields();
List<TableColumn> list = new ArrayList<TableColumn>(fields.length); List<TableColumn> list = new ArrayList<TableColumn>(fields.length);
@ -52,8 +49,8 @@ public class TableDescriptor {
// shouldn't happen // shouldn't happen
} }
catch (IllegalAccessException e) { catch (IllegalAccessException e) {
Msg.showError( this, null, "Class Usage Error", "You must provide public " + Msg.showError(this, null, "Class Usage Error",
"static members for your TableColumns" ); "You must provide public " + "static members for your TableColumns");
} }
} }
@ -86,13 +83,12 @@ public class TableDescriptor {
return list.toArray(new String[columns.length]); return list.toArray(new String[columns.length]);
} }
@SuppressWarnings("unchecked") // we know our class types are safe public Field[] getColumnFields() {
public Class<? extends Field>[] getColumnClasses() { Field[] fields = new Field[columns.length];
List<Class<? extends Field>> list = new LinkedList<Class<? extends Field>>(); for (int i = 0; i < fields.length; i++) {
for ( TableColumn column : columns ) { fields[i] = columns[i].getColumnField().newField();
list.add( column.getColumnClass() );
} }
return list.toArray( new Class[ columns.length ] ); return fields;
} }
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -17,31 +16,32 @@
package ghidra.feature.vt.api.db; package ghidra.feature.vt.api.db;
import static ghidra.feature.vt.api.db.VTAddressCorrelatorAdapter.AddressCorrelationTableDescriptor.*; import static ghidra.feature.vt.api.db.VTAddressCorrelatorAdapter.AddressCorrelationTableDescriptor.*;
import ghidra.util.exception.CancelledException;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import db.*; import db.*;
import db.util.TableColumn; import ghidra.util.exception.CancelledException;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public abstract class VTAddressCorrelatorAdapter { public abstract class VTAddressCorrelatorAdapter {
public static class AddressCorrelationTableDescriptor extends db.util.TableDescriptor { public static class AddressCorrelationTableDescriptor
extends ghidra.feature.vt.api.db.TableDescriptor {
public static TableColumn SOURCE_ENTRY_COL = new TableColumn(LongField.class, true); public static TableColumn SOURCE_ENTRY_COL = new TableColumn(LongField.INSTANCE, true);
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.class); public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.INSTANCE);
public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.class); public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.INSTANCE);
public static AddressCorrelationTableDescriptor INSTANCE = new AddressCorrelationTableDescriptor(); public static AddressCorrelationTableDescriptor INSTANCE =
new AddressCorrelationTableDescriptor();
} }
static String TABLE_NAME = "AddressCorrelationTable"; static String TABLE_NAME = "AddressCorrelationTable";
static Schema TABLE_SCHEMA = new Schema(0, "Key", static Schema TABLE_SCHEMA =
INSTANCE.getColumnClasses(), INSTANCE.getColumnNames()); new Schema(0, "Key", INSTANCE.getColumnFields(), INSTANCE.getColumnNames());
static int[] TABLE_INDEXES = INSTANCE.getIndexedColumns(); static int[] TABLE_INDEXES = INSTANCE.getIndexedColumns();
private DBHandle dbHandle; private DBHandle dbHandle;
@ -58,7 +58,8 @@ public abstract class VTAddressCorrelatorAdapter {
return new VTAddressCorrelationAdapterV0(dbHandle, monitor); return new VTAddressCorrelationAdapterV0(dbHandle, monitor);
} }
abstract void createAddressRecord(long sourceEntryLong, long sourceLong, long destinationLong) throws IOException; abstract void createAddressRecord(long sourceEntryLong, long sourceLong, long destinationLong)
throws IOException;
abstract List<Record> getAddressRecords(long sourceEntryLong) throws IOException; abstract List<Record> getAddressRecords(long sourceEntryLong) throws IOException;
@ -69,6 +70,7 @@ public abstract class VTAddressCorrelatorAdapter {
void save(TaskMonitor monitor) throws CancelledException, IOException { void save(TaskMonitor monitor) throws CancelledException, IOException {
dbHandle.save("", null, monitor); dbHandle.save("", null, monitor);
} }
void saveAs(File file, TaskMonitor monitor) throws CancelledException, IOException { void saveAs(File file, TaskMonitor monitor) throws CancelledException, IOException {
dbHandle.saveAs(file, true, monitor); dbHandle.saveAs(file, true, monitor);
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,35 +15,36 @@
*/ */
package ghidra.feature.vt.api.db; package ghidra.feature.vt.api.db;
import static ghidra.feature.vt.api.db.VTAssociationTableDBAdapter.AssociationTableDescriptor.INSTANCE; import static ghidra.feature.vt.api.db.VTAssociationTableDBAdapter.AssociationTableDescriptor.*;
import ghidra.feature.vt.api.main.VTAssociationStatus;
import ghidra.feature.vt.api.main.VTAssociationType;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException; import java.io.IOException;
import java.util.Set; import java.util.Set;
import db.*; import db.*;
import db.util.TableColumn; import ghidra.feature.vt.api.main.VTAssociationStatus;
import ghidra.feature.vt.api.main.VTAssociationType;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public abstract class VTAssociationTableDBAdapter { public abstract class VTAssociationTableDBAdapter {
public static class AssociationTableDescriptor extends db.util.TableDescriptor { public static class AssociationTableDescriptor
extends ghidra.feature.vt.api.db.TableDescriptor {
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.class, true); public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.INSTANCE, true);
public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.class, true); public static TableColumn DESTINATION_ADDRESS_COL =
public static TableColumn TYPE_COL = new TableColumn(ByteField.class); new TableColumn(LongField.INSTANCE, true);
public static TableColumn STATUS_COL = new TableColumn(ByteField.class); public static TableColumn TYPE_COL = new TableColumn(ByteField.INSTANCE);
public static TableColumn APPLIED_STATUS_COL = new TableColumn(ByteField.class); public static TableColumn STATUS_COL = new TableColumn(ByteField.INSTANCE);
public static TableColumn VOTE_COUNT_COL = new TableColumn(IntField.class); public static TableColumn APPLIED_STATUS_COL = new TableColumn(ByteField.INSTANCE);
public static TableColumn VOTE_COUNT_COL = new TableColumn(IntField.INSTANCE);
public static AssociationTableDescriptor INSTANCE = new AssociationTableDescriptor(); public static AssociationTableDescriptor INSTANCE = new AssociationTableDescriptor();
} }
static String TABLE_NAME = "AssociationTable"; static String TABLE_NAME = "AssociationTable";
static Schema TABLE_SCHEMA = static Schema TABLE_SCHEMA =
new Schema(0, "Key", INSTANCE.getColumnClasses(), INSTANCE.getColumnNames()); new Schema(0, "Key", INSTANCE.getColumnFields(), INSTANCE.getColumnNames());
static int[] TABLE_INDEXES = INSTANCE.getIndexedColumns(); static int[] TABLE_INDEXES = INSTANCE.getIndexedColumns();
public static VTAssociationTableDBAdapter createAdapter(DBHandle dbHandle) throws IOException { public static VTAssociationTableDBAdapter createAdapter(DBHandle dbHandle) throws IOException {

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,36 +15,35 @@
*/ */
package ghidra.feature.vt.api.db; package ghidra.feature.vt.api.db;
import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.INSTANCE; import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.*;
import ghidra.feature.vt.api.impl.MarkupItemStorage;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException; import java.io.IOException;
import db.*; import db.*;
import db.util.TableColumn; import ghidra.feature.vt.api.impl.MarkupItemStorage;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public abstract class VTMatchMarkupItemTableDBAdapter { public abstract class VTMatchMarkupItemTableDBAdapter {
public static class MarkupTableDescriptor extends db.util.TableDescriptor { public static class MarkupTableDescriptor extends ghidra.feature.vt.api.db.TableDescriptor {
public static TableColumn ASSOCIATION_KEY_COL = new TableColumn(LongField.class, true); public static TableColumn ASSOCIATION_KEY_COL = new TableColumn(LongField.INSTANCE, true);
public static TableColumn ADDRESS_SOURCE_COL = new TableColumn(StringField.class); public static TableColumn ADDRESS_SOURCE_COL = new TableColumn(StringField.INSTANCE);
public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.class); public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.INSTANCE);
public static TableColumn MARKUP_TYPE_COL = new TableColumn(ShortField.class); public static TableColumn MARKUP_TYPE_COL = new TableColumn(ShortField.INSTANCE);
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.class); public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.INSTANCE);
public static TableColumn SOURCE_VALUE_COL = new TableColumn(StringField.class); public static TableColumn SOURCE_VALUE_COL = new TableColumn(StringField.INSTANCE);
public static TableColumn ORIGINAL_DESTINATION_VALUE_COL = public static TableColumn ORIGINAL_DESTINATION_VALUE_COL =
new TableColumn(StringField.class); new TableColumn(StringField.INSTANCE);
public static TableColumn STATUS_COL = new TableColumn(ByteField.class); public static TableColumn STATUS_COL = new TableColumn(ByteField.INSTANCE);
public static TableColumn STATUS_DESCRIPTION_COL = new TableColumn(StringField.class); public static TableColumn STATUS_DESCRIPTION_COL = new TableColumn(StringField.INSTANCE);
public static MarkupTableDescriptor INSTANCE = new MarkupTableDescriptor(); public static MarkupTableDescriptor INSTANCE = new MarkupTableDescriptor();
} }
protected static String TABLE_NAME = "MatchMarkupItemTable"; protected static String TABLE_NAME = "MatchMarkupItemTable";
static Schema TABLE_SCHEMA = static Schema TABLE_SCHEMA =
new Schema(0, "Key", INSTANCE.getColumnClasses(), INSTANCE.getColumnNames()); new Schema(0, "Key", INSTANCE.getColumnFields(), INSTANCE.getColumnNames());
protected static int[] INDEXED_COLUMNS = INSTANCE.getIndexedColumns(); protected static int[] INDEXED_COLUMNS = INSTANCE.getIndexedColumns();
@ -71,6 +69,5 @@ public abstract class VTMatchMarkupItemTableDBAdapter {
public abstract int getRecordCount(); public abstract int getRecordCount();
public abstract Record createMarkupItemRecord(MarkupItemStorage markupItem) public abstract Record createMarkupItemRecord(MarkupItemStorage markupItem) throws IOException;
throws IOException;
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,34 +15,33 @@
*/ */
package ghidra.feature.vt.api.db; package ghidra.feature.vt.api.db;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import db.*;
import ghidra.feature.vt.api.main.VTProgramCorrelator; import ghidra.feature.vt.api.main.VTProgramCorrelator;
import ghidra.program.database.map.AddressMap; import ghidra.program.database.map.AddressMap;
import ghidra.program.model.address.AddressSet; import ghidra.program.model.address.AddressSet;
import ghidra.util.exception.VersionException; import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor; import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import db.*;
public abstract class VTMatchSetTableDBAdapter { public abstract class VTMatchSetTableDBAdapter {
public enum ColumnDescription { public enum ColumnDescription {
CORRELATOR_CLASS_COL(StringField.class), CORRELATOR_CLASS_COL(StringField.INSTANCE),
CORRELATOR_NAME_COL(StringField.class), CORRELATOR_NAME_COL(StringField.INSTANCE),
OPTIONS_COL(StringField.class); OPTIONS_COL(StringField.INSTANCE);
private final Class<? extends Field> columnClass; private final Field columnField;
private ColumnDescription(Class<? extends Field> columnClass) { private ColumnDescription(Field columnField) {
this.columnClass = columnClass; this.columnField = columnField;
} }
public Class<? extends Field> getColumnClass() { public Field getColumnField() {
return columnClass; return columnField;
} }
public int column() { public int column() {
@ -59,20 +57,18 @@ public abstract class VTMatchSetTableDBAdapter {
return list.toArray(new String[columns.length]); return list.toArray(new String[columns.length]);
} }
@SuppressWarnings("unchecked") private static Field[] getColumnFields() {
// we know our class types are safe
private static Class<? extends Field>[] getColumnClasses() {
ColumnDescription[] columns = ColumnDescription.values(); ColumnDescription[] columns = ColumnDescription.values();
List<Class<? extends Field>> list = new LinkedList<Class<? extends Field>>(); Field[] fields = new Field[columns.length];
for (ColumnDescription column : columns) { for (int i = 0; i < fields.length; i++) {
list.add(column.getColumnClass()); fields[i] = columns[i].getColumnField();
} }
return list.toArray(new Class[columns.length]); return fields;
} }
} }
static String TABLE_NAME = "MatchSetTable"; static String TABLE_NAME = "MatchSetTable";
static Schema TABLE_SCHEMA = new Schema(0, "Key", ColumnDescription.getColumnClasses(), static Schema TABLE_SCHEMA = new Schema(0, "Key", ColumnDescription.getColumnFields(),
ColumnDescription.getColumnNames()); ColumnDescription.getColumnNames());
static VTMatchSetTableDBAdapter createAdapter(DBHandle dbHandle) throws IOException { static VTMatchSetTableDBAdapter createAdapter(DBHandle dbHandle) throws IOException {

View file

@ -16,13 +16,6 @@
package ghidra.feature.vt.api.db; package ghidra.feature.vt.api.db;
import static ghidra.feature.vt.api.db.VTMatchSetTableDBAdapter.ColumnDescription.*; import static ghidra.feature.vt.api.db.VTMatchSetTableDBAdapter.ColumnDescription.*;
import ghidra.feature.vt.api.main.VTProgramCorrelator;
import ghidra.framework.options.ToolOptions;
import ghidra.program.database.map.AddressMap;
import ghidra.program.model.address.*;
import ghidra.program.model.listing.Program;
import ghidra.util.exception.VersionException;
import ghidra.util.xml.GenericXMLOutputter;
import java.io.IOException; import java.io.IOException;
import java.io.StringWriter; import java.io.StringWriter;
@ -31,13 +24,20 @@ import org.jdom.Element;
import org.jdom.output.XMLOutputter; import org.jdom.output.XMLOutputter;
import db.*; import db.*;
import ghidra.feature.vt.api.main.VTProgramCorrelator;
import ghidra.framework.options.ToolOptions;
import ghidra.program.database.map.AddressMap;
import ghidra.program.model.address.*;
import ghidra.program.model.listing.Program;
import ghidra.util.exception.VersionException;
import ghidra.util.xml.GenericXMLOutputter;
public class VTMatchSetTableDBAdapterV0 extends VTMatchSetTableDBAdapter { public class VTMatchSetTableDBAdapterV0 extends VTMatchSetTableDBAdapter {
private Table table; private Table table;
private static final Schema STORED_ADDRESS_RANGE_SCHEMA = new Schema(0, "Key", new Class[] { private static final Schema STORED_ADDRESS_RANGE_SCHEMA = new Schema(0, "Key",
LongField.class, LongField.class }, new String[] { "addr1", "addr2" }); new Field[] { LongField.INSTANCE, LongField.INSTANCE }, new String[] { "addr1", "addr2" });
private final DBHandle dbHandle; private final DBHandle dbHandle;
@ -46,7 +46,8 @@ public class VTMatchSetTableDBAdapterV0 extends VTMatchSetTableDBAdapter {
table = dbHandle.createTable(TABLE_NAME, TABLE_SCHEMA); table = dbHandle.createTable(TABLE_NAME, TABLE_SCHEMA);
} }
public VTMatchSetTableDBAdapterV0(DBHandle dbHandle, OpenMode openMode) throws VersionException { public VTMatchSetTableDBAdapterV0(DBHandle dbHandle, OpenMode openMode)
throws VersionException {
this.dbHandle = dbHandle; this.dbHandle = dbHandle;
table = dbHandle.getTable(TABLE_NAME); table = dbHandle.getTable(TABLE_NAME);
if (table == null) { if (table == null) {
@ -59,7 +60,8 @@ public class VTMatchSetTableDBAdapterV0 extends VTMatchSetTableDBAdapter {
} }
@Override @Override
public Record createMatchSetRecord(long key, VTProgramCorrelator correlator) throws IOException { public Record createMatchSetRecord(long key, VTProgramCorrelator correlator)
throws IOException {
Record record = TABLE_SCHEMA.createRecord(key); Record record = TABLE_SCHEMA.createRecord(key);
record.setString(CORRELATOR_CLASS_COL.column(), correlator.getClass().getName()); record.setString(CORRELATOR_CLASS_COL.column(), correlator.getClass().getName());

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,36 +15,35 @@
*/ */
package ghidra.feature.vt.api.db; package ghidra.feature.vt.api.db;
import ghidra.feature.vt.api.main.VTMatchInfo;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException; import java.io.IOException;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import db.*; import db.*;
import ghidra.feature.vt.api.main.VTMatchInfo;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public abstract class VTMatchTableDBAdapter { public abstract class VTMatchTableDBAdapter {
public enum ColumnDescription { public enum ColumnDescription {
TAG_KEY_COL(LongField.class), TAG_KEY_COL(LongField.INSTANCE),
MATCH_SET_COL(LongField.class), MATCH_SET_COL(LongField.INSTANCE),
SIMILARITY_SCORE_COL(StringField.class), SIMILARITY_SCORE_COL(StringField.INSTANCE),
CONFIDENCE_SCORE_COL(StringField.class), CONFIDENCE_SCORE_COL(StringField.INSTANCE),
LENGTH_TYPE(StringField.class), LENGTH_TYPE(StringField.INSTANCE),
SOURCE_LENGTH_COL(IntField.class), SOURCE_LENGTH_COL(IntField.INSTANCE),
DESTINATION_LENGTH_COL(IntField.class), DESTINATION_LENGTH_COL(IntField.INSTANCE),
ASSOCIATION_COL(LongField.class); ASSOCIATION_COL(LongField.INSTANCE);
private final Class<? extends Field> columnClass; private final Field columnField;
private ColumnDescription(Class<? extends Field> columnClass) { private ColumnDescription(Field columnField) {
this.columnClass = columnClass; this.columnField = columnField;
} }
public Class<? extends Field> getColumnClass() { public Field getColumnField() {
return columnClass; return columnField;
} }
public int column() { public int column() {
@ -61,21 +59,18 @@ public abstract class VTMatchTableDBAdapter {
return list.toArray(new String[columns.length]); return list.toArray(new String[columns.length]);
} }
@SuppressWarnings("unchecked") private static Field[] getColumnFields() {
// we know our class types are safe
private static Class<? extends Field>[] getColumnClasses() {
ColumnDescription[] columns = ColumnDescription.values(); ColumnDescription[] columns = ColumnDescription.values();
List<Class<? extends Field>> list = new LinkedList<Class<? extends Field>>(); Field[] fields = new Field[columns.length];
for (ColumnDescription column : columns) { for (int i = 0; i < fields.length; i++) {
list.add(column.getColumnClass()); fields[i] = columns[i].getColumnField();
} }
return list.toArray(new Class[columns.length]); return fields;
} }
} }
static String TABLE_NAME = "MatchTable"; static String TABLE_NAME = "MatchTable";
static Schema TABLE_SCHEMA = static Schema TABLE_SCHEMA = new Schema(0, "Key", ColumnDescription.getColumnFields(),
new Schema(0, "Key", ColumnDescription.getColumnClasses(),
ColumnDescription.getColumnNames()); ColumnDescription.getColumnNames());
static VTMatchTableDBAdapter createAdapter(DBHandle dbHandle, long tableID) throws IOException { static VTMatchTableDBAdapter createAdapter(DBHandle dbHandle, long tableID) throws IOException {

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,14 +15,13 @@
*/ */
package ghidra.feature.vt.api.db; package ghidra.feature.vt.api.db;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException; import java.io.IOException;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import db.*; import db.*;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
/** /**
* Abstract adapter for the database table that holds tags for version tracking matches. * Abstract adapter for the database table that holds tags for version tracking matches.
@ -31,16 +29,16 @@ import db.*;
public abstract class VTMatchTagDBAdapter { public abstract class VTMatchTagDBAdapter {
public enum ColumnDescription { public enum ColumnDescription {
TAG_NAME_COL(StringField.class); TAG_NAME_COL(StringField.INSTANCE);
private final Class<? extends Field> columnClass; private final Field columnField;
private ColumnDescription(Class<? extends Field> columnClass) { private ColumnDescription(Field columnField) {
this.columnClass = columnClass; this.columnField = columnField;
} }
public Class<? extends Field> getColumnClass() { public Field getColumnField() {
return columnClass; return columnField;
} }
public int column() { public int column() {
@ -56,21 +54,18 @@ public abstract class VTMatchTagDBAdapter {
return list.toArray(new String[columns.length]); return list.toArray(new String[columns.length]);
} }
@SuppressWarnings("unchecked") private static Field[] getColumnFields() {
// we know our class types are safe
private static Class<? extends Field>[] getColumnClasses() {
ColumnDescription[] columns = ColumnDescription.values(); ColumnDescription[] columns = ColumnDescription.values();
List<Class<? extends Field>> list = new LinkedList<Class<? extends Field>>(); Field[] fields = new Field[columns.length];
for (ColumnDescription column : columns) { for (int i = 0; i < fields.length; i++) {
list.add(column.getColumnClass()); fields[i] = columns[i].getColumnField();
} }
return list.toArray(new Class[columns.length]); return fields;
} }
} }
static String TABLE_NAME = "MatchTagTable"; static String TABLE_NAME = "MatchTagTable";
static Schema TABLE_SCHEMA = static Schema TABLE_SCHEMA = new Schema(0, "Key", ColumnDescription.getColumnFields(),
new Schema(0, LongField.class, "Key", ColumnDescription.getColumnClasses(),
ColumnDescription.getColumnNames()); ColumnDescription.getColumnNames());
static VTMatchTagDBAdapter createAdapter(DBHandle dbHandle) throws IOException { static VTMatchTagDBAdapter createAdapter(DBHandle dbHandle) throws IOException {

View file

@ -39,10 +39,10 @@ import ghidra.util.exception.*;
import ghidra.util.task.*; import ghidra.util.task.*;
public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTChangeManager { public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTChangeManager {
private final static Class<?>[] COL_CLASS = new Class[] { StringField.class }; private final static Field[] COL_FIELDS = new Field[] { StringField.INSTANCE };
private final static String[] COL_TYPES = new String[] { "Value" }; private final static String[] COL_TYPES = new String[] { "Value" };
private final static Schema SCHEMA = private final static Schema SCHEMA =
new Schema(0, StringField.class, "Key", COL_CLASS, COL_TYPES); new Schema(0, StringField.INSTANCE, "Key", COL_FIELDS, COL_TYPES);
private static final String PROGRAM_ID_PROPERTYLIST_NAME = "ProgramIDs"; private static final String PROGRAM_ID_PROPERTYLIST_NAME = "ProgramIDs";
private static final String SOURCE_PROGRAM_ID_PROPERTY_KEY = "SourceProgramID"; private static final String SOURCE_PROGRAM_ID_PROPERTY_KEY = "SourceProgramID";
@ -55,7 +55,24 @@ public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTC
private static final long IMPLIED_MATCH_SET_ID = -1; private static final long IMPLIED_MATCH_SET_ID = -1;
private static final String PROPERTY_TABLE_NAME = "PropertyTable"; private static final String PROPERTY_TABLE_NAME = "PropertyTable";
private static final String DB_VERSION_PROPERTY_NAME = "DB_VERSION"; private static final String DB_VERSION_PROPERTY_NAME = "DB_VERSION";
private static final int DB_VERSION = 1;
/**
* DB_VERSION should be incremented any time a change is made to the overall
* database schema associated with any of the adapters.
* 14-Nov-2019 - version 2 - Corrected fixed length indexing implementation causing
* change in index table low-level storage for newly
* created tables.
*/
private static final int DB_VERSION = 2;
/**
* UPGRADE_REQUIRED_BFORE_VERSION should be changed to DB_VERSION any time the
* latest version requires a forced upgrade (i.e., Read-only mode not supported
* until upgrade is performed). It is assumed that read-only mode is supported
* if the data's version is >= UPGRADE_REQUIRED_BEFORE_VERSION and <= DB_VERSION.
*/
// NOTE: Schema upgrades are not currently supported
private static final int UPGRADE_REQUIRED_BEFORE_VERSION = 1;
private VTMatchSetTableDBAdapter matchSetTableAdapter; private VTMatchSetTableDBAdapter matchSetTableAdapter;
private AssociationDatabaseManager associationManager; private AssociationDatabaseManager associationManager;
@ -78,12 +95,11 @@ public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTC
int ID = session.startTransaction("Constructing New Version Tracking Match Set"); int ID = session.startTransaction("Constructing New Version Tracking Match Set");
try { try {
session.propertyTable = createPropertyTable(session.getDBHandle()); session.propertyTable = session.dbh.createTable(PROPERTY_TABLE_NAME, SCHEMA);
session.matchSetTableAdapter = session.matchSetTableAdapter = VTMatchSetTableDBAdapter.createAdapter(session.dbh);
VTMatchSetTableDBAdapter.createAdapter(session.getDBHandle());
session.associationManager = session.associationManager =
AssociationDatabaseManager.createAssociationManager(session.getDBHandle(), session); AssociationDatabaseManager.createAssociationManager(session.dbh, session);
session.matchTagAdapter = VTMatchTagDBAdapter.createAdapter(session.getDBHandle()); session.matchTagAdapter = VTMatchTagDBAdapter.createAdapter(session.dbh);
session.initializePrograms(sourceProgram, destinationProgram); session.initializePrograms(sourceProgram, destinationProgram);
session.createMatchSet( session.createMatchSet(
new ManualMatchProgramCorrelator(sourceProgram, destinationProgram), new ManualMatchProgramCorrelator(sourceProgram, destinationProgram),
@ -91,6 +107,7 @@ public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTC
session.createMatchSet( session.createMatchSet(
new ImpliedMatchProgramCorrelator(sourceProgram, destinationProgram), new ImpliedMatchProgramCorrelator(sourceProgram, destinationProgram),
IMPLIED_MATCH_SET_ID); IMPLIED_MATCH_SET_ID);
session.updateVersion();
} }
finally { finally {
session.endTransaction(ID, true); session.endTransaction(ID, true);
@ -105,21 +122,29 @@ public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTC
return session; return session;
} }
private static Table createPropertyTable(DBHandle dbh) throws IOException { private void updateVersion() throws IOException {
Table table = dbh.createTable(PROPERTY_TABLE_NAME, SCHEMA);
Record record = SCHEMA.createRecord(new StringField(DB_VERSION_PROPERTY_NAME)); Record record = SCHEMA.createRecord(new StringField(DB_VERSION_PROPERTY_NAME));
record.setString(0, Integer.toString(DB_VERSION)); record.setString(0, Integer.toString(DB_VERSION));
table.putRecord(record); propertyTable.putRecord(record);
return table;
} }
public static VTSessionDB getVTSession(DBHandle dbHandle, OpenMode openMode, Object consumer, public static VTSessionDB getVTSession(DBHandle dbHandle, OpenMode openMode, Object consumer,
TaskMonitor monitor) throws VersionException, IOException { TaskMonitor monitor) throws VersionException, IOException {
VTSessionDB session = new VTSessionDB(dbHandle, consumer); VTSessionDB session = new VTSessionDB(dbHandle, consumer);
if (session.getVersion() < DB_VERSION) { int storedVersion = session.getVersion();
throw new VersionException("Version Tracking Sessions do not support upgrades.");
if (storedVersion > DB_VERSION) {
throw new VersionException(VersionException.NEWER_VERSION, false);
} }
// The following version logic holds true for DB_VERSION=2 which assumes no additional
// DB index tables will be added when open for update/upgrade. This will not hold
// true for future revisions associated with table schema changes in which case the
// UPGRADE_REQUIRED_BEFORE_VERSION value should equal DB_VERSION.
if (storedVersion < UPGRADE_REQUIRED_BEFORE_VERSION) {
throw new VersionException("Version Tracking Sessions do not support schema upgrades.");
}
session.matchSetTableAdapter = session.matchSetTableAdapter =
VTMatchSetTableDBAdapter.getAdapter(session.getDBHandle(), openMode, monitor); VTMatchSetTableDBAdapter.getAdapter(session.getDBHandle(), openMode, monitor);
session.associationManager = session.associationManager =

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,12 +15,11 @@
*/ */
package db; package db;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer; import db.buffers.DataBuffer;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
/** /**
* <code>BTreeNode</code> defines a common interface for all types * <code>BTreeNode</code> defines a common interface for all types
@ -30,17 +28,22 @@ import db.buffers.DataBuffer;
interface BTreeNode { interface BTreeNode {
/** /**
* Return the data buffer ID associated with this node. * @return the parent node or null if this is the root
*/
public InteriorNode getParent();
/**
* @return the data buffer ID associated with this node.
*/ */
public int getBufferId(); public int getBufferId();
/** /**
* Return the data buffer associated with this node. * @return the data buffer associated with this node.
*/ */
public DataBuffer getBuffer(); public DataBuffer getBuffer();
/** /**
* Return the number of keys contained within this node. * @return the number of keys contained within this node.
*/ */
public int getKeyCount(); public int getKeyCount();
@ -50,6 +53,26 @@ interface BTreeNode {
*/ */
public void setKeyCount(int cnt); public void setKeyCount(int cnt);
/**
* Get the key value at a specific index.
* @param index key index
* @return key value
* @throws IOException thrown if an IO error occurs
*/
public Field getKeyField(int index) throws IOException;
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. This method is intended to find the insertion
* index or exact match for a child key. A negative value will be returned
* when an exact match is not found and may be transformed into an
* insertion index (insetIndex = -returnedIndex-1).
* @param key key to search for
* @return int buffer ID index.
* @throws IOException thrown if an IO error occurs
*/
public int getKeyIndex(Field key) throws IOException;
/** /**
* Delete this node and all child nodes. * Delete this node and all child nodes.
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
@ -67,11 +90,12 @@ interface BTreeNode {
* Check the consistency of this node and all of its children. * Check the consistency of this node and all of its children.
* @return true if consistency check passed, else false * @return true if consistency check passed, else false
* @param tableName name of table containing this node * @param tableName name of table containing this node
* @param monitor * @param monitor task monitor
* @throws IOException * @throws IOException if IO error occured
* @throws CancelledException if task cancelled
* @{@link ThrowsTag} CancelledException * @{@link ThrowsTag} CancelledException
*/ */
public boolean isConsistent(String tableName, TaskMonitor monitor) throws IOException, public boolean isConsistent(String tableName, TaskMonitor monitor)
CancelledException; throws IOException, CancelledException;
} }

View file

@ -15,11 +15,11 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.util.ArrayList; import java.util.ArrayList;
import ghidra.util.exception.AssertException;
/** /**
* Allows various non-database supported data types to be * Allows various non-database supported data types to be
* encoded within a BinaryField which may be stored within the * encoded within a BinaryField which may be stored within the
@ -261,13 +261,15 @@ public class BinaryCodedField extends BinaryField {
for (int i = 0; i < strings.length; i++) { for (int i = 0; i < strings.length; i++) {
if (strings[i] == null) { if (strings[i] == null) {
offset = buffer.putInt(offset, -1); offset = buffer.putInt(offset, -1);
} else { }
else {
byte[] bytes = strings[i].getBytes(STRING_ENCODING); byte[] bytes = strings[i].getBytes(STRING_ENCODING);
offset = buffer.putInt(offset, bytes.length); offset = buffer.putInt(offset, bytes.length);
offset = buffer.put(offset, bytes); offset = buffer.put(offset, bytes);
} }
} }
} catch (UnsupportedEncodingException e) { }
catch (UnsupportedEncodingException e) {
throw new AssertException(); throw new AssertException();
} }
} }
@ -463,11 +465,13 @@ public class BinaryCodedField extends BinaryField {
byte[] bytes = buffer.get(offset, len); byte[] bytes = buffer.get(offset, len);
strList.add(new String(bytes, STRING_ENCODING)); strList.add(new String(bytes, STRING_ENCODING));
offset += len; offset += len;
} else { }
else {
strList.add(null); strList.add(null);
} }
} }
} catch (UnsupportedEncodingException e) { }
catch (UnsupportedEncodingException e) {
throw new AssertException(); throw new AssertException();
} }
String[] strings = new String[strList.size()]; String[] strings = new String[strList.size()];

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,13 +18,21 @@ package db;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import db.buffers.DataBuffer;
/** /**
* <code>BinaryField</code> provides a wrapper for variable length binary data which is read or * <code>BinaryField</code> provides a wrapper for variable length binary data which is read or
* written to a Record. * written to a Record.
*/ */
public class BinaryField extends Field { public class BinaryField extends Field {
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final BinaryField INSTANCE = new BinaryField(null, true);
protected byte[] data; protected byte[] data;
private Integer hashcode;
/** /**
* Construct a binary data field with an initial value of null. * Construct a binary data field with an initial value of null.
@ -38,36 +45,41 @@ public class BinaryField extends Field {
* @param data initial value * @param data initial value
*/ */
public BinaryField(byte[] data) { public BinaryField(byte[] data) {
this(data, false);
}
/**
* Construct a binary data field with an initial value of data.
* @param data initial value
* @param immutable true if field value is immutable
*/
BinaryField(byte[] data, boolean immutable) {
super(immutable);
this.data = data; this.data = data;
} }
/* @Override
* @see ghidra.framework.store.db.Field#getBinaryData() void checkImmutable() {
*/ super.checkImmutable();
hashcode = null;
}
@Override @Override
public byte[] getBinaryData() { public byte[] getBinaryData() {
return data; return data;
} }
/*
* @see ghidra.framework.store.db.Field#setBinaryData(byte[])
*/
@Override @Override
public void setBinaryData(byte[] data) { public void setBinaryData(byte[] data) {
checkImmutable();
this.data = data; this.data = data;
} }
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override @Override
int length() { int length() {
return (data == null) ? 4 : (data.length + 4); return (data == null) ? 4 : (data.length + 4);
} }
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int write(Buffer buf, int offset) throws IOException { int write(Buffer buf, int offset) throws IOException {
if (data == null) { if (data == null) {
@ -77,11 +89,9 @@ public class BinaryField extends Field {
return buf.put(offset, data); return buf.put(offset, data);
} }
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int read(Buffer buf, int offset) throws IOException { int read(Buffer buf, int offset) throws IOException {
checkImmutable();
int len = buf.getInt(offset); int len = buf.getInt(offset);
offset += 4; offset += 4;
if (len < 0) { if (len < 0) {
@ -94,97 +104,25 @@ public class BinaryField extends Field {
return offset; return offset;
} }
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int readLength(Buffer buf, int offset) throws IOException { int readLength(Buffer buf, int offset) throws IOException {
int len = buf.getInt(offset); int len = buf.getInt(offset);
return (len < 0 ? 0 : len) + 4; return (len < 0 ? 0 : len) + 4;
} }
/*
* @see ghidra.framework.store.db.Field#isVariableLength()
*/
@Override @Override
public boolean isVariableLength() { public boolean isVariableLength() {
return true; return true;
} }
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override @Override
protected byte getFieldType() { byte getFieldType() {
return BINARY_OBJ_TYPE; return BINARY_OBJ_TYPE;
} }
/*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
if (data == null) {
return "BinaryField: null";
}
return "BinaryField[" + data.length + "] = " + getValueAsString();
}
@Override
public String getValueAsString() {
StringBuffer buf = new StringBuffer();
int i = 0;
for (; i < 24 && i < data.length; i++) {
String b = Integer.toHexString(data[i] & 0xff);
if (b.length() == 1) {
buf.append('0');
}
buf.append(b);
buf.append(' ');
}
if (i < data.length) {
buf.append("...");
}
return buf.toString();
}
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof BinaryField))
return false;
BinaryField f = (BinaryField) obj;
return Arrays.equals(f.data, data);
}
// /**
// * Get first 8 bytes of data as long value.
// * First data byte corresponds to most significant byte
// * of long value so that proper sign is preserved.
// * If data is null, Long.MIN_VALUE is returned.
// * @see ghidra.framework.store.db.Field#getLongValue()
// */
// public long getLongValue() {
// long value = 0;
// if (data == null) {
// return Long.MIN_VALUE;
// }
// for (int i = 0; i < 8 && i < data.length; i++) {
// value = (value << 8) | ((long)data[i] & 0x000000ff);
// }
// if (data.length < 8) {
// value = value << (8 * (8 - data.length));
// }
// return value;
// }
/*
* @see ghidra.framework.store.db.Field#truncate(int)
*/
@Override @Override
void truncate(int length) { void truncate(int length) {
checkImmutable();
int maxLen = length - 4; int maxLen = length - 4;
if (data != null && data.length > maxLen) { if (data != null && data.length > maxLen) {
byte[] newData = new byte[maxLen]; byte[] newData = new byte[maxLen];
@ -193,9 +131,6 @@ public class BinaryField extends Field {
} }
} }
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override @Override
public int compareTo(Field o) { public int compareTo(Field o) {
BinaryField f = (BinaryField) o; BinaryField f = (BinaryField) o;
@ -224,28 +159,105 @@ public class BinaryField extends Field {
return len1 - len2; return len1 - len2;
} }
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override @Override
public Field newField(Field fieldValue) { int compareTo(DataBuffer buffer, int offset) {
return new BinaryField(fieldValue.getBinaryData()); int len = buffer.getInt(offset);
if (data == null) {
if (len < 0) {
return 0;
}
return -1;
}
else if (len < 0) {
return 1;
}
return -buffer.unsignedCompareTo(data, offset + 4, len);
} }
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override @Override
public Field newField() { public BinaryField copyField() {
return new BinaryField(getBinaryData().clone());
}
@Override
public BinaryField newField() {
return new BinaryField(); return new BinaryField();
} }
/* @Override
* @see java.lang.Object#hashCode() BinaryField getMinValue() {
*/ throw new UnsupportedOperationException();
}
@Override
BinaryField getMaxValue() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass())
return false;
BinaryField f = (BinaryField) obj;
return Arrays.equals(f.data, data);
}
@Override @Override
public int hashCode() { public int hashCode() {
return data.hashCode(); if (hashcode == null) {
int h = 0;
if (data != null) {
for (byte b : data) {
h = 31 * h + (b & 0xff);
}
}
hashcode = h;
}
return hashcode;
}
/// Methods below should not use data field directly
@Override
public String toString() {
String classname = getClass().getSimpleName();
byte[] d = getBinaryData();
if (d == null) {
return classname + ": null";
}
return classname = "[" + d.length + "] = 0x" + getValueAsString(d);
}
@Override
public String getValueAsString() {
byte[] d = getBinaryData();
if (d == null) {
return "null";
}
return "{" + getValueAsString(d) + "}";
}
/**
* Get format value string for byte array
* @param data byte array
* @return formatted value string
*/
public static String getValueAsString(byte[] data) {
StringBuffer buf = new StringBuffer();
int i = 0;
for (; i < 24 && i < data.length; i++) {
String b = Integer.toHexString(data[i] & 0xff);
if (b.length() == 1) {
buf.append('0');
}
buf.append(b);
buf.append(' ');
}
if (i < data.length) {
buf.append("...");
}
return buf.toString();
} }
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer;
/** /**
* <code>BooleanField</code> provides a wrapper for boolean data which is read or * <code>BooleanField</code> provides a wrapper for boolean data which is read or
* written to a Record. * written to a Record.
*/ */
public class BooleanField extends Field { public final class BooleanField extends Field {
/**
* Minimum boolean field value (FALSE)
*/
public static final BooleanField MIN_VALUE = new BooleanField(false, true);
/**
* Maximum boolean field value (TRUE)
*/
public static final BooleanField MAX_VALUE = new BooleanField(true, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final BooleanField INSTANCE = MIN_VALUE;
private byte value; private byte value;
@ -39,70 +53,57 @@ public class BooleanField extends Field {
* @param b initial value * @param b initial value
*/ */
public BooleanField(boolean b) { public BooleanField(boolean b) {
this(b, false);
}
/**
* Construct a boolean data field with an initial value of b.
* @param b initial value
* @param immutable true if field value is immutable
*/
BooleanField(boolean b, boolean immutable) {
super(immutable);
value = b ? (byte) 1 : (byte) 0; value = b ? (byte) 1 : (byte) 0;
} }
/*
* @see ghidra.framework.store.db.Field#getBooleanValue()
*/
@Override @Override
public boolean getBooleanValue() { public boolean getBooleanValue() {
return (value == 0) ? false : true; return (value == 0) ? false : true;
} }
/*
* @see ghidra.framework.store.db.Field#setBooleanValue(boolean)
*/
@Override @Override
public void setBooleanValue(boolean b) { public void setBooleanValue(boolean b) {
checkImmutable();
this.value = b ? (byte) 1 : (byte) 0; this.value = b ? (byte) 1 : (byte) 0;
} }
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override @Override
int length() { int length() {
return 1; return 1;
} }
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int write(Buffer buf, int offset) throws IOException { int write(Buffer buf, int offset) throws IOException {
return buf.putByte(offset, value); return buf.putByte(offset, value);
} }
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int read(Buffer buf, int offset) throws IOException { int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getByte(offset); value = buf.getByte(offset);
return offset + 1; return offset + 1;
} }
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int readLength(Buffer buf, int offset) throws IOException { int readLength(Buffer buf, int offset) throws IOException {
return 1; return 1;
} }
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override @Override
protected byte getFieldType() { byte getFieldType() {
return BOOLEAN_TYPE; return BOOLEAN_TYPE;
} }
/*
* @see java.lang.Object#toString()
*/
@Override @Override
public String toString() { public String toString() {
return "BooleanField: " + Boolean.toString(getBooleanValue()); return "BooleanField: " + Boolean.toString(getBooleanValue());
@ -113,9 +114,6 @@ public class BooleanField extends Field {
return Boolean.toString(getBooleanValue()); return Boolean.toString(getBooleanValue());
} }
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
if (obj == null || !(obj instanceof BooleanField)) if (obj == null || !(obj instanceof BooleanField))
@ -124,9 +122,6 @@ public class BooleanField extends Field {
return otherField.value == value; return otherField.value == value;
} }
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override @Override
public int compareTo(Field o) { public int compareTo(Field o) {
BooleanField f = (BooleanField) o; BooleanField f = (BooleanField) o;
@ -137,44 +132,58 @@ public class BooleanField extends Field {
return 1; return 1;
} }
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override @Override
public Field newField(Field fieldValue) { int compareTo(DataBuffer buffer, int offset) {
if (fieldValue.isVariableLength()) byte otherValue = buffer.getByte(offset);
throw new AssertException(); if (value == otherValue)
return new BooleanField(fieldValue.getLongValue() != 0); return 0;
else if (value < otherValue)
return -1;
return 1;
} }
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override @Override
public Field newField() { public BooleanField copyField() {
return new BooleanField(getLongValue() != 0);
}
@Override
public BooleanField newField() {
return new BooleanField(); return new BooleanField();
} }
/*
* @see ghidra.framework.store.db.Field#getLongValue()
*/
@Override @Override
public long getLongValue() { public long getLongValue() {
return value; return value;
} }
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override @Override
public byte[] getBinaryData() { public byte[] getBinaryData() {
return new byte[] { value }; return new byte[] { value };
} }
@Override
public void setBinaryData(byte[] bytes) {
checkImmutable();
if (bytes.length != 1) {
throw new IllegalFieldAccessException();
}
value = bytes[0];
}
@Override @Override
public int hashCode() { public int hashCode() {
// TODO Auto-generated method stub
return value; return value;
} }
@Override
BooleanField getMinValue() {
return MIN_VALUE;
}
@Override
BooleanField getMaxValue() {
return MAX_VALUE;
}
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -66,6 +65,7 @@ public interface Buffer {
/** /**
* Get the byte data located at the specified offset. * Get the byte data located at the specified offset.
* @param offset byte offset from start of buffer. * @param offset byte offset from start of buffer.
* @param length number of bytes to be read and returned
* @return the byte array. * @return the byte array.
* @throws ArrayIndexOutOfBoundsException is thrown if an invalid offset is * @throws ArrayIndexOutOfBoundsException is thrown if an invalid offset is
* specified or the end of the buffer was encountered while reading the * specified or the end of the buffer was encountered while reading the

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer;
/** /**
* <code>ByteField</code> provides a wrapper for single signed byte data * <code>ByteField</code> provides a wrapper for single signed byte data
* which is read or written to a Record. * which is read or written to a Record.
*/ */
public class ByteField extends Field { public final class ByteField extends Field {
/**
* Minimum byte field value
*/
public static final ByteField MIN_VALUE = new ByteField(Byte.MIN_VALUE, true);
/**
* Maximum byte field value
*/
public static final ByteField MAX_VALUE = new ByteField(Byte.MAX_VALUE, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final ByteField INSTANCE = MIN_VALUE;
private byte value; private byte value;
@ -39,69 +53,57 @@ public class ByteField extends Field {
* @param b initial value * @param b initial value
*/ */
public ByteField(byte b) { public ByteField(byte b) {
this(b, false);
}
/**
* Construct a byte field with an initial value of b.
* @param b initial value
* @param immutable true if field value is immutable
*/
ByteField(byte b, boolean immutable) {
super(immutable);
value = b; value = b;
} }
/*
* @see ghidra.framework.store.db.Field#getByteValue()
*/
@Override @Override
public byte getByteValue() { public byte getByteValue() {
return value; return value;
} }
/*
* @see ghidra.framework.store.db.Field#setByteValue(byte)
*/
@Override @Override
public void setByteValue(byte value) { public void setByteValue(byte value) {
checkImmutable();
this.value = value; this.value = value;
} }
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override @Override
int length() { int length() {
return 1; return 1;
} }
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int write(Buffer buf, int offset) throws IOException { int write(Buffer buf, int offset) throws IOException {
return buf.putByte(offset, value); return buf.putByte(offset, value);
} }
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int read(Buffer buf, int offset) throws IOException { int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getByte(offset); value = buf.getByte(offset);
return offset + 1; return offset + 1;
} }
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int readLength(Buffer buf, int offset) throws IOException { int readLength(Buffer buf, int offset) throws IOException {
return 1; return 1;
} }
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override @Override
protected byte getFieldType() { byte getFieldType() {
return BYTE_TYPE; return BYTE_TYPE;
} }
/*
* @see java.lang.Object#toString()
*/
@Override @Override
public String toString() { public String toString() {
return "Byte: " + Byte.toString(value); return "Byte: " + Byte.toString(value);
@ -109,12 +111,9 @@ public class ByteField extends Field {
@Override @Override
public String getValueAsString() { public String getValueAsString() {
return Integer.toHexString(value); return "0x" + Integer.toHexString(value);
} }
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
if (obj == null || !(obj instanceof ByteField)) if (obj == null || !(obj instanceof ByteField))
@ -122,9 +121,6 @@ public class ByteField extends Field {
return ((ByteField) obj).value == value; return ((ByteField) obj).value == value;
} }
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override @Override
public int compareTo(Field o) { public int compareTo(Field o) {
ByteField f = (ByteField) o; ByteField f = (ByteField) o;
@ -135,54 +131,63 @@ public class ByteField extends Field {
return 1; return 1;
} }
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override @Override
public Field newField(Field fieldValue) { int compareTo(DataBuffer buffer, int offset) {
if (fieldValue.isVariableLength()) byte otherValue = buffer.getByte(offset);
throw new AssertException(); if (value == otherValue)
return new ByteField((byte) fieldValue.getLongValue()); return 0;
else if (value < otherValue)
return -1;
return 1;
} }
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override @Override
public Field newField() { public ByteField copyField() {
return new ByteField((byte) getLongValue());
}
@Override
public ByteField newField() {
return new ByteField(); return new ByteField();
} }
/*
* @see ghidra.framework.store.db.Field#getLongValue()
*/
@Override @Override
public long getLongValue() { public long getLongValue() {
return value; return value;
} }
/*
* @see ghidra.framework.store.db.Field#setLongValue(long)
*/
@Override @Override
public void setLongValue(long value) { public void setLongValue(long value) {
this.value = (byte) value; setByteValue((byte) value);
} }
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override @Override
public byte[] getBinaryData() { public byte[] getBinaryData() {
return new byte[] { value }; return new byte[] { value };
} }
/* @Override
* @see java.lang.Object#hashCode() public void setBinaryData(byte[] bytes) {
*/ checkImmutable();
if (bytes.length != 1) {
throw new IllegalFieldAccessException();
}
value = bytes[0];
}
@Override @Override
public int hashCode() { public int hashCode() {
return value; return value;
} }
@Override
ByteField getMinValue() {
return MIN_VALUE;
}
@Override
ByteField getMaxValue() {
return MAX_VALUE;
}
} }

View file

@ -123,7 +123,7 @@ public class ChainedBuffer implements Buffer {
* @param unintializedDataSourceOffset uninitialized data source offset which corresponds to * @param unintializedDataSourceOffset uninitialized data source offset which corresponds to
* this buffers contents. * this buffers contents.
* @param bufferMgr database buffer manager * @param bufferMgr database buffer manager
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
public ChainedBuffer(int size, boolean enableObfuscation, Buffer uninitializedDataSource, public ChainedBuffer(int size, boolean enableObfuscation, Buffer uninitializedDataSource,
int unintializedDataSourceOffset, BufferMgr bufferMgr) throws IOException { int unintializedDataSourceOffset, BufferMgr bufferMgr) throws IOException {
@ -171,7 +171,7 @@ public class ChainedBuffer implements Buffer {
* @param size {@literal buffer size (0 < size <= 0x7fffffff)} * @param size {@literal buffer size (0 < size <= 0x7fffffff)}
* @param enableObfuscation true to enable xor-ing of stored data to facilitate data obfuscation. * @param enableObfuscation true to enable xor-ing of stored data to facilitate data obfuscation.
* @param bufferMgr database buffer manager * @param bufferMgr database buffer manager
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
public ChainedBuffer(int size, boolean enableObfuscation, BufferMgr bufferMgr) public ChainedBuffer(int size, boolean enableObfuscation, BufferMgr bufferMgr)
throws IOException { throws IOException {
@ -183,7 +183,7 @@ public class ChainedBuffer implements Buffer {
* This method may only be invoked while a database transaction is in progress. * This method may only be invoked while a database transaction is in progress.
* @param size {@literal buffer size (0 < size <= 0x7fffffff)} * @param size {@literal buffer size (0 < size <= 0x7fffffff)}
* @param bufferMgr database buffer manager * @param bufferMgr database buffer manager
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
public ChainedBuffer(int size, BufferMgr bufferMgr) throws IOException { public ChainedBuffer(int size, BufferMgr bufferMgr) throws IOException {
this(size, false, null, 0, bufferMgr); this(size, false, null, 0, bufferMgr);
@ -198,7 +198,7 @@ public class ChainedBuffer implements Buffer {
* This should not be specified if buffer will be completely filled/initialized. * This should not be specified if buffer will be completely filled/initialized.
* @param unintializedDataSourceOffset uninitialized data source offset which corresponds to * @param unintializedDataSourceOffset uninitialized data source offset which corresponds to
* this buffers contents. * this buffers contents.
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
public ChainedBuffer(BufferMgr bufferMgr, int bufferId, Buffer uninitializedDataSource, public ChainedBuffer(BufferMgr bufferMgr, int bufferId, Buffer uninitializedDataSource,
int unintializedDataSourceOffset) throws IOException { int unintializedDataSourceOffset) throws IOException {
@ -238,6 +238,7 @@ public class ChainedBuffer implements Buffer {
* Construct an existing chained buffer. * Construct an existing chained buffer.
* @param bufferMgr database buffer manager * @param bufferMgr database buffer manager
* @param bufferId database buffer ID which corresponds to a stored ChainedBuffer * @param bufferId database buffer ID which corresponds to a stored ChainedBuffer
* @throws IOException thrown if an IO error occurs
*/ */
public ChainedBuffer(BufferMgr bufferMgr, int bufferId) throws IOException { public ChainedBuffer(BufferMgr bufferMgr, int bufferId) throws IOException {
this(bufferMgr, bufferId, null, 0); this(bufferMgr, bufferId, null, 0);
@ -249,12 +250,12 @@ public class ChainedBuffer implements Buffer {
} }
/** /**
* Generate the XOR value for the specified byteValue which is located at the * Generate the XOR'd value for the specified byteValue which is located at the
* specified bufferOffset. * specified bufferOffset.
* @param bufferOffset offset within a single chained buffer, valid values are in the * @param bufferOffset offset within a single chained buffer, valid values are in the
* range 0 to (dataSpace-1). * range 0 to (dataSpace-1). This value is used to determine the appropriate XOR mask.
* @param byteValue * @param byteValue value to be XOR'd against appropriate mask value
* @return * @return XOR'd value
*/ */
private byte xorMaskByte(int bufferOffset, byte byteValue) { private byte xorMaskByte(int bufferOffset, byte byteValue) {
byte maskByte = XOR_MASK_BYTES[bufferOffset % XOR_MASK_BYTES.length]; byte maskByte = XOR_MASK_BYTES[bufferOffset % XOR_MASK_BYTES.length];
@ -267,7 +268,7 @@ public class ChainedBuffer implements Buffer {
* @param bufferOffset offset within a single chained buffer, valid values are in the * @param bufferOffset offset within a single chained buffer, valid values are in the
* range 0 to (dataSpace-1). The value (bufferOffset+len-1) must be less than dataSpace. * range 0 to (dataSpace-1). The value (bufferOffset+len-1) must be less than dataSpace.
* @param len mask length (2, 4, or 8) * @param len mask length (2, 4, or 8)
* @return * @return XOR mask of specified length which corresponds to specified bufferOffset.
*/ */
private long getXorMask(int bufferOffset, int len) { private long getXorMask(int bufferOffset, int len) {
long mask = 0; long mask = 0;
@ -284,8 +285,9 @@ public class ChainedBuffer implements Buffer {
* The same uninitialized read-only dataSource used for a chained buffer should be re-applied * The same uninitialized read-only dataSource used for a chained buffer should be re-applied
* anytime this chained buffer is re-instantiated. * anytime this chained buffer is re-instantiated.
* *
* @param dataSource * @param dataSource data source for unitilized bytes
* @param dataSourceOffset * @param dataSourceOffset offset within dataSource which corresponds to first byte of
* this chained buffer.
*/ */
private void setUnintializedDataSource(Buffer dataSource, int dataSourceOffset) { private void setUnintializedDataSource(Buffer dataSource, int dataSourceOffset) {
@ -321,6 +323,7 @@ public class ChainedBuffer implements Buffer {
/** /**
* Return the maximum number of buffers consumed by the storage of this DBBuffer object. * Return the maximum number of buffers consumed by the storage of this DBBuffer object.
* The actual number may be less if data has not been written to the entire buffer. * The actual number may be less if data has not been written to the entire buffer.
* @return total number of buffers consumed by this ChaninedBuffer.
*/ */
int getBufferCount() { int getBufferCount() {
return dataBufferIdTable.length + return dataBufferIdTable.length +
@ -734,7 +737,7 @@ public class ChainedBuffer implements Buffer {
* The index buffer provided is always released. * The index buffer provided is always released.
* @param indexBuffer the last index buffer. * @param indexBuffer the last index buffer.
* @return DataBuffer * @return DataBuffer
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
private DataBuffer appendIndexBuffer(DataBuffer indexBuffer) throws IOException { private DataBuffer appendIndexBuffer(DataBuffer indexBuffer) throws IOException {
try { try {
@ -856,6 +859,7 @@ public class ChainedBuffer implements Buffer {
/** /**
* Delete and release all underlying DataBuffers. * Delete and release all underlying DataBuffers.
* @throws IOException thrown if an IO error occurs
*/ */
public synchronized void delete() throws IOException { public synchronized void delete() throws IOException {
if (readOnly) { if (readOnly) {
@ -1115,6 +1119,7 @@ public class ChainedBuffer implements Buffer {
* @param startOffset starting offset, inclusive * @param startOffset starting offset, inclusive
* @param endOffset ending offset, exclusive * @param endOffset ending offset, exclusive
* @param fillByte byte value * @param fillByte byte value
* @throws IOException thrown if an IO error occurs
*/ */
public synchronized void fill(int startOffset, int endOffset, byte fillByte) public synchronized void fill(int startOffset, int endOffset, byte fillByte)
throws IOException { throws IOException {
@ -1160,7 +1165,7 @@ public class ChainedBuffer implements Buffer {
* @return int actual number of bytes written. * @return int actual number of bytes written.
* This could be smaller than length if the end of buffer is * This could be smaller than length if the end of buffer is
* encountered while writing data. * encountered while writing data.
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
private int putBytes(int index, int bufferDataOffset, byte[] data, int dataOffset, int length) private int putBytes(int index, int bufferDataOffset, byte[] data, int dataOffset, int length)
throws IOException { throws IOException {
@ -1370,9 +1375,6 @@ public class ChainedBuffer implements Buffer {
return offset + 8; return offset + 8;
} }
/*
* @see ghidra.framework.store.Buffer#putShort(int, short)
*/
@Override @Override
public synchronized int putShort(int offset, short v) throws IOException { public synchronized int putShort(int offset, short v) throws IOException {
if (readOnly) { if (readOnly) {
@ -1406,7 +1408,7 @@ public class ChainedBuffer implements Buffer {
* Get a data buffer. * Get a data buffer.
* @param index index of within buffer chain * @param index index of within buffer chain
* @return requested data buffer. * @return requested data buffer.
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
private DataBuffer getBuffer(int index) throws IOException { private DataBuffer getBuffer(int index) throws IOException {
// if databufferIdTable is null, index must be null. let it throw null pointer in this case. // if databufferIdTable is null, index must be null. let it throw null pointer in this case.
@ -1425,7 +1427,7 @@ public class ChainedBuffer implements Buffer {
* Initialize specified DataBuffer which corresponds to the chain index. * Initialize specified DataBuffer which corresponds to the chain index.
* @param chainBufferIndex chain buffer index * @param chainBufferIndex chain buffer index
* @param buf newly allocated database buffer * @param buf newly allocated database buffer
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
private void initializeAllocatedBuffer(int chainBufferIndex, DataBuffer buf) private void initializeAllocatedBuffer(int chainBufferIndex, DataBuffer buf)
throws IOException { throws IOException {
@ -1455,7 +1457,7 @@ public class ChainedBuffer implements Buffer {
* Add a new data buffer as an indexed buffer. * Add a new data buffer as an indexed buffer.
* @param index buffer index. * @param index buffer index.
* @param buf new data buffer. * @param buf new data buffer.
* @throws IOException * @throws IOException thrown if an IO error occurs
*/ */
private void addBuffer(int index, DataBuffer buf) throws IOException { private void addBuffer(int index, DataBuffer buf) throws IOException {
buf.putByte(NODE_TYPE_OFFSET, NodeMgr.CHAINED_BUFFER_DATA_NODE); buf.putByte(NODE_TYPE_OFFSET, NodeMgr.CHAINED_BUFFER_DATA_NODE);

View file

@ -1,239 +0,0 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import ghidra.util.LongIterator;
import java.io.IOException;
import java.util.NoSuchElementException;
/**
* <code>DBFieldMap</code> provides a database-backed map of non-unique Field values to long values.
*/
public class DBFieldMap {
private static final Class<?>[] fieldClasses = {
};
private static final String[] fieldNames = {
};
private static final int BUFFER_SIZE = 16 * 1024;
private DBHandle dbh;
private Schema schema;
private Table indexTable;
private Class<? extends Field> fieldClass;
/**
* Construct a new map.
* A temporary database is used to provide storage for the map.
* @param fieldClass specifies class of Field values to be stored in this map.
* @param cacheSizeMB size of data cache in MBytes.
*/
public DBFieldMap(Class<? extends Field> fieldClass, int cacheSizeMB) {
if (!Field.class.isAssignableFrom(fieldClass)) {
throw new IllegalArgumentException("Field class expected");
}
this.fieldClass = fieldClass;
int indexFieldType;
try {
indexFieldType = Field.INDEX_TYPE_FLAG |
fieldClass.newInstance().getFieldType();
} catch (Exception e) {
throw new IllegalArgumentException("Bad Field class: " + e.getMessage());
}
Field indexKeyField = IndexField.getIndexField((byte)indexFieldType);
schema = new Schema(0, indexKeyField.getClass(), "MapKey", fieldClasses, fieldNames);
boolean success = false;
try {
dbh = new DBHandle(BUFFER_SIZE, cacheSizeMB * 1024 * 1024);
long txId = dbh.startTransaction();
indexTable = dbh.createTable("DBFieldMap", schema);
dbh.endTransaction(txId, true);
success = true;
}
catch (IOException e) {
throw new RuntimeException(e);
}
finally {
if (!success && dbh != null) {
dbh.close();
dbh = null;
}
}
}
/**
* Dispose all resources associated with this map.
* This method should be invoked when the map is no longer needed.
*/
public void dispose() {
if (dbh != null) {
dbh.close();
dbh = null;
}
}
/*
* @see java.lang.Object#finalize()
*/
@Override
protected void finalize() throws Throwable {
dispose();
}
/**
* Add the specified value pair to this map.
* If the entry already exists, this method has no affect.
* @param fieldValue
* @param longValue
*/
public void addEntry(Field fieldValue, long longValue) {
if (!fieldClass.isInstance(fieldValue)) {
throw new IllegalArgumentException("Instance of " + fieldClass.getName() + " expected");
}
IndexField indexField = IndexField.getIndexField(fieldValue, longValue);
Record rec = schema.createRecord(indexField);
try {
long txId = dbh.startTransaction();
indexTable.putRecord(rec);
dbh.endTransaction(txId, true);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
}
}
/**
* Delete the specified value pair from this map.
* @param fieldValue
* @param longValue
* @return true if entry exists and was deleted
*/
public boolean deleteEntry(Field fieldValue, long longValue) {
if (!fieldClass.isInstance(fieldValue)) {
throw new IllegalArgumentException("Instance of " + fieldClass.getName() + " expected");
}
IndexField indexField = IndexField.getIndexField(fieldValue, longValue);
try {
long txId = dbh.startTransaction();
boolean success = indexTable.deleteRecord(indexField);
dbh.endTransaction(txId, true);
return success;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Determine if the specified value pair exists within this map.
* (This method provided for test purposes).
* @param fieldValue
* @param longValue
* @return
*/
boolean hasEntry(Field fieldValue, long longValue) {
if (!fieldClass.isInstance(fieldValue)) {
throw new IllegalArgumentException("Instance of " + fieldClass.getName() + " expected");
}
IndexField indexField = IndexField.getIndexField(fieldValue, longValue);
try {
return indexTable.hasRecord(indexField);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public LongIterator iterator() {
return new MapLongIterator();
}
private class MapLongIterator implements LongIterator {
DBFieldIterator indexIterator;
MapLongIterator() {
try {
indexIterator = indexTable.fieldKeyIterator();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* @see ghidra.util.LongIterator#hasNext()
*/
public boolean hasNext() {
try {
return indexIterator.hasNext();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* @see ghidra.util.LongIterator#next()
*/
public long next() {
try {
IndexField indexField = (IndexField) indexIterator.next();
if (indexField == null) {
throw new NoSuchElementException();
}
return indexField.getPrimaryKey();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* @see ghidra.util.LongIterator#hasPrevious()
*/
public boolean hasPrevious() {
try {
return indexIterator.hasPrevious();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* @see ghidra.util.LongIterator#previous()
*/
public long previous() {
try {
IndexField indexField = (IndexField) indexIterator.previous();
if (indexField == null) {
throw new NoSuchElementException();
}
return indexField.getPrimaryKey();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}

View file

@ -17,11 +17,24 @@ package db;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer;
/** /**
* <code>Field</code> is an abstract data wrapper for use with Records. * <code>Field</code> is an abstract data wrapper for use with Records.
* Note that when comparing two Field instances both must be of the same
* class.
*/ */
public abstract class Field implements Comparable<Field> { public abstract class Field implements Comparable<Field> {
public static final Field[] EMPTY_ARRAY = new Field[0];
/**
* 8-bit Field Type Encoding (PPPPFFFF)
* where:
* FFFF - normal/indexed field type
* PPPP - indexed table primary key type (1000b indicates LegacyIndexField)
*/
/** /**
* Field type for ByteField * Field type for ByteField
* @see db.ByteField * @see db.ByteField
@ -65,19 +78,56 @@ public abstract class Field implements Comparable<Field> {
static final byte BOOLEAN_TYPE = 6; static final byte BOOLEAN_TYPE = 6;
/** /**
* Field type flag mask used to isolate flag bits * Field type for 10-byte binary FixedField(10)
* @see db.FixedField
*/ */
static final byte TYPE_FLAG_MASK = (byte) 0xC0; static final byte FIXED_10_TYPE = 7;
/** /**
* Field base type mask used to isolate base type * Legacy Index Primary Key Field type for LongField
* which was previously a boolean indicator for an index
* field with assumed long primary key.
* (see {@link LegacyIndexField})
*/ */
static final byte BASE_TYPE_MASK = (byte) 0x3F; static final byte LEGACY_INDEX_LONG_TYPE = 8;
/** /**
* Field type flag bit shared by all Index type fields * Field base type mask
*/ */
static final byte INDEX_TYPE_FLAG = (byte) 0x80; static final byte FIELD_TYPE_MASK = (byte) 0x0F;
/**
* Field index primary key type mask
*/
static final byte INDEX_PRIMARY_KEY_TYPE_MASK = (byte) ~FIELD_TYPE_MASK;
/**
* Index Primary Key Field Type Shift
*/
static final int INDEX_FIELD_TYPE_SHIFT = 4;
private final boolean immutable;
/**
* Abstract Field Constructor for a mutable instance
*/
Field() {
immutable = false;
}
/**
* Abstract Field Constructor
* @param immutable true if field value is immutable
*/
Field(boolean immutable) {
this.immutable = immutable;
}
void checkImmutable() {
if (immutable) {
throw new IllegalFieldAccessException("immutable field instance");
}
}
/** /**
* Get field as a long value. * Get field as a long value.
@ -191,10 +241,11 @@ public abstract class Field implements Comparable<Field> {
* Set data from binary byte array. * Set data from binary byte array.
* All variable-length fields must implement this method. * All variable-length fields must implement this method.
* @param bytes field data * @param bytes field data
* @throws IllegalFieldAccessException if error occurs while reading bytes
* into field which will generally be caused by the incorrect number of
* bytes provided to a fixed-length field.
*/ */
public void setBinaryData(byte[] bytes) { abstract public void setBinaryData(byte[] bytes);
throw new IllegalFieldAccessException();
}
/** /**
* Get field as a String value. * Get field as a String value.
@ -219,10 +270,10 @@ public abstract class Field implements Comparable<Field> {
/** /**
* Truncate a variable length field to the specified length. * Truncate a variable length field to the specified length.
* If current length is shorterm, this method has no affect. * If current length is shorterm, this method has no affect.
* @param length * @param length truncated length
*/ */
void truncate(int length) { void truncate(int length) {
throw new IllegalFieldAccessException(); throw new UnsupportedOperationException("Field may not be truncated");
} }
/** /**
@ -233,22 +284,31 @@ public abstract class Field implements Comparable<Field> {
} }
/** /**
* Create new instance of this field type. * Determine if specified field is same type as this field
* @param fieldValue initial field value. * @param field a Field instance
* @return long * @return true if field is same type as this field
*/ */
public abstract Field newField(Field fieldValue); public boolean isSameType(Field field) {
return field != null && field.getClass() == getClass();
}
/**
* Create new instance of this field with the same value.
* @return new field instance with same value
*/
public abstract Field copyField();
/** /**
* Create new instance of this field type. * Create new instance of this field type.
* @return long * @return new field instance with undefined initial value
*/ */
public abstract Field newField(); public abstract Field newField();
/** /**
* Return Field instance type as an integer value * Return Field instance type as an integer value.
* @return encoded field type
*/ */
protected abstract byte getFieldType(); abstract byte getFieldType();
/** /**
* Write the field to buf at the specified offset. When writing variable length * Write the field to buf at the specified offset. When writing variable length
@ -292,40 +352,73 @@ public abstract class Field implements Comparable<Field> {
*/ */
abstract int length(); abstract int length();
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override @Override
public abstract boolean equals(Object obj); public abstract boolean equals(Object obj);
@Override @Override
public abstract int hashCode(); public abstract int hashCode();
/**
* Get field value as a formatted string
* @return field value string
*/
public abstract String getValueAsString(); public abstract String getValueAsString();
/**
* Get minimum field value.
*
* Supported for fixed-length fields only.
* @return minimum value
* @throws UnsupportedOperationException if field is not fixed-length
*/
abstract Field getMinValue();
/**
* Get maximum field value.
*
* Supported for fixed-length fields only.
* @return maximum value
* @throws UnsupportedOperationException if field is not fixed-length
*/
abstract Field getMaxValue();
/**
* Performs a fast in-place comparison of this field value with another
* field value stored within the specified buffer at the the specified offset.
* @param buffer data buffer
* @param offset field value offset within buffer
* @return comparison value, zero if equal, -1 if this field has a value
* less than the stored field, or +1 if this field has a value greater than
* the stored field located at keyIndex.
*/
abstract int compareTo(DataBuffer buffer, int offset);
/** /**
* Get the field associated with the specified type value. * Get the field associated with the specified type value.
* @param fieldType encoded Field type * @param fieldType field type index
* @return Field * @return Field field instance which corresponds to the specified fieldType
* @throws UnsupportedFieldException if unsupported fieldType specified * @throws UnsupportedFieldException if unsupported fieldType specified
*/ */
static Field getField(byte fieldType) throws UnsupportedFieldException { static Field getField(byte fieldType) throws UnsupportedFieldException {
if ((fieldType & INDEX_TYPE_FLAG) == 0) {
switch (fieldType & BASE_TYPE_MASK) { if ((fieldType & INDEX_PRIMARY_KEY_TYPE_MASK) == 0) {
switch (fieldType & FIELD_TYPE_MASK) {
case LONG_TYPE: case LONG_TYPE:
return new LongField(); return LongField.INSTANCE;
case INT_TYPE: case INT_TYPE:
return new IntField(); return IntField.INSTANCE;
case STRING_TYPE: case STRING_TYPE:
return new StringField(); return StringField.INSTANCE;
case SHORT_TYPE: case SHORT_TYPE:
return new ShortField(); return ShortField.INSTANCE;
case BYTE_TYPE: case BYTE_TYPE:
return new ByteField(); return ByteField.INSTANCE;
case BOOLEAN_TYPE: case BOOLEAN_TYPE:
return new BooleanField(); return BooleanField.INSTANCE;
case BINARY_OBJ_TYPE: case BINARY_OBJ_TYPE:
return new BinaryField(); return BinaryField.INSTANCE;
case FIXED_10_TYPE:
return FixedField10.INSTANCE;
} }
} }
else { else {
@ -340,4 +433,53 @@ public abstract class Field implements Comparable<Field> {
} }
} }
/**
* Get the type index value of the FixedField type which corresponds
* to the specified fixed-length;
* @param fixedLength fixed length
* @return FixedLength field type index
*/
static byte getFixedType(int fixedLength) {
if (fixedLength == 10) {
return FIXED_10_TYPE;
}
throw new IllegalArgumentException(
"Unsupported fixed-length binary type size: " + fixedLength);
}
/**
* Get a fixed-length field of the specified size
* @param size fixed-field length (supported sizes: 1, 4, 8, 10)
* @return fixed field instance
* @throws IllegalArgumentException if unsupported fixed field length
*/
static Field getFixedField(int size) {
switch (size) {
case 1:
return new ByteField();
case 4:
return new IntField();
case 8:
return new LongField();
case 10:
return new FixedField10();
}
throw new IllegalArgumentException("Unsupported fixed-field length: " + size);
}
/**
* Determine if a specified field instance may be indexed
* @param field field to be checked
* @return true if field can be indexed
*/
public static boolean canIndex(Field field) {
if (field == null) {
return false;
}
if (field instanceof IndexField) {
return false;
}
return !field.isSameType(BooleanField.INSTANCE) && !field.isSameType(ByteField.INSTANCE);
}
} }

View file

@ -19,21 +19,27 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.NoSuchElementException; import java.util.NoSuchElementException;
/**
* <code>FieldIndexTable</code> provides a simplified index table whoose key is
* a fixed or variable length {@link IndexField} which consists of a concatenation of
* the index field value and associated primary table key.
*/
public class FieldIndexTable extends IndexTable { public class FieldIndexTable extends IndexTable {
private static final Class<?>[] fieldClasses = {}; private static final Field[] fields = {};
private static final String[] fieldNames = {}; private static final String[] fieldNames = {};
private final Schema indexSchema;
private final int indexColumn; private final int indexColumn;
private final IndexField indexKeyType;
/** /**
* Construct a new secondary index which is based upon a specific field within the * Construct a new secondary index which is based upon a specific field column within the
* primary table specified by name. * primary table.
* @param primaryTable primary table. * @param primaryTable primary table
* @param colIndex identifies the indexed column within the primary table. * @param colIndex field column index
* @throws IOException thrown if an IO error occurs * @throws IOException thrown if IO error occurs
*/ */
FieldIndexTable(Table primaryTable, int colIndex) throws IOException { FieldIndexTable(Table primaryTable, int colIndex) throws IOException {
this(primaryTable, primaryTable.getDBHandle().getMasterTable().createTableRecord( this(primaryTable, primaryTable.getDBHandle().getMasterTable().createTableRecord(
@ -49,27 +55,35 @@ public class FieldIndexTable extends IndexTable {
*/ */
FieldIndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException { FieldIndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException {
super(primaryTable, indexTableRecord); super(primaryTable, indexTableRecord);
this.indexSchema = indexTable.getSchema();
this.indexColumn = indexTableRecord.getIndexedColumn(); this.indexColumn = indexTableRecord.getIndexedColumn();
indexKeyType = (IndexField) indexTable.getSchema().getKeyFieldType();
} }
private static Schema getIndexTableSchema(Table primaryTable, int colIndex) { /**
byte fieldType = primaryTable.getSchema().getField(colIndex).getFieldType(); * Generate index table schema for specified primaryTable and index column
IndexField indexKeyField = IndexField.getIndexField(fieldType); * @param primaryTable primary table
return new Schema(0, indexKeyField.getClass(), "IndexKey", fieldClasses, fieldNames); * @param colIndex index column
} * @return index table schema
/*
* @see ghidra.framework.store.db.IndexTable#findPrimaryKeys(ghidra.framework.store.db.Field)
*/ */
private static Schema getIndexTableSchema(Table primaryTable, int colIndex) {
Schema primarySchema = primaryTable.getSchema();
Field indexedField = primarySchema.getField(colIndex);
Field primaryKeyType = primarySchema.getKeyFieldType();
IndexField indexKeyField = new IndexField(indexedField, primaryKeyType);
return new Schema(0, indexKeyField, "IndexKey", fields, fieldNames);
}
@Override @Override
long[] findPrimaryKeys(Field indexValue) throws IOException { Field[] findPrimaryKeys(Field indexValue) throws IOException {
IndexField indexField = IndexField.getIndexField(indexValue, Long.MIN_VALUE);
IndexField indexField =
indexKeyType.newIndexField(indexValue, getPrimaryTableKeyType().getMinValue());
DBFieldIterator iter = indexTable.fieldKeyIterator(indexField); DBFieldIterator iter = indexTable.fieldKeyIterator(indexField);
ArrayList<IndexField> list = new ArrayList<>(20); ArrayList<IndexField> list = new ArrayList<>(20);
while (iter.hasNext()) { while (iter.hasNext()) {
IndexField f = (IndexField) iter.next(); IndexField f = (IndexField) iter.next();
if (!f.hasSameIndex(indexField)) { if (!f.hasSameIndexValue(indexField)) {
break; break;
} }
if (indexField.usesTruncatedFieldValue()) { if (indexField.usesTruncatedFieldValue()) {
@ -82,7 +96,7 @@ public class FieldIndexTable extends IndexTable {
} }
list.add(f); list.add(f);
} }
long[] keys = new long[list.size()]; Field[] keys = new Field[list.size()];
for (int i = 0; i < keys.length; i++) { for (int i = 0; i < keys.length; i++) {
IndexField f = list.get(i); IndexField f = list.get(i);
keys[i] = f.getPrimaryKey(); keys[i] = f.getPrimaryKey();
@ -90,55 +104,37 @@ public class FieldIndexTable extends IndexTable {
return keys; return keys;
} }
/*
* @see ghidra.framework.store.db.IndexTable#getKeyCount(ghidra.framework.store.db.Field)
*/
@Override @Override
int getKeyCount(Field indexValue) throws IOException { int getKeyCount(Field indexValue) throws IOException {
return findPrimaryKeys(indexValue).length; return findPrimaryKeys(indexValue).length;
} }
/*
* @see ghidra.framework.store.db.IndexTable#addEntry(ghidra.framework.store.db.Record)
*/
@Override @Override
void addEntry(Record record) throws IOException { void addEntry(Record record) throws IOException {
Field indexedField = record.getField(colIndex); Field indexedField = record.getField(colIndex);
IndexField f = IndexField.getIndexField(indexedField, record.getKey()); IndexField f = indexKeyType.newIndexField(indexedField, record.getKeyField());
Record rec = indexSchema.createRecord(f); Record rec = indexTable.getSchema().createRecord(f);
indexTable.putRecord(rec); indexTable.putRecord(rec);
} }
/*
* @see ghidra.framework.store.db.IndexTable#deleteEntry(ghidra.framework.store.db.Record)
*/
@Override @Override
void deleteEntry(Record record) throws IOException { void deleteEntry(Record record) throws IOException {
Field indexedField = record.getField(colIndex); Field indexedField = record.getField(colIndex);
IndexField f = IndexField.getIndexField(indexedField, record.getKey()); IndexField f = indexKeyType.newIndexField(indexedField, record.getKeyField());
indexTable.deleteRecord(f); indexTable.deleteRecord(f);
} }
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator()
*/
@Override @Override
DBFieldIterator indexIterator() throws IOException { DBFieldIterator indexIterator() throws IOException {
return new IndexFieldIterator(); return new IndexFieldIterator();
} }
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator(ghidra.framework.store.db.Field, ghidra.framework.store.db.Field, boolean)
*/
@Override @Override
DBFieldIterator indexIterator(Field minField, Field maxField, boolean before) DBFieldIterator indexIterator(Field minField, Field maxField, boolean before)
throws IOException { throws IOException {
return new IndexFieldIterator(minField, maxField, before); return new IndexFieldIterator(minField, maxField, before);
} }
/*
* @see db.IndexTable#indexIterator(db.Field, db.Field, db.Field, boolean)
*/
@Override @Override
DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before) DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException { throws IOException {
@ -161,19 +157,19 @@ public class FieldIndexTable extends IndexTable {
/** /**
* Construct an index field iterator starting with the minimum index value. * Construct an index field iterator starting with the minimum index value.
* @throws IOException an IO error occurred
*/ */
IndexFieldIterator() throws IOException { IndexFieldIterator() throws IOException {
this(null, null, true); this(null, null, true);
} }
/** /**
* Construct an index field iterator. The iterator is positioned at index * Construct an index field iterator.
* value identified by startValue.
* @param minValue minimum index value or null if no minimum * @param minValue minimum index value or null if no minimum
* @param maxValue maximum index value or null if no maximum * @param maxValue maximum index value or null if no maximum
* @param before if true initial position is before minValue, else position * @param before if true initial position is before minValue, else position
* after maxValue * after maxValue
* @throws IOException * @throws IOException an IO error occurred
*/ */
IndexFieldIterator(Field minValue, Field maxValue, boolean before) throws IOException { IndexFieldIterator(Field minValue, Field maxValue, boolean before) throws IOException {
@ -182,8 +178,13 @@ public class FieldIndexTable extends IndexTable {
"Due to potential truncation issues, operation not permitted on variable length fields"); "Due to potential truncation issues, operation not permitted on variable length fields");
} }
min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null; Field primaryKeyType = getPrimaryTableKeyType();
max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null; min = minValue != null
? indexKeyType.newIndexField(minValue, primaryKeyType.getMinValue())
: null;
max = maxValue != null
? indexKeyType.newIndexField(maxValue, primaryKeyType.getMaxValue())
: null;
IndexField start = null; IndexField start = null;
if (before && minValue != null) { if (before && minValue != null) {
@ -209,13 +210,16 @@ public class FieldIndexTable extends IndexTable {
} }
/** /**
* @param minField * Construct an index field iterator. The iterator is positioned at index
* @param maxField * value identified by startValue.
* @param startField * @param minValue minimum index value or null if no minimum
* @param before * @param maxValue maximum index value or null if no maximum
* @throws IOException * @param startValue initial index value position
* @param before if true initial position is before minValue, else position
* after maxValue
* @throws IOException an IO error occurred
*/ */
public IndexFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before) IndexFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before)
throws IOException { throws IOException {
if (primaryTable.getSchema().getField(indexColumn).isVariableLength()) { if (primaryTable.getSchema().getField(indexColumn).isVariableLength()) {
@ -226,17 +230,23 @@ public class FieldIndexTable extends IndexTable {
if (startValue == null) { if (startValue == null) {
throw new IllegalArgumentException("starting index value required"); throw new IllegalArgumentException("starting index value required");
} }
min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null;
max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null;
IndexField start = Field primaryKeyType = getPrimaryTableKeyType();
IndexField.getIndexField(startValue, before ? Long.MIN_VALUE : Long.MAX_VALUE); min = minValue != null
? indexKeyType.newIndexField(minValue, primaryKeyType.getMinValue())
: null;
max = maxValue != null
? indexKeyType.newIndexField(maxValue, primaryKeyType.getMaxValue())
: null;
IndexField start = indexKeyType.newIndexField(startValue,
before ? primaryKeyType.getMinValue() : primaryKeyType.getMaxValue());
indexIterator = indexTable.fieldKeyIterator(min, max, start); indexIterator = indexTable.fieldKeyIterator(min, max, start);
if (indexIterator.hasNext()) { if (indexIterator.hasNext()) {
IndexField f = (IndexField) indexIterator.next(); IndexField f = (IndexField) indexIterator.next();
if (before || !f.getIndexField().equals(startValue)) { if (before || !f.getIndexedField().equals(startValue)) {
indexIterator.previous(); indexIterator.previous();
} }
} }
@ -250,11 +260,12 @@ public class FieldIndexTable extends IndexTable {
hasPrev = false; // TODO ??? hasPrev = false; // TODO ???
indexKey = (IndexField) indexIterator.next(); indexKey = (IndexField) indexIterator.next();
int skipCnt = 0; int skipCnt = 0;
while (indexKey != null && indexKey.hasSameIndex(lastKey)) { while (indexKey != null && indexKey.hasSameIndexValue(lastKey)) {
if (++skipCnt > 10) { if (++skipCnt > 10) {
// Reinit iterator to skip large number of same index value // Reinit iterator to skip large number of same index value
indexIterator = indexTable.fieldKeyIterator(min, max, indexIterator =
IndexField.getIndexField(indexKey.getIndexField(), Long.MAX_VALUE)); indexTable.fieldKeyIterator(min, max, indexKeyType.newIndexField(
indexKey.getIndexedField(), getPrimaryTableKeyType().getMaxValue()));
skipCnt = 0; skipCnt = 0;
} }
indexKey = (IndexField) indexIterator.next(); indexKey = (IndexField) indexIterator.next();
@ -276,11 +287,12 @@ public class FieldIndexTable extends IndexTable {
hasNext = false; // TODO ??? hasNext = false; // TODO ???
indexKey = (IndexField) indexIterator.previous(); indexKey = (IndexField) indexIterator.previous();
int skipCnt = 0; int skipCnt = 0;
while (indexKey != null && indexKey.hasSameIndex(lastKey)) { while (indexKey != null && indexKey.hasSameIndexValue(lastKey)) {
if (++skipCnt > 10) { if (++skipCnt > 10) {
// Reinit iterator to skip large number of same index value // Reinit iterator to skip large number of same index value
indexIterator = indexTable.fieldKeyIterator(min, max, indexIterator =
IndexField.getIndexField(indexKey.getIndexField(), Long.MIN_VALUE)); indexTable.fieldKeyIterator(min, max, indexKeyType.newIndexField(
indexKey.getIndexedField(), getPrimaryTableKeyType().getMinValue()));
skipCnt = 0; skipCnt = 0;
} }
indexKey = (IndexField) indexIterator.previous(); indexKey = (IndexField) indexIterator.previous();
@ -300,8 +312,7 @@ public class FieldIndexTable extends IndexTable {
hasNext = false; hasNext = false;
hasPrev = true; hasPrev = true;
lastKey = indexKey; lastKey = indexKey;
Field f = indexKey.getIndexField(); return indexKey.getIndexedField();
return f.newField(f);
} }
return null; return null;
} }
@ -312,8 +323,7 @@ public class FieldIndexTable extends IndexTable {
hasNext = true; hasNext = true;
hasPrev = false; hasPrev = false;
lastKey = indexKey; lastKey = indexKey;
Field f = indexKey.getIndexField(); return indexKey.getIndexedField();
return f.newField(f);
} }
return null; return null;
} }
@ -329,8 +339,8 @@ public class FieldIndexTable extends IndexTable {
return false; return false;
} }
synchronized (db) { synchronized (db) {
long[] keys = findPrimaryKeys(lastKey.getIndexField()); Field[] keys = findPrimaryKeys(lastKey.getIndexedField());
for (long key : keys) { for (Field key : keys) {
primaryTable.deleteRecord(key); primaryTable.deleteRecord(key);
} }
lastKey = null; lastKey = null;
@ -339,16 +349,14 @@ public class FieldIndexTable extends IndexTable {
} }
} }
/* (non-Javadoc)
* @see ghidra.framework.store.db.IndexTable#hasRecord(ghidra.framework.store.db.Field)
*/
@Override @Override
boolean hasRecord(Field field) throws IOException { boolean hasRecord(Field field) throws IOException {
IndexField indexField = IndexField.getIndexField(field, Long.MIN_VALUE); IndexField indexField =
indexKeyType.newIndexField(field, getPrimaryTableKeyType().getMinValue());
DBFieldIterator iter = indexTable.fieldKeyIterator(indexField); DBFieldIterator iter = indexTable.fieldKeyIterator(indexField);
while (iter.hasNext()) { while (iter.hasNext()) {
IndexField f = (IndexField) iter.next(); IndexField f = (IndexField) iter.next();
if (!f.hasSameIndex(indexField)) { if (!f.hasSameIndexValue(indexField)) {
return false; return false;
} }
if (indexField.usesTruncatedFieldValue()) { if (indexField.usesTruncatedFieldValue()) {
@ -364,109 +372,54 @@ public class FieldIndexTable extends IndexTable {
return false; return false;
} }
/**
* Iterate over all primary keys sorted based upon the associated index key.
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override @Override
DBLongIterator keyIterator() throws IOException { DBFieldIterator keyIterator() throws IOException {
return new PrimaryKeyIterator(); return new PrimaryKeyIterator();
} }
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is initially positioned before the first index buffer whose index key
* is greater than or equal to the specified startField value.
* @param startField index key value which determines initial position of iterator
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override @Override
DBLongIterator keyIteratorBefore(Field startField) throws IOException { DBFieldIterator keyIteratorBefore(Field startField) throws IOException {
return new PrimaryKeyIterator(startField, false); return new PrimaryKeyIterator(startField, false);
} }
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is initially positioned after the index buffer whose index key
* is equal to the specified startField value or immediately before the first
* index buffer whose index key is greater than the specified startField value.
* @param startField index key value which determines initial position of iterator
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override @Override
DBLongIterator keyIteratorAfter(Field startField) throws IOException { DBFieldIterator keyIteratorAfter(Field startField) throws IOException {
return new PrimaryKeyIterator(startField, true); return new PrimaryKeyIterator(startField, true);
} }
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is initially positioned before the primaryKey within the index buffer
* whose index key is equal to the specified startField value or immediately before the first
* index buffer whose index key is greater than the specified startField value.
* @param startField index key value which determines initial position of iterator
* @param primaryKey initial position within index buffer if index key matches startField value.
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override @Override
DBLongIterator keyIteratorBefore(Field startField, long primaryKey) throws IOException { DBFieldIterator keyIteratorBefore(Field startField, Field primaryKey) throws IOException {
return new PrimaryKeyIterator(null, null, startField, primaryKey, false); return new PrimaryKeyIterator(null, null, startField, primaryKey, false);
} }
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is initially positioned after the primaryKey within the index buffer
* whose index key is equal to the specified startField value or immediately before the first
* index buffer whose index key is greater than the specified startField value.
* @param startField index key value which determines initial position of iterator
* @param primaryKey initial position within index buffer if index key matches startField value.
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override @Override
DBLongIterator keyIteratorAfter(Field startField, long primaryKey) throws IOException { DBFieldIterator keyIteratorAfter(Field startField, Field primaryKey) throws IOException {
return new PrimaryKeyIterator(null, null, startField, primaryKey, true); return new PrimaryKeyIterator(null, null, startField, primaryKey, true);
} }
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is limited to range of index keys of startField through endField, inclusive.
* If atStart is true, the iterator is initially positioned before the first index
* buffer whose index key is greater than or equal to the specified startField value.
* If atStart is false, the iterator is initially positioned after the first index
* buffer whose index key is less than or equal to the specified endField value.
* @param startField minimum index key value
* @param endField maximum index key value
* @param atStart if true, position iterator before start value.
* Otherwise, position iterator after end value.
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override @Override
DBLongIterator keyIterator(Field startField, Field endField, boolean atStart) DBFieldIterator keyIterator(Field startField, Field endField, boolean atStart)
throws IOException { throws IOException {
return new PrimaryKeyIterator(startField, endField, atStart ? startField : endField, return new PrimaryKeyIterator(startField, endField, atStart ? startField : endField,
atStart ? Long.MIN_VALUE : Long.MAX_VALUE, !atStart); atStart ? getPrimaryTableKeyType().getMinValue()
: getPrimaryTableKeyType().getMaxValue(),
!atStart);
} }
/**
* @see db.IndexTable#keyIterator(db.Field, db.Field, db.Field, boolean)
*/
@Override @Override
DBLongIterator keyIterator(Field minField, Field maxField, Field startField, boolean before) DBFieldIterator keyIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException { throws IOException {
return new PrimaryKeyIterator(minField, maxField, startField, return new PrimaryKeyIterator(minField, maxField, startField,
before ? Long.MIN_VALUE : Long.MAX_VALUE, !before); before ? getPrimaryTableKeyType().getMinValue()
: getPrimaryTableKeyType().getMaxValue(),
!before);
} }
/** /**
* Iterates over primary keys which correspond to index field values within a specified range. * Iterates over primary keys which correspond to index field values within a specified range.
* NOTE: Primary keys corresponding to index fields which have been truncated may be returned out of order. * NOTE: Primary keys corresponding to index fields which have been truncated may be returned out of order.
*/ */
private class PrimaryKeyIterator implements DBLongIterator { private class PrimaryKeyIterator implements DBFieldIterator {
private IndexField min; private IndexField min;
private IndexField max; private IndexField max;
@ -479,6 +432,7 @@ public class FieldIndexTable extends IndexTable {
/** /**
* Construct a key iterator starting with the minimum secondary key. * Construct a key iterator starting with the minimum secondary key.
* @throws IOException thrown if IO error occurs
*/ */
PrimaryKeyIterator() throws IOException { PrimaryKeyIterator() throws IOException {
indexIterator = indexTable.fieldKeyIterator(); indexIterator = indexTable.fieldKeyIterator();
@ -490,9 +444,12 @@ public class FieldIndexTable extends IndexTable {
* @param startValue indexed field value. * @param startValue indexed field value.
* @param after if true the iterator is positioned immediately after * @param after if true the iterator is positioned immediately after
* the last occurance of the specified startValue position. * the last occurance of the specified startValue position.
* @throws IOException thrown if IO error occurs
*/ */
PrimaryKeyIterator(Field startValue, boolean after) throws IOException { PrimaryKeyIterator(Field startValue, boolean after) throws IOException {
this(null, null, startValue, after ? Long.MAX_VALUE : Long.MIN_VALUE, after); this(null, null, startValue, after ? getPrimaryTableKeyType().getMaxValue()
: getPrimaryTableKeyType().getMinValue(),
after);
} }
/** /**
@ -505,13 +462,18 @@ public class FieldIndexTable extends IndexTable {
* @param after if true iterator is positioned immediately after * @param after if true iterator is positioned immediately after
* the startValue/primaryKey, * the startValue/primaryKey,
* otherwise immediately before. * otherwise immediately before.
* @throws IOException * @throws IOException thrown if IO error occurs
*/ */
PrimaryKeyIterator(Field minValue, Field maxValue, Field startValue, long primaryKey, PrimaryKeyIterator(Field minValue, Field maxValue, Field startValue, Field primaryKey,
boolean after) throws IOException { boolean after) throws IOException {
min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null; Field primaryKeyType = getPrimaryTableKeyType();
max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null; min = minValue != null
? indexKeyType.newIndexField(minValue, primaryKeyType.getMinValue())
: null;
max = maxValue != null
? indexKeyType.newIndexField(maxValue, primaryKeyType.getMaxValue())
: null;
IndexField start = null; IndexField start = null;
if (after && startValue == null && maxValue == null) { if (after && startValue == null && maxValue == null) {
@ -522,7 +484,7 @@ public class FieldIndexTable extends IndexTable {
} }
else { else {
start = start =
startValue != null ? IndexField.getIndexField(startValue, primaryKey) : null; startValue != null ? indexKeyType.newIndexField(startValue, primaryKey) : null;
indexIterator = indexTable.fieldKeyIterator(min, max, start); indexIterator = indexTable.fieldKeyIterator(min, max, start);
if (indexIterator.hasNext()) { if (indexIterator.hasNext()) {
Field f = indexIterator.next(); Field f = indexIterator.next();
@ -540,18 +502,18 @@ public class FieldIndexTable extends IndexTable {
* @return true if field value corresponding to f is outside the min/max range. * @return true if field value corresponding to f is outside the min/max range.
* It is assumed that the underlying table iterator will not return index values * It is assumed that the underlying table iterator will not return index values
* out of range which do not have the same truncated index value. * out of range which do not have the same truncated index value.
* @throws IOException * @throws IOException thrown if IO error occurs
*/ */
private boolean indexValueOutOfRange(IndexField f) throws IOException { private boolean indexValueOutOfRange(IndexField f) throws IOException {
Field val = null; Field val = null;
if (min != null && min.usesTruncatedFieldValue() && min.hasSameIndex(f)) { if (min != null && min.usesTruncatedFieldValue() && min.hasSameIndexValue(f)) {
Record rec = primaryTable.getRecord(f.getPrimaryKey()); Record rec = primaryTable.getRecord(f.getPrimaryKey());
val = rec.getField(indexColumn); val = rec.getField(indexColumn);
if (val.compareTo(min.getNonTruncatedIndexField()) < 0) { if (val.compareTo(min.getNonTruncatedIndexField()) < 0) {
return true; return true;
} }
} }
if (max != null && max.usesTruncatedFieldValue() && max.hasSameIndex(f)) { if (max != null && max.usesTruncatedFieldValue() && max.hasSameIndexValue(f)) {
if (val == null) { if (val == null) {
Record rec = primaryTable.getRecord(f.getPrimaryKey()); Record rec = primaryTable.getRecord(f.getPrimaryKey());
val = rec.getField(indexColumn); val = rec.getField(indexColumn);
@ -563,9 +525,6 @@ public class FieldIndexTable extends IndexTable {
return false; return false;
} }
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#hasNext()
*/
@Override @Override
public boolean hasNext() throws IOException { public boolean hasNext() throws IOException {
if (hasNext) { if (hasNext) {
@ -582,9 +541,6 @@ public class FieldIndexTable extends IndexTable {
return hasNext; return hasNext;
} }
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#hasPrevious()
*/
@Override @Override
public boolean hasPrevious() throws IOException { public boolean hasPrevious() throws IOException {
if (hasPrev) { if (hasPrev) {
@ -601,11 +557,8 @@ public class FieldIndexTable extends IndexTable {
return hasPrev; return hasPrev;
} }
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#next()
*/
@Override @Override
public long next() throws IOException { public Field next() throws IOException {
if (hasNext()) { if (hasNext()) {
lastKey = key; lastKey = key;
hasNext = false; hasNext = false;
@ -614,11 +567,8 @@ public class FieldIndexTable extends IndexTable {
throw new NoSuchElementException(); throw new NoSuchElementException();
} }
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#previous()
*/
@Override @Override
public long previous() throws IOException { public Field previous() throws IOException {
if (hasPrevious()) { if (hasPrevious()) {
lastKey = key; lastKey = key;
hasPrev = false; hasPrev = false;
@ -627,13 +577,10 @@ public class FieldIndexTable extends IndexTable {
throw new NoSuchElementException(); throw new NoSuchElementException();
} }
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#delete()
*/
@Override @Override
public boolean delete() throws IOException { public boolean delete() throws IOException {
if (lastKey != null) { if (lastKey != null) {
long primaryKey = lastKey.getPrimaryKey(); Field primaryKey = lastKey.getPrimaryKey();
lastKey = null; lastKey = null;
return primaryTable.deleteRecord(primaryKey); return primaryTable.deleteRecord(primaryKey);
} }

View file

@ -0,0 +1,35 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* <code>FieldKeyInteriorNode</code> defines a common interface for {@link FieldKeyNode}
* implementations which are also an {@link InteriorNode}.
*/
public interface FieldKeyInteriorNode extends InteriorNode, FieldKeyNode {
/**
* Callback method for when a child node's leftmost key changes.
* @param oldKey previous leftmost key.
* @param newKey new leftmost key.
* @param childNode child node containing oldKey (null if not a VarKeyNode)
* @throws IOException if IO error occurs
*/
void keyChanged(Field oldKey, Field newKey, FieldKeyNode childNode) throws IOException;
}

View file

@ -0,0 +1,65 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* <code>FieldKeyNode</code> defines a common interface for {@link BTreeNode}
* implementations which utilize a {@link Field} key.
*/
interface FieldKeyNode extends BTreeNode {
/**
* @return the parent node or null if this is the root
*/
@Override
public FieldKeyInteriorNode getParent();
/**
* Get the leaf node which contains the specified key.
* @param key key value
* @return leaf node
* @throws IOException thrown if an IO error occurs
*/
public FieldKeyRecordNode getLeafNode(Field key) throws IOException;
/**
* Get the left-most leaf node within the tree.
* @return left-most leaf node.
* @throws IOException thrown if IO error occurs
*/
abstract FieldKeyRecordNode getLeftmostLeafNode() throws IOException;
/**
* Get the right-most leaf node within the tree.
* @return right-most leaf node.
* @throws IOException thrown if IO error occurs
*/
abstract FieldKeyRecordNode getRightmostLeafNode() throws IOException;
/**
* Performs a fast in-place key comparison of the specified key
* value with a key stored within this node at the specified keyIndex.
* @param k key value to be compared
* @param keyIndex key index to another key within this node's buffer
* @return comparison value, zero if equal, -1 if k has a value less than
* the store key, or +1 if k has a value greater than the stored key located
* at keyIndex.
*/
abstract int compareKeyField(Field k, int keyIndex);
}

View file

@ -0,0 +1,145 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* <code>FieldKeyRecordNode</code> defines a common interface for {@link FieldKeyNode}
* implementations which are also a {@link RecordNode} (i.e., leaf node).
*/
interface FieldKeyRecordNode extends RecordNode, FieldKeyNode {
/**
* Get the record located at the specified index.
* @param schema record data schema
* @param index key index
* @return Record
* @throws IOException thrown if IO error occurs
*/
Record getRecord(Schema schema, int index) throws IOException;
/**
* Insert or Update a record.
* @param record data record with long key
* @param table table which will be notified when record is inserted or updated.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
FieldKeyNode putRecord(Record record, Table table) throws IOException;
/**
* Remove the record identified by index.
* This will never be the last record within the node.
* @param index record index
* @throws IOException thrown if IO error occurs
*/
void remove(int index) throws IOException;
/**
* Determine if this record node has a right sibling.
* @return true if right sibling exists
* @throws IOException if IO error occurs
*/
boolean hasNextLeaf() throws IOException;
/**
* Get this leaf node's right sibling
* @return this leaf node's right sibling or null if right sibling does not exist.
* @throws IOException if an IO error occurs
*/
FieldKeyRecordNode getNextLeaf() throws IOException;
/**
* Determine if this record node has a left sibling.
* @return true if left sibling exists
* @throws IOException if IO error occurs
*/
boolean hasPreviousLeaf() throws IOException;
/**
* Get this leaf node's left sibling
* @return this leaf node's left sibling or null if left sibling does not exist.
* @throws IOException if an IO error occurs
*/
FieldKeyRecordNode getPreviousLeaf() throws IOException;
/**
* Remove this leaf from the tree.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
FieldKeyNode removeLeaf() throws IOException;
/**
* Delete the record identified by the specified key.
* @param key record key
* @param table table which will be notified when record is deleted.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
FieldKeyNode deleteRecord(Field key, Table table) throws IOException;
/**
* Get the record with the minimum key value which is greater than or equal
* to the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAtOrAfter(Field key, Schema schema) throws IOException;
/**
* Get the record with the maximum key value which is less than or equal
* to the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAtOrBefore(Field key, Schema schema) throws IOException;
/**
* Get the record with the minimum key value which is greater than
* the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAfter(Field key, Schema schema) throws IOException;
/**
* Get the record with the maximum key value which is less than
* the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordBefore(Field key, Schema schema) throws IOException;
/**
* Get the record identified by the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecord(Field key, Schema schema) throws IOException;
}

View file

@ -0,0 +1,55 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
/**
* <code>FixedField</code> provides an abstract implementation of a fixed-length
* binary field.
*/
public abstract class FixedField extends BinaryField {
/**
* Construct a fixed-length field
* @param data initial value
* @param immutable true if field value is immutable
*/
FixedField(byte[] data, boolean immutable) {
super(data, immutable);
}
@Override
public final boolean isVariableLength() {
return false;
}
@Override
void truncate(int length) {
throw new UnsupportedOperationException("Field may not be truncated");
}
@Override
public abstract FixedField copyField();
@Override
public abstract FixedField newField();
@Override
abstract FixedField getMinValue();
@Override
abstract FixedField getMaxValue();
}

View file

@ -0,0 +1,215 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import generic.util.UnsignedDataUtils;
import ghidra.util.BigEndianDataConverter;
/**
* <code>FixedField10</code> is a 10-byte fixed-length binary field.
*/
public class FixedField10 extends FixedField {
/**
* Minimum long field value
*/
public static FixedField10 MIN_VALUE = new FixedField10(0L, (short) 0, true);
/**
* Maximum long field value
*/
public static FixedField10 MAX_VALUE = new FixedField10(-1L, (short) -1, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
@SuppressWarnings("hiding")
public static final FixedField10 INSTANCE = MIN_VALUE;
// This implementation uses both a data byte array and short+long variables
// for data storage. While the short+long is always available, the data
// byte array is only set when needed or supplied during construction.
// The use of the short+long is done to speed-up comparison with other
// FixedField10 instances or directly from a DataBuffer.
private short lo2;
private long hi8;
/**
* Construct a 10-byte fixed-length field with an initial value of 0.
*/
public FixedField10() {
super(null, false);
}
/**
* Construct a 10-byte fixed-length field with an initial value of data.
* @param data initial 10-byte binary value
* @throws IllegalArgumentException thrown if data is not 10-bytes in length
*/
public FixedField10(byte[] data) {
this(data, false);
}
/**
* Construct a 10-byte fixed-length binary field with an initial value of data.
* @param data initial 10-byte binary value
* @param immutable true if field value is immutable
* @throws IllegalArgumentException thrown if data is not 10-bytes in length
*/
public FixedField10(byte[] data, boolean immutable) {
super(null, immutable);
setBinaryData(data);
}
FixedField10(long hi8, short lo2, boolean immutable) {
super(null, immutable);
this.hi8 = hi8;
this.lo2 = lo2;
}
@Override
public int compareTo(Field o) {
if (!(o instanceof FixedField10)) {
throw new UnsupportedOperationException("may only compare similar Field types");
}
FixedField10 f = (FixedField10) o;
if (hi8 != f.hi8) {
return UnsignedDataUtils.unsignedLessThan(hi8, f.hi8) ? -1 : 1;
}
if (lo2 != f.lo2) {
return UnsignedDataUtils.unsignedLessThan(lo2, f.lo2) ? -1 : 1;
}
return 0;
}
@Override
int compareTo(DataBuffer buffer, int offset) {
long otherHi8 = buffer.getLong(offset);
if (hi8 != otherHi8) {
return UnsignedDataUtils.unsignedLessThan(hi8, otherHi8) ? -1 : 1;
}
short otherLo2 = buffer.getShort(offset + 8);
if (lo2 != otherLo2) {
return UnsignedDataUtils.unsignedLessThan(lo2, otherLo2) ? -1 : 1;
}
return 0;
}
@Override
public FixedField copyField() {
return new FixedField10(hi8, lo2, false);
}
@Override
public FixedField newField() {
return new FixedField10();
}
@Override
FixedField getMinValue() {
return MIN_VALUE;
}
@Override
FixedField getMaxValue() {
return MAX_VALUE;
}
@Override
public byte[] getBinaryData() {
if (data != null) {
return data;
}
data = new byte[10];
BigEndianDataConverter.INSTANCE.putLong(data, 0, hi8);
BigEndianDataConverter.INSTANCE.putShort(data, 8, lo2);
return data;
}
@Override
public void setBinaryData(byte[] data) {
if (data.length != 10) {
throw new IllegalArgumentException("Invalid FixedField10 length: " + data.length);
}
this.data = data;
hi8 = BigEndianDataConverter.INSTANCE.getLong(data, 0);
lo2 = BigEndianDataConverter.INSTANCE.getShort(data, 8);
}
@Override
byte getFieldType() {
return FIXED_10_TYPE;
}
@Override
int write(Buffer buf, int offset) throws IOException {
if (data != null) {
return buf.put(offset, data);
}
offset = buf.putLong(offset, hi8);
return buf.putShort(offset, lo2);
}
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
data = null; // be lazy
hi8 = buf.getLong(offset);
lo2 = buf.getShort(offset + 8);
return offset + 10;
}
@Override
int readLength(Buffer buf, int offset) throws IOException {
return 10;
}
@Override
int length() {
return 10;
}
@Override
public int hashCode() {
final int prime = 31;
int result = (int) (hi8 ^ (hi8 >>> 32));
result = prime * result + lo2;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (getClass() != obj.getClass())
return false;
FixedField10 other = (FixedField10) obj;
if (hi8 != other.hi8)
return false;
if (lo2 != other.lo2)
return false;
return true;
}
@Override
public String getValueAsString() {
return "{" + BinaryField.getValueAsString(getBinaryData()) + "}";
}
}

View file

@ -1,328 +0,0 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import java.util.NoSuchElementException;
/**
* The <code>FixedIndexTable</code> provides a secondary index on a fixed-length table column
* (e.g., IntField, LongField, etc.). For each unique secondary index value, an IndexBuffer is
* stored within an underlying index Table record. The secondary index value is used as the long
* key to access this record. Within a single IndexBuffer is stored all primary keys which
* correspond to an index value.
*/
class FixedIndexTable extends IndexTable {
private static final Class<?>[] fieldClasses = { BinaryField.class, // index data
};
private static final String[] fieldNames = { "IndexBuffer" };
private static Schema indexSchema = new Schema(0, "IndexKey", fieldClasses, fieldNames);
/**
* Construct a new secondary index which is based upon a field within the
* primary table specified by name.
* @param primaryTable primary table.
* @param colIndex identifies the indexed column within the primary table.
* @throws IOException thrown if an IO error occurs
*/
FixedIndexTable(Table primaryTable, int colIndex) throws IOException {
this(primaryTable, primaryTable.getDBHandle().getMasterTable().createTableRecord(
primaryTable.getName(), indexSchema, colIndex));
}
/**
* Construct a new or existing secondary index. An existing index must have
* its root ID specified within the tableRecord.
* @param primaryTable primary table.
* @param indexTableRecord specifies the index parameters.
* @throws IOException thrown if an IO error occurs
*/
FixedIndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException {
super(primaryTable, indexTableRecord);
}
/**
* Find all primary keys which correspond to the specified indexed field
* value.
* @param indexValue the field value to search for.
* @return list of primary keys
* @throws IOException thrown if an IO error occurs
*/
@Override
long[] findPrimaryKeys(Field indexValue) throws IOException {
if (!indexValue.getClass().equals(fieldType.getClass())) {
throw new IllegalArgumentException("Incorrect indexed field type");
}
Record indexRecord = indexTable.getRecord(indexValue.getLongValue());
if (indexRecord == null) {
return emptyKeyArray;
}
IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0));
return indexBuffer.getPrimaryKeys();
}
/**
* Get the number of primary keys which correspond to the specified indexed field
* value.
* @param indexValue the field value to search for.
* @return key count
*/
@Override
int getKeyCount(Field indexValue) throws IOException {
if (!indexValue.getClass().equals(fieldType.getClass())) {
throw new IllegalArgumentException("Incorrect indexed field type");
}
Record indexRecord = indexTable.getRecord(indexValue.getLongValue());
if (indexRecord == null) {
return 0;
}
IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0));
return indexBuffer.keyCount;
}
/*
* @see ghidra.framework.store.db.IndexTable#addEntry(ghidra.framework.store.db.Record)
*/
@Override
void addEntry(Record record) throws IOException {
Field indexField = record.getField(colIndex);
long secondaryKey = indexField.getLongValue();
Record indexRecord = indexTable.getRecord(secondaryKey);
if (indexRecord == null) {
indexRecord = indexSchema.createRecord(secondaryKey);
}
IndexBuffer indexBuffer = new IndexBuffer(indexField, indexRecord.getBinaryData(0));
indexBuffer.addEntry(record.getKey());
indexRecord.setBinaryData(0, indexBuffer.getData());
indexTable.putRecord(indexRecord);
}
/*
* @see ghidra.framework.store.db.IndexTable#deleteEntry(ghidra.framework.store.db.Record)
*/
@Override
void deleteEntry(Record record) throws IOException {
Field indexField = record.getField(colIndex);
long secondaryKey = indexField.getLongValue();
Record indexRecord = indexTable.getRecord(secondaryKey);
if (indexRecord != null) {
IndexBuffer indexBuffer = new IndexBuffer(indexField, indexRecord.getBinaryData(0));
indexBuffer.deleteEntry(record.getKey());
byte[] data = indexBuffer.getData();
if (data == null) {
indexTable.deleteRecord(secondaryKey);
}
else {
indexRecord.setBinaryData(0, data);
indexTable.putRecord(indexRecord);
}
}
}
/**
* Get the index buffer associated with the specified index key
* @param indexKey index key
* @return index buffer or null if not found
* @throws IOException thrown if IO error occurs
*/
private IndexBuffer getIndexBuffer(Field indexKey) throws IOException {
Record indexRec = indexTable.getRecord(indexKey.getLongValue());
return indexRec != null ? new IndexBuffer(indexKey, indexRec.getBinaryData(0)) : null;
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator()
*/
@Override
DBFieldIterator indexIterator() throws IOException {
return new IndexLongIterator();
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator(ghidra.framework.store.db.Field, ghidra.framework.store.db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, boolean atMin)
throws IOException {
long min = minField != null ? minField.getLongValue() : Long.MIN_VALUE;
long max = maxField != null ? maxField.getLongValue() : Long.MAX_VALUE;
return new IndexLongIterator(min, max, atMin);
}
/*
* @see db.IndexTable#indexIterator(db.Field, db.Field, db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException {
if (startField == null) {
throw new IllegalArgumentException("starting index value required");
}
long min = minField != null ? minField.getLongValue() : Long.MIN_VALUE;
long max = maxField != null ? maxField.getLongValue() : Long.MAX_VALUE;
return new IndexLongIterator(min, max, startField.getLongValue(), before);
}
/**
* Iterates over index field values within a specified range.
*/
class IndexLongIterator implements DBFieldIterator {
private Field lastKey;
private Field keyField;
private DBLongIterator indexIterator;
private boolean hasNext = false;
private boolean hasPrev = false;
/**
* Construct an index field iterator starting with the minimum index value.
*/
IndexLongIterator() throws IOException {
indexIterator = indexTable.longKeyIterator();
}
/**
* Construct an index field iterator. The iterator is positioned at index
* value identified by startValue.
* @param startValue minimum index value or null if no minimum
* @param endValue maximum index value or null if no maximum
* @param atStart if true initial position is before startValue, else position
* is after endValue
* @throws IOException
*/
IndexLongIterator(long minValue, long maxValue, boolean atMin) throws IOException {
long start = atMin ? minValue : maxValue;
indexIterator = indexTable.longKeyIterator(minValue, maxValue, start);
if (indexIterator.hasNext()) {
indexIterator.next();
if (atMin) {
indexIterator.previous();
}
}
}
/**
* @param min
* @param max
* @param longValue
* @param before
*/
public IndexLongIterator(long minValue, long maxValue, long start, boolean before)
throws IOException {
indexIterator = indexTable.longKeyIterator(minValue, maxValue, start);
if (indexIterator.hasNext()) {
long val = indexIterator.next();
if (before || val != start) {
indexIterator.previous();
}
}
}
@Override
public boolean hasNext() throws IOException {
if (hasNext) {
return true;
}
try {
long key = indexIterator.next();
keyField = fieldType.newField();
keyField.setLongValue(key);
hasNext = true;
hasPrev = false;
}
catch (NoSuchElementException e) {
return false;
}
return true;
}
@Override
public boolean hasPrevious() throws IOException {
if (hasPrev) {
return true;
}
try {
long key = indexIterator.previous();
keyField = fieldType.newField();
keyField.setLongValue(key);
hasNext = false;
hasPrev = true;
}
catch (NoSuchElementException e) {
return false;
}
return true;
}
@Override
public Field next() throws IOException {
if (hasNext || hasNext()) {
hasNext = false;
hasPrev = true;
lastKey = keyField;
return keyField;
}
return null;
}
@Override
public Field previous() throws IOException {
if (hasPrev || hasPrevious()) {
hasNext = true;
hasPrev = false;
lastKey = keyField;
return keyField;
}
return null;
}
/**
* Delete all primary records which have the current
* index value (lastKey).
* @see db.DBFieldIterator#delete()
*/
@Override
public boolean delete() throws IOException {
if (lastKey == null) {
return false;
}
synchronized (db) {
IndexBuffer indexBuf = getIndexBuffer(lastKey);
if (indexBuf != null) {
long[] keys = indexBuf.getPrimaryKeys();
for (long key : keys) {
primaryTable.deleteRecord(key);
}
// The following does not actually delete the index record since it
// should already have been removed with the removal of all associated
// primary records. Invoking this method allows the iterator to
// recover from the index table change.
indexIterator.delete();
}
lastKey = null;
return true;
}
}
}
}

View file

@ -0,0 +1,199 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/**
* <code>FixedKeyFixedRecNode</code> is an implementation of a BTree leaf node
* which utilizes fixed-length key values and stores fixed-length records.
* <p>
* This type of node has the following layout within a single DataBuffer
* (field size in bytes, where 'L' is the fixed length of the fixed-length
* key as specified by key type in associated Schema):
* <pre>
* | NodeType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | Key0(L) | Rec0 | ...
*
* | KeyN(L) | RecN |
* </pre>
*/
class FixedKeyFixedRecNode extends FixedKeyRecordNode {
private static final int HEADER_SIZE = RECORD_LEAF_HEADER_SIZE;
private static final int ENTRY_BASE_OFFSET = HEADER_SIZE;
private static final int[] EMPTY_ID_LIST = new int[0];
private int entrySize;
private int recordLength;
/**
* Construct an existing fixed-length key fixed-length record leaf node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException if IO error occurs
*/
FixedKeyFixedRecNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
super(nodeMgr, buf);
this.recordLength = nodeMgr.getTableSchema().getFixedLength();
entrySize = keySize + recordLength;
}
/**
* Construct a new fixed-length key fixed-length record leaf node.
* @param nodeMgr table node manager instance
* @param prevLeafId node buffer id for previous leaf ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf ( &lt; 0 : no leaf)
* @throws IOException if IO error occurs
*/
FixedKeyFixedRecNode(NodeMgr nodeMgr, int prevLeafId, int nextLeafId) throws IOException {
super(nodeMgr, NodeMgr.FIXEDKEY_FIXED_REC_NODE, prevLeafId, nextLeafId);
this.recordLength = nodeMgr.getTableSchema().getFixedLength();
entrySize = keySize + recordLength;
}
@Override
FixedKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
return new FixedKeyFixedRecNode(nodeMgr, prevLeafId, nextLeafId);
}
@Override
public int getKeyOffset(int index) {
return ENTRY_BASE_OFFSET + (index * entrySize);
}
/**
* Get the record offset within the buffer
* @param index key index
* @return record offset
*/
@Override
public int getRecordOffset(int index) {
return ENTRY_BASE_OFFSET + (index * entrySize);
}
/**
* Shift all records by one starting with index to the end.
* @param index the smaller key index (0 &lt;= index1)
* @param rightShift shift right by one record if true, else shift left by
* one record.
*/
private void shiftRecords(int index, boolean rightShift) {
// No movement needed for appended record
if (index == keyCount)
return;
// Determine block to be moved
int start = getRecordOffset(index);
int end = getRecordOffset(keyCount);
int len = end - start;
// Move record data
int offset = start + (rightShift ? entrySize : -entrySize);
buffer.move(start, offset, len);
}
@Override
public void remove(int index) {
if (index < 0 || index >= keyCount)
throw new AssertException();
shiftRecords(index + 1, false);
setKeyCount(keyCount - 1);
}
@Override
boolean insertRecord(int index, Record record) throws IOException {
// Check for use of indirect chained record node(s)
// int len = record.length();
if (keyCount == ((buffer.length() - HEADER_SIZE) / entrySize))
return false; // insufficient space for record storage
// Make room for new record
shiftRecords(index, true);
// Store new record
int offset = getRecordOffset(index);
record.getKeyField().write(buffer, offset);
record.write(buffer, offset + keySize);
setKeyCount(keyCount + 1);
return true;
}
@Override
FixedKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordOffset(index) + keySize;
record.write(buffer, offset);
return getRoot();
}
@Override
public Record getRecord(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
return null;
Record record = schema.createRecord(key);
record.read(buffer, getRecordOffset(index) + keySize);
return record;
}
@Override
public Record getRecord(Schema schema, int index) throws IOException {
Field key = getKeyField(index);
Record record = schema.createRecord(key);
record.read(buffer, getRecordOffset(index) + keySize);
return record;
}
@Override
void splitData(FixedKeyRecordNode newRightLeaf) {
FixedKeyFixedRecNode rightNode = (FixedKeyFixedRecNode) newRightLeaf;
int splitIndex = keyCount / 2;
int count = keyCount - splitIndex;
int start = getRecordOffset(splitIndex); // start of block to be moved
int end = getRecordOffset(keyCount); // end of block to be moved
int splitLen = end - start; // length of block to be moved
// Copy data to new leaf node
rightNode.buffer.copy(ENTRY_BASE_OFFSET, buffer, start, splitLen);
// Adjust key counts
setKeyCount(keyCount - count);
rightNode.setKeyCount(count);
}
@Override
public void delete() throws IOException {
nodeMgr.deleteNode(this);
}
@Override
public int[] getBufferReferences() {
return EMPTY_ID_LIST;
}
}

View file

@ -0,0 +1,610 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.Msg;
import ghidra.util.exception.AssertException;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
/**
* <code>FixedKeyInteriorNode</code> stores a BTree node for use as an interior
* node when searching for Table records within the database. This type of node
* has the following layout within a single DataBuffer (field size in bytes,
* where 'L' is the fixed length of the fixed-length key as specified by
* key type in associated Schema):
* <pre>
* | NodeType(1) | KeyCount(4) | Key0(L) | ID0(4) | ... | KeyN(L) | IDN(4) |
* </pre>
*/
class FixedKeyInteriorNode extends FixedKeyNode implements FieldKeyInteriorNode {
private static final int BASE = FIXEDKEY_NODE_HEADER_SIZE;
private static final int ID_SIZE = 4; // int
private final int maxKeyCount;
private final int entrySize;
/**
* Construct an existing fixed-length key interior node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException thrown if IO error occurs
*/
FixedKeyInteriorNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
super(nodeMgr, buf);
entrySize = keySize + ID_SIZE;
maxKeyCount = (buffer.length() - BASE) / entrySize;
}
/**
* Construct a new fixed-length key interior node with two child nodes.
* @param nodeMgr table node manager.
* @param keyType key Field type
* @param key1 left child node left-most key
* @param id1 left child node buffer ID
* @param key2 right child node left-most key
* @param id2 right child node buffer ID
* @throws IOException thrown if IO error occurs
*/
FixedKeyInteriorNode(NodeMgr nodeMgr, Field keyType, byte[] key1, int id1, byte[] key2, int id2)
throws IOException {
super(nodeMgr, NodeMgr.FIXEDKEY_INTERIOR_NODE);
if (keySize != key1.length || keySize != key2.length) {
throw new IllegalArgumentException("mismatched fixed-length key sizes");
}
entrySize = keySize + ID_SIZE;
maxKeyCount = (buffer.length() - BASE) / entrySize;
setKeyCount(2);
// Store key and node ids
putEntry(0, key1, id1);
putEntry(1, key2, id2);
}
/**
* Construct a new empty fixed-length key interior node.
* Node must be initialized with a minimum of two keys.
* @param nodeMgr table node manager.
* @param keyType key Field type
* @throws IOException thrown if IO error occurs
*/
private FixedKeyInteriorNode(NodeMgr nodeMgr, Field keyType) throws IOException {
super(nodeMgr, NodeMgr.FIXEDKEY_INTERIOR_NODE);
entrySize = keySize + ID_SIZE;
maxKeyCount = (buffer.length() - BASE) / entrySize;
}
void logConsistencyError(String tableName, String msg, Throwable t) {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " parent.key[0]=" + BinaryField.getValueAsString(getKey(0)) +
" bufferID=" + getBufferId());
if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t);
}
}
@Override
public boolean isConsistent(String tableName, TaskMonitor monitor)
throws IOException, CancelledException {
boolean consistent = true;
Field lastMinKey = null;
Field lastMaxKey = null;
for (int i = 0; i < keyCount; i++) {
// Compare each key entry with the previous entries key-range
Field key = getKeyField(i);
if (lastMinKey != null && key.compareTo(lastMinKey) <= 0) {
consistent = false;
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null);
Msg.debug(this, " child[" + i + "].minKey = " + key.getValueAsString() +
" bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].minKey = " +
lastMinKey.getValueAsString() + " bufferID=" + getBufferId(i - 1));
}
else if (lastMaxKey != null && key.compareTo(lastMaxKey) <= 0) {
consistent = false;
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null);
Msg.debug(this, " child[" + i + "].minKey = " + key.getValueAsString() +
" bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].maxKey = " +
lastMaxKey.getValueAsString() + " bufferID=" + getBufferId(i - 1));
}
lastMinKey = key;
FixedKeyNode node = null;
try {
try {
node = nodeMgr.getFixedKeyNode(getBufferId(i));
node.parent = this;
}
catch (IOException e) {
logConsistencyError(tableName, "failed to fetch child node: " + e.getMessage(),
e);
}
catch (RuntimeException e) {
logConsistencyError(tableName, "failed to fetch child node: " + e.getMessage(),
e);
}
if (node == null) {
consistent = false;
lastMaxKey = key; // for lack of a better solution
continue; // skip child
}
lastMaxKey = node.getKeyField(node.getKeyCount() - 1);
// Verify key matchup between parent and child
Field childKey0 = node.getKeyField(0);
if (!key.equals(childKey0)) {
consistent = false;
logConsistencyError(tableName,
"parent key entry mismatch with child[" + i + "].minKey", null);
Msg.debug(this, " child[" + i + "].minKey = " + childKey0.getValueAsString() +
" bufferID=" + getBufferId(i - 1));
Msg.debug(this, " parent key entry = " + key.getValueAsString());
}
consistent &= node.isConsistent(tableName, monitor);
monitor.checkCanceled();
}
finally {
if (node != null) {
// Release nodes as we go - this is not the norm!
nodeMgr.releaseReadOnlyNode(node.getBufferId());
}
}
}
monitor.checkCanceled();
return consistent;
}
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. This method is intended to locate the child
* node which contains the specified key. The returned index corresponds
* to a child's stored buffer/node ID and may correspond to another interior
* node or a leaf record node. Each stored key within this interior node
* effectively identifies the maximum key contained within the corresponding
* child node.
* @param key key to search for
* @return int buffer ID index of child node. An existing positive index
* value will always be returned.
*/
int getIdIndex(Field key) {
int min = 1;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max) / 2;
int c = compareKeyField(key, i);
if (c == 0) {
return i;
}
else if (c > 0) {
min = i + 1;
}
else {
max = i - 1;
}
}
return max;
}
@Override
public int getKeyIndex(Field key) {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max) / 2;
int rc = compareKeyField(key, i);
if (rc == 0) {
return i;
}
else if (rc > 0) {
min = i + 1;
}
else {
max = i - 1;
}
}
return -(min + 1);
}
@Override
byte[] getKey(int index) {
byte[] key = new byte[keySize];
buffer.get(BASE + (index * entrySize), key);
return key;
}
@Override
public int compareKeyField(Field k, int keyIndex) {
return k.compareTo(buffer, BASE + (keyIndex * entrySize));
}
/**
* Store a key at the specified index
* @param index key index
* @param key key value
*/
private void putKey(int index, byte[] key) {
buffer.put(BASE + (index * entrySize), key);
}
/**
* Get the child node buffer ID associated with the specified key index
* @param index child key index
* @return child node buffer ID
*/
private int getBufferId(int index) {
return buffer.getInt(BASE + (index * entrySize) + keySize);
}
/**
* Store the child node entry (key and buffer ID) associated with the specified key index.
* The entry at index is overwritten. Since each entry is a fixed length, movement of
* existing entries is not necessary.
* @param index child key index
* @param key child node key
* @param bufferId child node buffer ID
*/
private void putEntry(int index, byte[] key, int bufferId) {
int offset = BASE + (index * entrySize);
buffer.put(offset, key);
buffer.putInt(offset + keySize, bufferId);
}
/**
* Insert the child node entry (key and buffer ID) associated with the specified key index.
* All entries at and after index are shifted right to make space for new entry.
* The node key count is adjusted to reflect the addition of a child.
* @param index child key index
* @param key child node key
* @param bufferId child node buffer ID
*/
private void insertEntry(int index, byte[] key, int bufferId) {
int start = BASE + (index * entrySize);
int end = BASE + (keyCount * entrySize);
buffer.move(start, start + entrySize, end - start);
buffer.put(start, key);
buffer.putInt(start + keySize, bufferId);
setKeyCount(keyCount + 1);
}
/**
* Delete the child node entry (key and buffer ID) associated with the specified key index.
* All entries after index are shifted left.
* The node key count is adjusted to reflect the removal of a child.
* @param index child key index
*/
private void deleteEntry(int index) {
if (keyCount < 3 || index >= keyCount)
throw new AssertException();
++index;
if (index < keyCount) {
int start = BASE + (index * entrySize);
int end = BASE + (keyCount * entrySize);
buffer.move(start, start - entrySize, end - start);
}
setKeyCount(keyCount - 1);
}
/**
* Callback method for when a child node's leftmost key changes.
* @param oldKey previous leftmost key.
* @param newKeyData new leftmost key.
*/
void keyChanged(Field oldKey, byte[] newKeyData) {
int index = getKeyIndex(oldKey);
if (index < 0) {
throw new AssertException();
}
// Update key
putKey(index, newKeyData);
if (index == 0 && parent != null) {
parent.keyChanged(oldKey, newKeyData);
}
}
@Override
public void keyChanged(Field oldKey, Field newKey, FieldKeyNode childNode) throws IOException {
keyChanged(oldKey, newKey.getBinaryData());
}
/**
* Insert a new node into this node.
* @param id id of new node
* @param key leftmost key associated with new node.
* @return root node.
* @throws IOException thrown if an IO error occurs
*/
FixedKeyNode insert(int id, Field key) throws IOException {
// Split this node if full
if (keyCount == maxKeyCount) {
return split(key, id);
}
// Insert key into this node
int index = -(getKeyIndex(key) + 1);
if (index < 0 || id == 0)
throw new AssertException();
byte[] keyData = key.getBinaryData();
insertEntry(index, keyData, id);
if (index == 0 && parent != null) {
parent.keyChanged(getKeyField(1), keyData);
}
return getRoot();
}
/**
* Split this interior node and insert new child entry (key and buffer ID).
* Assumes 3 or more child keys exist in this node.
* @param newKey new child key
* @param newId new child node's buffer ID
* @return root node.
* @throws IOException thrown if IO error occurs
*/
private FixedKeyNode split(Field newKey, int newId) throws IOException {
// Create new interior node
FixedKeyInteriorNode newNode = new FixedKeyInteriorNode(nodeMgr, keyType);
moveKeysRight(this, newNode, keyCount / 2);
// Insert new key/id
Field rightKey = newNode.getKeyField(0);
if (newKey.compareTo(rightKey) < 0) {
insert(newId, newKey);
}
else {
newNode.insert(newId, newKey);
}
if (parent != null) {
// Ask parent to insert new node and return root
return parent.insert(newNode.getBufferId(), rightKey);
}
// New parent node becomes root
return new FixedKeyInteriorNode(nodeMgr, keyType, getKey(0), buffer.getId(),
rightKey.getBinaryData(), newNode.getBufferId());
}
@Override
public FixedKeyRecordNode getLeafNode(Field key) throws IOException {
FixedKeyNode node = nodeMgr.getFixedKeyNode(getBufferId(getIdIndex(key)));
node.parent = this;
return (FixedKeyRecordNode) node.getLeafNode(key);
}
@Override
public FieldKeyRecordNode getLeftmostLeafNode() throws IOException {
FixedKeyNode node = nodeMgr.getFixedKeyNode(getBufferId(0));
return node.getLeftmostLeafNode();
}
@Override
public FieldKeyRecordNode getRightmostLeafNode() throws IOException {
FixedKeyNode node = nodeMgr.getFixedKeyNode(getBufferId(keyCount - 1));
return node.getRightmostLeafNode();
}
/**
* Callback method allowing child node to remove itself from parent.
* Rebalancing of the tree is performed if the interior node falls
* below the half-full point.
* @param key child node key
* @return root node
* @throws IOException thrown if IO error occurs
*/
FixedKeyNode deleteChild(Field key) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
throw new AssertException();
// Handle ellimination of this node
if (keyCount == 2) {
if (parent != null)
throw new AssertException();
FixedKeyNode rootNode = nodeMgr.getFixedKeyNode(getBufferId(1 - index));
rootNode.parent = null;
nodeMgr.deleteNode(this);
return rootNode;
}
// Delete child entry
deleteEntry(index);
if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0));
}
return (parent != null) ? parent.balanceChild(this) : this;
}
/**
* Callback method allowing a child interior node to request balancing of its
* content with its sibling nodes. Balancing is only done if the specified node
* is half-full or less.
* @param node child interior node
* @return root node
*/
private FixedKeyNode balanceChild(FixedKeyInteriorNode node) throws IOException {
// Do nothing if node more than half full
if (node.keyCount > maxKeyCount / 2) {
return getRoot();
}
// balance with right sibling except if node corresponds to the right-most
// key within this interior node - in that case balance with left sibling.
int index = getIdIndex(node.getKeyField(0));
if (index == (keyCount - 1)) {
return balanceChild(
(FixedKeyInteriorNode) nodeMgr.getFixedKeyNode(getBufferId(index - 1)), node);
}
return balanceChild(node,
(FixedKeyInteriorNode) nodeMgr.getFixedKeyNode(getBufferId(index + 1)));
}
/**
* Balance the entries contained within two adjacent child interior nodes.
* One of the two nodes must be half-full or less.
* This could result in the removal of a child node if entries will fit within
* one node.
* @param leftNode left child interior node
* @param rightNode right child interior node
* @return new root
* @throws IOException thrown if an IO error occurs
*/
private FixedKeyNode balanceChild(FixedKeyInteriorNode leftNode, FixedKeyInteriorNode rightNode)
throws IOException {
Field rightKey = rightNode.getKeyField(0);
int leftKeyCount = leftNode.keyCount;
int rightKeyCount = rightNode.keyCount;
int newLeftKeyCount = leftKeyCount + rightKeyCount;
// Can right keys fit within left node
if (newLeftKeyCount <= maxKeyCount) {
// Right node is elliminated and all entries stored in left node
moveKeysLeft(leftNode, rightNode, rightKeyCount);
nodeMgr.deleteNode(rightNode);
return deleteChild(rightKey);
}
newLeftKeyCount = newLeftKeyCount / 2;
if (newLeftKeyCount < leftKeyCount) {
moveKeysRight(leftNode, rightNode, leftKeyCount - newLeftKeyCount);
}
else if (newLeftKeyCount > leftKeyCount) {
moveKeysLeft(leftNode, rightNode, newLeftKeyCount - leftKeyCount);
}
this.keyChanged(rightKey, rightNode.getKey(0));
return getRoot();
}
/**
* Move some (not all) of the entries from the left node into the right node.
* @param leftNode
* @param rightNode
* @param count
*/
private static void moveKeysRight(FixedKeyInteriorNode leftNode, FixedKeyInteriorNode rightNode,
int count) {
if (leftNode.keySize != rightNode.keySize) {
throw new IllegalArgumentException("mismatched fixed key sizes");
}
int leftKeyCount = leftNode.keyCount;
int rightKeyCount = rightNode.keyCount;
int leftOffset = BASE + ((leftKeyCount - count) * leftNode.entrySize);
int len = count * leftNode.entrySize;
rightNode.buffer.move(BASE, BASE + len, rightKeyCount * leftNode.entrySize);
rightNode.buffer.copy(BASE, leftNode.buffer, leftOffset, len);
leftNode.setKeyCount(leftKeyCount - count);
rightNode.setKeyCount(rightKeyCount + count);
}
/**
* Move some or all of the entries from the right node into the left node.
* If all keys are moved, the caller is responsible for deleting the right
* node.
* @param leftNode
* @param rightNode
* @param count
*/
private static void moveKeysLeft(FixedKeyInteriorNode leftNode, FixedKeyInteriorNode rightNode,
int count) {
if (leftNode.keySize != rightNode.keySize) {
throw new IllegalArgumentException("mismatched fixed key sizes");
}
int leftKeyCount = leftNode.keyCount;
int rightKeyCount = rightNode.keyCount;
int leftOffset = BASE + (leftKeyCount * leftNode.entrySize);
int len = count * leftNode.entrySize;
leftNode.buffer.copy(leftOffset, rightNode.buffer, BASE, len);
leftNode.setKeyCount(leftKeyCount + count);
if (count < rightKeyCount) {
// Only need to update right node if partial move
rightKeyCount -= count;
rightNode.buffer.move(BASE + len, BASE, rightKeyCount * leftNode.entrySize);
rightNode.setKeyCount(rightKeyCount);
}
}
@Override
public void delete() throws IOException {
// Delete all child nodes
for (int index = 0; index < keyCount; index++) {
nodeMgr.getFixedKeyNode(getBufferId(index)).delete();
}
// Remove this node
nodeMgr.deleteNode(this);
}
@Override
public int[] getBufferReferences() {
int[] ids = new int[keyCount];
for (int i = 0; i < keyCount; i++) {
ids[i] = getBufferId(i);
}
return ids;
}
boolean isLeftmostKey(Field key) {
if (getIdIndex(key) == 0) {
if (parent != null) {
return parent.isLeftmostKey(key);
}
return true;
}
return false;
}
boolean isRightmostKey(Field key) {
if (getIdIndex(key) == (keyCount - 1)) {
if (parent != null) {
return parent.isRightmostKey(getKeyField(0));
}
return true;
}
return false;
}
}

View file

@ -0,0 +1,136 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/**
* <code>FixedKeyNode</code> is an abstract implementation of a BTree node
* which utilizes fixed-length key values.
* <pre>
* | NodeType(1) | KeyCount(4) | ...
* </pre>
*/
abstract class FixedKeyNode implements FieldKeyNode {
private static final int KEY_COUNT_SIZE = 4;
private static final int KEY_COUNT_OFFSET = NodeMgr.NODE_HEADER_SIZE;
static final int FIXEDKEY_NODE_HEADER_SIZE = NodeMgr.NODE_HEADER_SIZE + KEY_COUNT_SIZE;
protected final Field keyType;
protected final int keySize;
protected NodeMgr nodeMgr;
protected DataBuffer buffer;
protected FixedKeyInteriorNode parent;
protected int keyCount;
/**
* Construct an existing fixed-length key node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException thrown if IO error occurs
*/
FixedKeyNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
this.nodeMgr = nodeMgr;
buffer = buf;
Schema schema = nodeMgr.getTableSchema();
if (!schema.useFixedKeyNodes()) {
throw new AssertException("unsupported schema");
}
keyType = schema.getKeyFieldType();
keySize = keyType.length();
keyCount = buffer.getInt(KEY_COUNT_OFFSET);
nodeMgr.addNode(this);
}
/**
* Construct a new fixed-length key node.
* @param nodeMgr table node manager.
* @param nodeType node type
* @throws IOException thrown if IO error occurs
*/
FixedKeyNode(NodeMgr nodeMgr, byte nodeType) throws IOException {
this.nodeMgr = nodeMgr;
buffer = nodeMgr.getBufferMgr().createBuffer();
NodeMgr.setNodeType(buffer, nodeType);
Schema schema = nodeMgr.getTableSchema();
if (!schema.useFixedKeyNodes()) {
throw new AssertException("unsupported schema");
}
keyType = schema.getKeyFieldType();
keySize = keyType.length();
setKeyCount(0);
nodeMgr.addNode(this);
}
@Override
public FixedKeyInteriorNode getParent() {
return parent;
}
@Override
public int getBufferId() {
return buffer.getId();
}
@Override
public DataBuffer getBuffer() {
return buffer;
}
/**
* Get the root for this node. If setParent has not been invoked, this node
* is assumed to be the root.
* @return root node
*/
FixedKeyNode getRoot() {
if (parent != null) {
return parent.getRoot();
}
return this;
}
@Override
public int getKeyCount() {
return keyCount;
}
@Override
public void setKeyCount(int cnt) {
keyCount = cnt;
buffer.putInt(KEY_COUNT_OFFSET, keyCount);
}
/**
* Get the key value at a specific index.
* @param index key index
* @return key value
*/
abstract byte[] getKey(int index);
@Override
public final Field getKeyField(int index) {
Field key = keyType.newField();
key.setBinaryData(getKey(index));
return key;
}
}

View file

@ -0,0 +1,508 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.Msg;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
/**
* <code>FixedKeyRecordNode</code> is an abstract implementation of a BTree leaf node
* which utilizes fixed-length binary key values and stores records.
* <p>
* This type of node has the following partial layout within a single DataBuffer
* (field size in bytes):
* <pre>
* | NodeType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) ...
* </pre>
*/
abstract class FixedKeyRecordNode extends FixedKeyNode implements FieldKeyRecordNode {
private static final int ID_SIZE = 4;
private static final int PREV_LEAF_ID_OFFSET = FIXEDKEY_NODE_HEADER_SIZE;
private static final int NEXT_LEAF_ID_OFFSET = PREV_LEAF_ID_OFFSET + ID_SIZE;
static final int RECORD_LEAF_HEADER_SIZE = FIXEDKEY_NODE_HEADER_SIZE + (2 * ID_SIZE);
/**
* Construct an existing fixed-length key record leaf node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException thrown if an IO error occurs
*/
FixedKeyRecordNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
super(nodeMgr, buf);
}
/**
* Construct a new fixed-length key record leaf node.
* @param nodeMgr table node manager instance
* @param nodeType node type
* @param prevLeafId node buffer id for previous leaf - left sibling ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @throws IOException thrown if an IO error occurs
*/
FixedKeyRecordNode(NodeMgr nodeMgr, byte nodeType, int prevLeafId, int nextLeafId)
throws IOException {
super(nodeMgr, nodeType);
// Initialize header
buffer.putInt(PREV_LEAF_ID_OFFSET, prevLeafId);
buffer.putInt(NEXT_LEAF_ID_OFFSET, nextLeafId);
}
void logConsistencyError(String tableName, String msg, Throwable t) {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this,
" bufferID=" + getBufferId() + " key[0]=" + BinaryField.getValueAsString(getKey(0)));
if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t);
}
}
@Override
public boolean isConsistent(String tableName, TaskMonitor monitor)
throws IOException, CancelledException {
boolean consistent = true;
Field prevKey = null;
for (int i = 0; i < keyCount; i++) {
// Compare each key entry with the previous key
Field key = getKeyField(i);
if (prevKey != null && key.compareTo(prevKey) <= 0) {
consistent = false;
logConsistencyError(tableName, "key[" + i + "] <= key[" + (i - 1) + "]", null);
Msg.debug(this, " key[" + i + "].minKey = " + key.getValueAsString());
Msg.debug(this, " key[" + (i - 1) + "].minKey = " + prevKey.getValueAsString());
}
prevKey = key;
}
Field key0 = getKeyField(0);
if ((parent == null || parent.isLeftmostKey(key0)) && getPreviousLeaf() != null) {
consistent = false;
logConsistencyError(tableName, "previous-leaf should not exist", null);
}
FixedKeyRecordNode node = getNextLeaf();
if (node != null) {
if (parent == null || parent.isRightmostKey(key0)) {
consistent = false;
logConsistencyError(tableName, "next-leaf should not exist", null);
}
else {
FixedKeyRecordNode me = node.getPreviousLeaf();
if (me != this) {
consistent = false;
logConsistencyError(tableName, "next-leaf is not linked to this leaf", null);
}
}
}
else if (parent != null && !parent.isRightmostKey(key0)) {
consistent = false;
logConsistencyError(tableName, "this leaf is not linked to next-leaf", null);
}
return consistent;
}
@Override
byte[] getKey(int index) {
byte[] key = new byte[keySize];
buffer.get(getKeyOffset(index), key);
return key;
}
@Override
public int compareKeyField(Field k, int keyIndex) {
return k.compareTo(buffer, getKeyOffset(keyIndex));
}
/**
* Get the key offset within the node's data buffer
* @param index key/record index
* @return positive record offset within buffer
*/
@Override
public abstract int getKeyOffset(int index);
@Override
public FixedKeyRecordNode getLeafNode(Field key) throws IOException {
return this;
}
@Override
public FixedKeyRecordNode getLeftmostLeafNode() throws IOException {
FixedKeyRecordNode leaf = getPreviousLeaf();
return leaf != null ? leaf.getLeftmostLeafNode() : this;
}
@Override
public FixedKeyRecordNode getRightmostLeafNode() throws IOException {
FixedKeyRecordNode leaf = getNextLeaf();
return leaf != null ? leaf.getRightmostLeafNode() : this;
}
@Override
public boolean hasNextLeaf() throws IOException {
int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
return (nextLeafId >= 0);
}
@Override
public FixedKeyRecordNode getNextLeaf() throws IOException {
FixedKeyRecordNode leaf = null;
int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
if (nextLeafId >= 0) {
leaf = (FixedKeyRecordNode) nodeMgr.getFixedKeyNode(nextLeafId);
}
return leaf;
}
@Override
public boolean hasPreviousLeaf() throws IOException {
int prevLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
return (prevLeafId >= 0);
}
@Override
public FixedKeyRecordNode getPreviousLeaf() throws IOException {
FixedKeyRecordNode leaf = null;
int prevLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
if (prevLeafId >= 0) {
leaf = (FixedKeyRecordNode) nodeMgr.getFixedKeyNode(prevLeafId);
}
return leaf;
}
@Override
public int getKeyIndex(Field key) {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max) / 2;
int rc = compareKeyField(key, i);
if (rc == 0) {
return i;
}
else if (rc > 0) {
min = i + 1;
}
else {
max = i - 1;
}
}
return -(min + 1);
}
/**
* Split this leaf node in half and update tree.
* When a split is performed, the next operation must be performed
* from the root node since the tree may have been restructured.
* @return root node which may have changed.
* @throws IOException thrown if an IO error occurs
*/
FixedKeyNode split() throws IOException {
// Create new leaf
int oldSiblingId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
FixedKeyRecordNode newLeaf = createNewLeaf(buffer.getId(), oldSiblingId);
DataBuffer newBuf = newLeaf.buffer;
int newBufId = newBuf.getId();
buffer.putInt(NEXT_LEAF_ID_OFFSET, newBufId);
if (oldSiblingId >= 0) {
FixedKeyRecordNode leaf = (FixedKeyRecordNode) nodeMgr.getFixedKeyNode(oldSiblingId);
leaf.buffer.putInt(PREV_LEAF_ID_OFFSET, newBufId);
}
// Split node creating two balanced leaves
splitData(newLeaf);
if (parent != null) {
// Ask parent to insert new node and return root
return parent.insert(newBufId, newLeaf.getKeyField(0));
}
// New parent node becomes root
return new FixedKeyInteriorNode(nodeMgr, keyType, getKey(0), buffer.getId(),
newLeaf.getKey(0), newBufId);
}
/**
* Append a leaf which contains one or more keys and update tree. Leaf is inserted
* as the new right sibling of this leaf.
* @param leaf new right sibling leaf (must be same node type as this leaf)
* @return root node which may have changed.
* @throws IOException thrown if an IO error occurs
*/
FixedKeyNode appendLeaf(FixedKeyRecordNode leaf) throws IOException {
// Create new leaf and link
leaf.buffer.putInt(PREV_LEAF_ID_OFFSET, buffer.getId());
int rightLeafBufId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
leaf.buffer.putInt(NEXT_LEAF_ID_OFFSET, rightLeafBufId);
// Adjust this node
int newBufId = leaf.buffer.getId();
buffer.putInt(NEXT_LEAF_ID_OFFSET, newBufId);
// Adjust old right node if present
if (rightLeafBufId >= 0) {
FixedKeyNode rightLeaf = nodeMgr.getFixedKeyNode(rightLeafBufId);
rightLeaf.buffer.putInt(PREV_LEAF_ID_OFFSET, newBufId);
}
if (parent != null) {
// Ask parent to insert new node and return root - leaf parent is unknown
return parent.insert(newBufId, leaf.getKeyField(0));
}
// New parent node becomes root
return new FixedKeyInteriorNode(nodeMgr, keyType, getKey(0), buffer.getId(), leaf.getKey(0),
newBufId);
}
/**
* Remove this leaf from the tree.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
@Override
public FixedKeyNode removeLeaf() throws IOException {
Field key = getKeyField(0);
int prevBufferId = buffer.getInt(PREV_LEAF_ID_OFFSET);
int nextBufferId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
if (prevBufferId >= 0) {
FixedKeyRecordNode prevNode =
(FixedKeyRecordNode) nodeMgr.getFixedKeyNode(prevBufferId);
prevNode.getBuffer().putInt(NEXT_LEAF_ID_OFFSET, nextBufferId);
}
if (nextBufferId >= 0) {
FixedKeyRecordNode nextNode =
(FixedKeyRecordNode) nodeMgr.getFixedKeyNode(nextBufferId);
nextNode.getBuffer().putInt(PREV_LEAF_ID_OFFSET, prevBufferId);
}
nodeMgr.deleteNode(this);
if (parent == null) {
return null;
}
return parent.deleteChild(key);
}
/**
* Split the contents of this leaf node; placing the right half of the records into the
* empty leaf node provided.
* @param newRightLeaf empty right sibling leaf
*/
abstract void splitData(FixedKeyRecordNode newRightLeaf);
/**
* Create a new leaf and add to the node manager.
* The new leaf's parent is unknown.
* @param prevNodeId node buffer id for previous leaf - left sibling ( &lt; 0: no leaf)
* @param nextNodeId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @return new leaf node.
* @throws IOException thrown if IO error occurs
*/
abstract FixedKeyRecordNode createNewLeaf(int prevNodeId, int nextNodeId) throws IOException;
@Override
public FixedKeyNode putRecord(Record record, Table table) throws IOException {
Field key = record.getKeyField();
int index = getKeyIndex(key);
// Handle record update case
if (index >= 0) {
if (table != null) {
table.updatedRecord(getRecord(table.getSchema(), index), record);
}
FixedKeyNode newRoot = updateRecord(index, record);
return newRoot;
}
// Handle new record - see if we have room in this leaf
index = -index - 1;
if (insertRecord(index, record)) {
if (index == 0 && parent != null) {
parent.keyChanged(getKeyField(1), key, null);
}
if (table != null) {
table.insertedRecord(record);
}
return getRoot();
}
// Special Case - append new leaf to right
if (index == keyCount) {
FixedKeyNode newRoot = appendNewLeaf(record);
if (table != null) {
table.insertedRecord(record);
}
return newRoot;
}
// Split leaf and complete insertion
FixedKeyRecordNode leaf = (FixedKeyRecordNode) split().getLeafNode(key);
return leaf.putRecord(record, table);
}
/**
* Append a new leaf and insert the specified record.
* @param record data record with long key
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
FixedKeyNode appendNewLeaf(Record record) throws IOException {
FixedKeyRecordNode newLeaf = createNewLeaf(-1, -1);
newLeaf.insertRecord(0, record);
return appendLeaf(newLeaf);
}
@Override
public FieldKeyNode deleteRecord(Field key, Table table) throws IOException {
// Handle non-existent key - do nothing
int index = getKeyIndex(key);
if (index < 0) {
return getRoot();
}
if (table != null) {
table.deletedRecord(getRecord(table.getSchema(), index));
}
// Handle removal of last record in node
if (keyCount == 1) {
FixedKeyNode newRoot = removeLeaf();
return newRoot;
}
// Remove record within this node
remove(index);
// Notify parent of leftmost key change
if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0));
}
return getRoot();
}
/**
* Inserts the record at the given index if there is sufficient space in
* the buffer.
* @param index insertion index
* @param record record to be inserted
* @return true if the record was successfully inserted.
* @throws IOException thrown if IO error occurs
*/
abstract boolean insertRecord(int index, Record record) throws IOException;
/**
* Updates the record at the given index.
* @param index record index
* @param record new record
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
abstract FixedKeyNode updateRecord(int index, Record record) throws IOException;
@Override
public db.Record getRecordBefore(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -index - 2;
}
else {
--index;
}
if (index < 0) {
FixedKeyRecordNode nextLeaf = getPreviousLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, nextLeaf.keyCount - 1) : null;
}
return getRecord(schema, index);
}
@Override
public db.Record getRecordAfter(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -(index + 1);
}
else {
++index;
}
if (index == keyCount) {
FixedKeyRecordNode nextLeaf = getNextLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, 0) : null;
}
return getRecord(schema, index);
}
@Override
public Record getRecordAtOrBefore(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -index - 2;
}
if (index < 0) {
FixedKeyRecordNode nextLeaf = getPreviousLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, nextLeaf.keyCount - 1) : null;
}
return getRecord(schema, index);
}
@Override
public Record getRecordAtOrAfter(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -(index + 1);
}
if (index == keyCount) {
FixedKeyRecordNode nextLeaf = getNextLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, 0) : null;
}
return getRecord(schema, index);
}
/**
* Create a new record node with no siblings attached.
* @param nodeMgr table node manager instance
* @return new record leaf node
* @throws IOException thrown if IO error occurs
*/
static FixedKeyRecordNode createRecordNode(NodeMgr nodeMgr) throws IOException {
Schema schema = nodeMgr.getTableSchema();
FixedKeyRecordNode node = null;
if (schema.isVariableLength()) {
node = new FixedKeyVarRecNode(nodeMgr, -1, -1);
}
else {
node = new FixedKeyFixedRecNode(nodeMgr, -1, -1);
}
return node;
}
}

View file

@ -0,0 +1,454 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.datastruct.IntArrayList;
import ghidra.util.exception.AssertException;
/**
* <code>FixedKeyVarRecNode</code> is an implementation of a BTree leaf node
* which utilizes fixed-length key values and stores variable-length records.
* <p>
* This type of node has the following layout within a single DataBuffer
* (field size in bytes, where 'L' is the fixed length of the fixed-length
* key as specified by key type in associated Schema)::
* <pre>
* | NodeType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | Key0(L) | RecOffset0(4) | IndFlag0(1) |...
*
* | KeyN(L) | RecOffsetN(4) | IndFlagN(1) |...&lt;FreeSpace&gt;... | RecN |... | Rec1 |
* </pre>
* IndFlag - if not zero the record has been stored within a chained DBBuffer
* whose 4-byte integer buffer ID has been stored within this leaf at the record offset.
*/
class FixedKeyVarRecNode extends FixedKeyRecordNode {
private static final int HEADER_SIZE = RECORD_LEAF_HEADER_SIZE;
private static final int OFFSET_SIZE = 4;
private static final int INDIRECT_OPTION_SIZE = 1;
private static final int KEY_BASE_OFFSET = HEADER_SIZE;
private final int entrySize;
private final int dataOffsetBaseOffset;
private final int indirectOptionBaseOffset;
/**
* Construct an existing fixed-length key variable-length record leaf node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException if IO error occurs
*/
FixedKeyVarRecNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
super(nodeMgr, buf);
entrySize = keySize + OFFSET_SIZE + INDIRECT_OPTION_SIZE;
dataOffsetBaseOffset = KEY_BASE_OFFSET + keySize;
indirectOptionBaseOffset = dataOffsetBaseOffset + OFFSET_SIZE;
}
/**
* Construct a new fixed-length key variable-length record leaf node.
* @param nodeMgr table node manager instance
* @param prevLeafId node buffer id for previous leaf ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf ( &lt; 0 : no leaf)
* @throws IOException if IO error occurs
*/
FixedKeyVarRecNode(NodeMgr nodeMgr, int prevLeafId, int nextLeafId) throws IOException {
super(nodeMgr, NodeMgr.FIXEDKEY_VAR_REC_NODE, prevLeafId, nextLeafId);
entrySize = keySize + OFFSET_SIZE + INDIRECT_OPTION_SIZE;
dataOffsetBaseOffset = KEY_BASE_OFFSET + keySize;
indirectOptionBaseOffset = dataOffsetBaseOffset + OFFSET_SIZE;
}
@Override
FixedKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
return new FixedKeyVarRecNode(nodeMgr, prevLeafId, nextLeafId);
}
@Override
public int getKeyOffset(int index) {
return KEY_BASE_OFFSET + (index * entrySize);
}
/**
* Get the record offset within the buffer
* @param index key index
* @return record offset
*/
public int getRecordDataOffset(int index) {
return buffer.getInt(dataOffsetBaseOffset + (index * entrySize));
}
/**
* Store the record offset within the buffer for the specified key index
* @param index key index
* @param offset record offset
*/
private void putRecordDataOffset(int index, int offset) {
buffer.putInt(dataOffsetBaseOffset + (index * entrySize), offset);
}
/**
* Determine if a record is utilizing a chained DBBuffer for data storage
* @param index key index
* @return true if indirect storage is used for record, else false
*/
private boolean hasIndirectStorage(int index) {
return buffer.getByte(indirectOptionBaseOffset + (index * entrySize)) != 0;
}
/**
* Set the indirect storage flag associated with a record
* @param index key index
* @param state indirect storage used (true) or not used (false)
*/
private void enableIndirectStorage(int index, boolean state) {
buffer.putByte(indirectOptionBaseOffset + (index * entrySize), state ? (byte) 1 : (byte) 0);
}
/**
* @return unused free space within node
*/
private int getFreeSpace() {
return (keyCount == 0 ? buffer.length() : getRecordDataOffset(keyCount - 1)) -
(keyCount * entrySize) - RECORD_LEAF_HEADER_SIZE;
}
/**
* Get the length of a stored record.
* @param index index associated with record.
*/
private int getRecordLength(int index) {
if (index == 0) {
return buffer.length() - getRecordDataOffset(0);
}
return getRecordDataOffset(index - 1) - getRecordDataOffset(index);
}
/**
* Get the length of a stored record. Optimized if record offset
* already known.
* @param index index associated with record.
* @param offset record offset
*/
private int getRecordLength(int index, int offset) {
if (index == 0) {
return buffer.length() - offset;
}
return getRecordDataOffset(index - 1) - offset;
}
/**
* Move all record data, starting with index, by the specified offset amount.
* If the node contains 5 records, an index of 3 would shift the record data
* for indexes 3 and 4 left by the spacified offset amount. This is used to
* make space for a new or updated record.
* @param index the smaller key/record index (0 &lt;= index1)
* @param offset movement offset in bytes
* @return insertion offset immediately following moved block.
*/
private int moveRecords(int index, int offset) {
int lastIndex = keyCount - 1;
// No movement needed for appended record
if (index == keyCount) {
if (index == 0) {
return buffer.length() + offset;
}
return getRecordDataOffset(lastIndex) + offset;
}
// Determine block to be moved
int start = getRecordDataOffset(lastIndex);
int end = (index == 0) ? buffer.length() : getRecordDataOffset(index - 1);
int len = end - start;
// Move record data
buffer.move(start, start + offset, len);
// Adjust stored offsets
for (int i = index; i < keyCount; i++) {
putRecordDataOffset(i, getRecordDataOffset(i) + offset);
}
return end + offset;
}
@Override
public Record getRecord(Schema schema, int index) throws IOException {
Field key = getKeyField(index);
Record record = schema.createRecord(key);
if (hasIndirectStorage(index)) {
int bufId = buffer.getInt(getRecordDataOffset(index));
ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufId);
record.read(chainedBuffer, 0);
}
else {
record.read(buffer, getRecordDataOffset(index));
}
return record;
}
@Override
public int getRecordOffset(int index) throws IOException {
if (hasIndirectStorage(index)) {
return -buffer.getInt(getRecordDataOffset(index));
}
return getRecordDataOffset(index);
}
@Override
public Record getRecord(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
return null;
return getRecord(schema, index);
}
/**
* Find the index which represents the halfway point within the record data.
* @returns key index.
*/
private int getSplitIndex() {
int halfway = ((keyCount == 0 ? buffer.length() : getRecordDataOffset(keyCount - 1)) +
buffer.length()) / 2;
int min = 1;
int max = keyCount - 1;
while (min < max) {
int i = (min + max) / 2;
int offset = getRecordDataOffset(i);
if (offset == halfway) {
return i;
}
else if (offset < halfway) {
max = i - 1;
}
else {
min = i + 1;
}
}
return min;
}
@Override
void splitData(FixedKeyRecordNode newRightLeaf) {
FixedKeyVarRecNode rightNode = (FixedKeyVarRecNode) newRightLeaf;
int splitIndex = getSplitIndex();
int count = keyCount - splitIndex;
int start = getRecordDataOffset(keyCount - 1); // start of block to be moved
int end = getRecordDataOffset(splitIndex - 1); // end of block to be moved
int splitLen = end - start; // length of block to be moved
int rightOffset = buffer.length() - splitLen; // data offset within new leaf node
// Copy data to new leaf node
DataBuffer newBuf = rightNode.buffer;
newBuf.copy(rightOffset, buffer, start, splitLen);
newBuf.copy(KEY_BASE_OFFSET, buffer, KEY_BASE_OFFSET + (splitIndex * entrySize),
count * entrySize);
// Fix record offsets in new leaf node
int offsetCorrection = buffer.length() - end;
for (int i = 0; i < count; i++) {
rightNode.putRecordDataOffset(i, rightNode.getRecordDataOffset(i) + offsetCorrection);
}
// Adjust key counts
setKeyCount(keyCount - count);
rightNode.setKeyCount(count);
}
@Override
FixedKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordDataOffset(index);
int oldLen = getRecordLength(index, offset);
int len = record.length();
// Check for use of indirect chained record node(s)
int maxRecordLength = ((buffer.length() - HEADER_SIZE) >> 2) - entrySize; // min 4 records per node
boolean wasIndirect = hasIndirectStorage(index);
boolean useIndirect = (len > maxRecordLength);
if (useIndirect) {
// Store record in chained buffers
len = 4;
ChainedBuffer chainedBuffer = null;
if (wasIndirect) {
chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), buffer.getInt(offset));
chainedBuffer.setSize(record.length(), false);
}
else {
chainedBuffer = new ChainedBuffer(record.length(), nodeMgr.getBufferMgr());
buffer.putInt(offset + oldLen - 4, chainedBuffer.getId()); // assumes old len is always > 4
enableIndirectStorage(index, true);
}
record.write(chainedBuffer, 0);
}
else if (wasIndirect) {
removeChainedBuffer(buffer.getInt(offset));
enableIndirectStorage(index, false);
}
// See if updated record will fit in current buffer
if (useIndirect || len <= (getFreeSpace() + oldLen)) {
// Overwrite record data - move other data if needed
int dataShift = oldLen - len;
if (dataShift != 0) {
offset = moveRecords(index + 1, dataShift);
putRecordDataOffset(index, offset);
}
if (!useIndirect) {
record.write(buffer, offset);
}
return getRoot();
}
// Insufficient room for updated record - remove and re-add
Field key = record.getKeyField();
FixedKeyRecordNode leaf = (FixedKeyRecordNode) deleteRecord(key, null).getLeafNode(key);
return leaf.putRecord(record, null);
}
/**
* Insert the specified record at the specified key index.
* Existing data may be shifted within the buffer to make room for
* the new record. Parent must be notified if this changes the leftmost
* key.
* @param index insertion index for stored key
* @param record record to be inserted
* @throws IOException thrown if an IO error occurs
*/
@Override
boolean insertRecord(int index, Record record) throws IOException {
// Check for use of indirect chained record node(s)
int len = record.length();
int maxRecordLength = ((buffer.length() - HEADER_SIZE) >> 2) - entrySize; // min 4 records per node
boolean useIndirect = (len > maxRecordLength);
if (useIndirect) {
len = 4;
}
if ((len + entrySize) > getFreeSpace())
return false; // insufficient space for record storage
// Make room for new record
int offset = moveRecords(index, -len);
// Make room for new key/offset entry
int start = KEY_BASE_OFFSET + (index * entrySize);
len = (keyCount - index) * entrySize;
buffer.move(start, start + entrySize, len);
// Store new record key/offset
buffer.put(start, record.getKeyField().getBinaryData());
buffer.putInt(start + keySize, offset);
setKeyCount(keyCount + 1);
// Store record data
if (useIndirect) {
ChainedBuffer chainedBuffer =
new ChainedBuffer(record.length(), nodeMgr.getBufferMgr());
buffer.putInt(offset, chainedBuffer.getId());
record.write(chainedBuffer, 0);
}
else {
record.write(buffer, offset);
}
enableIndirectStorage(index, useIndirect);
return true;
}
@Override
public void remove(int index) throws IOException {
if (index < 0 || index >= keyCount)
throw new AssertException();
if (hasIndirectStorage(index)) {
removeChainedBuffer(buffer.getInt(getRecordDataOffset(index)));
enableIndirectStorage(index, false);
}
int len = getRecordLength(index);
moveRecords(index + 1, len);
int start = KEY_BASE_OFFSET + ((index + 1) * entrySize);
len = (keyCount - index - 1) * entrySize;
buffer.move(start, start - entrySize, len);
setKeyCount(keyCount - 1);
}
@Override
public FixedKeyNode removeLeaf() throws IOException {
// Remove all chained buffers associated with this leaf
for (int index = 0; index < keyCount; ++index) {
if (hasIndirectStorage(index)) {
removeChainedBuffer(buffer.getInt(getRecordDataOffset(index)));
}
}
return super.removeLeaf();
}
/**
* Remove a chained buffer.
* @param bufferId chained buffer ID
*/
private void removeChainedBuffer(int bufferId) throws IOException {
ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufferId);
chainedBuffer.delete();
}
@Override
public void delete() throws IOException {
// Remove all chained buffers associated with this node.
for (int index = 0; index < keyCount; index++) {
if (hasIndirectStorage(index)) {
int offset = getRecordDataOffset(index);
int bufferId = buffer.getInt(offset);
removeChainedBuffer(bufferId);
buffer.putInt(offset, -1);
}
}
// Remove this node
nodeMgr.deleteNode(this);
}
@Override
public int[] getBufferReferences() {
IntArrayList idList = new IntArrayList();
for (int i = 0; i < keyCount; i++) {
if (hasIndirectStorage(i)) {
int offset = getRecordDataOffset(i);
idList.add(buffer.getInt(offset));
}
}
return idList.toArray();
}
}

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,11 +15,10 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer; import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/** /**
* <code>FixedRecNode</code> is an implementation of a BTree leaf node * <code>FixedRecNode</code> is an implementation of a BTree leaf node
@ -51,6 +49,7 @@ class FixedRecNode extends LongKeyRecordNode {
* Construct an existing long-key fixed-length record leaf node. * Construct an existing long-key fixed-length record leaf node.
* @param nodeMgr table node manager instance * @param nodeMgr table node manager instance
* @param buf node buffer * @param buf node buffer
* @param recordLength fixed record length
*/ */
FixedRecNode(NodeMgr nodeMgr, DataBuffer buf, int recordLength) { FixedRecNode(NodeMgr nodeMgr, DataBuffer buf, int recordLength) {
super(nodeMgr, buf); super(nodeMgr, buf);
@ -64,45 +63,37 @@ class FixedRecNode extends LongKeyRecordNode {
* @param recordLength fixed record length * @param recordLength fixed record length
* @param prevLeafId node buffer id for previous leaf ( &lt; 0: no leaf) * @param prevLeafId node buffer id for previous leaf ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf ( &lt; 0 : no leaf) * @param nextLeafId node buffer id for next leaf ( &lt; 0 : no leaf)
* @throws IOException * @throws IOException thrown if IO error occurs
*/ */
FixedRecNode(NodeMgr nodeMgr, int recordLength, int prevLeafId, int nextLeafId) throws IOException { FixedRecNode(NodeMgr nodeMgr, int recordLength, int prevLeafId, int nextLeafId)
throws IOException {
super(nodeMgr, NodeMgr.LONGKEY_FIXED_REC_NODE, prevLeafId, nextLeafId); super(nodeMgr, NodeMgr.LONGKEY_FIXED_REC_NODE, prevLeafId, nextLeafId);
this.recordLength = recordLength; this.recordLength = recordLength;
entrySize = KEY_SIZE + recordLength; entrySize = KEY_SIZE + recordLength;
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#createNewLeaf(int, int)
*/
@Override @Override
LongKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException { LongKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
return new FixedRecNode(nodeMgr, recordLength, prevLeafId, nextLeafId); return new FixedRecNode(nodeMgr, recordLength, prevLeafId, nextLeafId);
} }
/*
* @see ghidra.framework.store.db.LongKeyNode#getKey(int)
*/
@Override @Override
long getKey(int index) { long getKey(int index) {
return buffer.getLong(ENTRY_BASE_OFFSET + (index * entrySize)); return buffer.getLong(getKeyOffset(index));
} }
// /** @Override
// * Store a key at the specified index public int getKeyOffset(int index) {
// * @param index key index return ENTRY_BASE_OFFSET + (index * entrySize);
// * @param key key value }
// */
// private void putKey(int index, long key) {
// buffer.putLong(ENTRY_BASE_OFFSET + (index * entrySize), key);
// }
/** /**
* Get the record offset within the buffer * Get the record offset within the buffer
* @param index key index * @param index key index
* @return record offset * @return record offset
*/ */
private int getRecordOffset(int index) { @Override
public int getRecordOffset(int index) {
return ENTRY_BASE_OFFSET + (index * entrySize); return ENTRY_BASE_OFFSET + (index * entrySize);
} }
@ -128,11 +119,8 @@ class FixedRecNode extends LongKeyRecordNode {
buffer.move(start, offset, len); buffer.move(start, offset, len);
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#remove(int)
*/
@Override @Override
void remove(int index) { public void remove(int index) {
if (index < 0 || index >= keyCount) if (index < 0 || index >= keyCount)
throw new AssertException(); throw new AssertException();
@ -141,17 +129,12 @@ throw new AssertException();
setKeyCount(keyCount - 1); setKeyCount(keyCount - 1);
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#insertRecord(int, ghidra.framework.store.db.Record)
*/
@Override @Override
boolean insertRecord(int index, Record record) throws IOException { boolean insertRecord(int index, Record record) throws IOException {
// Check for use of indirect chained record node(s) if (keyCount == ((buffer.length() - HEADER_SIZE) / entrySize)) {
// int len = record.length();
if (keyCount == ((buffer.length() - HEADER_SIZE) / entrySize))
return false; // insufficient space for record storage return false; // insufficient space for record storage
}
// Make room for new record // Make room for new record
shiftRecords(index, true); shiftRecords(index, true);
@ -165,9 +148,6 @@ throw new AssertException();
return true; return true;
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#updateRecord(int, ghidra.framework.store.db.Record)
*/
@Override @Override
LongKeyNode updateRecord(int index, Record record) throws IOException { LongKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordOffset(index) + KEY_SIZE; int offset = getRecordOffset(index) + KEY_SIZE;
@ -175,9 +155,6 @@ throw new AssertException();
return getRoot(); return getRoot();
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#getRecord(long, ghidra.framework.store.db.Schema)
*/
@Override @Override
Record getRecord(long key, Schema schema) throws IOException { Record getRecord(long key, Schema schema) throws IOException {
int index = getKeyIndex(key); int index = getKeyIndex(key);
@ -188,20 +165,14 @@ throw new AssertException();
return record; return record;
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#getRecord(ghidra.framework.store.db.Schema, int)
*/
@Override @Override
Record getRecord(Schema schema, int index) throws IOException { public Record getRecord(Schema schema, int index) throws IOException {
long key = getKey(index); long key = getKey(index);
Record record = schema.createRecord(key); Record record = schema.createRecord(key);
record.read(buffer, getRecordOffset(index) + KEY_SIZE); record.read(buffer, getRecordOffset(index) + KEY_SIZE);
return record; return record;
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#splitData(ghidra.framework.store.db.LongKeyRecordNode)
*/
@Override @Override
void splitData(LongKeyRecordNode newRightLeaf) { void splitData(LongKeyRecordNode newRightLeaf) {
@ -221,17 +192,12 @@ throw new AssertException();
rightNode.setKeyCount(count); rightNode.setKeyCount(count);
} }
/*
* @see ghidra.framework.store.db.LongKeyNode#delete()
*/
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
nodeMgr.deleteNode(this); nodeMgr.deleteNode(this);
} }
/* @Override
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
public int[] getBufferReferences() { public int[] getBufferReferences() {
return EMPTY_ID_LIST; return EMPTY_ID_LIST;
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -27,4 +26,13 @@ public class IllegalFieldAccessException extends RuntimeException {
IllegalFieldAccessException() { IllegalFieldAccessException() {
super("Illegal field access"); super("Illegal field access");
} }
/**
* Construct an illegal field access exception
* with a specific message
*/
IllegalFieldAccessException(String msg) {
super(msg);
}
} }

View file

@ -1,237 +0,0 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>IndexBuffer</code> stores index data for a common index key
* within a data buffer. The index data has the following layout (field size in
* bytes):
* <pre>
* | FieldType(1) | KeyCount(4) | PrimeKey1(8) | ... | PrimeKeyN(8) |
* </pre>
* This type of index buffer is used to store primary keys associated with a
* single secondary key. The association to a specific secondary key
* is handled by the <code>IndexTable</code>. The primary keys are maintained
* within the buffer in an asscending sorted order.
*/
class IndexBuffer {
private static final int FIELD_TYPE_SIZE = 1;
private static final int KEY_COUNT_SIZE = 4;
private static final int FIELD_TYPE_OFFSET = 0;
private static final int KEY_COUNT_OFFSET = FIELD_TYPE_OFFSET + FIELD_TYPE_SIZE;
static final int INDEX_HEADER_SIZE = FIELD_TYPE_SIZE + KEY_COUNT_SIZE;
static final int PRIMARY_KEY_SIZE = 8;
Field indexKey;
int keyCount;
IndexDataBuffer dataBuffer;
/**
* Construct a new index buffer.
* @param indexKey associated index key
* @param data existing index buffer data from storage or null for an
* empty index buffer.
* @throws IOException thrown if IO error occurs
*/
IndexBuffer(Field indexKey, byte[] data) throws IOException {
this.indexKey = indexKey;
if (data == null) {
dataBuffer = new IndexDataBuffer(INDEX_HEADER_SIZE);
dataBuffer.putByte(FIELD_TYPE_OFFSET, indexKey.getFieldType());
dataBuffer.putInt(KEY_COUNT_OFFSET, 0);
}
else {
if (data[FIELD_TYPE_OFFSET] != indexKey.getFieldType())
throw new IOException("Invalid index data");
dataBuffer = new IndexDataBuffer(data);
}
keyCount = dataBuffer.getInt(KEY_COUNT_OFFSET);
}
/**
* Get the associated index key
* @return index key
*/
Field getIndexKey() {
return indexKey;
}
/**
* Set the stored primary key count
* @param cnt primary key count
*/
private void setKeyCount(int cnt) {
keyCount = cnt;
dataBuffer.putInt(KEY_COUNT_OFFSET, keyCount);
}
/**
* Provides data buffer manipulation for the index data
*/
class IndexDataBuffer extends DataBuffer {
/**
* Construct an index data buffer.
* @see db.buffers.DataBuffer#DataBuffer(byte[])
*/
IndexDataBuffer(byte[] data) {
super(data);
}
/**
* Construct an index data buffer.
* @see db.buffers.DataBuffer#DataBuffer(int)
*/
IndexDataBuffer(int size) {
super(size);
}
/**
* Get the storage array associated with this buffer.
* @return byte storage array.
*/
@Override
protected byte[] getData() {
return data;
}
/**
* Get the storage array associated with this buffer.
* @return byte storage array.
*/
@Override
protected void setData(byte[] data) {
this.data = data;
}
}
/**
* Get the index buffer data.
* @return index data or null if index data is empty.
*/
byte[] getData() {
byte[] data = dataBuffer.getData();
if (data.length <= INDEX_HEADER_SIZE)
return null;
return data;
}
/**
* Get the primary key associated with the specified entry index.
* This method does not perform any bounds checking on the index value.
* @param index index entry index.
* @return primary key associated with entry.
*/
long getPrimaryKey(int index) {
return dataBuffer.getLong(INDEX_HEADER_SIZE + (index * PRIMARY_KEY_SIZE));
}
/**
* Get the secondary key index within the buffer.
* @param primaryKey primary key
* @return key index if found, else -(key index + 1) indicates insertion
* point.
*/
int getIndex(long primaryKey) {
return getKeyIndex(primaryKey);
}
/**
* Perform a binary search to locate the specified primary key.
* @param primaryKey primary key
* @return key index if found, else -(key index + 1) indicates insertion
* point.
*/
private int getKeyIndex(long primaryKey) {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max)/2;
long k = getPrimaryKey(i);
if (k == primaryKey) {
return i;
}
else if (k < primaryKey) {
min = i + 1;
}
else {
max = i - 1;
}
}
return -(min+1);
}
/**
* Add a new primary key to this index buffer.
* @param primaryKey primary key
*/
void addEntry(long primaryKey) {
int index = getKeyIndex(primaryKey);
if (index < 0) {
index = -index-1;
IndexDataBuffer newDataBuffer = new IndexDataBuffer(dataBuffer.length() + PRIMARY_KEY_SIZE);
int len = INDEX_HEADER_SIZE + (index * PRIMARY_KEY_SIZE);
newDataBuffer.copy(0, dataBuffer, 0, len);
newDataBuffer.copy(len + PRIMARY_KEY_SIZE, dataBuffer, len, dataBuffer.length() - len);
newDataBuffer.putLong(len, primaryKey);
dataBuffer = newDataBuffer;
setKeyCount(keyCount + 1);
}
}
/**
* Delete the specified index entry from this index buffer.
* @param primaryKey primary key
*/
void deleteEntry(long primaryKey) {
int index = getKeyIndex(primaryKey);
if (index >= 0) {
IndexDataBuffer newDataBuffer = new IndexDataBuffer(dataBuffer.length() - PRIMARY_KEY_SIZE);
int len = INDEX_HEADER_SIZE + (index * PRIMARY_KEY_SIZE);
newDataBuffer.copy(0, dataBuffer, 0, len);
newDataBuffer.copy(len, dataBuffer, len + PRIMARY_KEY_SIZE, dataBuffer.length() - len - PRIMARY_KEY_SIZE);
dataBuffer = newDataBuffer;
setKeyCount(keyCount - 1);
}
}
/**
* Get the list of primary keys contained within this index buffer.
* @return long[] list of primary keys
* @throws IOException thrown if IO error occurs
*/
long[] getPrimaryKeys() {
long[] keys = new long[keyCount];
for (int i = 0; i < keyCount; i++) {
keys[i] = getPrimaryKey(i);
}
return keys;
}
}

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,486 +15,276 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
abstract class IndexField extends Field { import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
private static final int MAX_INDEX_FIELD_LENGTH = 64; /**
* <code>IndexField</code> provides a index table primary key {@link Field}
* implementation which wraps both the index field value (fixed or varaible length)
* and its' corresponding primary key (fixed or variable length).
*/
class IndexField extends Field {
private long primaryKey; static final int MAX_INDEX_FIELD_LENGTH = 64;
private Field nonTruncatedIndexField;
private Field indexField; private Field primaryKey;
private Field nonTruncatedIndexedField;
private Field indexedField;
private boolean isTruncated = false; private boolean isTruncated = false;
/** /**
* Construct a new index field with an initial value of null. * Construct an index field with an initial value.
* @param indexedField indexed field value
* @param primaryKey primary key value
*/ */
IndexField(Field newIndexField) { IndexField(Field indexedField, Field primaryKey) {
indexField = newIndexField; if (primaryKey.isVariableLength()) {
nonTruncatedIndexField = newIndexField; throw new IllegalArgumentException("variable length primaryKey not supported");
}
this.primaryKey = primaryKey.copyField();
this.nonTruncatedIndexedField = indexedField;
this.indexedField = indexedField;
if (indexedField.isVariableLength() && indexedField.length() >= MAX_INDEX_FIELD_LENGTH) {
// Ensure that we do not exceed the maximum allowed index key length
// and conserves space when indexing very long values
this.indexedField = indexedField.copyField();
this.indexedField.truncate(MAX_INDEX_FIELD_LENGTH);
isTruncated = true;
}
} }
/** /**
* Construct an index field with an initial value. * Get the indexed field value. If the original value exceeded
* {@link #MAX_INDEX_FIELD_LENGTH} in length the returned value will
* be truncated.
* @return indexed field value
*/ */
IndexField(Field value, long primaryKey) { Field getIndexedField() {
this.nonTruncatedIndexField = value; return indexedField;
indexField = value.newField(value);
if (indexField.isVariableLength() && indexField.length() >= MAX_INDEX_FIELD_LENGTH) {
// Ensure that we do not exceed the maximum allowed index key length
// and conserves space when indexing very long values
indexField.truncate(MAX_INDEX_FIELD_LENGTH);
isTruncated = true;
}
this.primaryKey = primaryKey;
}
Field getIndexField() {
return indexField;
} }
/**
* Get the non-truncated index field value.
* @return non-truncated index field value.
* @deprecated this method serves no real purpose since the non-truncated
* indexed field value is not retained within the index table.
*/
@Deprecated
Field getNonTruncatedIndexField() { Field getNonTruncatedIndexField() {
return nonTruncatedIndexField; return nonTruncatedIndexedField;
} }
/**
* Determine if the index field value has been truncated from its' original
* value.
* @return true if truncated else false
* @deprecated this method serves no real purpose since the truncation
* status is not retained within the index table.
*/
@Deprecated
boolean usesTruncatedFieldValue() { boolean usesTruncatedFieldValue() {
return isTruncated; return isTruncated;
} }
long getPrimaryKey() { Field getPrimaryKey() {
return primaryKey; return primaryKey;
} }
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override @Override
int length() { int length() {
return indexField.length() + 8; return indexedField.length() + primaryKey.length();
} }
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int write(Buffer buf, int offset) throws IOException { int write(Buffer buf, int offset) throws IOException {
offset = indexField.write(buf, offset); offset = indexedField.write(buf, offset);
return buf.putLong(offset, primaryKey); return primaryKey.write(buf, offset);
} }
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int read(Buffer buf, int offset) throws IOException { int read(Buffer buf, int offset) throws IOException {
offset = indexField.read(buf, offset); offset = indexedField.read(buf, offset);
primaryKey = buf.getLong(offset); return primaryKey.read(buf, offset);
return offset + 8;
} }
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int readLength(Buffer buf, int offset) throws IOException { int readLength(Buffer buf, int offset) throws IOException {
return indexField.readLength(buf, offset) + 8; return indexedField.readLength(buf, offset) + primaryKey.length();
} }
/*
* @see ghidra.framework.store.db.Field#isVariableLength()
*/
@Override @Override
public boolean isVariableLength() { public boolean isVariableLength() {
return true; return indexedField.isVariableLength();
} }
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override @Override
protected abstract byte getFieldType(); public IndexField copyField() {
return new IndexField(indexedField.copyField(), primaryKey.copyField());
}
abstract String getFieldTypeString(); @Override
public IndexField newField() {
return new IndexField(indexedField.newField(), primaryKey.newField());
}
/* /**
* @see java.lang.Object#toString() * Construct a new {@link IndexField} instance for the given indexValue and
* associated primary key. These fields are verified against this instance to
* ensure that they are of the correct type.
* @param indexValue column field value to be indexed
* @param key primary key associated with indexValue
* @return new IndexField instance
*/ */
IndexField newIndexField(Field indexValue, Field key) {
if (!indexValue.isSameType(indexedField) || !primaryKey.isSameType(getPrimaryKey())) {
throw new IllegalArgumentException("incorrect index value or key type");
}
return new IndexField(indexValue, key);
}
@Override
final IndexField getMinValue() {
throw new UnsupportedOperationException();
}
@Override
final IndexField getMaxValue() {
throw new UnsupportedOperationException();
}
@Override
byte getFieldType() {
return getIndexFieldType(indexedField, primaryKey);
}
@Override @Override
public String toString() { public String toString() {
return getFieldTypeString() + ": " + indexField; return indexedField + "/" + primaryKey;
} }
@Override @Override
public String getValueAsString() { public String getValueAsString() {
return indexField.getValueAsString() + " + " + Long.toHexString(primaryKey); return indexedField.getValueAsString() + " / " + primaryKey.getValueAsString();
} }
boolean hasSameIndex(IndexField field) { boolean hasSameIndexValue(IndexField field) {
if (field == null) { if (field == null) {
return false; return false;
} }
if (indexField == null) { if (indexedField == null) {
return field.indexField == null; return field.indexedField == null;
} }
return indexField.equals(field.indexField); return indexedField.equals(field.indexedField);
} }
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override @Override
public byte[] getBinaryData() { public byte[] getBinaryData() {
byte[] indexBytes = indexField.getBinaryData(); byte[] indexBytes = indexedField.getBinaryData();
int len = indexBytes.length; byte[] primaryKeyBytes = primaryKey.getBinaryData();
byte[] bytes = new byte[len + 8]; int len = indexBytes.length + primaryKeyBytes.length;
System.arraycopy(indexBytes, 0, bytes, 0, len); byte[] bytes = new byte[len];
System.arraycopy(indexBytes, 0, bytes, 0, indexBytes.length);
bytes[len] = (byte) (primaryKey >> 56); System.arraycopy(primaryKeyBytes, 0, bytes, indexBytes.length, primaryKeyBytes.length);
bytes[++len] = (byte) (primaryKey >> 48);
bytes[++len] = (byte) (primaryKey >> 40);
bytes[++len] = (byte) (primaryKey >> 32);
bytes[++len] = (byte) (primaryKey >> 24);
bytes[++len] = (byte) (primaryKey >> 16);
bytes[++len] = (byte) (primaryKey >> 8);
bytes[++len] = (byte) primaryKey;
return bytes; return bytes;
} }
/* @Override
* @see java.lang.Comparable#compareTo(java.lang.Object) public void setBinaryData(byte[] bytes) {
*/ if (isVariableLength()) {
throw new IllegalFieldAccessException("Unsupported for variable length IndexField");
}
if (bytes.length != length()) {
throw new IllegalFieldAccessException();
}
BinaryDataBuffer buffer = new BinaryDataBuffer(bytes);
try {
read(buffer, 0);
}
catch (IOException e) {
throw new IllegalFieldAccessException();
}
}
@Override @Override
public int compareTo(Field o) { public int compareTo(Field o) {
IndexField f = (IndexField) o; IndexField f = (IndexField) o;
int result = indexField.compareTo(f.indexField); int result = indexedField.compareTo(f.indexedField);
if (result != 0) { if (result != 0) {
return result; return result;
} }
if (primaryKey == f.primaryKey) { return primaryKey.compareTo(f.primaryKey);
return 0; }
}
else if (primaryKey < f.primaryKey) { @Override
return -1; int compareTo(DataBuffer buffer, int offset) {
} int result = indexedField.compareTo(buffer, offset);
return 1; if (result != 0) {
return result;
}
try {
int indexedFieldLen = indexedField.readLength(buffer, offset);
return primaryKey.compareTo(buffer, offset + indexedFieldLen);
}
catch (IOException e) {
throw new AssertException(e); // DataBuffer does not throw IOException
}
}
@Override
public boolean isSameType(Field field) {
if (!(field instanceof IndexField)) {
return false;
}
IndexField otherField = (IndexField) field;
return indexedField.isSameType(otherField.indexedField) &&
primaryKey.isSameType(otherField.primaryKey);
} }
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
if (!getClass().isInstance(obj)) if (obj == null || obj.getClass() != getClass()) {
return false; return false;
}
IndexField f = (IndexField) obj; IndexField f = (IndexField) obj;
return primaryKey == f.primaryKey && indexField.equals(f.indexField); return primaryKey.equals(f.primaryKey) && indexedField.equals(f.indexedField);
} }
/*
* @see java.lang.Object#hashCode()
*/
@Override @Override
public int hashCode() { public int hashCode() {
return (int) primaryKey; return (indexedField.hashCode() * 31) + primaryKey.hashCode();
}
static byte getIndexFieldType(Field indexedFieldType, Field primaryKeyFieldType) {
if (primaryKeyFieldType instanceof IndexField) {
throw new IllegalArgumentException();
}
if (indexedFieldType instanceof IndexField) {
throw new IllegalArgumentException();
}
return (byte) ((primaryKeyFieldType.getFieldType() << INDEX_FIELD_TYPE_SHIFT) |
indexedFieldType.getFieldType());
} }
/** /**
* Get the field associated with the specified type value. * Get the index field associated with the specified encoded field type.
* @param fieldType * @param fieldType field type
* @return Field * @return IndexField
* @throws UnsupportedFieldException if unsupported fieldType specified
*/ */
static IndexField getIndexField(byte fieldType) { static IndexField getIndexField(byte fieldType) throws UnsupportedFieldException {
switch (fieldType & BASE_TYPE_MASK) { Field indexedField = Field.getField((byte) (fieldType & FIELD_TYPE_MASK));
case LONG_TYPE:
return new LongIndexField(); byte primaryKeyFeldType = (byte) (fieldType >> INDEX_FIELD_TYPE_SHIFT & FIELD_TYPE_MASK);
case INT_TYPE: if (primaryKeyFeldType == LEGACY_INDEX_LONG_TYPE) {
return new IntIndexField(); return new LegacyIndexField(indexedField);
case STRING_TYPE:
return new StringIndexField();
case SHORT_TYPE:
return new ShortIndexField();
case BYTE_TYPE:
return new ByteIndexField();
case BOOLEAN_TYPE:
return new BooleanIndexField();
case BINARY_OBJ_TYPE:
return new BinaryIndexField();
}
throw new AssertException();
} }
static IndexField getIndexField(Field indexedField, long primaryKey) { Field primaryKeyType = Field.getField(primaryKeyFeldType);
switch (indexedField.getFieldType()) { return new IndexField(indexedField, primaryKeyType);
case LONG_TYPE:
return new LongIndexField((LongField) indexedField, primaryKey);
case INT_TYPE:
return new IntIndexField((IntField) indexedField, primaryKey);
case STRING_TYPE:
return new StringIndexField((StringField) indexedField, primaryKey);
case SHORT_TYPE:
return new ShortIndexField((ShortField) indexedField, primaryKey);
case BYTE_TYPE:
return new ByteIndexField((ByteField) indexedField, primaryKey);
case BOOLEAN_TYPE:
return new BooleanIndexField((BooleanField) indexedField, primaryKey);
case BINARY_OBJ_TYPE:
return new BinaryIndexField((BinaryField) indexedField, primaryKey);
}
throw new AssertException();
}
private static class LongIndexField extends IndexField {
LongIndexField() {
super(new LongField());
}
LongIndexField(LongField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | LONG_TYPE;
}
@Override
String getFieldTypeString() {
return "LongIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof LongIndexField)) {
throw new AssertException();
}
LongIndexField f = (LongIndexField) fieldValue;
return new LongIndexField((LongField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new LongIndexField();
} }
} }
private static class IntIndexField extends IndexField {
IntIndexField() {
super(new IntField());
}
IntIndexField(IntField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | INT_TYPE;
}
@Override
String getFieldTypeString() {
return "IntIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof IntIndexField)) {
throw new AssertException();
}
IntIndexField f = (IntIndexField) fieldValue;
return new IntIndexField((IntField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new IntIndexField();
}
}
private static class StringIndexField extends IndexField {
StringIndexField() {
super(new StringField());
}
StringIndexField(StringField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | STRING_TYPE;
}
@Override
String getFieldTypeString() {
return "StringIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof StringIndexField)) {
throw new AssertException();
}
StringIndexField f = (StringIndexField) fieldValue;
return new StringIndexField((StringField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new StringIndexField();
}
}
private static class ShortIndexField extends IndexField {
ShortIndexField() {
super(new ShortField());
}
ShortIndexField(ShortField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | SHORT_TYPE;
}
@Override
String getFieldTypeString() {
return "ShortIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof ShortIndexField)) {
throw new AssertException();
}
ShortIndexField f = (ShortIndexField) fieldValue;
return new ShortIndexField((ShortField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new ShortIndexField();
}
}
private static class ByteIndexField extends IndexField {
ByteIndexField() {
super(new ByteField());
}
ByteIndexField(ByteField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | BYTE_TYPE;
}
@Override
String getFieldTypeString() {
return "ByteIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof ByteIndexField)) {
throw new AssertException();
}
ByteIndexField f = (ByteIndexField) fieldValue;
return new ByteIndexField((ByteField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new ByteIndexField();
}
}
private static class BooleanIndexField extends IndexField {
BooleanIndexField() {
super(new BooleanField());
}
BooleanIndexField(BooleanField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | BOOLEAN_TYPE;
}
@Override
String getFieldTypeString() {
return "BooleanIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof BooleanIndexField)) {
throw new AssertException();
}
BooleanIndexField f = (BooleanIndexField) fieldValue;
return new BooleanIndexField((BooleanField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new BooleanIndexField();
}
}
private static class BinaryIndexField extends IndexField {
BinaryIndexField() {
super(new BinaryField());
}
BinaryIndexField(BinaryField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | BINARY_OBJ_TYPE;
}
@Override
String getFieldTypeString() {
return "BinaryIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof BinaryIndexField)) {
throw new AssertException();
}
BinaryIndexField f = (BinaryIndexField) fieldValue;
return new BinaryIndexField((BinaryField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new BinaryIndexField();
}
}
}

View file

@ -16,7 +16,6 @@
package db; package db;
import java.io.IOException; import java.io.IOException;
import java.util.NoSuchElementException;
import ghidra.util.exception.AssertException; import ghidra.util.exception.AssertException;
import ghidra.util.exception.CancelledException; import ghidra.util.exception.CancelledException;
@ -29,7 +28,7 @@ import ghidra.util.task.TaskMonitor;
*/ */
abstract class IndexTable { abstract class IndexTable {
protected static final long[] emptyKeyArray = new long[0]; protected static final Field[] emptyKeyArray = Field.EMPTY_ARRAY;
/** /**
* Database Handle * Database Handle
@ -51,11 +50,6 @@ abstract class IndexTable {
*/ */
protected Table indexTable; protected Table indexTable;
/**
* Field type associated with indexed column.
*/
protected final Field fieldType;
/** /**
* Indexed column within primary table schema. * Indexed column within primary table schema.
*/ */
@ -69,15 +63,14 @@ abstract class IndexTable {
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
IndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException { IndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException {
if (!primaryTable.useLongKeys()) { if (!primaryTable.useLongKeys() && !primaryTable.useFixedKeys()) {
throw new AssertException("Only long-key tables may be indexed"); throw new AssertException("Only fixed-length key tables may be indexed");
} }
this.db = primaryTable.getDBHandle(); this.db = primaryTable.getDBHandle();
this.primaryTable = primaryTable; this.primaryTable = primaryTable;
this.indexTableRecord = indexTableRecord; this.indexTableRecord = indexTableRecord;
this.indexTable = new Table(primaryTable.getDBHandle(), indexTableRecord); this.indexTable = new Table(primaryTable.getDBHandle(), indexTableRecord);
this.colIndex = indexTableRecord.getIndexedColumn(); this.colIndex = indexTableRecord.getIndexedColumn();
fieldType = primaryTable.getSchema().getField(indexTableRecord.getIndexedColumn());
primaryTable.addIndex(this); primaryTable.addIndex(this);
} }
@ -95,14 +88,12 @@ abstract class IndexTable {
throw new AssertException("Table not found: " + name); throw new AssertException("Table not found: " + name);
} }
if (indexTableRecord.getSchema().getKeyFieldType() instanceof IndexField) { Field keyFieldType = indexTableRecord.getSchema().getKeyFieldType();
if (keyFieldType instanceof IndexField) {
return new FieldIndexTable(primaryTable, indexTableRecord); return new FieldIndexTable(primaryTable, indexTableRecord);
} }
Field fieldType = primaryTable.getSchema().getField(indexTableRecord.getIndexedColumn()); throw new AssertException(
if (fieldType.isVariableLength()) { "Unexpected index field type: " + keyFieldType.getClass().getName());
return new VarIndexTable(primaryTable, indexTableRecord);
}
return new FixedIndexTable(primaryTable, indexTableRecord);
} }
/** /**
@ -121,14 +112,23 @@ abstract class IndexTable {
/** /**
* Check the consistency of this index table. * Check the consistency of this index table.
* @param monitor task monitor
* @return true if consistency check passed, else false * @return true if consistency check passed, else false
* @throws IOException * @throws IOException if IO error occurs
* @throws CancelledException * @throws CancelledException if task cancelled
*/ */
boolean isConsistent(TaskMonitor monitor) throws IOException, CancelledException { boolean isConsistent(TaskMonitor monitor) throws IOException, CancelledException {
return indexTable.isConsistent(primaryTable.getSchema().getFieldNames()[colIndex], monitor); return indexTable.isConsistent(primaryTable.getSchema().getFieldNames()[colIndex], monitor);
} }
/**
* Get the primary table key type
* @return primary table key type
*/
Field getPrimaryTableKeyType() {
return primaryTable.getSchema().getKeyFieldType();
}
/** /**
* Get the table number associated with the underlying index table. * Get the table number associated with the underlying index table.
* @return table number * @return table number
@ -160,6 +160,7 @@ abstract class IndexTable {
* Determine if there is an occurance of the specified index key value. * Determine if there is an occurance of the specified index key value.
* @param field index key value * @param field index key value
* @return true if an index key value equal to field exists. * @return true if an index key value equal to field exists.
* @throws IOException if IO error occurs
*/ */
boolean hasRecord(Field field) throws IOException { boolean hasRecord(Field field) throws IOException {
return indexTable.hasRecord(field); return indexTable.hasRecord(field);
@ -168,16 +169,18 @@ abstract class IndexTable {
/** /**
* Find all primary keys which correspond to the specified indexed field * Find all primary keys which correspond to the specified indexed field
* value. * value.
* @param field the field value to search for. * @param indexValue the field value to search for.
* @return list of primary keys * @return list of primary keys
* @throws IOException if IO error occurs
*/ */
abstract long[] findPrimaryKeys(Field indexValue) throws IOException; abstract Field[] findPrimaryKeys(Field indexValue) throws IOException;
/** /**
* Get the number of primary keys which correspond to the specified indexed field * Get the number of primary keys which correspond to the specified indexed field
* value. * value.
* @param field the field value to search for. * @param indexValue the field value to search for.
* @return key count * @return key count
* @throws IOException if IO error occurs
*/ */
abstract int getKeyCount(Field indexValue) throws IOException; abstract int getKeyCount(Field indexValue) throws IOException;
@ -185,19 +188,20 @@ abstract class IndexTable {
* Add an entry to this index. Caller is responsible for ensuring that this * Add an entry to this index. Caller is responsible for ensuring that this
* is not a duplicate entry. * is not a duplicate entry.
* @param record new record * @param record new record
* @throws IOException * @throws IOException if IO error occurs
*/ */
abstract void addEntry(Record record) throws IOException; abstract void addEntry(Record record) throws IOException;
/** /**
* Delete an entry from this index. * Delete an entry from this index.
* @param record deleted record * @param record deleted record
* @throws IOException * @throws IOException if IO error occurs
*/ */
abstract void deleteEntry(Record record) throws IOException; abstract void deleteEntry(Record record) throws IOException;
/** /**
* Delete all records within this index table. * Delete all records within this index table.
* @throws IOException if IO error occurs
*/ */
void deleteAll() throws IOException { void deleteAll() throws IOException {
indexTable.deleteAll(); indexTable.deleteAll();
@ -218,7 +222,7 @@ abstract class IndexTable {
* @param before if true initial position is before minField, else position * @param before if true initial position is before minField, else position
* is after endField * is after endField
* @return index field iterator. * @return index field iterator.
* @throws IOException * @throws IOException if IO error occurs
*/ */
abstract DBFieldIterator indexIterator(Field minField, Field maxField, boolean before) abstract DBFieldIterator indexIterator(Field minField, Field maxField, boolean before)
throws IOException; throws IOException;
@ -233,7 +237,7 @@ abstract class IndexTable {
* @param before if true initial position is before startField value, else position * @param before if true initial position is before startField value, else position
* is after startField value * is after startField value
* @return index field iterator. * @return index field iterator.
* @throws IOException * @throws IOException if IO error occurs
*/ */
abstract DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, abstract DBFieldIterator indexIterator(Field minField, Field maxField, Field startField,
boolean before) throws IOException; boolean before) throws IOException;
@ -243,9 +247,7 @@ abstract class IndexTable {
* @return primary key iterator * @return primary key iterator
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
DBLongIterator keyIterator() throws IOException { abstract DBFieldIterator keyIterator() throws IOException;
return new PrimaryKeyIterator();
}
/** /**
* Iterate over all primary keys sorted based upon the associated index key. * Iterate over all primary keys sorted based upon the associated index key.
@ -255,9 +257,7 @@ abstract class IndexTable {
* @return primary key iterator * @return primary key iterator
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
DBLongIterator keyIteratorBefore(Field startField) throws IOException { abstract DBFieldIterator keyIteratorBefore(Field startField) throws IOException;
return new PrimaryKeyIterator(startField, false);
}
/** /**
* Iterate over all primary keys sorted based upon the associated index key. * Iterate over all primary keys sorted based upon the associated index key.
@ -268,9 +268,7 @@ abstract class IndexTable {
* @return primary key iterator * @return primary key iterator
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
DBLongIterator keyIteratorAfter(Field startField) throws IOException { abstract DBFieldIterator keyIteratorAfter(Field startField) throws IOException;
return new PrimaryKeyIterator(startField, true);
}
/** /**
* Iterate over all primary keys sorted based upon the associated index key. * Iterate over all primary keys sorted based upon the associated index key.
@ -282,9 +280,8 @@ abstract class IndexTable {
* @return primary key iterator * @return primary key iterator
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
DBLongIterator keyIteratorBefore(Field startField, long primaryKey) throws IOException { abstract DBFieldIterator keyIteratorBefore(Field startField, Field primaryKey)
return new PrimaryKeyIterator(null, null, startField, primaryKey, false); throws IOException;
}
/** /**
* Iterate over all primary keys sorted based upon the associated index key. * Iterate over all primary keys sorted based upon the associated index key.
@ -296,9 +293,8 @@ abstract class IndexTable {
* @return primary key iterator * @return primary key iterator
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
DBLongIterator keyIteratorAfter(Field startField, long primaryKey) throws IOException { abstract DBFieldIterator keyIteratorAfter(Field startField, Field primaryKey)
return new PrimaryKeyIterator(null, null, startField, primaryKey, true); throws IOException;
}
/** /**
* Iterate over all primary keys sorted based upon the associated index key. * Iterate over all primary keys sorted based upon the associated index key.
@ -314,17 +310,8 @@ abstract class IndexTable {
* @return primary key iterator * @return primary key iterator
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
DBLongIterator keyIterator(Field minField, Field maxField, boolean before) throws IOException { abstract DBFieldIterator keyIterator(Field minField, Field maxField, boolean before)
throws IOException;
Field startField = before ? minField : maxField;
if (startField == null && !before) {
}
return new PrimaryKeyIterator(minField, maxField, before ? minField : maxField,
before ? Long.MIN_VALUE : Long.MAX_VALUE, !before);
}
/** /**
* Iterate over all primary keys sorted based upon the associated index key. * Iterate over all primary keys sorted based upon the associated index key.
@ -337,295 +324,7 @@ abstract class IndexTable {
* @return primary key iterator * @return primary key iterator
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
DBLongIterator keyIterator(Field minField, Field maxField, Field startField, boolean before) abstract DBFieldIterator keyIterator(Field minField, Field maxField, Field startField,
throws IOException { boolean before) throws IOException;
return new PrimaryKeyIterator(minField, maxField, startField,
before ? Long.MIN_VALUE : Long.MAX_VALUE, !before);
}
/**
* Iterates over primary keys for a range of index field values.
*/
private class PrimaryKeyIterator implements DBLongIterator {
private RecordIterator indexIterator;
private int expectedModCount;
private int index;
private long indexPrimaryKey;
private IndexBuffer indexBuffer;
private boolean forward = true;
private boolean reverse = true;
private Field lastKey;
private boolean hasPrev = false;
private boolean hasNext = false;
/**
* Construct a key iterator starting with the minimum secondary key.
*/
PrimaryKeyIterator() throws IOException {
expectedModCount = indexTable.modCount;
indexIterator = indexTable.iterator();
}
/**
* Construct a key iterator. The iterator is positioned immediately before
* the key associated with the first occurance of the startValue.
* @param startValue indexed field value.
* @param after if true the iterator is positioned immediately after
* the last occurance of the specified startValue position.
*/
PrimaryKeyIterator(Field startValue, boolean after) throws IOException {
this(null, null, startValue, after ? Long.MAX_VALUE : Long.MIN_VALUE, after);
}
/**
* Construct a key iterator. The iterator is positioned immediately before
* or after the key associated with the specified startValue/primaryKey.
* @param minValue minimum index value or null if no minimum
* @param maxValue maximum index value or null if no maximum
* @param startValue starting index value.
* @param primaryKey starting primary key value.
* @param after if true iterator is positioned immediately after
* the startValue/primaryKey,
* otherwise immediately before.
* @throws IOException
*/
PrimaryKeyIterator(Field minValue, Field maxValue, Field startValue, long primaryKey,
boolean after) throws IOException {
expectedModCount = indexTable.modCount;
indexIterator = indexTable.iterator(minValue, maxValue, startValue);
if (hasNext()) {
if (startValue.equals(indexBuffer.getIndexKey())) {
index = indexBuffer.getIndex(primaryKey);
if (index < 0) {
index = -index - 1;
}
else if (after) {
index++;
}
if (index == indexBuffer.keyCount) {
--index;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = false;
hasPrev = true;
}
}
}
}
@Override
public boolean hasNext() throws IOException {
if (hasNext) {
return true;
}
synchronized (db) {
// Handle concurrent modification if necessary
// This is a slightly lazy approach which could miss keys added to the end of an index buffer
if (indexBuffer != null && index < (indexBuffer.keyCount - 1) &&
indexTable.modCount != expectedModCount) {
// refetch index buffer which may have changed
Field indexKey = indexBuffer.getIndexKey();
Record indexRecord;
if (indexKey.isVariableLength()) {
indexRecord = indexTable.getRecord(indexKey);
}
else {
indexRecord = indexTable.getRecord(indexKey.getLongValue());
}
if (indexRecord != null) {
// recover position within index buffer
indexBuffer = new IndexBuffer(indexKey, indexRecord.getBinaryData(0));
index = indexBuffer.getIndex(indexPrimaryKey + 1);
if (index < 0) {
index = -index - 1;
if (index == indexBuffer.keyCount) {
// next must be found in next index buffer below
indexBuffer = null;
}
else {
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = true;
}
}
else {
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = true;
}
}
else {
// index buffer no longer exists - will need to get next buffer below
indexBuffer = null;
}
hasPrev = false;
}
if (!hasNext) {
// Goto next index buffer
if ((indexBuffer == null || index >= (indexBuffer.keyCount) - 1)) {
// get next index buffer
Record indexRecord = indexIterator.next();
if (indexRecord != null) {
if (!forward) {
indexRecord = indexIterator.next();
forward = true;
}
reverse = false;
if (indexRecord != null) {
indexBuffer =
new IndexBuffer(fieldType.newField(indexRecord.getKeyField()),
indexRecord.getBinaryData(0));
index = 0;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = true;
hasPrev = false;
}
}
}
// Step within current index buffer
else {
++index;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = true;
hasPrev = false;
}
}
expectedModCount = indexTable.modCount;
return hasNext;
}
}
@Override
public boolean hasPrevious() throws IOException {
if (hasPrev) {
return true;
}
synchronized (db) {
// Handle concurrent modification if necessary
// This is a slightly lazy approach which could miss keys added to the beginning of an index buffer
if (indexBuffer != null && index > 0 && indexTable.modCount != expectedModCount) {
// refetch index buffer which may have changed
Field indexKey = indexBuffer.getIndexKey();
Record indexRecord; // refetch index buffer which may have changed
if (indexKey.isVariableLength()) {
indexRecord = indexTable.getRecord(indexKey);
}
else {
indexRecord = indexTable.getRecord(indexKey.getLongValue());
}
if (indexRecord != null) {
// recover position within index buffer
indexBuffer = new IndexBuffer(indexKey, indexRecord.getBinaryData(0));
index = indexBuffer.getIndex(indexPrimaryKey - 1);
if (index < 0) {
index = -index - 1;
if (index == 0) {
// previous must be found in previous index buffer below
indexBuffer = null;
}
else {
--index;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasPrev = true;
}
}
else {
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasPrev = true;
}
}
else {
indexBuffer = null;
}
hasNext = false;
}
if (!hasPrev) {
// Goto previous index buffer
if ((indexBuffer == null || index == 0)) {
// get previous index buffer
Record indexRecord = indexIterator.previous();
if (indexRecord != null) {
if (!reverse) {
indexRecord = indexIterator.previous();
reverse = true;
}
forward = false;
if (indexRecord != null) {
indexBuffer =
new IndexBuffer(fieldType.newField(indexRecord.getKeyField()),
indexRecord.getBinaryData(0));
index = indexBuffer.keyCount - 1;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = false;
hasPrev = true;
}
}
}
// Step within current index buffer
else {
--index;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = false;
hasPrev = true;
}
}
expectedModCount = indexTable.modCount;
return hasPrev;
}
}
@Override
public long next() throws IOException {
if (hasNext || hasNext()) {
long key = indexBuffer.getPrimaryKey(index);
lastKey = new LongField(key);
hasNext = false;
hasPrev = true;
return key;
}
throw new NoSuchElementException();
}
@Override
public long previous() throws IOException {
if (hasPrev || hasPrevious()) {
long key = indexBuffer.getPrimaryKey(index);
lastKey = new LongField(key);
hasNext = true;
hasPrev = false;
return key;
}
throw new NoSuchElementException();
}
/**
* WARNING: This could be slow since the index buffer must be read
* after each record deletion.
* @see db.DBLongIterator#delete()
*/
@Override
public boolean delete() throws IOException {
if (lastKey == null) {
return false;
}
synchronized (db) {
long key = lastKey.getLongValue();
primaryTable.deleteRecord(key);
lastKey = null;
return true;
}
}
}
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer;
/** /**
* <code>IntField</code> provides a wrapper for 4-byte signed integer data * <code>IntField</code> provides a wrapper for 4-byte signed integer data
* which is read or written to a Record. * which is read or written to a Record.
*/ */
public class IntField extends Field { public final class IntField extends Field {
/**
* Minimum integer field value
*/
public static final IntField MIN_VALUE = new IntField(Integer.MIN_VALUE, true);
/**
* Maximum integer field value
*/
public static final IntField MAX_VALUE = new IntField(Integer.MAX_VALUE, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final IntField INSTANCE = MIN_VALUE;
private int value; private int value;
@ -39,69 +53,57 @@ public class IntField extends Field {
* @param i initial value * @param i initial value
*/ */
public IntField(int i) { public IntField(int i) {
value = i; this(i, false);
} }
/** /**
* @see db.Field#getIntValue() * Construct an integer field with an initial value of i.
* @param i initial value
* @param immutable true if field value is immutable
*/ */
IntField(int i, boolean immutable) {
super(immutable);
value = i;
}
@Override @Override
public int getIntValue() { public int getIntValue() {
return value; return value;
} }
/**
* @see db.Field#setIntValue(int)
*/
@Override @Override
public void setIntValue(int value) { public void setIntValue(int value) {
checkImmutable();
this.value = value; this.value = value;
} }
/**
* @see db.Field#length()
*/
@Override @Override
int length() { int length() {
return 4; return 4;
} }
/**
* @see db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int write(Buffer buf, int offset) throws IOException { int write(Buffer buf, int offset) throws IOException {
return buf.putInt(offset, value); return buf.putInt(offset, value);
} }
/**
* @see db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int read(Buffer buf, int offset) throws IOException { int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getInt(offset); value = buf.getInt(offset);
return offset + 4; return offset + 4;
} }
/**
* @see db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int readLength(Buffer buf, int offset) throws IOException { int readLength(Buffer buf, int offset) throws IOException {
return 4; return 4;
} }
/**
* @see db.Field#getFieldType()
*/
@Override @Override
protected byte getFieldType() { byte getFieldType() {
return INT_TYPE; return INT_TYPE;
} }
/**
* @see java.lang.Object#toString()
*/
@Override @Override
public String toString() { public String toString() {
return "IntField: " + Integer.toString(value); return "IntField: " + Integer.toString(value);
@ -109,12 +111,9 @@ public class IntField extends Field {
@Override @Override
public String getValueAsString() { public String getValueAsString() {
return Integer.toHexString(value); return "0x" + Integer.toHexString(value);
} }
/**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
if (obj == null || !(obj instanceof IntField)) if (obj == null || !(obj instanceof IntField))
@ -122,9 +121,6 @@ public class IntField extends Field {
return ((IntField) obj).value == value; return ((IntField) obj).value == value;
} }
/**
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override @Override
public int compareTo(Field o) { public int compareTo(Field o) {
IntField f = (IntField) o; IntField f = (IntField) o;
@ -135,54 +131,64 @@ public class IntField extends Field {
return 1; return 1;
} }
/**
* @see db.Field#newField(docking.widgets.fieldpanel.Field)
*/
@Override @Override
public Field newField(Field fieldValue) { int compareTo(DataBuffer buffer, int offset) {
if (fieldValue.isVariableLength()) int otherValue = buffer.getInt(offset);
throw new AssertException(); if (value == otherValue)
return new IntField((int) fieldValue.getLongValue()); return 0;
else if (value < otherValue)
return -1;
return 1;
} }
/**
* @see db.Field#newField()
*/
@Override @Override
public Field newField() { public IntField copyField() {
return new IntField((int) getLongValue());
}
@Override
public IntField newField() {
return new IntField(); return new IntField();
} }
/**
* @see db.Field#getLongValue()
*/
@Override @Override
public long getLongValue() { public long getLongValue() {
return value; return value;
} }
/**
* @see db.Field#setLongValue(long)
*/
@Override @Override
public void setLongValue(long value) { public void setLongValue(long value) {
this.value = (int) value; setIntValue((int) value);
} }
/**
* @see db.Field#getBinaryData()
*/
@Override @Override
public byte[] getBinaryData() { public byte[] getBinaryData() {
return new byte[] { (byte) (value >> 24), (byte) (value >> 16), (byte) (value >> 8), return new byte[] { (byte) (value >> 24), (byte) (value >> 16), (byte) (value >> 8),
(byte) value }; (byte) value };
} }
/** @Override
* @see java.lang.Object#hashCode() public void setBinaryData(byte[] bytes) {
*/ checkImmutable();
if (bytes.length != 4) {
throw new IllegalFieldAccessException();
}
value = ((bytes[0] & 0xff) << 24) | ((bytes[1] & 0xff) << 16) | ((bytes[2] & 0xff) << 8) |
(bytes[3] & 0xff);
}
@Override @Override
public int hashCode() { public int hashCode() {
return value; return value;
} }
@Override
IntField getMinValue() {
return MIN_VALUE;
}
@Override
IntField getMaxValue() {
return MAX_VALUE;
}
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -14,8 +13,11 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package ghidra.feature.vt.api.db; package db;
public class VTAppliedMarkupTableDBAdapterV0 {
/**
* Marker interface for {@link Table} interior nodes within the BTree structure.
*/
public interface InteriorNode extends BTreeNode {
// marker interface only
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -24,7 +23,7 @@ import java.util.NoSuchElementException;
*/ */
public class KeyToRecordIterator implements RecordIterator { public class KeyToRecordIterator implements RecordIterator {
private DBLongIterator keyIter; private DBFieldIterator keyIter;
private Table table; private Table table;
private DBHandle db; private DBHandle db;
@ -32,7 +31,7 @@ public class KeyToRecordIterator implements RecordIterator {
* Construct a record iterator from a secondary index key iterator. * Construct a record iterator from a secondary index key iterator.
* @param keyIter key iterator. * @param keyIter key iterator.
*/ */
public KeyToRecordIterator(Table table, DBLongIterator keyIter) { public KeyToRecordIterator(Table table, DBFieldIterator keyIter) {
this.table = table; this.table = table;
this.db = table.getDBHandle(); this.db = table.getDBHandle();
this.keyIter = keyIter; this.keyIter = keyIter;
@ -41,6 +40,7 @@ public class KeyToRecordIterator implements RecordIterator {
/** /**
* @see db.RecordIterator#hasNext() * @see db.RecordIterator#hasNext()
*/ */
@Override
public boolean hasNext() throws IOException { public boolean hasNext() throws IOException {
synchronized (db) { synchronized (db) {
return keyIter.hasNext(); return keyIter.hasNext();
@ -50,6 +50,7 @@ public class KeyToRecordIterator implements RecordIterator {
/** /**
* @see db.RecordIterator#hasPrevious() * @see db.RecordIterator#hasPrevious()
*/ */
@Override
public boolean hasPrevious() throws IOException { public boolean hasPrevious() throws IOException {
synchronized (db) { synchronized (db) {
return keyIter.hasPrevious(); return keyIter.hasPrevious();
@ -59,11 +60,13 @@ public class KeyToRecordIterator implements RecordIterator {
/** /**
* @see db.RecordIterator#next() * @see db.RecordIterator#next()
*/ */
@Override
public Record next() throws IOException { public Record next() throws IOException {
synchronized (db) { synchronized (db) {
try { try {
return table.getRecord(keyIter.next()); return table.getRecord(keyIter.next());
} catch (NoSuchElementException e) { }
catch (NoSuchElementException e) {
return null; return null;
} }
} }
@ -72,11 +75,13 @@ public class KeyToRecordIterator implements RecordIterator {
/** /**
* @see db.RecordIterator#previous() * @see db.RecordIterator#previous()
*/ */
@Override
public Record previous() throws IOException { public Record previous() throws IOException {
synchronized (db) { synchronized (db) {
try { try {
return table.getRecord(keyIter.previous()); return table.getRecord(keyIter.previous());
} catch (NoSuchElementException e) { }
catch (NoSuchElementException e) {
return null; return null;
} }
} }
@ -85,6 +90,7 @@ public class KeyToRecordIterator implements RecordIterator {
/** /**
* @see db.RecordIterator#delete() * @see db.RecordIterator#delete()
*/ */
@Override
public boolean delete() throws IOException { public boolean delete() throws IOException {
return keyIter.delete(); return keyIter.delete();
} }

View file

@ -0,0 +1,61 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
/**
* <code>LegacyIndexField</code> supports legacy index tables where the indexed
* field was a {@link LongField} and improperly employed a variable-length
* index storage scheme when the primary key was a LongField.
*/
class LegacyIndexField extends IndexField {
/**
* Constructor
* @param indexField primary table field type being indexed
*/
LegacyIndexField(Field indexField) {
super(indexField, new LongField());
}
private LegacyIndexField(Field indexField, LongField primaryKey) {
super(indexField, primaryKey);
}
@Override
public boolean isVariableLength() {
// NOTE: while fixed-length IndexFields are possible this past
// oversight failed to override this method for fixed-length cases
// (e.g., indexing fixed-length field with long primary key).
// To preserve backward compatibility this can not be changed for
// long primary keys.
return true;
}
@Override
public boolean equals(Object obj) {
return (obj instanceof LegacyIndexField) && super.equals(obj);
}
@Override
LegacyIndexField newIndexField(Field indexValue, Field primaryKey) {
if (!indexValue.isSameType(getIndexedField()) || !(primaryKey instanceof LongField)) {
throw new IllegalArgumentException("incorrect index value or key type");
}
return new LegacyIndexField(indexValue, (LongField) primaryKey);
}
}

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer;
/** /**
* <code>LongField</code> provides a wrapper for 8-byte signed long data * <code>LongField</code> provides a wrapper for 8-byte signed long data
* which is read or written to a Record. * which is read or written to a Record.
*/ */
public class LongField extends Field { public final class LongField extends Field {
/**
* Minimum long field value
*/
public static final LongField MIN_VALUE = new LongField(Long.MIN_VALUE, true);
/**
* Maximum long field value
*/
public static final LongField MAX_VALUE = new LongField(Long.MAX_VALUE, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final LongField INSTANCE = MIN_VALUE;
private long value; private long value;
@ -39,69 +53,57 @@ public class LongField extends Field {
* @param l initial value * @param l initial value
*/ */
public LongField(long l) { public LongField(long l) {
this(l, false);
}
/**
* Construct a long field with an initial value of l.
* @param l initial value
* @param immutable true if field value is immutable
*/
LongField(long l, boolean immutable) {
super(immutable);
value = l; value = l;
} }
/*
* @see ghidra.framework.store.db.Field#getLongValue()
*/
@Override @Override
public long getLongValue() { public long getLongValue() {
return value; return value;
} }
/*
* @see ghidra.framework.store.db.Field#setLongValue(long)
*/
@Override @Override
public void setLongValue(long value) { public void setLongValue(long value) {
checkImmutable();
this.value = value; this.value = value;
} }
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override @Override
int length() { int length() {
return 8; return 8;
} }
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int write(Buffer buf, int offset) throws IOException { int write(Buffer buf, int offset) throws IOException {
return buf.putLong(offset, value); return buf.putLong(offset, value);
} }
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int read(Buffer buf, int offset) throws IOException { int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getLong(offset); value = buf.getLong(offset);
return offset + 8; return offset + 8;
} }
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int readLength(Buffer buf, int offset) throws IOException { int readLength(Buffer buf, int offset) throws IOException {
return 8; return 8;
} }
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override @Override
protected byte getFieldType() { byte getFieldType() {
return LONG_TYPE; return LONG_TYPE;
} }
/*
* @see java.lang.Object#toString()
*/
@Override @Override
public String toString() { public String toString() {
return "LongField: " + Long.toString(value); return "LongField: " + Long.toString(value);
@ -109,12 +111,9 @@ public class LongField extends Field {
@Override @Override
public String getValueAsString() { public String getValueAsString() {
return Long.toHexString(value); return "0x" + Long.toHexString(value);
} }
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
if (obj == null || !(obj instanceof LongField)) if (obj == null || !(obj instanceof LongField))
@ -122,11 +121,11 @@ public class LongField extends Field {
return ((LongField) obj).value == value; return ((LongField) obj).value == value;
} }
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override @Override
public int compareTo(Field o) { public int compareTo(Field o) {
if (!(o instanceof LongField)) {
throw new UnsupportedOperationException("may only compare similar Field types");
}
LongField f = (LongField) o; LongField f = (LongField) o;
if (value == f.value) if (value == f.value)
return 0; return 0;
@ -135,27 +134,26 @@ public class LongField extends Field {
return 1; return 1;
} }
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override @Override
public Field newField(Field fieldValue) { int compareTo(DataBuffer buffer, int offset) {
if (fieldValue.isVariableLength()) long otherValue = buffer.getLong(offset);
throw new AssertException(); if (value == otherValue)
return new LongField(fieldValue.getLongValue()); return 0;
else if (value < otherValue)
return -1;
return 1;
} }
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override @Override
public Field newField() { public LongField copyField() {
return new LongField(getLongValue());
}
@Override
public LongField newField() {
return new LongField(); return new LongField();
} }
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override @Override
public byte[] getBinaryData() { public byte[] getBinaryData() {
return new byte[] { (byte) (value >> 56), (byte) (value >> 48), (byte) (value >> 40), return new byte[] { (byte) (value >> 56), (byte) (value >> 48), (byte) (value >> 40),
@ -163,12 +161,31 @@ public class LongField extends Field {
(byte) value }; (byte) value };
} }
/* @Override
* @see java.lang.Object#hashCode() public void setBinaryData(byte[] bytes) {
*/ checkImmutable();
if (bytes.length != 8) {
throw new IllegalFieldAccessException();
}
value = (((long) bytes[0] & 0xff) << 56) | (((long) bytes[1] & 0xff) << 48) |
(((long) bytes[2] & 0xff) << 40) | (((long) bytes[3] & 0xff) << 32) |
(((long) bytes[4] & 0xff) << 24) | (((long) bytes[5] & 0xff) << 16) |
(((long) bytes[6] & 0xff) << 8) | ((long) bytes[7] & 0xff);
}
@Override @Override
public int hashCode() { public int hashCode() {
return (int) (value ^ (value >>> 32)); return (int) (value ^ (value >>> 32));
} }
@Override
LongField getMinValue() {
return MIN_VALUE;
}
@Override
LongField getMaxValue() {
return MAX_VALUE;
}
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,15 +15,14 @@
*/ */
package db; package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.Msg; import ghidra.util.Msg;
import ghidra.util.exception.AssertException; import ghidra.util.exception.AssertException;
import ghidra.util.exception.CancelledException; import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor; import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import db.buffers.DataBuffer;
/** /**
* <code>LongKeyInteriorNode</code> stores a BTree node for use as an interior * <code>LongKeyInteriorNode</code> stores a BTree node for use as an interior
* node when searching for Table records within the database. This type of node * node when searching for Table records within the database. This type of node
@ -33,7 +31,7 @@ import db.buffers.DataBuffer;
* | NodeType(1) | KeyCount(4) | Key0(8) | ID0(4) | ... | KeyN(8) | IDN(4) | * | NodeType(1) | KeyCount(4) | Key0(8) | ID0(4) | ... | KeyN(8) | IDN(4) |
* </pre> * </pre>
*/ */
class LongKeyInteriorNode extends LongKeyNode { class LongKeyInteriorNode extends LongKeyNode implements InteriorNode {
private static final int BASE = LONGKEY_NODE_HEADER_SIZE; private static final int BASE = LONGKEY_NODE_HEADER_SIZE;
@ -63,7 +61,8 @@ class LongKeyInteriorNode extends LongKeyNode {
* @param id2 right child node buffer ID * @param id2 right child node buffer ID
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
LongKeyInteriorNode(NodeMgr nodeMgr, long key1, int id1, long key2, int id2) throws IOException { LongKeyInteriorNode(NodeMgr nodeMgr, long key1, int id1, long key2, int id2)
throws IOException {
super(nodeMgr, NodeMgr.LONGKEY_INTERIOR_NODE); super(nodeMgr, NodeMgr.LONGKEY_INTERIOR_NODE);
maxKeyCount = (buffer.length() - BASE) / ENTRY_SIZE; maxKeyCount = (buffer.length() - BASE) / ENTRY_SIZE;
setKeyCount(2); setKeyCount(2);
@ -73,6 +72,11 @@ class LongKeyInteriorNode extends LongKeyNode {
putEntry(1, key2, id2); putEntry(1, key2, id2);
} }
@Override
public LongKeyInteriorNode getParent() {
return parent;
}
/** /**
* Construct a new empty long-key interior node. * Construct a new empty long-key interior node.
* Node must be initialized with a minimum of two keys. * Node must be initialized with a minimum of two keys.
@ -86,16 +90,16 @@ class LongKeyInteriorNode extends LongKeyNode {
void logConsistencyError(String tableName, String msg, Throwable t) { void logConsistencyError(String tableName, String msg, Throwable t) {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg); Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " parent.key[0]=" + Long.toHexString(getKey(0)) + " bufferID=" + Msg.debug(this,
getBufferId()); " parent.key[0]=" + Long.toHexString(getKey(0)) + " bufferID=" + getBufferId());
if (t != null) { if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t); Msg.error(this, "Consistency Error (" + tableName + ")", t);
} }
} }
@Override @Override
public boolean isConsistent(String tableName, TaskMonitor monitor) throws IOException, public boolean isConsistent(String tableName, TaskMonitor monitor)
CancelledException { throws IOException, CancelledException {
boolean consistent = true; boolean consistent = true;
long lastMinKey = 0; long lastMinKey = 0;
long lastMaxKey = 0; long lastMaxKey = 0;
@ -106,23 +110,21 @@ class LongKeyInteriorNode extends LongKeyNode {
if (i != 0) { if (i != 0) {
if (key <= lastMinKey) { if (key <= lastMinKey) {
consistent = false; consistent = false;
logConsistencyError(tableName, "child[" + i + "].minKey <= child[" + (i - 1) + logConsistencyError(tableName,
"].minKey", null); "child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null);
Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(key) + Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(key) +
" bufferID=" + getBufferId(i)); " bufferID=" + getBufferId(i));
Msg.debug(this, Msg.debug(this, " child[" + (i - 1) + "].minKey = 0x" +
" child[" + (i - 1) + "].minKey = 0x" + Long.toHexString(lastMinKey) + Long.toHexString(lastMinKey) + " bufferID=" + getBufferId(i - 1));
" bufferID=" + getBufferId(i - 1));
} }
else if (key <= lastMaxKey) { else if (key <= lastMaxKey) {
consistent = false; consistent = false;
logConsistencyError(tableName, "child[" + i + "].minKey <= child[" + (i - 1) + logConsistencyError(tableName,
"].maxKey", null); "child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null);
Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(key) + Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(key) +
" bufferID=" + getBufferId(i)); " bufferID=" + getBufferId(i));
Msg.debug(this, Msg.debug(this, " child[" + (i - 1) + "].maxKey = 0x" +
" child[" + (i - 1) + "].maxKey = 0x" + Long.toHexString(lastMaxKey) + Long.toHexString(lastMaxKey) + " bufferID=" + getBufferId(i - 1));
" bufferID=" + getBufferId(i - 1));
} }
} }
@ -155,8 +157,8 @@ class LongKeyInteriorNode extends LongKeyNode {
long childKey0 = node.getKey(0); long childKey0 = node.getKey(0);
if (key != childKey0) { if (key != childKey0) {
consistent = false; consistent = false;
logConsistencyError(tableName, "parent key entry mismatch with child[" + i + logConsistencyError(tableName,
"].minKey", null); "parent key entry mismatch with child[" + i + "].minKey", null);
Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(childKey0) + Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(childKey0) +
" bufferID=" + getBufferId(i - 1)); " bufferID=" + getBufferId(i - 1));
Msg.debug(this, " parent key entry = 0x" + Long.toHexString(key)); Msg.debug(this, " parent key entry = 0x" + Long.toHexString(key));
@ -178,9 +180,15 @@ class LongKeyInteriorNode extends LongKeyNode {
/** /**
* Perform a binary search to locate the specified key and derive an index * Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. * into the Buffer ID storage. This method is intended to locate the child
* @param key * node which contains the specified key. The returned index corresponds
* @return int buffer ID index. * to a child's stored buffer/node ID and may correspond to another interior
* node or a leaf record node. Each stored key within this interior node
* effectively identifies the maximum key contained within the corresponding
* child node.
* @param key key to search for
* @return int buffer ID index of child node. An existing positive index
* value will always be returned.
*/ */
int getIdIndex(long key) { int getIdIndex(long key) {
@ -203,10 +211,18 @@ class LongKeyInteriorNode extends LongKeyNode {
return max; return max;
} }
@Override
public int getKeyIndex(Field key) throws IOException {
return getKeyIndex(key.getLongValue());
}
/** /**
* Perform a binary search to locate the specified key and derive an index * Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. * into the Buffer ID storage. This method is intended to find the insertion
* @param key * index or exact match for a child key. A negative value will be returned
* when an exact match is not found and may be transformed into an
* insertion index (insetIndex = -returnedIndex-1).
* @param key key to search for
* @return int buffer ID index. * @return int buffer ID index.
*/ */
private int getKeyIndex(long key) { private int getKeyIndex(long key) {
@ -230,9 +246,6 @@ class LongKeyInteriorNode extends LongKeyNode {
return -(min + 1); return -(min + 1);
} }
/*
* @see ghidra.framework.store.db.LongKeyNode#getKey(int)
*/
@Override @Override
long getKey(int index) { long getKey(int index) {
return buffer.getLong(BASE + (index * ENTRY_SIZE)); return buffer.getLong(BASE + (index * ENTRY_SIZE));
@ -329,10 +342,11 @@ class LongKeyInteriorNode extends LongKeyNode {
if (index < 0) { if (index < 0) {
throw new AssertException(); throw new AssertException();
} }
// Update key
putKey(index, newKey);
if (index == 0 && parent != null) { if (index == 0 && parent != null) {
parent.keyChanged(oldKey, newKey); parent.keyChanged(oldKey, newKey);
} }
putKey(index, newKey);
} }
/** /**
@ -395,9 +409,6 @@ class LongKeyInteriorNode extends LongKeyNode {
newNode.getBufferId()); newNode.getBufferId());
} }
/*
* @see ghidra.framework.store.db.LongKeyNode#getLeafNode(long)
*/
@Override @Override
LongKeyRecordNode getLeafNode(long key) throws IOException { LongKeyRecordNode getLeafNode(long key) throws IOException {
LongKeyNode node = nodeMgr.getLongKeyNode(getBufferId(getIdIndex(key))); LongKeyNode node = nodeMgr.getLongKeyNode(getBufferId(getIdIndex(key)));
@ -544,9 +555,6 @@ class LongKeyInteriorNode extends LongKeyNode {
} }
} }
/*
* @see ghidra.framework.store.db.LongKeyNode#delete()
*/
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
@ -559,9 +567,7 @@ class LongKeyInteriorNode extends LongKeyNode {
nodeMgr.deleteNode(this); nodeMgr.deleteNode(this);
} }
/* @Override
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
public int[] getBufferReferences() { public int[] getBufferReferences() {
int[] ids = new int[keyCount]; int[] ids = new int[keyCount];
for (int i = 0; i < keyCount; i++) { for (int i = 0; i < keyCount; i++) {

View file

@ -101,6 +101,11 @@ abstract class LongKeyNode implements BTreeNode {
*/ */
abstract long getKey(int index); abstract long getKey(int index);
@Override
public final Field getKeyField(int index) throws IOException {
return new LongField(getKey(index));
}
/** /**
* Get the leaf node which contains the specified key. * Get the leaf node which contains the specified key.
* @param key key value * @param key key value

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,20 +15,24 @@
*/ */
package db; package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.Msg; import ghidra.util.Msg;
import ghidra.util.exception.CancelledException; import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor; import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import db.buffers.DataBuffer;
/** /**
* <code>LongKeyRecordNode</code> is an abstract implementation of a BTree leaf node * <code>LongKeyRecordNode</code> is an abstract implementation of a BTree leaf node
* which utilizes long key values and stores records. * which utilizes long key values and stores records.
* <p>
* This type of node has the following partial layout within a single DataBuffer
* (field size in bytes):
* <pre>
* | NodeType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | ...
* </pre>
*/ */
abstract class LongKeyRecordNode extends LongKeyNode { abstract class LongKeyRecordNode extends LongKeyNode implements RecordNode {
private static final int ID_SIZE = 4; private static final int ID_SIZE = 4;
@ -55,7 +58,8 @@ abstract class LongKeyRecordNode extends LongKeyNode {
* @param nextLeafId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf) * @param nextLeafId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @throws IOException thrown if an IO error occurs * @throws IOException thrown if an IO error occurs
*/ */
LongKeyRecordNode(NodeMgr nodeMgr, byte nodeType, int prevLeafId, int nextLeafId) throws IOException { LongKeyRecordNode(NodeMgr nodeMgr, byte nodeType, int prevLeafId, int nextLeafId)
throws IOException {
super(nodeMgr, nodeType); super(nodeMgr, nodeType);
// Initialize header // Initialize header
@ -63,6 +67,11 @@ abstract class LongKeyRecordNode extends LongKeyNode {
buffer.putInt(NEXT_LEAF_ID_OFFSET, nextLeafId); buffer.putInt(NEXT_LEAF_ID_OFFSET, nextLeafId);
} }
@Override
public LongKeyInteriorNode getParent() {
return parent;
}
void logConsistencyError(String tableName, String msg, Throwable t) { void logConsistencyError(String tableName, String msg, Throwable t) {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg); Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " bufferID=" + getBufferId() + " key[0]=0x" + Long.toHexString(getKey(0))); Msg.debug(this, " bufferID=" + getBufferId() + " key[0]=0x" + Long.toHexString(getKey(0)));
@ -72,7 +81,8 @@ abstract class LongKeyRecordNode extends LongKeyNode {
} }
@Override @Override
public boolean isConsistent(String tableName, TaskMonitor monitor) throws IOException, CancelledException { public boolean isConsistent(String tableName, TaskMonitor monitor)
throws IOException, CancelledException {
boolean consistent = true; boolean consistent = true;
long prevKey = 0; long prevKey = 0;
for (int i = 0; i < keyCount; i++) { for (int i = 0; i < keyCount; i++) {
@ -83,7 +93,8 @@ abstract class LongKeyRecordNode extends LongKeyNode {
consistent = false; consistent = false;
logConsistencyError(tableName, "key[" + i + "] <= key[" + (i - 1) + "]", null); logConsistencyError(tableName, "key[" + i + "] <= key[" + (i - 1) + "]", null);
Msg.debug(this, " key[" + i + "].minKey = 0x" + Long.toHexString(key)); Msg.debug(this, " key[" + i + "].minKey = 0x" + Long.toHexString(key));
Msg.debug(this, " key[" + (i-1) + "].minKey = 0x" + Long.toHexString(prevKey)); Msg.debug(this,
" key[" + (i - 1) + "].minKey = 0x" + Long.toHexString(prevKey));
} }
} }
prevKey = key; prevKey = key;
@ -116,9 +127,6 @@ abstract class LongKeyRecordNode extends LongKeyNode {
return consistent; return consistent;
} }
/*
* @see ghidra.framework.store.db.LongKeyNode#getLeafNode(long)
*/
@Override @Override
LongKeyRecordNode getLeafNode(long key) throws IOException { LongKeyRecordNode getLeafNode(long key) throws IOException {
return this; return this;
@ -179,6 +187,11 @@ abstract class LongKeyRecordNode extends LongKeyNode {
return -(min + 1); return -(min + 1);
} }
@Override
public int getKeyIndex(Field key) throws IOException {
return getKeyIndex(key.getLongValue());
}
/** /**
* Split this leaf node in half and update tree. * Split this leaf node in half and update tree.
* When a split is performed, the next operation must be performed * When a split is performed, the next operation must be performed
@ -209,13 +222,14 @@ abstract class LongKeyRecordNode extends LongKeyNode {
} }
// New parent node becomes root // New parent node becomes root
return new LongKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newLeaf.getKey(0), newBufId); return new LongKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newLeaf.getKey(0),
newBufId);
} }
/** /**
* Append a leaf which contains one or more keys and update tree. Leaf is inserted * Append a leaf which contains one or more keys and update tree. Leaf is inserted
* as the new right sibling of this leaf. * as the new right sibling of this leaf.
* @param newLeaf new right sibling leaf (must be same node type as this leaf) * @param leaf new right sibling leaf (must be same node type as this leaf)
* @return root node which may have changed. * @return root node which may have changed.
* @throws IOException thrown if an IO error occurs * @throws IOException thrown if an IO error occurs
*/ */
@ -242,7 +256,8 @@ abstract class LongKeyRecordNode extends LongKeyNode {
} }
// New parent node becomes root // New parent node becomes root
return new LongKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), leaf.getKey(0), newBufId); return new LongKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), leaf.getKey(0),
newBufId);
} }
/** /**
@ -281,8 +296,8 @@ abstract class LongKeyRecordNode extends LongKeyNode {
/** /**
* Create a new leaf and add to the node manager. * Create a new leaf and add to the node manager.
* The new leaf's parent is unknown. * The new leaf's parent is unknown.
* @param prevLeafId node buffer id for previous leaf - left sibling ( &lt; 0: no leaf) * @param prevNodeId node buffer id for previous leaf - left sibling ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf) * @param nextNodeId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @return new leaf node. * @return new leaf node.
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
@ -391,7 +406,6 @@ abstract class LongKeyRecordNode extends LongKeyNode {
*/ */
abstract void remove(int index) throws IOException; abstract void remove(int index) throws IOException;
/** /**
* Inserts the record at the given index if there is sufficient space in * Inserts the record at the given index if there is sufficient space in
* the buffer. * the buffer.
@ -423,7 +437,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
/** /**
* Get the record located at the specified index. * Get the record located at the specified index.
* @param schema record data schema * @param schema record data schema
* @param keyIndex key index * @param index key index
* @return Record * @return Record
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
@ -516,7 +530,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
/** /**
* Create a new record node with no siblings attached. * Create a new record node with no siblings attached.
* @param nodeMgr table node manager instance * @param nodeMgr table node manager instance
* @param fixedRecordLength length of fixed-length record, 0 = variable length * @param schema record schema
* @return new record leaf node * @return new record leaf node
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,7 +15,6 @@
*/ */
package db; package db;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -31,6 +29,7 @@ class MasterTable {
private TableRecord masterRecord; private TableRecord masterRecord;
private DBHandle dbh;
private DBParms dbParms; private DBParms dbParms;
private Table table; private Table table;
@ -41,24 +40,26 @@ class MasterTable {
/** /**
* Construct an existing master table. * Construct an existing master table.
* @param db database handle * @param dbh database handle
* @throws IOException database IO error
*/ */
MasterTable(DBHandle db) throws IOException { MasterTable(DBHandle dbh) throws IOException {
this.dbParms = db.getDBParms(); this.dbh = dbh;
this.dbParms = dbh.getDBParms();
masterRecord = new TableRecord(0, "MASTER", masterRecord = new TableRecord(0, "MASTER", TableRecord.getTableRecordSchema(), -1);
TableRecord.getTableRecordSchema(), -1);
try { try {
masterRecord.setRootBufferId(dbParms.get(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM)); masterRecord.setRootBufferId(dbParms.get(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM));
} catch (ArrayIndexOutOfBoundsException e) { }
throw new IOException("Corrupt database parameters"); catch (ArrayIndexOutOfBoundsException e) {
throw new IOException("Corrupt database parameters", e);
} }
table = new Table(db, masterRecord); table = new Table(dbh, masterRecord);
ArrayList<TableRecord> trList = new ArrayList<TableRecord>(); ArrayList<TableRecord> trList = new ArrayList<TableRecord>();
RecordIterator it = table.iterator(); RecordIterator it = table.iterator();
while (it.hasNext()) { while (it.hasNext()) {
trList.add(new TableRecord(it.next())); trList.add(new TableRecord(dbh, it.next()));
} }
tableRecords = new TableRecord[trList.size()]; tableRecords = new TableRecord[trList.size()];
trList.toArray(tableRecords); trList.toArray(tableRecords);
@ -78,8 +79,10 @@ class MasterTable {
* @param tableSchema table schema * @param tableSchema table schema
* @param indexedColumn primary table index key column, or -1 for primary table * @param indexedColumn primary table index key column, or -1 for primary table
* @return new table record * @return new table record
* @throws IOException database IO error
*/ */
TableRecord createTableRecord(String name, Schema tableSchema, int indexedColumn) throws IOException { TableRecord createTableRecord(String name, Schema tableSchema, int indexedColumn)
throws IOException {
// Create new table record // Create new table record
TableRecord tableRecord = new TableRecord(nextTableNum++, name, tableSchema, indexedColumn); TableRecord tableRecord = new TableRecord(nextTableNum++, name, tableSchema, indexedColumn);
@ -102,7 +105,8 @@ class MasterTable {
* Remove the master table record associated with the specified table name. * Remove the master table record associated with the specified table name.
* This method may only be invoked while a database transaction * This method may only be invoked while a database transaction
* is in progress. * is in progress.
* @param id table name * @param tableNum table number (key within master table)
* @throws IOException database IO error
*/ */
void deleteTableRecord(long tableNum) throws IOException { void deleteTableRecord(long tableNum) throws IOException {
@ -115,7 +119,8 @@ class MasterTable {
tableRecords[i].invalidate(); tableRecords[i].invalidate();
// Update master root which may have changed // Update master root which may have changed
dbParms.set(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM, masterRecord.getRootBufferId()); dbParms.set(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM,
masterRecord.getRootBufferId());
// Update tableRecord list // Update tableRecord list
TableRecord[] newList = new TableRecord[tableRecords.length - 1]; TableRecord[] newList = new TableRecord[tableRecords.length - 1];
@ -141,6 +146,7 @@ class MasterTable {
* Refresh table data from the master table. * Refresh table data from the master table.
* Records are returned in the list ordered by their table number key. * Records are returned in the list ordered by their table number key.
* @return the update list of master table records. * @return the update list of master table records.
* @throws IOException database IO error
*/ */
TableRecord[] refreshTableRecords() throws IOException { TableRecord[] refreshTableRecords() throws IOException {
@ -150,8 +156,9 @@ class MasterTable {
masterRecord.setRootBufferId(masterRootId); masterRecord.setRootBufferId(masterRootId);
table.tableRecordChanged(); table.tableRecordChanged();
} }
} catch (ArrayIndexOutOfBoundsException e) { }
throw new IOException("Corrupt database parameters"); catch (ArrayIndexOutOfBoundsException e) {
throw new IOException("Corrupt database parameters", e);
} }
ArrayList<TableRecord> trList = new ArrayList<TableRecord>(); ArrayList<TableRecord> trList = new ArrayList<TableRecord>();
@ -168,10 +175,10 @@ class MasterTable {
} }
if (ix == oldTableCnt || tablenum < tableRecords[ix].getTableNum()) { if (ix == oldTableCnt || tablenum < tableRecords[ix].getTableNum()) {
trList.add(new TableRecord(rec)); // new table trList.add(new TableRecord(dbh, rec)); // new table
} }
else if (tablenum == tableRecords[ix].getTableNum()) { else if (tablenum == tableRecords[ix].getTableNum()) {
tableRecords[ix].setRecord(rec); tableRecords[ix].setRecord(dbh, rec);
trList.add(tableRecords[ix++]); // update existing table trList.add(tableRecords[ix++]); // update existing table
} }
} }
@ -188,7 +195,7 @@ class MasterTable {
* Flush all unsaved table changes to the underlying buffer mgr. * Flush all unsaved table changes to the underlying buffer mgr.
* This method may only be invoked while a database transaction * This method may only be invoked while a database transaction
* is in progress. * is in progress.
* @throws IOException * @throws IOException database IO error
*/ */
void flush() throws IOException { void flush() throws IOException {
for (int i = 0; i < tableRecords.length; i++) { for (int i = 0; i < tableRecords.length; i++) {
@ -201,8 +208,8 @@ class MasterTable {
/** /**
* Change the name of a table and its associated indexes. * Change the name of a table and its associated indexes.
* @param oldName * @param oldName old table name
* @param newName * @param newName new tablename
*/ */
void changeTableName(String oldName, String newName) { void changeTableName(String oldName, String newName) {
for (int i = 0; i < tableRecords.length; i++) { for (int i = 0; i < tableRecords.length; i++) {
@ -213,4 +220,3 @@ class MasterTable {
} }
} }

View file

@ -15,13 +15,12 @@
*/ */
package db; package db;
import ghidra.util.datastruct.IntObjectHashtable;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap;
import db.buffers.BufferMgr; import db.buffers.BufferMgr;
import db.buffers.DataBuffer; import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/** /**
* The <code>NodeMgr</code> manages all database nodes associated with * The <code>NodeMgr</code> manages all database nodes associated with
@ -30,6 +29,25 @@ import db.buffers.DataBuffer;
* buffer allocations, retrievals and releases as required. The NodeMgr * buffer allocations, retrievals and releases as required. The NodeMgr
* also performs hard caching of all buffers until the releaseNodes * also performs hard caching of all buffers until the releaseNodes
* method is invoked. * method is invoked.
*
* Legacy Issues (prior to Ghidra 9.2):
* <ul>
* <li>Legacy {@link Table} implementation incorrectly employed {@link VarKeyNode}
* storage with primitive fixed-length primary keys other than {@link LongField}
* (e.g., {@link ByteField}). With improved support for fixed-length keys
* legacy data poses a backward capatibility issue. This has been
* addressed through the use of a hack whereby a {@link Schema} is forced to
* treat the primary key as variable length
* (see {@link Schema#forceUseOfVariableLengthKeyNodes()}. The detection
* for this rare condition is provided by {@link TableRecord} during
* schema instantiation.</li>
*
* <li>Legacy {@link Table} implementation incorrectly employed variable
* length storage when both primary key and indexed fields were
* LongField types. This issue has been addressed by treating the
* {@link Field#LEGACY_INDEX_LONG_TYPE} (0x8) as variable-length (see
* implementation {@link LegacyIndexField}).</li>
* </ul>
*/ */
class NodeMgr { class NodeMgr {
@ -70,6 +88,24 @@ class NodeMgr {
*/ */
static final byte VARKEY_REC_NODE = 4; static final byte VARKEY_REC_NODE = 4;
/**
* Node type for fixed-length key interior tree nodes
* @see db.FixedKeyInteriorNode
*/
static final byte FIXEDKEY_INTERIOR_NODE = 5;
/**
* Node type for fixed-length key variable-length record leaf nodes
* @see db.FixedKeyVarRecNode
*/
static final byte FIXEDKEY_VAR_REC_NODE = 6;
/**
* Node type for fixed-length key fixed-length record leaf nodes
* @see db.FixedKeyFixedRecNode
*/
static final byte FIXEDKEY_FIXED_REC_NODE = 7;
/** /**
* Node type for chained buffer index nodes * Node type for chained buffer index nodes
* @see db.DBBuffer * @see db.DBBuffer
@ -84,21 +120,21 @@ class NodeMgr {
private BufferMgr bufferMgr; private BufferMgr bufferMgr;
private Schema schema; private Schema schema;
private String tableName;
private int leafRecordCnt = 0; private int leafRecordCnt = 0;
private IntObjectHashtable<BTreeNode> nodeTable = new IntObjectHashtable<BTreeNode>(10); private HashMap<Integer, BTreeNode> nodeTable = new HashMap<>();
// private ArrayList<BTreeNode> nodeList = new ArrayList<BTreeNode>(10);
/** /**
* Construct a node manager for a specific table. * Construct a node manager for a specific table.
* @param table associated table
* @param bufferMgr buffer manager. * @param bufferMgr buffer manager.
* @param schema table schema (required for Table use)
*/ */
NodeMgr(BufferMgr bufferMgr, Schema schema) { NodeMgr(Table table, BufferMgr bufferMgr) {
this.bufferMgr = bufferMgr; this.bufferMgr = bufferMgr;
this.schema = schema; this.schema = table.getSchema();
this.tableName = table.getName();
} }
/** /**
@ -109,21 +145,36 @@ class NodeMgr {
return bufferMgr; return bufferMgr;
} }
/**
* Get the table schema associated with this node manager
* @return table schema
*/
Schema getTableSchema() {
return schema;
}
/**
* Get the table name associated with this node manager
* @return table name
*/
String getTableName() {
return tableName;
}
/** /**
* Release all nodes held by this node manager. * Release all nodes held by this node manager.
* This method must be invoked before a database transaction can be committed. * This method must be invoked before a database transaction can be committed.
* @return the change in record count (+/-) * @return the change in record count (+/-)
* @throws IOException if IO error occurs on database
*/ */
int releaseNodes() throws IOException { int releaseNodes() throws IOException {
int[] bufferIds = nodeTable.getKeys(); for (BTreeNode node : nodeTable.values()) {
for (int bufferId : bufferIds) { if (node instanceof RecordNode) {
BTreeNode node = nodeTable.get(bufferId);
if (node instanceof LongKeyRecordNode || node instanceof VarKeyRecordNode) {
leafRecordCnt -= node.getKeyCount(); leafRecordCnt -= node.getKeyCount();
} }
bufferMgr.releaseBuffer(node.getBuffer()); bufferMgr.releaseBuffer(node.getBuffer());
} }
nodeTable.removeAll(); nodeTable = new HashMap<>();
int result = -leafRecordCnt; int result = -leafRecordCnt;
leafRecordCnt = 0; leafRecordCnt = 0;
return result; return result;
@ -133,8 +184,8 @@ class NodeMgr {
* Release a specific read-only buffer node. * Release a specific read-only buffer node.
* WARNING! This method may only be used to release read-only buffers, * WARNING! This method may only be used to release read-only buffers,
* if a release buffer has been modified an IOException will be thrown. * if a release buffer has been modified an IOException will be thrown.
* @param bufferId * @param bufferId buffer ID
* @throws IOException * @throws IOException if IO error occurs on database
*/ */
void releaseReadOnlyNode(int bufferId) throws IOException { void releaseReadOnlyNode(int bufferId) throws IOException {
BTreeNode node = nodeTable.get(bufferId); BTreeNode node = nodeTable.get(bufferId);
@ -142,7 +193,7 @@ class NodeMgr {
// There is a possible leafRecordCount error if buffer is released multiple times // There is a possible leafRecordCount error if buffer is released multiple times
throw new IOException("Releasing modified buffer node as read-only"); throw new IOException("Releasing modified buffer node as read-only");
} }
if (node instanceof LongKeyRecordNode || node instanceof VarKeyRecordNode) { if (node instanceof RecordNode) {
leafRecordCnt -= node.getKeyCount(); leafRecordCnt -= node.getKeyCount();
} }
bufferMgr.releaseBuffer(node.getBuffer()); bufferMgr.releaseBuffer(node.getBuffer());
@ -170,11 +221,32 @@ class NodeMgr {
bufferMgr.deleteBuffer(bufferId); bufferMgr.deleteBuffer(bufferId);
} }
/**
* Perform a test of the specified buffer to determine if it is
* a VarKeyNode type. It is important that the specified buffer
* not be in use.
* @param bufferMgr buffer manager
* @param bufferId buffer ID
* @return true if node found and is a VarKeyNode type
* @throws IOException thrown if an IO error occurs
*/
static boolean isVarKeyNode(BufferMgr bufferMgr, int bufferId) throws IOException {
DataBuffer buf = bufferMgr.getBuffer(bufferId);
try {
int nodeType = getNodeType(buf);
return nodeType == VARKEY_REC_NODE || nodeType == VARKEY_INTERIOR_NODE;
}
finally {
bufferMgr.releaseBuffer(buf);
}
}
/** /**
* Get a LongKeyNode object for a specified buffer * Get a LongKeyNode object for a specified buffer
* @param bufferId buffer ID * @param bufferId buffer ID
* @return LongKeyNode instance * @return LongKeyNode instance
* @throws ClassCastException if node type is incorrect. * @throws ClassCastException if node type is incorrect.
* @throws IOException if IO error occurs on database
*/ */
LongKeyNode getLongKeyNode(int bufferId) throws IOException { LongKeyNode getLongKeyNode(int bufferId) throws IOException {
LongKeyNode node = (LongKeyNode) nodeTable.get(bufferId); LongKeyNode node = (LongKeyNode) nodeTable.get(bufferId);
@ -197,8 +269,44 @@ class NodeMgr {
node = new LongKeyInteriorNode(this, buf); node = new LongKeyInteriorNode(this, buf);
break; break;
default: default:
throw new AssertException("Unexpected Node Type (" + nodeType + bufferMgr.releaseBuffer(buf);
") found, expecting LongKeyNode"); throw new AssertException(
"Unexpected Node Type (" + nodeType + ") found, expecting LongKeyNode");
}
return node;
}
/**
* Get a FixedKeyNode object for a specified buffer
* @param bufferId buffer ID
* @return LongKeyNode instance
* @throws ClassCastException if node type is incorrect.
* @throws IOException if IO error occurs on database
*/
FixedKeyNode getFixedKeyNode(int bufferId) throws IOException {
FixedKeyNode node = (FixedKeyNode) nodeTable.get(bufferId);
if (node != null) {
return node;
}
DataBuffer buf = bufferMgr.getBuffer(bufferId);
int nodeType = getNodeType(buf);
switch (nodeType) {
case FIXEDKEY_VAR_REC_NODE:
node = new FixedKeyVarRecNode(this, buf);
leafRecordCnt += node.keyCount;
break;
case FIXEDKEY_FIXED_REC_NODE:
node = new FixedKeyFixedRecNode(this, buf);
leafRecordCnt += node.keyCount;
break;
case FIXEDKEY_INTERIOR_NODE:
node = new FixedKeyInteriorNode(this, buf);
break;
default:
bufferMgr.releaseBuffer(buf);
throw new IOException(
"Unexpected Node Type (" + nodeType + ") found, expecting FixedKeyNode");
} }
return node; return node;
} }
@ -208,6 +316,7 @@ class NodeMgr {
* @param bufferId buffer ID * @param bufferId buffer ID
* @return VarKeyNode instance * @return VarKeyNode instance
* @throws ClassCastException if node type is incorrect. * @throws ClassCastException if node type is incorrect.
* @throws IOException if IO error occurs on database
*/ */
VarKeyNode getVarKeyNode(int bufferId) throws IOException { VarKeyNode getVarKeyNode(int bufferId) throws IOException {
VarKeyNode node = (VarKeyNode) nodeTable.get(bufferId); VarKeyNode node = (VarKeyNode) nodeTable.get(bufferId);
@ -226,8 +335,9 @@ class NodeMgr {
node = new VarKeyInteriorNode(this, buf); node = new VarKeyInteriorNode(this, buf);
break; break;
default: default:
throw new AssertException("Unexpected Node Type (" + nodeType + bufferMgr.releaseBuffer(buf);
") found, expecting VarKeyNode"); throw new AssertException(
"Unexpected Node Type (" + nodeType + ") found, expecting VarKeyNode");
} }
return node; return node;
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,10 +15,10 @@
*/ */
package db; package db;
import ghidra.util.ObjectStorage;
import java.util.ArrayList; import java.util.ArrayList;
import ghidra.util.ObjectStorage;
/** /**
* <code>ObjectStorageAdapterDB</code> provides an ObjectStorage * <code>ObjectStorageAdapterDB</code> provides an ObjectStorage
* implementation for use by Saveable objects. This allows Saveable objects * implementation for use by Saveable objects. This allows Saveable objects
@ -53,81 +52,63 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#putInt(int)
*/
public void putInt(int value) { public void putInt(int value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new IntField(value)); fieldList.add(new IntField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putByte(byte)
*/
public void putByte(byte value) { public void putByte(byte value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new ByteField(value)); fieldList.add(new ByteField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putShort(short)
*/
public void putShort(short value) { public void putShort(short value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new ShortField(value)); fieldList.add(new ShortField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putLong(long)
*/
public void putLong(long value) { public void putLong(long value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new LongField(value)); fieldList.add(new LongField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putString(java.lang.String)
*/
public void putString(String value) { public void putString(String value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new StringField(value)); fieldList.add(new StringField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putBoolean(boolean)
*/
public void putBoolean(boolean value) { public void putBoolean(boolean value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BooleanField(value)); fieldList.add(new BooleanField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putFloat(float)
*/
public void putFloat(float value) { public void putFloat(float value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putDouble(double)
*/
public void putDouble(double value) { public void putDouble(double value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#getInt()
*/
public int getInt() { public int getInt() {
try { try {
return fieldList.get(col++).getIntValue(); return fieldList.get(col++).getIntValue();
@ -137,9 +118,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getByte()
*/
public byte getByte() { public byte getByte() {
try { try {
return fieldList.get(col++).getByteValue(); return fieldList.get(col++).getByteValue();
@ -149,9 +128,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getShort()
*/
public short getShort() { public short getShort() {
try { try {
return fieldList.get(col++).getShortValue(); return fieldList.get(col++).getShortValue();
@ -161,9 +138,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getLong()
*/
public long getLong() { public long getLong() {
try { try {
return fieldList.get(col++).getLongValue(); return fieldList.get(col++).getLongValue();
@ -173,9 +148,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getBoolean()
*/
public boolean getBoolean() { public boolean getBoolean() {
try { try {
return fieldList.get(col++).getBooleanValue(); return fieldList.get(col++).getBooleanValue();
@ -185,9 +158,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getString()
*/
public String getString() { public String getString() {
try { try {
return fieldList.get(col++).getString(); return fieldList.get(col++).getString();
@ -197,9 +168,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getFloat()
*/
public float getFloat() { public float getFloat() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -210,9 +179,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getDouble()
*/
public double getDouble() { public double getDouble() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -223,72 +190,56 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#putInts(int[])
*/
public void putInts(int[] value) { public void putInts(int[] value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putBytes(byte[])
*/
public void putBytes(byte[] value) { public void putBytes(byte[] value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putShorts(short[])
*/
public void putShorts(short[] value) { public void putShorts(short[] value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putLongs(long[])
*/
public void putLongs(long[] value) { public void putLongs(long[] value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putFloats(float[])
*/
public void putFloats(float[] value) { public void putFloats(float[] value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putDoubles(double[])
*/
public void putDoubles(double[] value) { public void putDoubles(double[] value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#putStrings(java.lang.String[])
*/
public void putStrings(String[] value) { public void putStrings(String[] value) {
if (readOnly) if (readOnly)
throw new IllegalStateException(); throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value)); fieldList.add(new BinaryCodedField(value));
} }
/* @Override
* @see ghidra.util.ObjectStorage#getInts()
*/
public int[] getInts() { public int[] getInts() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -299,9 +250,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getBytes()
*/
public byte[] getBytes() { public byte[] getBytes() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -312,9 +261,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getShorts()
*/
public short[] getShorts() { public short[] getShorts() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -325,9 +272,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getLongs()
*/
public long[] getLongs() { public long[] getLongs() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -338,9 +283,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getFloats()
*/
public float[] getFloats() { public float[] getFloats() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -351,9 +294,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getDoubles()
*/
public double[] getDoubles() { public double[] getDoubles() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -364,9 +305,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
} }
} }
/* @Override
* @see ghidra.util.ObjectStorage#getStrings()
*/
public String[] getStrings() { public String[] getStrings() {
try { try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++)); BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
@ -383,13 +322,13 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
* @return Schema * @return Schema
*/ */
public Schema getSchema(int version) { public Schema getSchema(int version) {
Class<?>[] fieldClasses = new Class<?>[fieldList.size()]; Field[] fields = new Field[fieldList.size()];
String[] fieldNames = new String[fieldClasses.length]; String[] fieldNames = new String[fields.length];
for (int i = 0; i < fieldClasses.length; i++) { for (int i = 0; i < fields.length; i++) {
fieldClasses[i] = fieldList.get(i).getClass(); fields[i] = fieldList.get(i).newField();
fieldNames[i] = Integer.toString(i); fieldNames[i] = Integer.toString(i);
} }
return new Schema(version, "key", fieldClasses, fieldNames); return new Schema(version, "key", fields, fieldNames);
} }
/** /**

View file

@ -107,17 +107,19 @@ public class Record implements Comparable<Record> {
/** /**
* Determine if this record's schema is compatible with the specified schema. * Determine if this record's schema is compatible with the specified schema.
* This check factors column count and column field types only. * This check factors column count and column field types only.
* @param schema * @param schema other schema
* @return true if records schemas are the same * @return true if records schemas are the same
*/ */
public boolean hasSameSchema(Schema schema) { public boolean hasSameSchema(Schema schema) {
if (fieldValues.length != schema.getFieldCount()) { if (fieldValues.length != schema.getFieldCount()) {
return false; return false;
} }
Class<?>[] schemaFieldClasses = schema.getFieldClasses(); if (!key.isSameType(schema.getKeyFieldType())) {
return false;
}
Field[] otherFields = schema.getFields();
for (int i = 0; i < fieldValues.length; i++) { for (int i = 0; i < fieldValues.length; i++) {
if (!fieldValues[i].getClass().equals(schemaFieldClasses[i])) { if (!fieldValues[i].isSameType(otherFields[i])) {
return false; return false;
} }
} }
@ -139,7 +141,7 @@ public class Record implements Comparable<Record> {
*/ */
public Field getFieldValue(int columnIndex) { public Field getFieldValue(int columnIndex) {
Field f = fieldValues[columnIndex]; Field f = fieldValues[columnIndex];
return f.newField(f); return f.copyField();
} }
/** /**
@ -202,11 +204,11 @@ public class Record implements Comparable<Record> {
*/ */
public Record copy() { public Record copy() {
Field newKey = key.newField(key); Field newKey = key.copyField();
Field[] fields = new Field[fieldValues.length]; Field[] fields = new Field[fieldValues.length];
for (int i = 0; i < fields.length; i++) { for (int i = 0; i < fields.length; i++) {
Field f = fieldValues[i]; Field f = fieldValues[i];
fields[i] = f.newField(f); fields[i] = f.copyField();
} }
return new Record(newKey, fields); return new Record(newKey, fields);
} }
@ -349,6 +351,7 @@ public class Record implements Comparable<Record> {
* @param colIndex field index * @param colIndex field index
* @param bytes field value * @param bytes field value
* @throws IllegalFieldAccessException if field does support binary data access * @throws IllegalFieldAccessException if field does support binary data access
* or incorrect number of bytes provided
*/ */
public void setBinaryData(int colIndex, byte[] bytes) { public void setBinaryData(int colIndex, byte[] bytes) {
dirty = true; dirty = true;
@ -417,6 +420,7 @@ public class Record implements Comparable<Record> {
public int hashCode() { public int hashCode() {
return key.hashCode(); return key.hashCode();
} }
/** /**
* Compare the content of two Records for equality. * Compare the content of two Records for equality.
* @see java.lang.Object#equals(java.lang.Object) * @see java.lang.Object#equals(java.lang.Object)
@ -439,4 +443,9 @@ public class Record implements Comparable<Record> {
return key.compareTo(otherRec.key); return key.compareTo(otherRec.key);
} }
@Override
public String toString() {
return "{key:" + key + "}";
}
} }

View file

@ -0,0 +1,42 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* {@link Table} record leaf nodes within the BTree structure.
*/
public interface RecordNode extends BTreeNode {
/**
* Get the record offset within the node's data buffer
* @param index key/record index
* @return positive record offset within buffer, or a negative bufferID for
* indirect record storage in a dedicated buffer
* @throws IOException if IO error occurs
*/
int getRecordOffset(int index) throws IOException;
/**
* Get the key offset within the node's data buffer
* @param index key/record index
* @return positive record offset within buffer
* @throws IOException if IO error occurs
*/
int getKeyOffset(int index) throws IOException;
}

View file

@ -33,42 +33,42 @@ public class Schema {
private Field keyType; private Field keyType;
private String keyName; private String keyName;
private Class<?>[] fieldClasses; private Field[] fields;
private String[] fieldNames; private String[] fieldNames;
private boolean isVariableLength; private boolean isVariableLength;
private int fixedLength; private int fixedLength;
private boolean forceUseVariableLengthKeyNodes;
/** /**
* Construct a new Schema. * Construct a new Schema.
* @param version * @param version schema version
* @param keyFieldClass Field class associated with primary key. If the * @param keyField field associated with primary key (representative instance)
* class is LongField, the long key methods on Table must be used. Specifying any * @param keyName primary key name
* other Field class requires the use of the Field key methods on Table. * @param fields array of column fields (representative instances)
* @param keyName * @param fieldNames array of column field names
* @param fieldClasses * @throws IllegalArgumentException invalid parameters
* @param fieldNames
*/ */
public Schema(int version, Class<? extends Field> keyFieldClass, String keyName, public Schema(int version, Field keyField, String keyName, Field[] fields,
Class<?>[] fieldClasses, String[] fieldNames) { String[] fieldNames) {
this.version = version; this.version = version;
this.keyType = getField(keyFieldClass); this.keyType = keyField;
this.keyName = keyName; this.keyName = keyName;
this.fieldClasses = new Class<?>[fieldClasses.length]; this.fields = fields;
this.fieldNames = fieldNames; this.fieldNames = fieldNames;
if (fieldClasses.length != fieldNames.length) if (fields.length != fieldNames.length)
throw new IllegalArgumentException(); throw new IllegalArgumentException("fieldNames and fields lengths differ");
isVariableLength = false; isVariableLength = false;
fixedLength = 0; fixedLength = 0;
for (int i = 0; i < fieldClasses.length; i++) { for (int colIndex = 0; colIndex < fields.length; colIndex++) {
this.fieldClasses[i] = fieldClasses[i]; Field field = fields[colIndex];
Field field = getField(fieldClasses[i]);
if (field.isVariableLength()) { if (field.isVariableLength()) {
isVariableLength = true; isVariableLength = true;
} }
fixedLength += field.length(); fixedLength += field.length();
if (fieldNames[i].indexOf(NAME_SEPARATOR) >= 0) if (fieldNames[colIndex].indexOf(NAME_SEPARATOR) >= 0)
throw new IllegalArgumentException(); throw new IllegalArgumentException("field names may not contain ';'");
} }
if (isVariableLength) { if (isVariableLength) {
fixedLength = 0; fixedLength = 0;
@ -76,46 +76,96 @@ public class Schema {
} }
/** /**
* Construct a new Schema which uses a long key. The Field key methods on Table * Construct a new Schema which uses a long key.
* should not be used. * @param version schema version
* @param version * @param keyName primary key name
* @param keyName * @param fields array of column fields (representative instances)
* @param fieldClasses * @param fieldNames array of column field names
* @param fieldNames * @throws IllegalArgumentException invalid parameters
*/
public Schema(int version, String keyName, Field[] fields, String[] fieldNames) {
this(version, LongField.INSTANCE, keyName, fields, fieldNames);
}
/**
* Construct a new Schema.
* @param version schema version
* @param keyClass field class associated with primary key
* @param keyName primary key name
* @param fieldClasses array of column field classes
* @param fieldNames array of column field names
* @throws IllegalArgumentException invalid parameters
*/
public Schema(int version, Class<?> keyClass, String keyName, Class<?>[] fieldClasses,
String[] fieldNames) {
this(version, getField(keyClass), keyName, getFields(fieldClasses), fieldNames);
}
/**
* Construct a new Schema which uses a long key.
* @param version schema version
* @param keyName primary key name
* @param fieldClasses array of column field classes
* @param fieldNames array of column field names
* @throws IllegalArgumentException invalid parameters
*/ */
public Schema(int version, String keyName, Class<?>[] fieldClasses, String[] fieldNames) { public Schema(int version, String keyName, Class<?>[] fieldClasses, String[] fieldNames) {
this(version, LongField.class, keyName, fieldClasses, fieldNames); this(version, LongField.INSTANCE, keyName, getFields(fieldClasses), fieldNames);
} }
/** /**
* Construct a new Schema with the given number of columns * Construct a new Schema with the given number of columns
* @param version * @param version schema version
* @param fieldTypes * @param encodedKeyFieldType key field type
* @param encodedFieldTypes encoded field types array.
* @param packedFieldNames packed list of field names separated by ';'. * @param packedFieldNames packed list of field names separated by ';'.
* The first field name corresponds to the key name. * The first field name corresponds to the key name.
* @throws UnsupportedFieldException if unsupported fieldType specified * @throws UnsupportedFieldException if unsupported fieldType specified
*/ */
Schema(int version, byte keyFieldType, byte[] fieldTypes, String packedFieldNames) Schema(int version, byte encodedKeyFieldType, byte[] encodedFieldTypes, String packedFieldNames)
throws UnsupportedFieldException { throws UnsupportedFieldException {
this.version = version; this.version = version;
this.keyType = Field.getField(keyFieldType); this.keyType = Field.getField(encodedKeyFieldType);
parseNames(packedFieldNames); parseNames(packedFieldNames);
if (fieldTypes.length != fieldNames.length)
throw new IllegalArgumentException();
this.fieldClasses = new Class[fieldTypes.length];
isVariableLength = false; isVariableLength = false;
fixedLength = 0; fixedLength = 0;
for (int i = 0; i < fieldTypes.length; i++) { fields = new Field[encodedFieldTypes.length];
Field field = Field.getField(fieldTypes[i]); for (int i = 0; i < encodedFieldTypes.length; i++) {
fieldClasses[i] = field.getClass(); byte b = encodedFieldTypes[i];
if (field.isVariableLength()) { Field f = Field.getField(b);
fields[i] = f;
if (f.isVariableLength()) {
isVariableLength = true; isVariableLength = true;
} }
fixedLength += field.length(); fixedLength += f.length();
} }
if (isVariableLength) { if (isVariableLength) {
fixedLength = 0; fixedLength = 0;
} }
if (fieldNames.length != encodedFieldTypes.length) {
throw new IllegalArgumentException("fieldNames and column types differ in length");
}
}
private static Field getField(Class<?> fieldClass) {
if (!Field.class.isAssignableFrom(fieldClass) || fieldClass == Field.class ||
IndexField.class.isAssignableFrom(fieldClass)) {
throw new IllegalArgumentException("Invalid Field class: " + fieldClass.getName());
}
try {
return (Field) fieldClass.getConstructor().newInstance();
}
catch (Exception e) {
throw new RuntimeException("Failed to construct: " + fieldClass.getName(), e);
}
}
private static Field[] getFields(Class<?>[] fieldClasses) {
Field[] fields = new Field[fieldClasses.length];
for (int i = 0; i < fieldClasses.length; i++) {
fields[i] = getField(fieldClasses[i]);
}
return fields;
} }
/** /**
@ -123,22 +173,43 @@ public class Schema {
* @return true if LongKeyNode's can be used to store records produced with this schema. * @return true if LongKeyNode's can be used to store records produced with this schema.
*/ */
boolean useLongKeyNodes() { boolean useLongKeyNodes() {
return keyType instanceof LongField; return !forceUseVariableLengthKeyNodes && keyType instanceof LongField;
} }
/** /**
* Get the key Field class * Determine if this schema uses VarKeyNode's within a table.
* @return key Field classes * @return true if VarKeyNode's are be used to store records produced with this schema.
*/ */
public Class<? extends Field> getKeyFieldClass() { boolean useVariableKeyNodes() {
return keyType.getClass(); return forceUseVariableLengthKeyNodes || keyType.isVariableLength();
}
/**
* Determine if this schema can use FixedKeyNode's within a table.
* @return true if FixedKeyNode's can be used to store records produced with this schema.
*/
boolean useFixedKeyNodes() {
return !useVariableKeyNodes() && !useLongKeyNodes();
}
/**
* Force use of variable-length key nodes.
* <br>
* This method provides a work-around for legacy schemas which
* employ primitive fixed-length keys other than LongField
* and improperly employ a variable-length-key storage schema.
* Although rare, this may be neccessary to ensure backward compatibility
* with legacy DB storage (example ByteField key employed by old table).
*/
void forceUseOfVariableLengthKeyNodes() {
forceUseVariableLengthKeyNodes = true;
} }
/** /**
* Get the Field type for the key. * Get the Field type for the key.
* @return key Field type * @return key Field type
*/ */
Field getKeyFieldType() { public Field getKeyFieldType() {
return keyType; return keyType;
} }
@ -155,8 +226,8 @@ public class Schema {
* The returned list is ordered consistent with the schema definition. * The returned list is ordered consistent with the schema definition.
* @return data Field classes * @return data Field classes
*/ */
public Class<?>[] getFieldClasses() { public Field[] getFields() {
return fieldClasses; return fields;
} }
/** /**
@ -173,7 +244,7 @@ public class Schema {
* @return data Field count * @return data Field count
*/ */
public int getFieldCount() { public int getFieldCount() {
return fieldClasses.length; return fields.length;
} }
/** /**
@ -207,16 +278,21 @@ public class Schema {
return buf.toString(); return buf.toString();
} }
/** byte getEncodedKeyFieldType() {
* Get the schema field types as a byte array. return keyType.getFieldType();
* @return byte[] field type list
*/
byte[] getFieldTypes() {
byte[] fieldTypes = new byte[fieldClasses.length];
for (int i = 0; i < fieldClasses.length; i++) {
fieldTypes[i] = getField(fieldClasses[i]).getFieldType();
} }
return fieldTypes;
/**
* Get the schema field types as an encoded byte array.
* @return byte[] field type list as an encoded byte array.
*/
byte[] getEncodedFieldTypes() {
byte[] encodedFieldTypes = new byte[fields.length];
for (int colIndex = 0; colIndex < fields.length; colIndex++) {
encodedFieldTypes[colIndex] = fields[colIndex].getFieldType();
}
return encodedFieldTypes;
} }
/** /**
@ -245,8 +321,8 @@ public class Schema {
/** /**
* Create an empty record for the specified key. * Create an empty record for the specified key.
* @param key * @param key long key
* @return Record * @return new record
*/ */
public Record createRecord(long key) { public Record createRecord(long key) {
return createRecord(new LongField(key)); return createRecord(new LongField(key));
@ -254,21 +330,20 @@ public class Schema {
/** /**
* Create an empty record for the specified key. * Create an empty record for the specified key.
* @param key * @param key record key field
* @return new record * @return new record
*/ */
public Record createRecord(Field key) { public Record createRecord(Field key) {
if (!getKeyFieldClass().equals(key.getClass())) { if (!keyType.isSameType(key)) {
throw new IllegalArgumentException( throw new IllegalArgumentException("key differs from schema key type");
"expected key field type of " + keyType.getClass().getSimpleName());
} }
Field[] fieldValues = new Field[fieldClasses.length]; Field[] fieldValues = new Field[fields.length];
for (int i = 0; i < fieldClasses.length; i++) { for (int colIndex = 0; colIndex < fields.length; colIndex++) {
try { try {
fieldValues[i] = (Field) fieldClasses[i].newInstance(); fieldValues[colIndex] = fields[colIndex].newField();
} }
catch (Exception e) { catch (Exception e) {
throw new AssertException(); throw new AssertException(e);
} }
} }
return new Record(key, fieldValues); return new Record(key, fieldValues);
@ -281,48 +356,13 @@ public class Schema {
*/ */
Field getField(int colIndex) { Field getField(int colIndex) {
try { try {
return (Field) fieldClasses[colIndex].newInstance(); return fields[colIndex].newField();
} }
catch (Exception e) { catch (Exception e) {
throw new AssertException(e.getMessage()); throw new AssertException(e.getMessage());
} }
} }
/**
* Get a new instance of a data Field object for the specified Field class.
* @param fieldClass Field implementation class
* @return new Field object suitable for data reading/writing.
*/
private Field getField(Class<?> fieldClass) {
try {
return (Field) fieldClass.newInstance();
}
catch (Exception e) {
throw new AssertException(e.getMessage());
}
}
/**
* Compare two schemas for equality.
* Field names are ignored in this comparison.
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Schema))
return false;
Schema otherSchema = (Schema) obj;
if (version != otherSchema.version ||
!keyType.getClass().equals(otherSchema.keyType.getClass()) ||
fieldClasses.length != otherSchema.fieldClasses.length)
return false;
for (int i = 0; i < fieldClasses.length; i++) {
if (!fieldClasses[i].getClass().equals(otherSchema.fieldClasses[i].getClass()))
return false;
}
return true;
}
@Override @Override
public String toString() { public String toString() {
StringBuilder buf = new StringBuilder(); StringBuilder buf = new StringBuilder();
@ -334,7 +374,7 @@ public class Schema {
buf.append("\n"); buf.append("\n");
buf.append(fieldNames[i]); buf.append(fieldNames[i]);
buf.append("("); buf.append("(");
buf.append(fieldClasses[i].getSimpleName()); buf.append(fields[i].getClass().getSimpleName());
buf.append(")"); buf.append(")");
} }
return buf.toString(); return buf.toString();

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer;
/** /**
* <code>ShortField</code> provides a wrapper for 2-byte signed short data * <code>ShortField</code> provides a wrapper for 2-byte signed short data
* which is read or written to a Record. * which is read or written to a Record.
*/ */
public class ShortField extends Field { public final class ShortField extends Field {
/**
* Minimum short field value
*/
public static final ShortField MIN_VALUE = new ShortField(Short.MIN_VALUE, true);
/**
* Maximum short field value
*/
public static final ShortField MAX_VALUE = new ShortField(Short.MAX_VALUE, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final ShortField INSTANCE = MIN_VALUE;
private short value; private short value;
@ -39,69 +53,57 @@ public class ShortField extends Field {
* @param s initial value * @param s initial value
*/ */
public ShortField(short s) { public ShortField(short s) {
this(s, false);
}
/**
* Construct a short field with an initial value of s.
* @param s initial value
* @param immutable true if field value is immutable
*/
ShortField(short s, boolean immutable) {
super(immutable);
value = s; value = s;
} }
/*
* @see ghidra.framework.store.db.Field#getShortValue()
*/
@Override @Override
public short getShortValue() { public short getShortValue() {
return value; return value;
} }
/*
* @see ghidra.framework.store.db.Field#setShortValue(short)
*/
@Override @Override
public void setShortValue(short value) { public void setShortValue(short value) {
checkImmutable();
this.value = value; this.value = value;
} }
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override @Override
int length() { int length() {
return 2; return 2;
} }
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int write(Buffer buf, int offset) throws IOException { int write(Buffer buf, int offset) throws IOException {
return buf.putShort(offset, value); return buf.putShort(offset, value);
} }
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int read(Buffer buf, int offset) throws IOException { int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getShort(offset); value = buf.getShort(offset);
return offset + 2; return offset + 2;
} }
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int readLength(Buffer buf, int offset) throws IOException { int readLength(Buffer buf, int offset) throws IOException {
return 2; return 2;
} }
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override @Override
protected byte getFieldType() { byte getFieldType() {
return SHORT_TYPE; return SHORT_TYPE;
} }
/*
* @see java.lang.Object#toString()
*/
@Override @Override
public String toString() { public String toString() {
return "ShortField: " + Short.toString(value); return "ShortField: " + Short.toString(value);
@ -109,12 +111,9 @@ public class ShortField extends Field {
@Override @Override
public String getValueAsString() { public String getValueAsString() {
return Integer.toHexString(value); return "0x" + Integer.toHexString(value);
} }
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
if (obj == null || !(obj instanceof ShortField)) if (obj == null || !(obj instanceof ShortField))
@ -122,9 +121,6 @@ public class ShortField extends Field {
return ((ShortField) obj).value == value; return ((ShortField) obj).value == value;
} }
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override @Override
public int compareTo(Field o) { public int compareTo(Field o) {
ShortField f = (ShortField) o; ShortField f = (ShortField) o;
@ -135,54 +131,63 @@ public class ShortField extends Field {
return 1; return 1;
} }
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override @Override
public Field newField(Field fieldValue) { int compareTo(DataBuffer buffer, int offset) {
if (fieldValue.isVariableLength()) short otherValue = buffer.getShort(offset);
throw new AssertException(); if (value == otherValue)
return new ShortField((short) fieldValue.getLongValue()); return 0;
else if (value < otherValue)
return -1;
return 1;
} }
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override @Override
public Field newField() { public ShortField copyField() {
return new ShortField((short) getLongValue());
}
@Override
public ShortField newField() {
return new ShortField(); return new ShortField();
} }
/*
* @see ghidra.framework.store.db.Field#getLongValue()
*/
@Override @Override
public long getLongValue() { public long getLongValue() {
return value; return value;
} }
/*
* @see ghidra.framework.store.db.Field#setLongValue(long)
*/
@Override @Override
public void setLongValue(long value) { public void setLongValue(long value) {
this.value = (short) value; setShortValue((short) value);
} }
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override @Override
public byte[] getBinaryData() { public byte[] getBinaryData() {
return new byte[] { (byte) (value >> 8), (byte) value }; return new byte[] { (byte) (value >> 8), (byte) value };
} }
/* @Override
* @see java.lang.Object#hashCode() public void setBinaryData(byte[] bytes) {
*/ checkImmutable();
if (bytes.length != 2) {
throw new IllegalFieldAccessException();
}
value = (short) (((bytes[0] & 0xff) << 8) | (bytes[1] & 0xff));
}
@Override @Override
public int hashCode() { public int hashCode() {
return value; return value;
} }
@Override
ShortField getMinValue() {
return MIN_VALUE;
}
@Override
ShortField getMaxValue() {
return MAX_VALUE;
}
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,16 +15,22 @@
*/ */
package db; package db;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/** /**
* <code>StringField</code> provides a wrapper for variable length String data which is read or * <code>StringField</code> provides a wrapper for variable length String data which is read or
* written to a Record. Strings are always encoded as UTF-8. * written to a Record. Strings are always encoded as UTF-8.
*/ */
public class StringField extends Field { public final class StringField extends Field {
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final StringField INSTANCE = new StringField(null, true);
private static String ENCODING = "UTF-8"; private static String ENCODING = "UTF-8";
@ -40,45 +45,48 @@ public class StringField extends Field {
/** /**
* Construct a String field with an initial value of s. * Construct a String field with an initial value of s.
* @param s initial value * @param str initial string value or null
*/ */
public StringField(String s) { public StringField(String str) {
setString(s); this(str, false);
} }
/* /**
* @see ghidra.framework.store.db.Field#getString() * Construct a String field with an initial value of s.
* @param str initial string value or null
* @param immutable true if field value is immutable
*/ */
StringField(String str, boolean immutable) {
super(immutable);
doSetString(str);
}
@Override @Override
public String getString() { public String getString() {
return str; return str;
} }
/*
* @see ghidra.framework.store.db.Field#setString(java.lang.String)
*/
@Override @Override
public void setString(String str) { public void setString(String str) {
checkImmutable();
doSetString(str);
}
private void doSetString(String str) {
this.str = str; this.str = str;
try { try {
bytes = (str != null ? str.getBytes(ENCODING) : null); bytes = (str != null ? str.getBytes(ENCODING) : null);
} }
catch (UnsupportedEncodingException e) { catch (UnsupportedEncodingException e) {
throw new AssertException(); throw new AssertException(e);
} }
} }
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override @Override
int length() { int length() {
return (bytes == null) ? 4 : (bytes.length + 4); return (bytes == null) ? 4 : (bytes.length + 4);
} }
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int write(Buffer buf, int offset) throws IOException { int write(Buffer buf, int offset) throws IOException {
if (bytes == null) { if (bytes == null) {
@ -88,11 +96,9 @@ public class StringField extends Field {
return buf.put(offset, bytes); return buf.put(offset, bytes);
} }
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int read(Buffer buf, int offset) throws IOException { int read(Buffer buf, int offset) throws IOException {
checkImmutable();
int len = buf.getInt(offset); int len = buf.getInt(offset);
offset += 4; offset += 4;
if (len < 0) { if (len < 0) {
@ -107,34 +113,22 @@ public class StringField extends Field {
return offset; return offset;
} }
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override @Override
int readLength(Buffer buf, int offset) throws IOException { int readLength(Buffer buf, int offset) throws IOException {
int len = buf.getInt(offset); int len = buf.getInt(offset);
return (len < 0 ? 0 : len) + 4; return (len < 0 ? 0 : len) + 4;
} }
/*
* @see ghidra.framework.store.db.Field#isVariableLength()
*/
@Override @Override
public boolean isVariableLength() { public boolean isVariableLength() {
return true; return true;
} }
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override @Override
protected byte getFieldType() { byte getFieldType() {
return STRING_TYPE; return STRING_TYPE;
} }
/*
* @see java.lang.Object#toString()
*/
@Override @Override
public String toString() { public String toString() {
return "StringField: " + str; return "StringField: " + str;
@ -168,9 +162,6 @@ public class StringField extends Field {
// return value; // return value;
// } // }
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override @Override
public boolean equals(Object obj) { public boolean equals(Object obj) {
if (obj == null || !(obj instanceof StringField)) if (obj == null || !(obj instanceof StringField))
@ -182,19 +173,14 @@ public class StringField extends Field {
return str.equals(f.str); return str.equals(f.str);
} }
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override @Override
public byte[] getBinaryData() { public byte[] getBinaryData() {
return bytes; return bytes;
} }
/*
* @see ghidra.framework.store.db.Field#setBinaryData(byte[])
*/
@Override @Override
public void setBinaryData(byte[] bytes) { public void setBinaryData(byte[] bytes) {
checkImmutable();
if (bytes == null) { if (bytes == null) {
str = null; str = null;
} }
@ -204,14 +190,11 @@ public class StringField extends Field {
str = new String(bytes, ENCODING); str = new String(bytes, ENCODING);
} }
catch (UnsupportedEncodingException e) { catch (UnsupportedEncodingException e) {
throw new AssertException(); throw new AssertException(e);
} }
} }
} }
/*
* @see ghidra.framework.store.db.Field#truncate(int)
*/
@Override @Override
void truncate(int length) { void truncate(int length) {
int maxLen = length - 4; int maxLen = length - 4;
@ -220,9 +203,6 @@ public class StringField extends Field {
} }
} }
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override @Override
public int compareTo(Field o) { public int compareTo(Field o) {
StringField f = (StringField) o; StringField f = (StringField) o;
@ -237,36 +217,41 @@ public class StringField extends Field {
return str.compareTo(f.str); return str.compareTo(f.str);
} }
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override @Override
public Field newField(Field fieldValue) { int compareTo(DataBuffer buffer, int offset) {
if (fieldValue instanceof StringField) { StringField f = new StringField();
return new StringField(fieldValue.getString());
}
try { try {
return new StringField(new String(fieldValue.getBinaryData(), ENCODING)); f.read(buffer, offset);
} }
catch (UnsupportedEncodingException e) { catch (IOException e) {
throw new AssertException(e); // DataBuffer does not throw IOException
} }
throw new AssertException(); return compareTo(f);
} }
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override @Override
public Field newField() { public StringField copyField() {
return new StringField(str);
}
@Override
public StringField newField() {
return new StringField(); return new StringField();
} }
/*
* @see java.lang.Object#hashCode()
*/
@Override @Override
public int hashCode() { public int hashCode() {
return str.hashCode(); return str.hashCode();
} }
@Override
StringField getMinValue() {
throw new UnsupportedOperationException();
}
@Override
StringField getMaxValue() {
throw new UnsupportedOperationException();
}
} }

File diff suppressed because it is too large Load diff

View file

@ -15,6 +15,8 @@
*/ */
package db; package db;
import java.io.IOException;
import db.Field.UnsupportedFieldException; import db.Field.UnsupportedFieldException;
/** /**
@ -33,24 +35,27 @@ class TableRecord implements Comparable<TableRecord> {
private static final int MAX_KEY_COLUMN = 7; private static final int MAX_KEY_COLUMN = 7;
private static final int RECORD_COUNT_COLUMN = 8; private static final int RECORD_COUNT_COLUMN = 8;
private static Class<?>[] fieldClasses = { StringField.class, // name of table //@formatter:off
IntField.class, // Schema version private static Field[] fields = {
IntField.class, // Root buffer ID (first buffer) StringField.INSTANCE, // name of table
ByteField.class, // Key field type IntField.INSTANCE, // Schema version
BinaryField.class, // Schema field types IntField.INSTANCE, // Root buffer ID (first buffer)
StringField.class, // Schema key/field names ByteField.INSTANCE, // Key field type
IntField.class, // indexing column (-1 = primary) BinaryField.INSTANCE, // Schema field types
LongField.class, // max primary key value ever used StringField.INSTANCE, // Schema key/field names
IntField.class // number of records IntField.INSTANCE, // indexing column (-1 = primary)
LongField.INSTANCE, // max primary key value ever used
IntField.INSTANCE // number of records
}; };
//@formatter:on
private static String[] tableRecordFieldNames = { "TableName", "SchemaVersion", "RootBufferId", private static String[] tableRecordFieldNames = { "TableName", "SchemaVersion", "RootBufferId",
"KeyType", "FieldTypes", "FieldNames", "IndexColumn", "MaxKey", "RecordCount" }; "KeyType", "FieldTypes", "FieldNames", "IndexColumn", "MaxKey", "RecordCount" };
private static Schema schema = new Schema(0, "TableNum", fieldClasses, tableRecordFieldNames); private static Schema schema = new Schema(0, "TableNum", fields, tableRecordFieldNames);
private Record record; private Record record;
private Schema tableSchema;
private Table table; private Table table;
/** /**
@ -61,10 +66,11 @@ class TableRecord implements Comparable<TableRecord> {
* @param indexedColumn primary table index key column, or -1 for primary table * @param indexedColumn primary table index key column, or -1 for primary table
*/ */
TableRecord(long tableNum, String name, Schema tableSchema, int indexedColumn) { TableRecord(long tableNum, String name, Schema tableSchema, int indexedColumn) {
this.tableSchema = tableSchema;
record = schema.createRecord(tableNum); record = schema.createRecord(tableNum);
record.setString(NAME_COLUMN, name); record.setString(NAME_COLUMN, name);
record.setByteValue(KEY_TYPE_COLUMN, tableSchema.getKeyFieldType().getFieldType()); record.setByteValue(KEY_TYPE_COLUMN, tableSchema.getEncodedKeyFieldType());
record.setBinaryData(FIELD_TYPES_COLUMN, tableSchema.getFieldTypes()); record.setBinaryData(FIELD_TYPES_COLUMN, tableSchema.getEncodedFieldTypes());
record.setString(FIELD_NAMES_COLUMN, tableSchema.getPackedFieldNames()); record.setString(FIELD_NAMES_COLUMN, tableSchema.getPackedFieldNames());
record.setIntValue(VERSION_COLUMN, tableSchema.getVersion()); record.setIntValue(VERSION_COLUMN, tableSchema.getVersion());
record.setIntValue(COLUMN_INDEXED_COLUMN, indexedColumn); record.setIntValue(COLUMN_INDEXED_COLUMN, indexedColumn);
@ -75,9 +81,13 @@ class TableRecord implements Comparable<TableRecord> {
/** /**
* Construct an existing master table storage record. * Construct an existing master table storage record.
* @param dbh database handle
* @param record master table storage record. * @param record master table storage record.
* @throws UnsupportedFieldException stored schema contains unsupported field
* @throws IOException if IO error occurs
*/ */
TableRecord(Record record) { TableRecord(DBHandle dbh, Record record) throws IOException {
this.tableSchema = parseSchema(dbh, record);
this.record = record; this.record = record;
} }
@ -100,9 +110,13 @@ class TableRecord implements Comparable<TableRecord> {
/** /**
* Set the storage record for this instance. * Set the storage record for this instance.
* Data is refreshed from the record provided. * Data is refreshed from the record provided.
* @param dbh database handle
* @param record master table storage record. * @param record master table storage record.
* @throws UnsupportedFieldException stored schema contains unsupported field
* @throws IOException if IO error occurs
*/ */
void setRecord(Record record) { void setRecord(DBHandle dbh, Record record) throws IOException {
this.tableSchema = parseSchema(dbh, record);
this.record = record; this.record = record;
if (table != null) { if (table != null) {
table.tableRecordChanged(); table.tableRecordChanged();
@ -120,6 +134,7 @@ class TableRecord implements Comparable<TableRecord> {
table = null; table = null;
} }
this.record = null; this.record = null;
this.tableSchema = null;
} }
/** /**
@ -140,20 +155,62 @@ class TableRecord implements Comparable<TableRecord> {
/** /**
* Set the table name * Set the table name
* @param name * @param name table name
*/ */
void setName(String name) { void setName(String name) {
record.setString(NAME_COLUMN, name); record.setString(NAME_COLUMN, name);
} }
/**
*
* @param dbh database handle
* @param record record which defines table schema
* @return table schema
* @throws UnsupportedFieldException stored schema contains unsupported field
* @throws IOException if IO error occurs
*/
private static Schema parseSchema(DBHandle dbh, Record record) throws IOException {
Schema tableSchema =
new Schema(record.getIntValue(VERSION_COLUMN), record.getByteValue(KEY_TYPE_COLUMN),
record.getBinaryData(FIELD_TYPES_COLUMN), record.getString(FIELD_NAMES_COLUMN));
forceUseOfVariableLengthKeyNodesIfNeeded(dbh, tableSchema,
record.getIntValue(BUFFER_ID_COLUMN));
return tableSchema;
}
/**
* Determine if legacy schema should be forced to use {@link VarKeyNode}
* table storage for compatibility. Root buffer node for applicable
* primitive fixed-length key types will be checked.
* @param dbh database handle
* @param tableSchema table schema to be checked
* @param rootBufferId table root buffer ID
* @throws IOException if IO error occurs
*/
private static void forceUseOfVariableLengthKeyNodesIfNeeded(DBHandle dbh, Schema tableSchema,
int rootBufferId) throws IOException {
if (rootBufferId < 0) {
return;
}
Field keyType = tableSchema.getKeyFieldType();
if (keyType.isVariableLength()) {
return;
}
if (keyType instanceof LongField || keyType instanceof IndexField ||
keyType instanceof FixedField) {
return;
}
if (NodeMgr.isVarKeyNode(dbh.getBufferMgr(), rootBufferId)) {
tableSchema.forceUseOfVariableLengthKeyNodes();
}
}
/** /**
* Get the table schema * Get the table schema
* @return table schema * @return table schema
* @throws UnsupportedFieldException if unsupported schema field encountered
*/ */
Schema getSchema() throws UnsupportedFieldException { Schema getSchema() {
return new Schema(record.getIntValue(VERSION_COLUMN), record.getByteValue(KEY_TYPE_COLUMN), return tableSchema;
record.getBinaryData(FIELD_TYPES_COLUMN), record.getString(FIELD_NAMES_COLUMN));
} }
/** /**

View file

@ -1,323 +0,0 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* The <code>VarIndexTable</code> provides a secondary index on a variable-length table column
* (e.g., StringField). For each unique secondary index value, an IndexBuffer is
* stored within an underlying index table record. The secondary index value is used as the long
* key to access this record. Within a single IndexBuffer is stored all primary keys which
* correspond to an index value.
*/
class VarIndexTable extends IndexTable {
private static final Class<?>[] fieldClasses = { BinaryField.class, // index data
};
private static final String[] fieldNames = { "IndexBuffer" };
private Schema indexSchema;
/**
* Construct a new secondary index which is based upon a field within the
* primary table specified by name.
* @param primaryTable primary table.
* @param colIndex identifies the indexed column within the primary table.
* @throws IOException thrown if an IO error occurs
*/
VarIndexTable(Table primaryTable, int colIndex) throws IOException {
this(primaryTable,
primaryTable.getDBHandle().getMasterTable().createTableRecord(primaryTable.getName(),
new Schema(0, primaryTable.getSchema().getField(colIndex).getClass(), "IndexKey",
fieldClasses, fieldNames),
colIndex));
}
/**
* Construct a new or existing secondary index. An existing index must have
* its root ID specified within the tableRecord.
* @param primaryTable primary table.
* @param indexTableRecord specifies the index parameters.
* @throws IOException thrown if an IO error occurs
*/
VarIndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException {
super(primaryTable, indexTableRecord);
this.indexSchema = indexTable.getSchema();
}
/**
* Find all primary keys which correspond to the specified indexed field
* value.
* @param indexValue the field value to search for.
* @return list of primary keys
* @throws IOException thrown if an IO error occurs
*/
@Override
long[] findPrimaryKeys(Field indexValue) throws IOException {
if (!indexValue.getClass().equals(fieldType.getClass())) {
throw new IllegalArgumentException("Incorrect indexed field type");
}
Record indexRecord = indexTable.getRecord(indexValue);
if (indexRecord == null) {
return emptyKeyArray;
}
IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0));
return indexBuffer.getPrimaryKeys();
}
/**
* Get the number of primary keys which correspond to the specified indexed field
* value.
* @param indexValue the field value to search for.
* @return key count
* @throws IOException thrown if an IO error occurs
*/
@Override
int getKeyCount(Field indexValue) throws IOException {
if (!indexValue.getClass().equals(fieldType.getClass())) {
throw new IllegalArgumentException("Incorrect indexed field type");
}
Record indexRecord = indexTable.getRecord(indexValue);
if (indexRecord == null) {
return 0;
}
IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0));
return indexBuffer.keyCount;
}
/*
* @see ghidra.framework.store.db.IndexTable#addEntry(ghidra.framework.store.db.Record)
*/
@Override
void addEntry(Record record) throws IOException {
Field indexField = record.getField(colIndex);
Record indexRecord = indexTable.getRecord(indexField);
if (indexRecord == null) {
indexRecord = indexSchema.createRecord(indexField);
}
IndexBuffer indexBuffer = new IndexBuffer(indexField, indexRecord.getBinaryData(0));
indexBuffer.addEntry(record.getKey());
indexRecord.setBinaryData(0, indexBuffer.getData());
indexTable.putRecord(indexRecord);
}
/*
* @see ghidra.framework.store.db.IndexTable#deleteEntry(ghidra.framework.store.db.Record)
*/
@Override
void deleteEntry(Record record) throws IOException {
Field indexField = record.getField(colIndex);
Record indexRecord = indexTable.getRecord(indexField);
if (indexRecord != null) {
IndexBuffer indexBuffer = new IndexBuffer(indexField, indexRecord.getBinaryData(0));
indexBuffer.deleteEntry(record.getKey());
byte[] data = indexBuffer.getData();
if (data == null) {
indexTable.deleteRecord(indexField);
}
else {
indexRecord.setBinaryData(0, data);
indexTable.putRecord(indexRecord);
}
}
}
/**
* Get the index buffer associated with the specified index key
* @param indexKey index key
* @return index buffer or null if not found
* @throws IOException thrown if IO error occurs
*/
private IndexBuffer getIndexBuffer(Field indexKey) throws IOException {
Record indexRec = indexTable.getRecord(indexKey);
return indexRec != null ? new IndexBuffer(indexKey, indexRec.getBinaryData(0)) : null;
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator()
*/
@Override
DBFieldIterator indexIterator() throws IOException {
return new IndexVarFieldIterator();
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator(ghidra.framework.store.db.Field, ghidra.framework.store.db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, boolean before)
throws IOException {
return new IndexVarFieldIterator(minField, maxField, before);
}
/*
* @see db.IndexTable#indexIterator(db.Field, db.Field, db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException {
return new IndexVarFieldIterator(minField, maxField, startField, before);
}
/**
* Iterates over index field values within a specified range.
*/
class IndexVarFieldIterator implements DBFieldIterator {
private Field lastKey;
private Field keyField;
private DBFieldIterator indexIterator;
private boolean hasNext = false;
private boolean hasPrev = false;
/**
* Construct an index field iterator starting with the minimum index value.
*/
IndexVarFieldIterator() throws IOException {
this(null, null, true);
}
/**
* Construct an index field iterator. The iterator is positioned at index
* value identified by startValue.
* @param minValue minimum index value. Null corresponds to minimum indexed value.
* @param maxValue maximum index value. Null corresponds to maximum indexed value.
* @param before if true initial position is before minValue, else position
* is after maxValue.
* @throws IOException
*/
IndexVarFieldIterator(Field minValue, Field maxValue, boolean before) throws IOException {
indexIterator = indexTable.fieldKeyIterator(minValue, maxValue, before);
if (indexIterator.hasNext()) {
indexIterator.next();
if (before) {
indexIterator.previous();
}
}
}
/**
* Construct an index field iterator. The iterator is positioned at index
* value identified by startValue.
* @param minValue minimum index value. Null corresponds to minimum indexed value.
* @param maxValue maximum index value. Null corresponds to maximum indexed value.
* @param startValue identify initial position by value
* @param before if true initial position is before minValue, else position
* is after maxValue.
* @throws IOException
*/
IndexVarFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before)
throws IOException {
if (startValue == null) {
throw new IllegalArgumentException("starting index value required");
}
indexIterator = indexTable.fieldKeyIterator(minValue, maxValue, startValue);
if (indexIterator.hasNext()) {
Field f = indexIterator.next();
if (before || !f.equals(startValue)) {
indexIterator.previous();
}
}
}
@Override
public boolean hasNext() throws IOException {
if (hasNext) {
return true;
}
Field key = indexIterator.next();
if (key == null) {
return false;
}
keyField = key;
hasNext = true;
hasPrev = false;
return true;
}
@Override
public boolean hasPrevious() throws IOException {
if (hasPrev) {
return true;
}
Field key = indexIterator.previous();
if (key == null) {
return false;
}
keyField = key;
hasNext = false;
hasPrev = true;
return true;
}
@Override
public Field next() throws IOException {
if (hasNext || hasNext()) {
hasNext = false;
hasPrev = true;
lastKey = keyField;
return keyField;
}
return null;
}
@Override
public Field previous() throws IOException {
if (hasPrev || hasPrevious()) {
hasNext = true;
hasPrev = false;
lastKey = keyField;
return keyField;
}
return null;
}
/**
* Delete all primary records which have the current
* index value (lastKey).
* @see db.DBFieldIterator#delete()
*/
@Override
public boolean delete() throws IOException {
if (lastKey == null) {
return false;
}
synchronized (db) {
IndexBuffer indexBuf = getIndexBuffer(lastKey);
if (indexBuf != null) {
long[] keys = indexBuf.getPrimaryKeys();
for (long key : keys) {
primaryTable.deleteRecord(key);
}
// The following does not actually delete the index record since it
// should already have been removed with the removal of all associated
// primary records. Invoking this method allows the iterator to
// recover from the index table change.
// indexIterator.delete();
}
lastKey = null;
return true;
}
}
}
}

View file

@ -29,9 +29,9 @@ import ghidra.util.task.TaskMonitor;
* has the following layout within a single DataBuffer (field size in bytes): * has the following layout within a single DataBuffer (field size in bytes):
* *
* | NodeType(1) | KeyType(1) | KeyCount(4) | KeyOffset0(4) | ID0(4) | ... | KeyOffsetN(4) | IDN(4) | * | NodeType(1) | KeyType(1) | KeyCount(4) | KeyOffset0(4) | ID0(4) | ... | KeyOffsetN(4) | IDN(4) |
* ...<FreeSpace>... | KeyN | ... | Key0 | * ...&lt;FreeSpace&gt;... | KeyN | ... | Key0 |
*/ */
class VarKeyInteriorNode extends VarKeyNode { class VarKeyInteriorNode extends VarKeyNode implements FieldKeyInteriorNode {
private static final int BASE = VARKEY_NODE_HEADER_SIZE; private static final int BASE = VARKEY_NODE_HEADER_SIZE;
@ -83,7 +83,7 @@ class VarKeyInteriorNode extends VarKeyNode {
void logConsistencyError(String tableName, String msg, Throwable t) throws IOException { void logConsistencyError(String tableName, String msg, Throwable t) throws IOException {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg); Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " parent.key[0]=" + getKey(0) + " bufferID=" + getBufferId()); Msg.debug(this, " parent.key[0]=" + getKeyField(0) + " bufferID=" + getBufferId());
if (t != null) { if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t); Msg.error(this, "Consistency Error (" + tableName + ")", t);
} }
@ -98,26 +98,24 @@ class VarKeyInteriorNode extends VarKeyNode {
for (int i = 0; i < keyCount; i++) { for (int i = 0; i < keyCount; i++) {
// Compare each key entry with the previous entries key-range // Compare each key entry with the previous entries key-range
Field key = getKey(i); Field key = getKeyField(i);
if (i != 0) { if (lastMinKey != null && key.compareTo(lastMinKey) <= 0) {
if (key.compareTo(lastMinKey) <= 0) {
consistent = false; consistent = false;
logConsistencyError(tableName, logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null); "child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null);
Msg.debug(this, Msg.debug(this,
" child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i)); " child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].minKey = " + lastMinKey + Msg.debug(this, " child[" + (i - 1) + "].minKey = " + lastMinKey + " bufferID=" +
" bufferID=" + getBufferId(i - 1)); getBufferId(i - 1));
} }
else if (key.compareTo(lastMaxKey) <= 0) { else if (lastMaxKey != null && key.compareTo(lastMaxKey) <= 0) {
consistent = false; consistent = false;
logConsistencyError(tableName, logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null); "child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null);
Msg.debug(this, Msg.debug(this,
" child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i)); " child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].maxKey = " + lastMaxKey + Msg.debug(this, " child[" + (i - 1) + "].maxKey = " + lastMaxKey + " bufferID=" +
" bufferID=" + getBufferId(i - 1)); getBufferId(i - 1));
}
} }
lastMinKey = key; lastMinKey = key;
@ -143,10 +141,10 @@ class VarKeyInteriorNode extends VarKeyNode {
continue; // skip child continue; // skip child
} }
lastMaxKey = node.getKey(node.getKeyCount() - 1); lastMaxKey = node.getKeyField(node.getKeyCount() - 1);
// Verify key match-up between parent and child // Verify key match-up between parent and child
Field childKey0 = node.getKey(0); Field childKey0 = node.getKeyField(0);
if (!key.equals(childKey0)) { if (!key.equals(childKey0)) {
consistent = false; consistent = false;
logConsistencyError(tableName, logConsistencyError(tableName,
@ -182,10 +180,16 @@ class VarKeyInteriorNode extends VarKeyNode {
/** /**
* Perform a binary search to locate the specified key and derive an index * Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. This method is used to identify the child * into the Buffer ID storage. This method is intended to locate the child
* node which contains the specified record key. * node which contains the specified key. The returned index corresponds
* @param key * to a child's stored buffer/node ID and may correspond to another interior
* @return int buffer ID index. * node or a leaf record node. Each stored key within this interior node
* effectively identifies the maximum key contained within the corresponding
* child node.
* @param key key to search for
* @return int buffer ID index of child node. An existing positive index
* value will always be returned.
* @throws IOException if IO error occurs
*/ */
int getIdIndex(Field key) throws IOException { int getIdIndex(Field key) throws IOException {
@ -194,12 +198,11 @@ class VarKeyInteriorNode extends VarKeyNode {
while (min <= max) { while (min <= max) {
int i = (min + max) / 2; int i = (min + max) / 2;
Field k = getKey(i); int rc = compareKeyField(key, i);
int rc = k.compareTo(key);
if (rc == 0) { if (rc == 0) {
return i; return i;
} }
else if (rc < 0) { else if (rc > 0) {
min = i + 1; min = i + 1;
} }
else { else {
@ -209,26 +212,19 @@ class VarKeyInteriorNode extends VarKeyNode {
return max; return max;
} }
/** @Override
* Perform a binary search to locate the specified key and derive an index public int getKeyIndex(Field key) throws IOException {
* into the Buffer ID storage. This method is intended to find the insertion
* index or exact match for a child key.
* @param key
* @return int buffer ID index.
*/
private int getKeyIndex(Field key) throws IOException {
int min = 0; int min = 0;
int max = keyCount - 1; int max = keyCount - 1;
while (min <= max) { while (min <= max) {
int i = (min + max) / 2; int i = (min + max) / 2;
Field k = getKey(i); int rc = compareKeyField(key, i);
int rc = k.compareTo(key);
if (rc == 0) { if (rc == 0) {
return i; return i;
} }
else if (rc < 0) { else if (rc > 0) {
min = i + 1; min = i + 1;
} }
else { else {
@ -271,7 +267,8 @@ class VarKeyInteriorNode extends VarKeyNode {
* @param index key index * @param index key index
* @return record key offset * @return record key offset
*/ */
private int getKeyOffset(int index) { @Override
public int getKeyOffset(int index) {
return buffer.getInt(BASE + (index * ENTRY_SIZE)); return buffer.getInt(BASE + (index * ENTRY_SIZE));
} }
@ -284,11 +281,8 @@ class VarKeyInteriorNode extends VarKeyNode {
buffer.putInt(BASE + (index * ENTRY_SIZE), offset); buffer.putInt(BASE + (index * ENTRY_SIZE), offset);
} }
/*
* @see ghidra.framework.store.db.VarKeyNode#getKey(int)
*/
@Override @Override
Field getKey(int index) throws IOException { public Field getKeyField(int index) throws IOException {
Field key = keyType.newField(); Field key = keyType.newField();
key.read(buffer, buffer.getInt(BASE + (index * ENTRY_SIZE))); key.read(buffer, buffer.getInt(BASE + (index * ENTRY_SIZE)));
return key; return key;
@ -313,15 +307,6 @@ class VarKeyInteriorNode extends VarKeyNode {
return buffer.getInt(BASE + (index * ENTRY_SIZE) + KEY_OFFSET_SIZE); return buffer.getInt(BASE + (index * ENTRY_SIZE) + KEY_OFFSET_SIZE);
} }
// /**
// * Store the child node buffer ID associated with the specified key index
// * @param index child key index
// * @param id child node buffer ID
// */
// private void putBufferId(int index, int id) {
// buffer.putInt(BASE + (index * ENTRY_SIZE) + KEY_OFFSET_SIZE, id);
// }
/** /**
* @return unused free space within node * @return unused free space within node
*/ */
@ -432,8 +417,11 @@ class VarKeyInteriorNode extends VarKeyNode {
* Callback method for when a child node's leftmost key changes. * Callback method for when a child node's leftmost key changes.
* @param oldKey previous leftmost key. * @param oldKey previous leftmost key.
* @param newKey new leftmost key. * @param newKey new leftmost key.
* @param node child node containing oldKey
* @throws IOException if IO error occurs
*/ */
void keyChanged(Field oldKey, Field newKey, VarKeyNode node) throws IOException { @Override
public void keyChanged(Field oldKey, Field newKey, FieldKeyNode node) throws IOException {
int index = getKeyIndex(oldKey); int index = getKeyIndex(oldKey);
if (index < 0) { if (index < 0) {
@ -443,7 +431,7 @@ class VarKeyInteriorNode extends VarKeyNode {
int lenChange = newKey.length() - oldKey.length(); int lenChange = newKey.length() - oldKey.length();
if (lenChange > 0 && lenChange > getFreeSpace()) { if (lenChange > 0 && lenChange > getFreeSpace()) {
// Split node if updated key won't fit // Split node if updated key won't fit
split(index, oldKey, newKey, node); split(index, oldKey, newKey, (VarKeyNode) node);
} }
else { else {
@ -461,7 +449,8 @@ class VarKeyInteriorNode extends VarKeyNode {
* @param oldIndex index of key to be updated * @param oldIndex index of key to be updated
* @param oldKey old key value stored at oldIndex * @param oldKey old key value stored at oldIndex
* @param newKey new key value * @param newKey new key value
* @throws IOException thrown if IO error occurs * @param node child node containing oldKey
* @throws IOException if IO error occurs
*/ */
private void split(int oldIndex, Field oldKey, Field newKey, VarKeyNode node) private void split(int oldIndex, Field oldKey, Field newKey, VarKeyNode node)
throws IOException { throws IOException {
@ -497,7 +486,7 @@ class VarKeyInteriorNode extends VarKeyNode {
parent.insert(newNode); parent.insert(newNode);
if (newNode.parent != parent) { if (newNode.parent != parent) {
// Fix my parent // Fix my parent
if (parent.getKeyIndex(getKey(0)) < 0) { if (parent.getKeyIndex(getKeyField(0)) < 0) {
parent = newNode.parent; parent = newNode.parent;
} }
} }
@ -505,8 +494,8 @@ class VarKeyInteriorNode extends VarKeyNode {
} }
// New parent node becomes root // New parent node becomes root
parent = new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newNode.getKey(0), parent = new VarKeyInteriorNode(nodeMgr, getKeyField(0), buffer.getId(),
newNode.getBufferId()); newNode.getKeyField(0), newNode.getBufferId());
newNode.parent = parent; newNode.parent = parent;
} }
@ -518,7 +507,7 @@ class VarKeyInteriorNode extends VarKeyNode {
*/ */
VarKeyNode insert(VarKeyNode node) throws IOException { VarKeyNode insert(VarKeyNode node) throws IOException {
Field key = node.getKey(0); Field key = node.getKeyField(0);
int id = node.getBufferId(); int id = node.getBufferId();
// Split this node if full // Split this node if full
@ -536,6 +525,7 @@ class VarKeyInteriorNode extends VarKeyNode {
* @param key leftmost key associated with new node. * @param key leftmost key associated with new node.
* @param node child node which corresponds to the id and key. * @param node child node which corresponds to the id and key.
* @return root node. * @return root node.
* @throws IOException thrown if an IO error occurs
*/ */
VarKeyNode insert(int id, Field key, VarKeyNode node) throws IOException { VarKeyNode insert(int id, Field key, VarKeyNode node) throws IOException {
@ -549,7 +539,7 @@ class VarKeyInteriorNode extends VarKeyNode {
node.parent = this; node.parent = this;
if (index == 0 && parent != null) { if (index == 0 && parent != null) {
parent.keyChanged(getKey(1), key, this); parent.keyChanged(getKeyField(1), key, this);
} }
return getRoot(); return getRoot();
@ -568,7 +558,6 @@ class VarKeyInteriorNode extends VarKeyNode {
// Create new interior node // Create new interior node
VarKeyInteriorNode newNode = new VarKeyInteriorNode(nodeMgr, keyType); VarKeyInteriorNode newNode = new VarKeyInteriorNode(nodeMgr, keyType);
// DataBuffer newBuf = newNode.buffer;
int halfway = int halfway =
((keyCount == 0 ? buffer.length() : getKeyOffset(keyCount - 1)) + buffer.length()) / 2; ((keyCount == 0 ? buffer.length() : getKeyOffset(keyCount - 1)) + buffer.length()) / 2;
@ -576,7 +565,7 @@ class VarKeyInteriorNode extends VarKeyNode {
moveKeysRight(this, newNode, keyCount - getOffsetIndex(halfway)); moveKeysRight(this, newNode, keyCount - getOffsetIndex(halfway));
// Insert new key/id // Insert new key/id
Field rightKey = newNode.getKey(0); Field rightKey = newNode.getKeyField(0);
if (newKey.compareTo(rightKey) < 0) { if (newKey.compareTo(rightKey) < 0) {
insert(newId, newKey, node); insert(newId, newKey, node);
} }
@ -588,7 +577,7 @@ class VarKeyInteriorNode extends VarKeyNode {
VarKeyNode rootNode = parent.insert(newNode); VarKeyNode rootNode = parent.insert(newNode);
if (newNode.parent != parent) { if (newNode.parent != parent) {
// Fix my parent // Fix my parent
if (parent.getKeyIndex(getKey(0)) < 0) { if (parent.getKeyIndex(getKeyField(0)) < 0) {
parent = newNode.parent; parent = newNode.parent;
} }
} }
@ -596,33 +585,27 @@ class VarKeyInteriorNode extends VarKeyNode {
} }
// New parent node becomes root // New parent node becomes root
parent = new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), rightKey, parent = new VarKeyInteriorNode(nodeMgr, getKeyField(0), buffer.getId(), rightKey,
newNode.getBufferId()); newNode.getBufferId());
newNode.parent = parent; newNode.parent = parent;
return parent; return parent;
} }
/*
* @see ghidra.framework.store.db.VarKeyNode#getLeafNode(long)
*/
@Override @Override
VarKeyRecordNode getLeafNode(Field key) throws IOException { public VarKeyRecordNode getLeafNode(Field key) throws IOException {
VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(getIdIndex(key))); VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(getIdIndex(key)));
node.parent = this; node.parent = this;
return node.getLeafNode(key); return node.getLeafNode(key);
} }
/*
* @see ghidra.framework.store.db.VarKeyNode#getLeftmostLeafNode()
*/
@Override @Override
VarKeyRecordNode getLeftmostLeafNode() throws IOException { public VarKeyRecordNode getLeftmostLeafNode() throws IOException {
VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(0)); VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(0));
return node.getLeftmostLeafNode(); return node.getLeftmostLeafNode();
} }
@Override @Override
VarKeyRecordNode getRightmostLeafNode() throws IOException { public VarKeyRecordNode getRightmostLeafNode() throws IOException {
VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(keyCount - 1)); VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(keyCount - 1));
return node.getRightmostLeafNode(); return node.getRightmostLeafNode();
} }
@ -654,7 +637,7 @@ class VarKeyInteriorNode extends VarKeyNode {
// Delete child entry // Delete child entry
deleteEntry(index); deleteEntry(index);
if (index == 0 && parent != null) { if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0), this); parent.keyChanged(key, getKeyField(0), this);
} }
return (parent != null) ? parent.balanceChild(this) : this; return (parent != null) ? parent.balanceChild(this) : this;
@ -676,7 +659,7 @@ class VarKeyInteriorNode extends VarKeyNode {
// balance with right sibling except if node corresponds to the right-most // balance with right sibling except if node corresponds to the right-most
// key within this interior node - in that case balance with left sibling. // key within this interior node - in that case balance with left sibling.
int index = getIdIndex(node.getKey(0)); int index = getIdIndex(node.getKeyField(0));
if (index == (keyCount - 1)) { if (index == (keyCount - 1)) {
return balanceChild((VarKeyInteriorNode) nodeMgr.getVarKeyNode(getBufferId(index - 1)), return balanceChild((VarKeyInteriorNode) nodeMgr.getVarKeyNode(getBufferId(index - 1)),
node); node);
@ -700,14 +683,11 @@ class VarKeyInteriorNode extends VarKeyNode {
int leftKeyCount = leftNode.keyCount; int leftKeyCount = leftNode.keyCount;
int rightKeyCount = rightNode.keyCount; int rightKeyCount = rightNode.keyCount;
// if (leftKeyCount == rightKeyCount) {
// return getRoot();
// }
int len = buffer.length(); int len = buffer.length();
int leftKeySpace = len - leftNode.getKeyOffset(leftKeyCount - 1); int leftKeySpace = len - leftNode.getKeyOffset(leftKeyCount - 1);
int rightKeySpace = len - rightNode.getKeyOffset(rightKeyCount - 1); int rightKeySpace = len - rightNode.getKeyOffset(rightKeyCount - 1);
Field rightKey = rightNode.getKey(0); Field rightKey = rightNode.getKeyField(0);
// Can right keys fit within left node // Can right keys fit within left node
if ((rightKeySpace + (rightKeyCount * ENTRY_SIZE)) <= (len - BASE - leftKeySpace - if ((rightKeySpace + (rightKeyCount * ENTRY_SIZE)) <= (len - BASE - leftKeySpace -
@ -731,7 +711,7 @@ class VarKeyInteriorNode extends VarKeyNode {
balanced = moveKeysLeft(leftNode, rightNode, rightKeyCount - index - 1); balanced = moveKeysLeft(leftNode, rightNode, rightKeyCount - index - 1);
} }
if (balanced) { if (balanced) {
this.keyChanged(rightKey, rightNode.getKey(0), rightNode); this.keyChanged(rightKey, rightNode.getKeyField(0), rightNode);
} }
return getRoot(); return getRoot();
} }
@ -775,16 +755,6 @@ class VarKeyInteriorNode extends VarKeyNode {
return true; return true;
} }
//private static void checkKeyOffsets(VarKeyInteriorNode node) {
// for (int i = 0; i < node.keyCount; i++) {
// int length = node.buffer.getInt(node.getKeyOffset(i));
//
// if (length < -1 || length > 40) {
// throw new ArrayIndexOutOfBoundsException();
// }
// }
//}
/** /**
* Move some or all of the keys from the right node into the left node. * Move some or all of the keys from the right node into the left node.
* If all keys are moved, the caller is responsible for deleting the right * If all keys are moved, the caller is responsible for deleting the right
@ -802,9 +772,6 @@ class VarKeyInteriorNode extends VarKeyNode {
int rightOffset = rightNode.getKeyOffset(count - 1); int rightOffset = rightNode.getKeyOffset(count - 1);
int len = rightNode.buffer.length() - rightOffset; int len = rightNode.buffer.length() - rightOffset;
int leftOffset = leftNode.getKeyOffset(leftKeyCount - 1) - len; int leftOffset = leftNode.getKeyOffset(leftKeyCount - 1) - len;
//if ((len + (ENTRY_SIZE * count)) > leftNode.getFreeSpace()) {
// throw new ArrayIndexOutOfBoundsException();
//}
// Move key data to left node // Move key data to left node
leftNode.buffer.copy(leftOffset, rightNode.buffer, rightOffset, len); leftNode.buffer.copy(leftOffset, rightNode.buffer, rightOffset, len);
@ -831,9 +798,6 @@ class VarKeyInteriorNode extends VarKeyNode {
return true; return true;
} }
/*
* @see ghidra.framework.store.db.VarKeyNode#delete()
*/
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
@ -846,9 +810,6 @@ class VarKeyInteriorNode extends VarKeyNode {
nodeMgr.deleteNode(this); nodeMgr.deleteNode(this);
} }
/*
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
@Override @Override
public int[] getBufferReferences() { public int[] getBufferReferences() {
int[] ids = new int[keyCount]; int[] ids = new int[keyCount];
@ -871,7 +832,7 @@ class VarKeyInteriorNode extends VarKeyNode {
public boolean isRightmostKey(Field key) throws IOException { public boolean isRightmostKey(Field key) throws IOException {
if (getIdIndex(key) == (keyCount - 1)) { if (getIdIndex(key) == (keyCount - 1)) {
if (parent != null) { if (parent != null) {
return parent.isRightmostKey(getKey(0)); return parent.isRightmostKey(getKeyField(0));
} }
return true; return true;
} }

View file

@ -22,8 +22,11 @@ import db.buffers.DataBuffer;
/** /**
* <code>VarKeyNode</code> is an abstract implementation of a BTree node * <code>VarKeyNode</code> is an abstract implementation of a BTree node
* which utilizes variable-length Field key values. * which utilizes variable-length Field key values.
* <pre>
* | NodeType(1) | KeyType(1) | KeyCount(4) | ...
* </pre>
*/ */
abstract class VarKeyNode implements BTreeNode { abstract class VarKeyNode implements FieldKeyNode {
private static final int KEY_TYPE_SIZE = 1; private static final int KEY_TYPE_SIZE = 1;
private static final int KEY_COUNT_SIZE = 4; private static final int KEY_COUNT_SIZE = 4;
@ -62,7 +65,7 @@ abstract class VarKeyNode implements BTreeNode {
* @param nodeMgr table node manager. * @param nodeMgr table node manager.
* @param nodeType node type * @param nodeType node type
* @param keyType key Field type * @param keyType key Field type
* @throws IOException thrown if IO error occurs * @throws IOException if IO error occurs
*/ */
VarKeyNode(NodeMgr nodeMgr, byte nodeType, Field keyType) throws IOException { VarKeyNode(NodeMgr nodeMgr, byte nodeType, Field keyType) throws IOException {
this.nodeMgr = nodeMgr; this.nodeMgr = nodeMgr;
@ -75,6 +78,11 @@ abstract class VarKeyNode implements BTreeNode {
nodeMgr.addNode(this); nodeMgr.addNode(this);
} }
@Override
public VarKeyInteriorNode getParent() {
return parent;
}
@Override @Override
public int getBufferId() { public int getBufferId() {
return buffer.getId(); return buffer.getId();
@ -91,8 +99,9 @@ abstract class VarKeyNode implements BTreeNode {
* @return TableNode * @return TableNode
*/ */
VarKeyNode getRoot() { VarKeyNode getRoot() {
if (parent != null) if (parent != null) {
return parent.getRoot(); return parent.getRoot();
}
return this; return this;
} }
@ -107,13 +116,26 @@ abstract class VarKeyNode implements BTreeNode {
buffer.putInt(KEY_COUNT_OFFSET, keyCount); buffer.putInt(KEY_COUNT_OFFSET, keyCount);
} }
@Override
public int compareKeyField(Field k, int keyIndex) {
return k.compareTo(buffer, getKeyOffset(keyIndex));
}
/**
* Get the key offset within the buffer
* @param index key index
* @return record key offset
*/
public abstract int getKeyOffset(int index);
/** /**
* Get the key value at a specific index. * Get the key value at a specific index.
* @param index key index * @param index key index
* @return key value * @return key value
* @throws IOException thrown if an IO error occurs * @throws IOException thrown if an IO error occurs
*/ */
abstract Field getKey(int index) throws IOException; @Override
public abstract Field getKeyField(int index) throws IOException;
/** /**
* Get the leaf node which contains the specified key. * Get the leaf node which contains the specified key.
@ -121,20 +143,23 @@ abstract class VarKeyNode implements BTreeNode {
* @return leaf node * @return leaf node
* @throws IOException thrown if an IO error occurs * @throws IOException thrown if an IO error occurs
*/ */
abstract VarKeyRecordNode getLeafNode(Field key) throws IOException; @Override
public abstract VarKeyRecordNode getLeafNode(Field key) throws IOException;
/** /**
* Get the left-most leaf node within the tree. * Get the left-most leaf node within the tree.
* @return left-most leaf node. * @return left-most leaf node.
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
abstract VarKeyRecordNode getLeftmostLeafNode() throws IOException; @Override
public abstract VarKeyRecordNode getLeftmostLeafNode() throws IOException;
/** /**
* Get the right-most leaf node within the tree. * Get the right-most leaf node within the tree.
* @return right-most leaf node. * @return right-most leaf node.
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
abstract VarKeyRecordNode getRightmostLeafNode() throws IOException; @Override
public abstract VarKeyRecordNode getRightmostLeafNode() throws IOException;
} }

View file

@ -25,19 +25,19 @@ import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor; import ghidra.util.task.TaskMonitor;
/** /**
* <code>LongKeyRecordNode</code> is an implementation of a BTree leaf node * <code>VarKeyRecordNode</code> is an implementation of a BTree leaf node
* which utilizes variable-length key values and stores variable-length records. * which utilizes variable-length key values and stores variable-length records.
* This type of node has the following layout within a single DataBuffer * This type of node has the following layout within a single DataBuffer
* (field size in bytes): * (field size in bytes):
* <pre> * <pre>
* | NodeType(1) | KeyType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | KeyOffset0(4) | IndFlag0(1) |... * | NodeType(1) | KeyType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | KeyOffset0(4) | IndFlag0(1) |...
* *
* | KeyOffsetN(4) | IndFlagN(1) |...<FreeSpace>... | KeyN | RecN |... | Key0 | Rec0 | * | KeyOffsetN(4) | IndFlagN(1) |...&lt;FreeSpace&gt;... | KeyN | RecN |... | Key0 | Rec0 |
* </pre> * </pre>
* IndFlag - if not zero the record has been stored within a chained DBBuffer * IndFlag - if not zero the record has been stored within a chained DBBuffer
* whose 4-byte integer buffer ID has been stored within this leaf at the record offset. * whose 4-byte integer buffer ID has been stored within this leaf at the record offset.
*/ */
class VarKeyRecordNode extends VarKeyNode { class VarKeyRecordNode extends VarKeyNode implements FieldKeyRecordNode {
private static final int ID_SIZE = 4; private static final int ID_SIZE = 4;
@ -94,7 +94,7 @@ class VarKeyRecordNode extends VarKeyNode {
void logConsistencyError(String tableName, String msg, Throwable t) throws IOException { void logConsistencyError(String tableName, String msg, Throwable t) throws IOException {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg); Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " bufferID=" + getBufferId() + " key[0]=" + getKey(0)); Msg.debug(this, " bufferID=" + getBufferId() + " key[0]=" + getKeyField(0));
if (t != null) { if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t); Msg.error(this, "Consistency Error (" + tableName + ")", t);
} }
@ -107,7 +107,7 @@ class VarKeyRecordNode extends VarKeyNode {
Field prevKey = null; Field prevKey = null;
for (int i = 0; i < keyCount; i++) { for (int i = 0; i < keyCount; i++) {
// Compare each key entry with the previous key // Compare each key entry with the previous key
Field key = getKey(i); Field key = getKeyField(i);
if (i != 0) { if (i != 0) {
if (key.compareTo(prevKey) <= 0) { if (key.compareTo(prevKey) <= 0) {
consistent = false; consistent = false;
@ -119,14 +119,14 @@ class VarKeyRecordNode extends VarKeyNode {
prevKey = key; prevKey = key;
} }
if ((parent == null || parent.isLeftmostKey(getKey(0))) && getPreviousLeaf() != null) { if ((parent == null || parent.isLeftmostKey(getKeyField(0))) && getPreviousLeaf() != null) {
consistent = false; consistent = false;
logConsistencyError(tableName, "previous-leaf should not exist", null); logConsistencyError(tableName, "previous-leaf should not exist", null);
} }
VarKeyRecordNode node = getNextLeaf(); VarKeyRecordNode node = getNextLeaf();
if (node != null) { if (node != null) {
if (parent == null || parent.isRightmostKey(getKey(0))) { if (parent == null || parent.isRightmostKey(getKeyField(0))) {
consistent = false; consistent = false;
logConsistencyError(tableName, "next-leaf should not exist", null); logConsistencyError(tableName, "next-leaf should not exist", null);
} }
@ -138,7 +138,7 @@ class VarKeyRecordNode extends VarKeyNode {
} }
} }
} }
else if (parent != null && !parent.isRightmostKey(getKey(0))) { else if (parent != null && !parent.isRightmostKey(getKeyField(0))) {
consistent = false; consistent = false;
logConsistencyError(tableName, "this leaf is not linked to next-leaf", null); logConsistencyError(tableName, "this leaf is not linked to next-leaf", null);
} }
@ -146,35 +146,36 @@ class VarKeyRecordNode extends VarKeyNode {
return consistent; return consistent;
} }
/*
* @see ghidra.framework.store.db.VarKeyNode#getLeafNode(long)
*/
@Override @Override
VarKeyRecordNode getLeafNode(Field key) throws IOException { public VarKeyRecordNode getLeafNode(Field key) throws IOException {
return this; return this;
} }
/*
* @see ghidra.framework.store.db2.VarKeyNode#getLeftmostLeafNode()
*/
@Override @Override
VarKeyRecordNode getLeftmostLeafNode() throws IOException { public VarKeyRecordNode getLeftmostLeafNode() throws IOException {
VarKeyRecordNode leaf = getPreviousLeaf(); VarKeyRecordNode leaf = getPreviousLeaf();
return leaf != null ? leaf.getLeftmostLeafNode() : this; return leaf != null ? leaf.getLeftmostLeafNode() : this;
} }
@Override @Override
VarKeyRecordNode getRightmostLeafNode() throws IOException { public VarKeyRecordNode getRightmostLeafNode() throws IOException {
VarKeyRecordNode leaf = getNextLeaf(); VarKeyRecordNode leaf = getNextLeaf();
return leaf != null ? leaf.getRightmostLeafNode() : this; return leaf != null ? leaf.getRightmostLeafNode() : this;
} }
@Override
public boolean hasNextLeaf() throws IOException {
int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
return (nextLeafId >= 0);
}
/** /**
* Get this leaf node's right sibling * Get this leaf node's right sibling
* @return this leaf node's right sibling or null if right sibling does not exist. * @return this leaf node's right sibling or null if right sibling does not exist.
* @throws IOException thrown if an IO error occurs * @throws IOException thrown if an IO error occurs
*/ */
VarKeyRecordNode getNextLeaf() throws IOException { @Override
public VarKeyRecordNode getNextLeaf() throws IOException {
VarKeyRecordNode leaf = null; VarKeyRecordNode leaf = null;
int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET); int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
if (nextLeafId >= 0) { if (nextLeafId >= 0) {
@ -183,40 +184,40 @@ class VarKeyRecordNode extends VarKeyNode {
return leaf; return leaf;
} }
@Override
public boolean hasPreviousLeaf() throws IOException {
int prevLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
return (prevLeafId >= 0);
}
/** /**
* Get this leaf node's left sibling * Get this leaf node's left sibling
* @return this leaf node's left sibling or null if left sibling does not exist. * @return this leaf node's left sibling or null if left sibling does not exist.
* @throws IOException thrown if an IO error occurs * @throws IOException if an IO error occurs
*/ */
VarKeyRecordNode getPreviousLeaf() throws IOException { @Override
public VarKeyRecordNode getPreviousLeaf() throws IOException {
VarKeyRecordNode leaf = null; VarKeyRecordNode leaf = null;
int nextLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET); int prevLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
if (nextLeafId >= 0) { if (prevLeafId >= 0) {
leaf = (VarKeyRecordNode) nodeMgr.getVarKeyNode(nextLeafId); leaf = (VarKeyRecordNode) nodeMgr.getVarKeyNode(prevLeafId);
} }
return leaf; return leaf;
} }
/** @Override
* Perform a binary search to locate the specified key and derive an index public int getKeyIndex(Field key) throws IOException {
* into the Buffer ID storage.
* @param key
* @return int buffer ID index.
* @throws IOException thrown if an IO error occurs
*/
int getKeyIndex(Field key) throws IOException {
int min = 0; int min = 0;
int max = keyCount - 1; int max = keyCount - 1;
while (min <= max) { while (min <= max) {
int i = (min + max) / 2; int i = (min + max) / 2;
Field k = getKey(i); int rc = compareKeyField(key, i);
int rc = k.compareTo(key);
if (rc == 0) { if (rc == 0) {
return i; return i;
} }
else if (rc < 0) { else if (rc > 0) {
min = i + 1; min = i + 1;
} }
else { else {
@ -256,14 +257,14 @@ class VarKeyRecordNode extends VarKeyNode {
} }
// New parent node becomes root // New parent node becomes root
return new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newLeaf.getKey(0), return new VarKeyInteriorNode(nodeMgr, getKeyField(0), buffer.getId(),
newBufId); newLeaf.getKeyField(0), newBufId);
} }
/** /**
* Append a leaf which contains one or more keys and update tree. Leaf is inserted * Append a leaf which contains one or more keys and update tree. Leaf is inserted
* as the new right sibling of this leaf. * as the new right sibling of this leaf.
* @param newLeaf new right sibling leaf (must be same node type as this leaf) * @param leaf new right sibling leaf (must be same node type as this leaf)
* @return root node which may have changed. * @return root node which may have changed.
* @throws IOException thrown if an IO error occurs * @throws IOException thrown if an IO error occurs
*/ */
@ -290,17 +291,12 @@ class VarKeyRecordNode extends VarKeyNode {
} }
// New parent node becomes root // New parent node becomes root
return new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), leaf.getKey(0), newBufId); return new VarKeyInteriorNode(nodeMgr, getKeyField(0), buffer.getId(), leaf.getKeyField(0),
newBufId);
} }
/** @Override
* Insert or Update a record. public VarKeyNode putRecord(Record record, Table table) throws IOException {
* @param record data record with long key
* @param table table which will be notified when record is inserted or updated.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
VarKeyNode putRecord(Record record, Table table) throws IOException {
Field key = record.getKeyField(); Field key = record.getKeyField();
int index = getKeyIndex(key); int index = getKeyIndex(key);
@ -318,7 +314,7 @@ class VarKeyRecordNode extends VarKeyNode {
index = -index - 1; index = -index - 1;
if (insertRecord(index, record)) { if (insertRecord(index, record)) {
if (index == 0 && parent != null) { if (index == 0 && parent != null) {
parent.keyChanged(getKey(1), key, this); parent.keyChanged(getKeyField(1), key, this);
} }
if (table != null) { if (table != null) {
table.insertedRecord(record); table.insertedRecord(record);
@ -359,7 +355,8 @@ class VarKeyRecordNode extends VarKeyNode {
* @return root node which may have changed. * @return root node which may have changed.
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
VarKeyNode deleteRecord(Field key, Table table) throws IOException { @Override
public VarKeyNode deleteRecord(Field key, Table table) throws IOException {
// Handle non-existent key - do nothing // Handle non-existent key - do nothing
int index = getKeyIndex(key); int index = getKeyIndex(key);
@ -382,20 +379,14 @@ class VarKeyRecordNode extends VarKeyNode {
// Notify parent of leftmost key change // Notify parent of leftmost key change
if (index == 0 && parent != null) { if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0), this); parent.keyChanged(key, getKeyField(0), this);
} }
return getRoot(); return getRoot();
} }
/** @Override
* Get the first record whoose key is less than the specified key. public Record getRecordBefore(Field key, Schema schema) throws IOException {
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordBefore(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key); int index = getKeyIndex(key);
if (index < 0) { if (index < 0) {
index = -index - 2; index = -index - 2;
@ -410,14 +401,8 @@ class VarKeyRecordNode extends VarKeyNode {
return getRecord(schema, index); return getRecord(schema, index);
} }
/** @Override
* Get the first record whoose key is greater than the specified key. public Record getRecordAfter(Field key, Schema schema) throws IOException {
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAfter(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key); int index = getKeyIndex(key);
if (index < 0) { if (index < 0) {
index = -(index + 1); index = -(index + 1);
@ -432,15 +417,8 @@ class VarKeyRecordNode extends VarKeyNode {
return getRecord(schema, index); return getRecord(schema, index);
} }
/** @Override
* Get the first record whoose key is less than or equal to the specified public Record getRecordAtOrBefore(Field key, Schema schema) throws IOException {
* key.
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAtOrBefore(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key); int index = getKeyIndex(key);
if (index < 0) { if (index < 0) {
index = -index - 2; index = -index - 2;
@ -452,15 +430,8 @@ class VarKeyRecordNode extends VarKeyNode {
return getRecord(schema, index); return getRecord(schema, index);
} }
/** @Override
* Get the first record whoose key is greater than or equal to the specified public Record getRecordAtOrAfter(Field key, Schema schema) throws IOException {
* key.
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAtOrAfter(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key); int index = getKeyIndex(key);
if (index < 0) { if (index < 0) {
index = -(index + 1); index = -(index + 1);
@ -484,23 +455,25 @@ class VarKeyRecordNode extends VarKeyNode {
return new VarKeyRecordNode(nodeMgr, prevLeafId, nextLeafId, keyType); return new VarKeyRecordNode(nodeMgr, prevLeafId, nextLeafId, keyType);
} }
/*
* @see ghidra.framework.store.db.VarKeyNode#getKey(int)
*/
@Override @Override
Field getKey(int index) throws IOException { public Field getKeyField(int index) throws IOException {
Field key = keyType.newField(); Field key = keyType.newField();
key.read(buffer, buffer.getInt(HEADER_SIZE + (index * ENTRY_SIZE))); key.read(buffer, getKeyOffset(index));
return key; return key;
} }
@Override
public int getKeyOffset(int index) {
return buffer.getInt(HEADER_SIZE + (index * ENTRY_SIZE));
}
/** /**
* Get the record data offset within the buffer * Get the record data offset within the buffer
* @param index key index * @param index key index
* @return record data offset * @return record data offset
*/ */
private int getRecordDataOffset(int index) throws IOException { private int getRecordDataOffset(int index) throws IOException {
int offset = buffer.getInt(HEADER_SIZE + (index * ENTRY_SIZE)); int offset = getKeyOffset(index);
return offset + keyType.readLength(buffer, offset); return offset + keyType.readLength(buffer, offset);
} }
@ -552,13 +525,13 @@ class VarKeyRecordNode extends VarKeyNode {
/** /**
* Get the length of a stored record with key. * Get the length of a stored record with key.
* @param keyIndex key index associated with record. * @param index key index associated with record.
*/ */
private int getFullRecordLength(int keyIndex) { private int getFullRecordLength(int index) {
if (keyIndex == 0) { if (index == 0) {
return buffer.length() - getRecordKeyOffset(0); return buffer.length() - getRecordKeyOffset(0);
} }
return getRecordKeyOffset(keyIndex - 1) - getRecordKeyOffset(keyIndex); return getRecordKeyOffset(index - 1) - getRecordKeyOffset(index);
} }
/** /**
@ -600,8 +573,9 @@ class VarKeyRecordNode extends VarKeyNode {
* @param index key index * @param index key index
* @return Record * @return Record
*/ */
Record getRecord(Schema schema, int index) throws IOException { @Override
Field key = getKey(index); public Record getRecord(Schema schema, int index) throws IOException {
Field key = getKeyField(index);
Record record = schema.createRecord(key); Record record = schema.createRecord(key);
if (hasIndirectStorage(index)) { if (hasIndirectStorage(index)) {
int bufId = buffer.getInt(getRecordDataOffset(index)); int bufId = buffer.getInt(getRecordDataOffset(index));
@ -614,14 +588,16 @@ class VarKeyRecordNode extends VarKeyNode {
return record; return record;
} }
/** @Override
* Get the record identified by the specified key. public int getRecordOffset(int index) throws IOException {
* @param key record key if (hasIndirectStorage(index)) {
* @param schema record data schema return -buffer.getInt(getRecordDataOffset(index));
* @return Record requested or null if record not found. }
* @throws IOException thrown if IO error occurs return getRecordDataOffset(index);
*/ }
Record getRecord(Field key, Schema schema) throws IOException {
@Override
public Record getRecord(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key); int index = getKeyIndex(key);
if (index < 0) if (index < 0)
return null; return null;
@ -658,7 +634,7 @@ class VarKeyRecordNode extends VarKeyNode {
/** /**
* Split the contents of this leaf node; placing the right half of the records into the * Split the contents of this leaf node; placing the right half of the records into the
* empty leaf node provided. * empty leaf node provided.
* @param newRightLeaf empty right sibling leaf * @param rightNode empty right sibling leaf
*/ */
private void splitData(VarKeyRecordNode rightNode) { private void splitData(VarKeyRecordNode rightNode) {
@ -752,12 +728,12 @@ class VarKeyRecordNode extends VarKeyNode {
/** /**
* Inserts the record at the given index if there is sufficient space in * Inserts the record at the given index if there is sufficient space in
* the buffer. * the buffer.
* @param keyIndex insertion index * @param index insertion index
* @param record record to be inserted * @param record record to be inserted
* @return true if the record was successfully inserted. * @return true if the record was successfully inserted.
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
private boolean insertRecord(int keyIndex, Record record) throws IOException { private boolean insertRecord(int index, Record record) throws IOException {
Field key = record.getKeyField(); Field key = record.getKeyField();
int keyLen = key.length(); int keyLen = key.length();
@ -776,11 +752,11 @@ class VarKeyRecordNode extends VarKeyNode {
return false; // insufficient space for record storage return false; // insufficient space for record storage
// Make room for new record // Make room for new record
int offset = moveRecords(keyIndex, -(len + keyLen)); int offset = moveRecords(index, -(len + keyLen));
// Make room for new key/offset entry // Make room for new key/offset entry
int start = HEADER_SIZE + (keyIndex * ENTRY_SIZE); int start = HEADER_SIZE + (index * ENTRY_SIZE);
len = (keyCount - keyIndex) * ENTRY_SIZE; len = (keyCount - index) * ENTRY_SIZE;
buffer.move(start, start + ENTRY_SIZE, len); buffer.move(start, start + ENTRY_SIZE, len);
// Store new record key/offset // Store new record key/offset
@ -798,7 +774,7 @@ class VarKeyRecordNode extends VarKeyNode {
else { else {
record.write(buffer, offset + keyLen); record.write(buffer, offset + keyLen);
} }
enableIndirectStorage(keyIndex, useIndirect); enableIndirectStorage(index, useIndirect);
return true; return true;
} }
@ -809,7 +785,8 @@ class VarKeyRecordNode extends VarKeyNode {
* @param index record index * @param index record index
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
void remove(int index) throws IOException { @Override
public void remove(int index) throws IOException {
if (index < 0 || index >= keyCount) if (index < 0 || index >= keyCount)
throw new AssertException(); throw new AssertException();
@ -833,7 +810,8 @@ class VarKeyRecordNode extends VarKeyNode {
* @return root node which may have changed. * @return root node which may have changed.
* @throws IOException thrown if IO error occurs * @throws IOException thrown if IO error occurs
*/ */
VarKeyNode removeLeaf() throws IOException { @Override
public VarKeyNode removeLeaf() throws IOException {
// Remove all chained buffers associated with this leaf // Remove all chained buffers associated with this leaf
for (int index = 0; index < keyCount; ++index) { for (int index = 0; index < keyCount; ++index) {
@ -842,7 +820,7 @@ class VarKeyRecordNode extends VarKeyNode {
} }
} }
Field key = getKey(0); Field key = getKeyField(0);
int prevBufferId = buffer.getInt(PREV_LEAF_ID_OFFSET); int prevBufferId = buffer.getInt(PREV_LEAF_ID_OFFSET);
int nextBufferId = buffer.getInt(NEXT_LEAF_ID_OFFSET); int nextBufferId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
if (prevBufferId >= 0) { if (prevBufferId >= 0) {
@ -870,9 +848,6 @@ class VarKeyRecordNode extends VarKeyNode {
chainedBuffer.delete(); chainedBuffer.delete();
} }
/*
* @see ghidra.framework.store.db.VarKeyNode#delete()
*/
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
@ -890,9 +865,6 @@ class VarKeyRecordNode extends VarKeyNode {
nodeMgr.deleteNode(this); nodeMgr.deleteNode(this);
} }
/*
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
@Override @Override
public int[] getBufferReferences() { public int[] getBufferReferences() {
IntArrayList idList = new IntArrayList(); IntArrayList idList = new IntArrayList();
@ -903,6 +875,7 @@ class VarKeyRecordNode extends VarKeyNode {
idList.add(buffer.getInt(offset)); idList.add(buffer.getInt(offset));
} }
catch (IOException e) { catch (IOException e) {
// ignore
} }
} }
} }

View file

@ -15,12 +15,11 @@
*/ */
package db; package db;
import ghidra.util.datastruct.IntArrayList;
import ghidra.util.exception.AssertException;
import java.io.IOException; import java.io.IOException;
import db.buffers.DataBuffer; import db.buffers.DataBuffer;
import ghidra.util.datastruct.IntArrayList;
import ghidra.util.exception.AssertException;
/** /**
* <code>VarRecNode</code> is an implementation of a BTree leaf node * <code>VarRecNode</code> is an implementation of a BTree leaf node
@ -70,37 +69,27 @@ class VarRecNode extends LongKeyRecordNode {
super(nodeMgr, NodeMgr.LONGKEY_VAR_REC_NODE, prevLeafId, nextLeafId); super(nodeMgr, NodeMgr.LONGKEY_VAR_REC_NODE, prevLeafId, nextLeafId);
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#createNewLeaf()
*/
@Override @Override
LongKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException { LongKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
return new VarRecNode(nodeMgr, prevLeafId, nextLeafId); return new VarRecNode(nodeMgr, prevLeafId, nextLeafId);
} }
/*
* @see ghidra.framework.store.db.LongKeyNode#getKey(int)
*/
@Override @Override
long getKey(int index) { long getKey(int index) {
return buffer.getLong(KEY_BASE_OFFSET + (index * ENTRY_SIZE)); return buffer.getLong(getKeyOffset(index));
} }
// /** @Override
// * Store a key at the specified index public int getKeyOffset(int index) {
// * @param index key index return KEY_BASE_OFFSET + (index * ENTRY_SIZE);
// * @param key key value }
// */
// private void putKey(int index, long key) {
// buffer.putLong(KEY_BASE_OFFSET + (index * ENTRY_SIZE), key);
// }
/** /**
* Get the record offset within the buffer * Get the record offset within the buffer
* @param index key index * @param index key index
* @return record offset * @return record offset
*/ */
private int getRecordOffset(int index) { int getRecordDataOffset(int index) {
return buffer.getInt(DATA_OFFSET_BASE_OFFSET + (index * ENTRY_SIZE)); return buffer.getInt(DATA_OFFSET_BASE_OFFSET + (index * ENTRY_SIZE));
} }
@ -109,7 +98,7 @@ class VarRecNode extends LongKeyRecordNode {
* @param index key index * @param index key index
* @param offset record offset * @param offset record offset
*/ */
private void putRecordOffset(int index, int offset) { private void putRecordDataOffset(int index, int offset) {
buffer.putInt(DATA_OFFSET_BASE_OFFSET + (index * ENTRY_SIZE), offset); buffer.putInt(DATA_OFFSET_BASE_OFFSET + (index * ENTRY_SIZE), offset);
} }
@ -128,40 +117,39 @@ class VarRecNode extends LongKeyRecordNode {
* @param state indirect storage used (true) or not used (false) * @param state indirect storage used (true) or not used (false)
*/ */
private void enableIndirectStorage(int index, boolean state) { private void enableIndirectStorage(int index, boolean state) {
buffer.putByte(IND_OPTION_BASE_OFFSET + (index * ENTRY_SIZE), buffer.putByte(IND_OPTION_BASE_OFFSET + (index * ENTRY_SIZE), state ? (byte) 1 : (byte) 0);
state ? (byte)1 : (byte)0);
} }
/** /**
* @return unused free space within node * @return unused free space within node
*/ */
private int getFreeSpace() { private int getFreeSpace() {
return (keyCount == 0 ? buffer.length() : getRecordOffset(keyCount - 1)) return (keyCount == 0 ? buffer.length() : getRecordDataOffset(keyCount - 1)) -
- (keyCount * ENTRY_SIZE) - RECORD_LEAF_HEADER_SIZE; (keyCount * ENTRY_SIZE) - RECORD_LEAF_HEADER_SIZE;
} }
/** /**
* Get the length of a stored record. * Get the length of a stored record.
* @param keyIndex key index associated with record. * @param index index associated with record.
*/ */
private int getRecordLength(int keyIndex) { private int getRecordLength(int index) {
if (keyIndex == 0) { if (index == 0) {
return buffer.length() - getRecordOffset(0); return buffer.length() - getRecordDataOffset(0);
} }
return getRecordOffset(keyIndex - 1) - getRecordOffset(keyIndex); return getRecordDataOffset(index - 1) - getRecordDataOffset(index);
} }
/** /**
* Get the length of a stored record. Optimized if record offset * Get the length of a stored record. Optimized if record offset
* already known. * already known.
* @param keyIndex key index associated with record. * @param index index associated with record.
* @param offset record offset * @param offset record offset
*/ */
private int getRecordLength(int keyIndex, int offset) { private int getRecordLength(int index, int offset) {
if (keyIndex == 0) { if (index == 0) {
return buffer.length() - offset; return buffer.length() - offset;
} }
return getRecordOffset(keyIndex - 1) - offset; return getRecordDataOffset(index - 1) - offset;
} }
/** /**
@ -182,12 +170,12 @@ class VarRecNode extends LongKeyRecordNode {
if (index == 0) { if (index == 0) {
return buffer.length() + offset; return buffer.length() + offset;
} }
return getRecordOffset(lastIndex) + offset; return getRecordDataOffset(lastIndex) + offset;
} }
// Determine block to be moved // Determine block to be moved
int start = getRecordOffset(lastIndex); int start = getRecordDataOffset(lastIndex);
int end = (index == 0) ? buffer.length() : getRecordOffset(index - 1); int end = (index == 0) ? buffer.length() : getRecordDataOffset(index - 1);
int len = end - start; int len = end - start;
// Move record data // Move record data
@ -195,33 +183,34 @@ class VarRecNode extends LongKeyRecordNode {
// Adjust stored offsets // Adjust stored offsets
for (int i = index; i < keyCount; i++) { for (int i = index; i < keyCount; i++) {
putRecordOffset(i, getRecordOffset(i) + offset); putRecordDataOffset(i, getRecordDataOffset(i) + offset);
} }
return end + offset; return end + offset;
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#getRecord(ghidra.framework.store.db.Schema, int)
*/
@Override @Override
Record getRecord(Schema schema, int index) throws IOException { public Record getRecord(Schema schema, int index) throws IOException {
long key = getKey(index); long key = getKey(index);
Record record = schema.createRecord(key); Record record = schema.createRecord(key);
if (hasIndirectStorage(index)) { if (hasIndirectStorage(index)) {
int bufId = buffer.getInt(getRecordOffset(index)); int bufId = buffer.getInt(getRecordDataOffset(index));
ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufId);
bufId);
record.read(chainedBuffer, 0); record.read(chainedBuffer, 0);
} }
else { else {
record.read(buffer, getRecordOffset(index)); record.read(buffer, getRecordDataOffset(index));
} }
return record; return record;
} }
/* @Override
* @see ghidra.framework.store.db.LongKeyRecordNode#getRecord(long, ghidra.framework.store.db.Schema) public int getRecordOffset(int index) throws IOException {
*/ if (hasIndirectStorage(index)) {
return -buffer.getInt(getRecordDataOffset(index));
}
return getRecordDataOffset(index);
}
@Override @Override
Record getRecord(long key, Schema schema) throws IOException { Record getRecord(long key, Schema schema) throws IOException {
int index = getKeyIndex(key); int index = getKeyIndex(key);
@ -236,14 +225,14 @@ class VarRecNode extends LongKeyRecordNode {
*/ */
private int getSplitIndex() { private int getSplitIndex() {
int halfway = ((keyCount == 0 ? buffer.length() : getRecordOffset(keyCount - 1)) int halfway = ((keyCount == 0 ? buffer.length() : getRecordDataOffset(keyCount - 1)) +
+ buffer.length()) / 2; buffer.length()) / 2;
int min = 1; int min = 1;
int max = keyCount - 1; int max = keyCount - 1;
while (min < max) { while (min < max) {
int i = (min + max) / 2; int i = (min + max) / 2;
int offset = getRecordOffset(i); int offset = getRecordDataOffset(i);
if (offset == halfway) { if (offset == halfway) {
return i; return i;
} }
@ -257,9 +246,6 @@ class VarRecNode extends LongKeyRecordNode {
return min; return min;
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#splitData(ghidra.framework.store.db.LongKeyRecordNode)
*/
@Override @Override
void splitData(LongKeyRecordNode newRightLeaf) { void splitData(LongKeyRecordNode newRightLeaf) {
@ -267,20 +253,21 @@ class VarRecNode extends LongKeyRecordNode {
int splitIndex = getSplitIndex(); int splitIndex = getSplitIndex();
int count = keyCount - splitIndex; int count = keyCount - splitIndex;
int start = getRecordOffset(keyCount - 1); // start of block to be moved int start = getRecordDataOffset(keyCount - 1); // start of block to be moved
int end = getRecordOffset(splitIndex - 1); // end of block to be moved int end = getRecordDataOffset(splitIndex - 1); // end of block to be moved
int splitLen = end - start; // length of block to be moved int splitLen = end - start; // length of block to be moved
int rightOffset = buffer.length() - splitLen; // data offset within new leaf node int rightOffset = buffer.length() - splitLen; // data offset within new leaf node
// Copy data to new leaf node // Copy data to new leaf node
DataBuffer newBuf = rightNode.buffer; DataBuffer newBuf = rightNode.buffer;
newBuf.copy(rightOffset, buffer, start, splitLen); newBuf.copy(rightOffset, buffer, start, splitLen);
newBuf.copy(KEY_BASE_OFFSET, buffer, KEY_BASE_OFFSET + (splitIndex * ENTRY_SIZE), count * ENTRY_SIZE); newBuf.copy(KEY_BASE_OFFSET, buffer, KEY_BASE_OFFSET + (splitIndex * ENTRY_SIZE),
count * ENTRY_SIZE);
// Fix record offsets in new leaf node // Fix record offsets in new leaf node
int offsetCorrection = buffer.length() - end; int offsetCorrection = buffer.length() - end;
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
rightNode.putRecordOffset(i, rightNode.getRecordOffset(i) + offsetCorrection); rightNode.putRecordDataOffset(i, rightNode.getRecordDataOffset(i) + offsetCorrection);
} }
// Adjust key counts // Adjust key counts
@ -288,13 +275,10 @@ class VarRecNode extends LongKeyRecordNode {
rightNode.setKeyCount(count); rightNode.setKeyCount(count);
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#updateRecord(int, ghidra.framework.store.db.Record)
*/
@Override @Override
LongKeyNode updateRecord(int index, Record record) throws IOException { LongKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordOffset(index); int offset = getRecordDataOffset(index);
int oldLen = getRecordLength(index, offset); int oldLen = getRecordLength(index, offset);
int len = record.length(); int len = record.length();
@ -308,8 +292,7 @@ class VarRecNode extends LongKeyRecordNode {
len = 4; len = 4;
ChainedBuffer chainedBuffer = null; ChainedBuffer chainedBuffer = null;
if (wasIndirect) { if (wasIndirect) {
chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), buffer.getInt(offset));
buffer.getInt(offset));
chainedBuffer.setSize(record.length(), false); chainedBuffer.setSize(record.length(), false);
} }
else { else {
@ -331,7 +314,7 @@ class VarRecNode extends LongKeyRecordNode {
int dataShift = oldLen - len; int dataShift = oldLen - len;
if (dataShift != 0) { if (dataShift != 0) {
offset = moveRecords(index + 1, dataShift); offset = moveRecords(index + 1, dataShift);
putRecordOffset(index, offset); putRecordDataOffset(index, offset);
} }
if (!useIndirect) { if (!useIndirect) {
record.write(buffer, offset); record.write(buffer, offset);
@ -345,17 +328,8 @@ class VarRecNode extends LongKeyRecordNode {
return leaf.putRecord(record, null); return leaf.putRecord(record, null);
} }
/**
* Insert the specified record at the specified key index.
* Existing data may be shifted within the buffer to make room for
* the new record. Parent must be notified if this changes the leftmost
* key.
* @param keyIndex
* @param record
* @throws IOException
*/
@Override @Override
boolean insertRecord(int keyIndex, Record record) throws IOException { boolean insertRecord(int index, Record record) throws IOException {
// Check for use of indirect chained record node(s) // Check for use of indirect chained record node(s)
int len = record.length(); int len = record.length();
@ -369,11 +343,11 @@ class VarRecNode extends LongKeyRecordNode {
return false; // insufficient space for record storage return false; // insufficient space for record storage
// Make room for new record // Make room for new record
int offset = moveRecords(keyIndex, -len); int offset = moveRecords(index, -len);
// Make room for new key/offset entry // Make room for new key/offset entry
int start = KEY_BASE_OFFSET + (keyIndex * ENTRY_SIZE); int start = KEY_BASE_OFFSET + (index * ENTRY_SIZE);
len = (keyCount - keyIndex) * ENTRY_SIZE; len = (keyCount - index) * ENTRY_SIZE;
buffer.move(start, start + ENTRY_SIZE, len); buffer.move(start, start + ENTRY_SIZE, len);
// Store new record key/offset // Store new record key/offset
@ -383,29 +357,27 @@ class VarRecNode extends LongKeyRecordNode {
// Store record data // Store record data
if (useIndirect) { if (useIndirect) {
ChainedBuffer chainedBuffer = new ChainedBuffer(record.length(), nodeMgr.getBufferMgr()); ChainedBuffer chainedBuffer =
new ChainedBuffer(record.length(), nodeMgr.getBufferMgr());
buffer.putInt(offset, chainedBuffer.getId()); buffer.putInt(offset, chainedBuffer.getId());
record.write(chainedBuffer, 0); record.write(chainedBuffer, 0);
} }
else { else {
record.write(buffer, offset); record.write(buffer, offset);
} }
enableIndirectStorage(keyIndex, useIndirect); enableIndirectStorage(index, useIndirect);
return true; return true;
} }
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#remove(int)
*/
@Override @Override
void remove(int index) throws IOException { public void remove(int index) throws IOException {
if (index < 0 || index >= keyCount) if (index < 0 || index >= keyCount)
throw new AssertException(); throw new AssertException();
if (hasIndirectStorage(index)) { if (hasIndirectStorage(index)) {
removeChainedBuffer(buffer.getInt(getRecordOffset(index))); removeChainedBuffer(buffer.getInt(getRecordDataOffset(index)));
enableIndirectStorage(index, false); enableIndirectStorage(index, false);
} }
@ -418,18 +390,17 @@ throw new AssertException();
setKeyCount(keyCount - 1); setKeyCount(keyCount - 1);
} }
/** /**
* Removes this leaf and all associated chained buffers. * Removes this leaf and all associated chained buffers.
* @see db.LongKeyRecordNode#removeLeaf() * @see db.LongKeyRecordNode#removeLeaf()
*/ */
@Override @Override
LongKeyNode removeLeaf() throws IOException { public LongKeyNode removeLeaf() throws IOException {
// Remove all chained buffers associated with this leaf // Remove all chained buffers associated with this leaf
for (int index = 0; index < keyCount; ++index) { for (int index = 0; index < keyCount; ++index) {
if (hasIndirectStorage(index)) { if (hasIndirectStorage(index)) {
removeChainedBuffer(buffer.getInt(getRecordOffset(index))); removeChainedBuffer(buffer.getInt(getRecordDataOffset(index)));
} }
} }
return super.removeLeaf(); return super.removeLeaf();
@ -444,16 +415,13 @@ throw new AssertException();
chainedBuffer.delete(); chainedBuffer.delete();
} }
/*
* @see ghidra.framework.store.db.LongKeyNode#delete()
*/
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
// Remove all chained buffers associated with this node. // Remove all chained buffers associated with this node.
for (int index = 0; index < keyCount; index++) { for (int index = 0; index < keyCount; index++) {
if (hasIndirectStorage(index)) { if (hasIndirectStorage(index)) {
int offset = getRecordOffset(index); int offset = getRecordDataOffset(index);
int bufferId = buffer.getInt(offset); int bufferId = buffer.getInt(offset);
removeChainedBuffer(bufferId); removeChainedBuffer(bufferId);
buffer.putInt(offset, -1); buffer.putInt(offset, -1);
@ -464,14 +432,12 @@ throw new AssertException();
nodeMgr.deleteNode(this); nodeMgr.deleteNode(this);
} }
/* @Override
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
public int[] getBufferReferences() { public int[] getBufferReferences() {
IntArrayList idList = new IntArrayList(); IntArrayList idList = new IntArrayList();
for (int i = 0; i < keyCount; i++) { for (int i = 0; i < keyCount; i++) {
if (hasIndirectStorage(i)) { if (hasIndirectStorage(i)) {
int offset = getRecordOffset(i); int offset = getRecordDataOffset(i);
idList.add(buffer.getInt(offset)); idList.add(buffer.getInt(offset));
} }
} }

View file

@ -28,7 +28,6 @@ import ghidra.util.SystemUtilities;
import ghidra.util.datastruct.ObjectArray; import ghidra.util.datastruct.ObjectArray;
import ghidra.util.exception.*; import ghidra.util.exception.*;
import ghidra.util.task.TaskMonitor; import ghidra.util.task.TaskMonitor;
import ghidra.util.task.TaskMonitorAdapter;
/** /**
* <code>BufferMgr</code> provides low-level buffer management and caching. * <code>BufferMgr</code> provides low-level buffer management and caching.
@ -176,7 +175,7 @@ public class BufferMgr {
* @param sourceFile buffer file * @param sourceFile buffer file
* @throws IOException if source or cache file access error occurs * @throws IOException if source or cache file access error occurs
*/ */
public BufferMgr(BufferFile sourceFile) throws FileNotFoundException, IOException { public BufferMgr(BufferFile sourceFile) throws IOException {
this(sourceFile, DEFAULT_BUFFER_SIZE, DEFAULT_CACHE_SIZE, DEFAULT_CHECKPOINT_COUNT); this(sourceFile, DEFAULT_BUFFER_SIZE, DEFAULT_CACHE_SIZE, DEFAULT_CHECKPOINT_COUNT);
} }
@ -188,8 +187,7 @@ public class BufferMgr {
* @param maxUndos maximum number of checkpoints retained for undo (Minimum=1). * @param maxUndos maximum number of checkpoints retained for undo (Minimum=1).
* @throws IOException if source or cache file access error occurs * @throws IOException if source or cache file access error occurs
*/ */
public BufferMgr(BufferFile sourceFile, long approxCacheSize, int maxUndos) public BufferMgr(BufferFile sourceFile, long approxCacheSize, int maxUndos) throws IOException {
throws FileNotFoundException, IOException {
this(sourceFile, 0, approxCacheSize, maxUndos); this(sourceFile, 0, approxCacheSize, maxUndos);
} }
@ -202,9 +200,9 @@ public class BufferMgr {
* @param maxUndos maximum number of checkpoints retained for undo (Minimum=1). * @param maxUndos maximum number of checkpoints retained for undo (Minimum=1).
* @throws IOException if source or cache file access error occurs * @throws IOException if source or cache file access error occurs
*/ */
private BufferMgr(BufferFile sourceFile, int requestedbufferSize, long approxCacheSize, private BufferMgr(BufferFile sourceFile, int requestedBufferSize, long approxCacheSize,
int maxUndos) throws FileNotFoundException, IOException { int maxUndos) throws FileNotFoundException, IOException {
bufferSize = requestedbufferSize; bufferSize = requestedBufferSize;
if (sourceFile != null) { if (sourceFile != null) {
this.sourceFile = sourceFile; this.sourceFile = sourceFile;
int cnt = sourceFile.getIndexCount(); int cnt = sourceFile.getIndexCount();
@ -362,6 +360,9 @@ public class BufferMgr {
/** /**
* Get file parameter * Get file parameter
* @param name parameter name/key
* @return parameter value
* @throws NoSuchElementException if parameter not found
*/ */
int getParameter(String name) throws NoSuchElementException { int getParameter(String name) throws NoSuchElementException {
return cacheFile.getParameter(name); return cacheFile.getParameter(name);
@ -369,8 +370,8 @@ public class BufferMgr {
/** /**
* Set file parameter * Set file parameter
* @param name * @param name parameter name/key
* @param value * @param value parameter value
*/ */
void setParameter(String name, int value) { void setParameter(String name, int value) {
cacheFile.setParameter(name, value); cacheFile.setParameter(name, value);
@ -391,7 +392,8 @@ public class BufferMgr {
* buffer file. * buffer file.
* This method should be called when this buffer manager instance * This method should be called when this buffer manager instance
* is no longer needed. * is no longer needed.
* @param keepRecoveryData * @param keepRecoveryData true if existing snapshot recovery files
* should not be deleted.
*/ */
public void dispose(boolean keepRecoveryData) { public void dispose(boolean keepRecoveryData) {
@ -626,7 +628,6 @@ public class BufferMgr {
/** /**
* Remove a buffer node from memory cache. * Remove a buffer node from memory cache.
* @param node buffer node * @param node buffer node
* @return buffer object, or null if buffer node was not cached
*/ */
private void removeFromCache(BufferNode node) { private void removeFromCache(BufferNode node) {
if (node.buffer != null) { if (node.buffer != null) {
@ -1010,9 +1011,11 @@ public class BufferMgr {
} }
/** /**
* Return buffer. * Release buffer back to buffer manager.
* After invoking this method, the buffer object should not * After invoking this method, the buffer object should not
* be used and all references should be dropped. * be used and all references should be dropped.
* @param buf data buffer
* @throws IOException if IO error occurs
*/ */
public void releaseBuffer(DataBuffer buf) throws IOException { public void releaseBuffer(DataBuffer buf) throws IOException {
@ -1031,9 +1034,9 @@ public class BufferMgr {
/** /**
* Handle exception which indicates a potential corruption of the BufferMgr state * Handle exception which indicates a potential corruption of the BufferMgr state
* @param exception * @param exception exception
* @param errorText * @param errorText associated error text
* @throws IOException * @throws IOException exception thrown if instance of IOException
*/ */
private void handleCorruptionException(Exception exception, String errorText) private void handleCorruptionException(Exception exception, String errorText)
throws IOException { throws IOException {
@ -1182,7 +1185,7 @@ public class BufferMgr {
} }
/** /**
* Returns true if unsaved "buffer" changes exist. * @return true if unsaved "buffer" changes exist.
* If no changes have been made, or all changes have been * If no changes have been made, or all changes have been
* "undone", false will be returned. Parameter changes * "undone", false will be returned. Parameter changes
* are no considered. * are no considered.
@ -1194,9 +1197,6 @@ public class BufferMgr {
/** /**
* Create a new checkpoint node list. * Create a new checkpoint node list.
* The redo stack will be cleared. * The redo stack will be cleared.
* @param force if true the checkpoint will be performed regardless of
* the lock count.
* @return true if checkpoint successful, or false if buffers are read-only
*/ */
private void startCheckpoint() { private void startCheckpoint() {
@ -1235,21 +1235,25 @@ public class BufferMgr {
} }
/** /**
* Returns number of undo-able transactions * @return number of undo-able transactions
*/ */
public int getAvailableUndoCount() { public int getAvailableUndoCount() {
return checkpointHeads.size() - 1; return checkpointHeads.size() - 1;
} }
/** /**
* Returns the number of redo-able transactions * @return the number of redo-able transactions
*/ */
public int getAvailableRedoCount() { public int getAvailableRedoCount() {
return redoCheckpointHeads.size(); return redoCheckpointHeads.size();
} }
/** /**
* Backup to previous checkpoint. * Backup to previous checkpoint. Method should not be invoked
* when one or more buffers are locked.
* @param redoable true if currrent checkpoint should be moved to redo stack
* @return true if successful else false
* @throws IOException if IO error occurs
*/ */
public boolean undo(boolean redoable) throws IOException { public boolean undo(boolean redoable) throws IOException {
synchronized (snapshotLock) { synchronized (snapshotLock) {
@ -1337,7 +1341,9 @@ public class BufferMgr {
} }
/** /**
* Redo next checkpoint. * Redo next checkpoint. Method should not be invoked
* when one or more buffers are locked.
* @return true if successful else false
*/ */
public boolean redo() { public boolean redo() {
synchronized (snapshotLock) { synchronized (snapshotLock) {
@ -1414,7 +1420,8 @@ public class BufferMgr {
} }
/** /**
* Returns true if save operation can be performed. * @return true if save operation can be performed.
* @throws IOException if IO error occurs
*/ */
public boolean canSave() throws IOException { public boolean canSave() throws IOException {
if (corruptedState) { if (corruptedState) {
@ -1427,7 +1434,7 @@ public class BufferMgr {
} }
/** /**
* Returns true if buffers have been modified since opening or since * @return true if buffers have been modified since opening or since
* last snapshot. * last snapshot.
*/ */
public synchronized boolean modifiedSinceSnapshot() { public synchronized boolean modifiedSinceSnapshot() {
@ -1440,6 +1447,8 @@ public class BufferMgr {
* made since the last version. * made since the last version.
* @param monitor task monitor * @param monitor task monitor
* @return true if snapshot successful, false if * @return true if snapshot successful, false if
* @throws IOException if IO error occurs
* @throws CancelledException if task monitor is cancelled
*/ */
public boolean takeRecoverySnapshot(DBChangeSet changeSet, TaskMonitor monitor) public boolean takeRecoverySnapshot(DBChangeSet changeSet, TaskMonitor monitor)
throws IOException, CancelledException { throws IOException, CancelledException {
@ -1548,7 +1557,8 @@ public class BufferMgr {
* Returns the recovery changeSet data file for reading or null if one is not available. * Returns the recovery changeSet data file for reading or null if one is not available.
* The caller must dispose of the returned file before peforming generating any new * The caller must dispose of the returned file before peforming generating any new
* recovery snapshots. * recovery snapshots.
* @throws IOException * @return recovery change set buffer file
* @throws IOException if IO error occurs
*/ */
public LocalBufferFile getRecoveryChangeSetFile() throws IOException { public LocalBufferFile getRecoveryChangeSetFile() throws IOException {
if (recoveryMgr != null) { if (recoveryMgr != null) {
@ -1580,6 +1590,9 @@ public class BufferMgr {
* If recovery is cancelled, this buffer manager must be disposed. * If recovery is cancelled, this buffer manager must be disposed.
* since the underlying state will be corrupt. * since the underlying state will be corrupt.
* @param monitor task monitor * @param monitor task monitor
* @return true if recovery successful else false
* @throws IOException if IO error occurs
* @throws CancelledException if task monitor is cancelled
*/ */
public boolean recover(TaskMonitor monitor) throws IOException, CancelledException { public boolean recover(TaskMonitor monitor) throws IOException, CancelledException {
synchronized (snapshotLock) { synchronized (snapshotLock) {
@ -1600,9 +1613,12 @@ public class BufferMgr {
/** /**
* Recover data from recovery file * Recover data from recovery file
* @param recoveryFile * @param recoveryFile recovery file
* @param monitor * @param recoveryIndex recovery index (0 or 1) which corresponds to
* @throws CancelledException * recoveryFile.
* @param monitor task monitor
* @throws IOException if IO error occurs
* @throws CancelledException if task monitor is cancelled
*/ */
synchronized void recover(RecoveryFile recoveryFile, int recoveryIndex, TaskMonitor monitor) synchronized void recover(RecoveryFile recoveryFile, int recoveryIndex, TaskMonitor monitor)
throws IOException, CancelledException { throws IOException, CancelledException {
@ -1755,7 +1771,7 @@ public class BufferMgr {
} }
if (monitor == null) { if (monitor == null) {
monitor = TaskMonitorAdapter.DUMMY_MONITOR; monitor = TaskMonitor.DUMMY;
} }
boolean oldCancelState = monitor.isCancelEnabled(); boolean oldCancelState = monitor.isCancelEnabled();
@ -1840,7 +1856,7 @@ public class BufferMgr {
} }
if (monitor == null) { if (monitor == null) {
monitor = TaskMonitorAdapter.DUMMY_MONITOR; monitor = TaskMonitor.DUMMY;
} }
int indexCnt = indexProvider.getIndexCount(); int indexCnt = indexProvider.getIndexCount();
@ -1871,7 +1887,7 @@ public class BufferMgr {
* Write all changes to the specified outFile * Write all changes to the specified outFile
* @param outFile output buffer file * @param outFile output buffer file
* @param monitor task monitor * @param monitor task monitor
* @throws IOException * @throws IOException if IO error occurs
* @throws CancelledException thrown if task cancelled * @throws CancelledException thrown if task cancelled
*/ */
private void doSave(BufferFile outFile, TaskMonitor monitor) private void doSave(BufferFile outFile, TaskMonitor monitor)
@ -1880,7 +1896,7 @@ public class BufferMgr {
int preSaveCnt = outFile.getIndexCount(); int preSaveCnt = outFile.getIndexCount();
if (monitor == null) { if (monitor == null) {
monitor = TaskMonitorAdapter.DUMMY_MONITOR; monitor = TaskMonitor.DUMMY;
} }
monitor.initialize(indexCnt); monitor.initialize(indexCnt);
monitor.setMessage("Saving file..."); monitor.setMessage("Saving file...");

View file

@ -143,34 +143,22 @@ public class DataBuffer implements Buffer, Externalizable {
empty = state; empty = state;
} }
/*
* @see ghidra.framework.store.Buffer#length()
*/
@Override @Override
public int length() { public int length() {
return data.length; return data.length;
} }
/*
* @see ghidra.framework.store.Buffer#get(int, byte[], int, int)
*/
@Override @Override
public void get(int offset, byte[] bytes, int dataOffset, int length) public void get(int offset, byte[] bytes, int dataOffset, int length)
throws ArrayIndexOutOfBoundsException { throws ArrayIndexOutOfBoundsException {
System.arraycopy(data, offset, bytes, dataOffset, length); System.arraycopy(data, offset, bytes, dataOffset, length);
} }
/*
* @see ghidra.framework.store.Buffer#get(int, byte[])
*/
@Override @Override
public void get(int offset, byte[] bytes) { public void get(int offset, byte[] bytes) {
System.arraycopy(data, offset, bytes, 0, bytes.length); System.arraycopy(data, offset, bytes, 0, bytes.length);
} }
/*
* @see ghidra.framework.store.Buffer#get(int, int)
*/
@Override @Override
public byte[] get(int offset, int length) throws ArrayIndexOutOfBoundsException { public byte[] get(int offset, int length) throws ArrayIndexOutOfBoundsException {
byte[] bytes = new byte[length]; byte[] bytes = new byte[length];
@ -178,34 +166,22 @@ public class DataBuffer implements Buffer, Externalizable {
return bytes; return bytes;
} }
/*
* @see ghidra.framework.store.Buffer#getByte(int)
*/
@Override @Override
public byte getByte(int offset) { public byte getByte(int offset) {
return data[offset]; return data[offset];
} }
/*
* @see ghidra.framework.store.Buffer#getInt(int)
*/
@Override @Override
public int getInt(int offset) { public int getInt(int offset) {
return ((data[offset] & 0xff) << 24) | ((data[++offset] & 0xff) << 16) | return ((data[offset] & 0xff) << 24) | ((data[++offset] & 0xff) << 16) |
((data[++offset] & 0xff) << 8) | (data[++offset] & 0xff); ((data[++offset] & 0xff) << 8) | (data[++offset] & 0xff);
} }
/*
* @see ghidra.framework.store.Buffer#getShort(int)
*/
@Override @Override
public short getShort(int offset) { public short getShort(int offset) {
return (short) (((data[offset] & 0xff) << 8) | (data[++offset] & 0xff)); return (short) (((data[offset] & 0xff) << 8) | (data[++offset] & 0xff));
} }
/*
* @see ghidra.framework.store.Buffer#getLong(int)
*/
@Override @Override
public long getLong(int offset) { public long getLong(int offset) {
return (((long) data[offset] & 0xff) << 56) | (((long) data[++offset] & 0xff) << 48) | return (((long) data[offset] & 0xff) << 56) | (((long) data[++offset] & 0xff) << 48) |
@ -214,9 +190,6 @@ public class DataBuffer implements Buffer, Externalizable {
(((long) data[++offset] & 0xff) << 8) | ((long) data[++offset] & 0xff); (((long) data[++offset] & 0xff) << 8) | ((long) data[++offset] & 0xff);
} }
/*
* @see ghidra.framework.store.Buffer#put(int, byte[], int, int)
*/
@Override @Override
public int put(int offset, byte[] bytes, int dataOffset, int length) { public int put(int offset, byte[] bytes, int dataOffset, int length) {
dirty = true; dirty = true;
@ -224,9 +197,6 @@ public class DataBuffer implements Buffer, Externalizable {
return offset + length; return offset + length;
} }
/*
* @see ghidra.framework.store.Buffer#put(int, byte[])
*/
@Override @Override
public int put(int offset, byte[] bytes) { public int put(int offset, byte[] bytes) {
dirty = true; dirty = true;
@ -234,9 +204,6 @@ public class DataBuffer implements Buffer, Externalizable {
return offset + bytes.length; return offset + bytes.length;
} }
/*
* @see ghidra.framework.store.Buffer#putByte(int, byte)
*/
@Override @Override
public int putByte(int offset, byte b) { public int putByte(int offset, byte b) {
dirty = true; dirty = true;
@ -244,9 +211,6 @@ public class DataBuffer implements Buffer, Externalizable {
return ++offset; return ++offset;
} }
/*
* @see ghidra.framework.store.Buffer#putInt(int, int)
*/
@Override @Override
public int putInt(int offset, int v) { public int putInt(int offset, int v) {
dirty = true; dirty = true;
@ -257,9 +221,6 @@ public class DataBuffer implements Buffer, Externalizable {
return ++offset; return ++offset;
} }
/*
* @see ghidra.framework.store.Buffer#putShort(int, short)
*/
@Override @Override
public int putShort(int offset, short v) { public int putShort(int offset, short v) {
dirty = true; dirty = true;
@ -268,9 +229,6 @@ public class DataBuffer implements Buffer, Externalizable {
return ++offset; return ++offset;
} }
/*
* @see ghidra.framework.store.Buffer#putLong(int, long)
*/
@Override @Override
public int putLong(int offset, long v) { public int putLong(int offset, long v) {
dirty = true; dirty = true;
@ -380,9 +338,8 @@ public class DataBuffer implements Buffer, Externalizable {
int compressedDataOffset = 0; int compressedDataOffset = 0;
while (!deflate.finished() && compressedDataOffset < compressedData.length) { while (!deflate.finished() && compressedDataOffset < compressedData.length) {
compressedDataOffset += compressedDataOffset += deflate.deflate(compressedData, compressedDataOffset,
deflate.deflate(compressedData, compressedDataOffset, compressedData.length - compressedData.length - compressedDataOffset, Deflater.SYNC_FLUSH);
compressedDataOffset, Deflater.SYNC_FLUSH);
} }
if (!deflate.finished()) { if (!deflate.finished()) {
@ -423,6 +380,30 @@ public class DataBuffer implements Buffer, Externalizable {
} }
} }
/**
* Perform an unsigned data comparison
* @param otherData other data to be compared
* @param offset offset within this buffer
* @param len length of data within this buffer
* @return unsigned comparison result
* @throws ArrayIndexOutOfBoundsException if specified region is not
* contained within this buffer.
*/
public int unsignedCompareTo(byte[] otherData, int offset, int len) {
int otherLen = otherData.length;
int otherOffset = 0;
int n = Math.min(len, otherLen);
while (n-- != 0) {
int b = data[offset++] & 0xff;
int otherByte = otherData[otherOffset++] & 0xff;
if (b != otherByte) {
return b - otherByte;
}
}
return len - otherLen;
}
/** /**
* Inflate compressedData into a properly sized data array. * Inflate compressedData into a properly sized data array.
* @param compressedData array containing compressed data * @param compressedData array containing compressed data

View file

@ -1,140 +0,0 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.NoSuchElementException;
import org.junit.*;
import generic.test.AbstractGenericTest;
import ghidra.util.LongIterator;
public class DBFieldMapTest extends AbstractGenericTest {
private DBFieldMap map;
public DBFieldMapTest() {
}
/*
* @see TestCase#setUp()
*/
@Before
public void setUp() throws Exception {
map = new DBFieldMap(StringField.class, 1);
}
/*
* @see TestCase#tearDown()
*/
@After
public void tearDown() throws Exception {
if (map != null) {
map.dispose();
}
}
private void addEntries() {
map.addEntry(new StringField("f3"), 5);
map.addEntry(new StringField("f2"), 3);
map.addEntry(new StringField("f1"), 1);
map.addEntry(new StringField("f2"), 2);
map.addEntry(new StringField("f4"), 6);
map.addEntry(new StringField("f3"), 4);
}
@Test
public void testAddEntry() {
addEntries();
assertTrue(map.hasEntry(new StringField("f1"), 1));
assertTrue(map.hasEntry(new StringField("f2"), 2));
assertTrue(map.hasEntry(new StringField("f2"), 3));
assertTrue(map.hasEntry(new StringField("f3"), 4));
assertTrue(map.hasEntry(new StringField("f3"), 5));
assertTrue(map.hasEntry(new StringField("f4"), 6));
}
@Test
public void testDeleteEntry() {
addEntries();
map.deleteEntry(new StringField("f2"), 2);
map.deleteEntry(new StringField("f3"), 4);
assertTrue(map.hasEntry(new StringField("f1"), 1));
assertTrue(!map.hasEntry(new StringField("f2"), 2));
assertTrue(map.hasEntry(new StringField("f2"), 3));
assertTrue(!map.hasEntry(new StringField("f3"), 4));
assertTrue(map.hasEntry(new StringField("f3"), 5));
assertTrue(map.hasEntry(new StringField("f4"), 6));
}
@Test
public void testIterator() {
addEntries();
LongIterator iter = map.iterator();
assertEquals(1, iter.next());
assertEquals(2, iter.next());
assertEquals(3, iter.next());
assertEquals(4, iter.next());
assertEquals(5, iter.next());
assertEquals(6, iter.next());
try {
iter.next();
Assert.fail();
}
catch (NoSuchElementException e) {
// expected
}
assertEquals(6, iter.previous());
assertEquals(5, iter.previous());
assertEquals(4, iter.previous());
assertEquals(3, iter.previous());
assertEquals(2, iter.previous());
assertEquals(1, iter.previous());
try {
iter.previous();
Assert.fail();
}
catch (NoSuchElementException e) {
// expected
}
assertEquals(1, iter.next());
assertEquals(2, iter.next());
assertEquals(3, iter.next());
assertEquals(4, iter.next());
assertEquals(4, iter.previous());
assertEquals(3, iter.previous());
assertEquals(2, iter.previous());
assertEquals(1, iter.previous());
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -15,8 +15,7 @@
*/ */
package db; package db;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.*;
import static org.junit.Assert.assertTrue;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
@ -27,6 +26,7 @@ import org.junit.*;
import db.buffers.*; import db.buffers.*;
import generic.test.AbstractGenericTest; import generic.test.AbstractGenericTest;
import ghidra.util.exception.CancelledException; import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
import utilities.util.FileUtilities; import utilities.util.FileUtilities;
public class DBIndexedTableTest extends AbstractGenericTest { public class DBIndexedTableTest extends AbstractGenericTest {
@ -130,7 +130,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
return recs; return recs;
} }
private long[] matchingKeys(Record[] recs, int columnIx, Record matchRec) { private Field[] matchingKeys(Record[] recs, int columnIx, Record matchRec) {
ArrayList<Record> recList = new ArrayList<>(); ArrayList<Record> recList = new ArrayList<>();
Field f = matchRec.getField(columnIx); Field f = matchRec.getField(columnIx);
for (Record rec : recs) { for (Record rec : recs) {
@ -138,12 +138,12 @@ public class DBIndexedTableTest extends AbstractGenericTest {
recList.add(rec); recList.add(rec);
} }
} }
long[] keys = new long[recList.size()]; Field[] keys = new Field[recList.size()];
Iterator<Record> iter = recList.iterator(); Iterator<Record> iter = recList.iterator();
int i = 0; int i = 0;
while (iter.hasNext()) { while (iter.hasNext()) {
Record rec = iter.next(); Record rec = iter.next();
keys[i++] = rec.getKey(); keys[i++] = rec.getKeyField();
} }
Arrays.sort(keys); Arrays.sort(keys);
return keys; return keys;
@ -156,15 +156,15 @@ public class DBIndexedTableTest extends AbstractGenericTest {
saveAsAndReopen(dbName); saveAsAndReopen(dbName);
} }
Table table = dbh.getTable(table1Name); Table table = dbh.getTable(table1Name);
int[] indexedColumns = table.getIndexedColumns();
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
int step = recordCnt / findCnt; int step = recordCnt / findCnt;
for (int n = 0; n < indexedColumns.length; n++) { for (int indexColumn : table.getIndexedColumns()) {
for (int i = 0; i < recordCnt; i += step) { for (int i = 0; i < recordCnt; i += step) {
long[] keys = table.findRecords(recs[i].getField(n), n); Field[] keys = table.findRecords(recs[i].getField(indexColumn), indexColumn);
Arrays.sort(keys); Arrays.sort(keys);
assertTrue(Arrays.equals(matchingKeys(recs, n, recs[i]), keys)); assertTrue(Arrays.equals(matchingKeys(recs, indexColumn, recs[i]), keys));
assertEquals(keys.length, table.getMatchingRecordCount(recs[i].getField(n), n)); assertEquals(keys.length,
table.getMatchingRecordCount(recs[i].getField(indexColumn), indexColumn));
} }
} }
} }
@ -181,11 +181,6 @@ public class DBIndexedTableTest extends AbstractGenericTest {
dbh.undo(); dbh.undo();
dbh.redo(); dbh.redo();
int[] indexedColumns = table.getIndexedColumns();
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
int max = indexedColumns.length > 3 ? 3 : indexedColumns.length;
for (int n = 0; n < max; n++) {
long startKey = 1500L; long startKey = 1500L;
long minKey = 100L; long minKey = 100L;
long maxKey = 5000L; long maxKey = 5000L;
@ -211,7 +206,6 @@ public class DBIndexedTableTest extends AbstractGenericTest {
assertTrue(!iter.hasPrevious()); assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext()); assertTrue(!iter.hasNext());
} }
}
@Test @Test
public void testFindRecordsSmallVLR() throws IOException { public void testFindRecordsSmallVLR() throws IOException {
@ -260,12 +254,15 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Find string and binary columns // Find string and binary columns
int strColumn = -1; int strColumn = -1;
int binColumn = -1; int binColumn = -1;
Class<?>[] fieldClasses = table.getSchema().getFieldClasses(); Field[] fields = table.getSchema().getFields();
for (int i = 0; i < fieldClasses.length; i++) { for (int i = 0; i < fields.length; i++) {
if (fieldClasses[i].equals(StringField.class)) { if (!fields[i].isVariableLength()) {
continue;
}
if (fields[i] instanceof StringField) {
strColumn = i; strColumn = i;
} }
else if (fieldClasses[i].equals(BinaryField.class)) { else if (fields[i] instanceof BinaryField) {
binColumn = i; binColumn = i;
} }
} }
@ -302,9 +299,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
assertEquals(recordCnt, table.getRecordCount()); assertEquals(recordCnt, table.getRecordCount());
int[] indexedColumns = table.getIndexedColumns(); for (int colIx : table.getIndexedColumns()) {
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
for (int colIx : indexedColumns) {
Arrays.sort(recs, new RecColumnComparator(colIx)); Arrays.sort(recs, new RecColumnComparator(colIx));
@ -319,8 +314,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Forward iteration (start in middle - specify primary key) // Forward iteration (start in middle - specify primary key)
recIx = recordCnt / 2; recIx = recordCnt / 2;
iter = iter = table.indexIteratorBefore(colIx, recs[recIx].getField(colIx),
table.indexIteratorBefore(colIx, recs[recIx].getField(colIx), recs[recIx].getKey()); recs[recIx].getKeyField());
while (iter.hasNext()) { while (iter.hasNext()) {
Record rec = iter.next(); Record rec = iter.next();
assertEquals(recs[recIx++], rec); assertEquals(recs[recIx++], rec);
@ -329,8 +324,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Reverse iteration (end - specify primary key) // Reverse iteration (end - specify primary key)
recIx = recordCnt - 1; recIx = recordCnt - 1;
iter = iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey()); recs[recIx].getKeyField());
while (iter.hasPrevious()) { while (iter.hasPrevious()) {
Record rec = iter.previous(); Record rec = iter.previous();
assertEquals(recs[recIx--], rec); assertEquals(recs[recIx--], rec);
@ -339,8 +334,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Reverse iteration (start in middle - specify primary key) // Reverse iteration (start in middle - specify primary key)
recIx = recordCnt / 2; recIx = recordCnt / 2;
iter = iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey()); recs[recIx].getKeyField());
while (iter.hasPrevious()) { while (iter.hasPrevious()) {
Record rec = iter.previous(); Record rec = iter.previous();
assertEquals(recs[recIx--], rec); assertEquals(recs[recIx--], rec);
@ -437,9 +432,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
assertEquals(recordCnt, table.getRecordCount()); assertEquals(recordCnt, table.getRecordCount());
} }
int[] indexedColumns = table.getIndexedColumns(); for (int colIx : table.getIndexedColumns()) {
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
for (int colIx : indexedColumns) {
Arrays.sort(recs, new RecColumnComparator(colIx)); Arrays.sort(recs, new RecColumnComparator(colIx));
int recIx; int recIx;
@ -491,7 +484,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
startIx = 0; startIx = 0;
recIx = findStart(recs, startIx, colIx); recIx = findStart(recs, startIx, colIx);
iter = table.indexIteratorBefore(colIx, recs[startIx].getField(colIx), iter = table.indexIteratorBefore(colIx, recs[startIx].getField(colIx),
recs[startIx].getKey()); recs[startIx].getKeyField());
while (iter.hasNext()) { while (iter.hasNext()) {
Record rec = iter.next(); Record rec = iter.next();
assertEquals(recs[recIx++], rec); assertEquals(recs[recIx++], rec);
@ -502,7 +495,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
startIx = 0; startIx = 0;
recIx = findStart(recs, startIx, colIx); recIx = findStart(recs, startIx, colIx);
iter = table.indexIteratorBefore(colIx, recs[startIx].getField(colIx), iter = table.indexIteratorBefore(colIx, recs[startIx].getField(colIx),
recs[startIx].getKey()); recs[startIx].getKeyField());
assertTrue(!iter.hasPrevious()); assertTrue(!iter.hasPrevious());
// Forward iteration (before first) // Forward iteration (before first)
@ -521,14 +514,14 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Forward iteration (end - specify primary key) // Forward iteration (end - specify primary key)
recIx = recordCnt - 1; recIx = recordCnt - 1;
iter = iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey()); recs[recIx].getKeyField());
assertTrue(!iter.hasNext()); assertTrue(!iter.hasNext());
// Backward iteration (end - specify primary key) // Backward iteration (end - specify primary key)
recIx = recordCnt - 1; recIx = recordCnt - 1;
iter = iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey()); recs[recIx].getKeyField());
while (iter.hasPrevious()) { while (iter.hasPrevious()) {
Record rec = iter.previous(); Record rec = iter.previous();
assertEquals(recs[recIx--], rec); assertEquals(recs[recIx--], rec);
@ -551,8 +544,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Forward iteration (start in middle - specify primary key) // Forward iteration (start in middle - specify primary key)
recIx = recordCnt / 2; recIx = recordCnt / 2;
iter = iter = table.indexIteratorBefore(colIx, recs[recIx].getField(colIx),
table.indexIteratorBefore(colIx, recs[recIx].getField(colIx), recs[recIx].getKey()); recs[recIx].getKeyField());
while (iter.hasNext()) { while (iter.hasNext()) {
Record rec = iter.next(); Record rec = iter.next();
assertEquals(recs[recIx++], rec); assertEquals(recs[recIx++], rec);
@ -561,8 +554,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Backward iteration (start in middle - specify primary key) // Backward iteration (start in middle - specify primary key)
recIx = recordCnt / 2; recIx = recordCnt / 2;
iter = iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey()); recs[recIx].getKeyField());
while (iter.hasPrevious()) { while (iter.hasPrevious()) {
Record rec = iter.previous(); Record rec = iter.previous();
assertEquals(recs[recIx--], rec); assertEquals(recs[recIx--], rec);
@ -943,7 +936,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
* @throws IOException * @throws IOException
*/ */
private void deleteIteratedIndexFields(int recordCnt, int testColIx, long keyIncrement, private void deleteIteratedIndexFields(int recordCnt, int testColIx, long keyIncrement,
int varDataSize) throws IOException { int varDataSize) throws Exception {
Record[] recs = null; Record[] recs = null;
if (keyIncrement == 0) { if (keyIncrement == 0) {
@ -989,6 +982,10 @@ public class DBIndexedTableTest extends AbstractGenericTest {
assertEquals(fieldCnt, cnt); assertEquals(fieldCnt, cnt);
assertEquals(0, table.getRecordCount()); assertEquals(0, table.getRecordCount());
} }
catch (Exception e) {
e.printStackTrace();
throw e;
}
finally { finally {
dbh.deleteTable(table1Name); dbh.deleteTable(table1Name);
dbh.endTransaction(txId, true); dbh.endTransaction(txId, true);
@ -1066,7 +1063,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
private class RecColumnComparator implements Comparator<Record> { private class RecColumnComparator implements Comparator<Record> {
int columnIx; final int columnIx;
RecColumnComparator(int columnIx) { RecColumnComparator(int columnIx) {
this.columnIx = columnIx; this.columnIx = columnIx;
@ -1160,12 +1157,12 @@ public class DBIndexedTableTest extends AbstractGenericTest {
public void testRecordIteratorExtents() throws IOException { public void testRecordIteratorExtents() throws IOException {
Record[] recs = null; Record[] recs = null;
recs = createOrderedRecordRange(DBTestUtils.SINGLE_BYTE, 30, 2, 1); recs = createOrderedRecordRange(DBTestUtils.SINGLE_SHORT, 30, 2, 1);
Table table = dbh.getTable(table1Name); Table table = dbh.getTable(table1Name);
assertEquals(recs.length, table.getRecordCount()); assertEquals(recs.length, table.getRecordCount());
int[] indexedColumns = table.getIndexedColumns(); int[] indexedColumns = table.getIndexedColumns();
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length); assertEquals(1, indexedColumns.length);
// Backward Range iterator // Backward Range iterator
int colIx = 0; int colIx = 0;
@ -1173,8 +1170,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
int recIx = recs.length - 1; int recIx = recs.length - 1;
// RecordIterator iter = table.indexIterator(colIx, recs[minIx].getField(colIx), // RecordIterator iter = table.indexIterator(colIx, recs[minIx].getField(colIx),
// recs[maxIx].getField(colIx), false); // recs[maxIx].getField(colIx), false);
Field minField = new ByteField(Byte.MIN_VALUE); Field minField = new ShortField(Short.MIN_VALUE);
Field maxField = new ByteField(Byte.MAX_VALUE); Field maxField = new ShortField(Short.MAX_VALUE);
RecordIterator iter = table.indexIterator(colIx, minField, maxField, false); RecordIterator iter = table.indexIterator(colIx, minField, maxField, false);
while (iter.hasPrevious()) { while (iter.hasPrevious()) {
Record rec = iter.previous(); Record rec = iter.previous();
@ -1202,22 +1199,67 @@ public class DBIndexedTableTest extends AbstractGenericTest {
@Test @Test
public void testRecordIteratorDelete() throws IOException { public void testRecordIteratorDelete() throws IOException {
for (int colIx = 0; colIx < 6; colIx++) { for (int colIx : DBTestUtils.getIndexedColumns(DBTestUtils.ALL_TYPES)) {
deleteIteratedRecords(ITER_REC_CNT, colIx, 1, 1); deleteIteratedRecords(ITER_REC_CNT, colIx, 1, 1);
} }
for (int colIx = 0; colIx < 6; colIx++) { for (int colIx : DBTestUtils.getIndexedColumns(DBTestUtils.ALL_TYPES)) {
deleteIteratedRecords(ITER_REC_CNT, colIx, 0, 1); deleteIteratedRecords(ITER_REC_CNT, colIx, 0, 1);
} }
} }
@Test @Test
public void testIndexFieldIteratorDelete() throws IOException { public void testIndexFieldIteratorDelete() throws Exception {
for (int colIx = 0; colIx < 6; colIx++) { for (int colIx : DBTestUtils.getIndexedColumns(DBTestUtils.ALL_TYPES)) {
deleteIteratedIndexFields(ITER_REC_CNT, colIx, 1, 1); deleteIteratedIndexFields(ITER_REC_CNT, colIx, 1, 1);
} }
for (int colIx = 0; colIx < 6; colIx++) { for (int colIx : DBTestUtils.getIndexedColumns(DBTestUtils.ALL_TYPES)) {
deleteIteratedIndexFields(ITER_REC_CNT, colIx, 0, 1); deleteIteratedIndexFields(ITER_REC_CNT, colIx, 0, 1);
} }
} }
@Test
public void testConsistencyAndIndexRebuild() throws IOException {
Record[] recs = createRandomTableRecords(DBTestUtils.ALL_TYPES, ITER_REC_CNT, 10);
long txId = dbh.startTransaction();
try {
assertTrue(dbh.isConsistent(TaskMonitor.DUMMY));
dbh.rebuild(TaskMonitor.DUMMY);
}
catch (CancelledException e) {
fail("unexpected cancel exception");
}
finally {
dbh.endTransaction(txId, true);
}
Table table = dbh.getTable(table1Name);
for (int colIx : table.getIndexedColumns()) {
Arrays.sort(recs, new RecColumnComparator(colIx));
int recIx = 0;
RecordIterator iter = table.indexIterator(colIx);
while (iter.hasNext()) {
Record rec = iter.next();
assertEquals(recs[recIx++], rec);
}
assertEquals(ITER_REC_CNT, recIx);
}
saveAsAndReopen(dbName);
table = dbh.getTable(table1Name);
for (int colIx : table.getIndexedColumns()) {
Arrays.sort(recs, new RecColumnComparator(colIx));
int recIx = 0;
RecordIterator iter = table.indexIterator(colIx);
while (iter.hasNext()) {
Record rec = iter.next();
assertEquals(recs[recIx++], rec);
}
assertEquals(ITER_REC_CNT, recIx);
}
}
} }

View file

@ -69,8 +69,8 @@ public class DBLongKeyChainedBufferUseTest extends AbstractGenericTest {
long txId = dbh.startTransaction(); long txId = dbh.startTransaction();
Schema schema = new Schema(0, "Enum ID", Schema schema = new Schema(0, "Enum ID",
new Class[] { StringField.class, StringField.class, LongField.class, ByteField.class, new Field[] { StringField.INSTANCE, StringField.INSTANCE, LongField.INSTANCE,
ShortField.class, IntField.class }, ByteField.INSTANCE, ShortField.INSTANCE, IntField.INSTANCE },
new String[] { "str1", "str2", "long", "byte", "short", "int" }); new String[] { "str1", "str2", "long", "byte", "short", "int" });
Table table = dbh.createTable("TABLE1", schema); Table table = dbh.createTable("TABLE1", schema);
@ -108,8 +108,8 @@ public class DBLongKeyChainedBufferUseTest extends AbstractGenericTest {
long txId = dbh.startTransaction(); long txId = dbh.startTransaction();
Schema schema = new Schema(0, "Enum ID", Schema schema = new Schema(0, "Enum ID",
new Class[] { StringField.class, StringField.class, LongField.class, ByteField.class, new Field[] { StringField.INSTANCE, StringField.INSTANCE, LongField.INSTANCE,
ShortField.class, IntField.class }, ByteField.INSTANCE, ShortField.INSTANCE, IntField.INSTANCE },
new String[] { "str1", "str2", "long", "byte", "short", "int" }); new String[] { "str1", "str2", "long", "byte", "short", "int" });
Table table = dbh.createTable("TABLE1", schema); Table table = dbh.createTable("TABLE1", schema);

View file

@ -265,8 +265,7 @@ public class DBTest extends AbstractGenericTest {
for (TableRecord tableRecord : tableRecords) { for (TableRecord tableRecord : tableRecords) {
if (tableRecord.getIndexedColumn() < 0) { if (tableRecord.getIndexedColumn() < 0) {
if (tableCnt > 0) { if (tableCnt > 0) {
Schema schema = lastTable.getSchema(); assertEquals(DBTestUtils.getIndexedColumnCount(tableCnt - 1), indexCnt);
assertEquals(schema.getFieldClasses().length, indexCnt);
} }
String name = "TABLE" + tableCnt; String name = "TABLE" + tableCnt;
lastTable = dbh.getTable(name); lastTable = dbh.getTable(name);
@ -281,7 +280,9 @@ public class DBTest extends AbstractGenericTest {
if (lastTable == null) { if (lastTable == null) {
Assert.fail(); Assert.fail();
} }
assertEquals(indexCnt, tableRecord.getIndexedColumn()); int[] indexedColumns = DBTestUtils.getIndexedColumns(tableCnt - 1);
assertTrue(indexCnt < indexedColumns.length);
assertEquals(indexedColumns[indexCnt], tableRecord.getIndexedColumn());
assertEquals(lastTable.getName(), tableRecord.getName()); assertEquals(lastTable.getName(), tableRecord.getName());
assertEquals(Long.MIN_VALUE, tableRecord.getMaxKey()); assertEquals(Long.MIN_VALUE, tableRecord.getMaxKey());
assertEquals(0, tableRecord.getRecordCount()); assertEquals(0, tableRecord.getRecordCount());
@ -290,8 +291,7 @@ public class DBTest extends AbstractGenericTest {
} }
} }
Schema schema = lastTable.getSchema(); assertEquals(DBTestUtils.getIndexedColumnCount(tableCnt - 1), indexCnt);
assertEquals(schema.getFieldClasses().length, indexCnt);
assertEquals(DBTestUtils.MAX_SCHEMA_TYPE + 1, tableCnt); assertEquals(DBTestUtils.MAX_SCHEMA_TYPE + 1, tableCnt);
} }
@ -387,8 +387,7 @@ public class DBTest extends AbstractGenericTest {
for (TableRecord tableRecord : tableRecords) { for (TableRecord tableRecord : tableRecords) {
if (tableRecord.getIndexedColumn() < 0) { if (tableRecord.getIndexedColumn() < 0) {
if (tableCnt > 0) { if (tableCnt > 0) {
Schema schema = lastTable.getSchema(); assertEquals(DBTestUtils.getIndexedColumnCount(2 * (tableCnt - 1)), indexCnt);
assertEquals(schema.getFieldClasses().length, indexCnt);
} }
String name = "TABLE" + (2 * tableCnt); String name = "TABLE" + (2 * tableCnt);
lastTable = dbh.getTable(name); lastTable = dbh.getTable(name);
@ -403,7 +402,9 @@ public class DBTest extends AbstractGenericTest {
if (lastTable == null) { if (lastTable == null) {
Assert.fail(); Assert.fail();
} }
assertEquals(indexCnt, tableRecord.getIndexedColumn()); int[] indexedColumns = DBTestUtils.getIndexedColumns(2 * (tableCnt - 1));
assertTrue(indexCnt < indexedColumns.length);
assertEquals(indexedColumns[indexCnt], tableRecord.getIndexedColumn());
assertEquals(lastTable.getName(), tableRecord.getName()); assertEquals(lastTable.getName(), tableRecord.getName());
assertEquals(Long.MIN_VALUE, tableRecord.getMaxKey()); assertEquals(Long.MIN_VALUE, tableRecord.getMaxKey());
assertEquals(0, tableRecord.getRecordCount()); assertEquals(0, tableRecord.getRecordCount());
@ -413,7 +414,7 @@ public class DBTest extends AbstractGenericTest {
} }
Schema schema = lastTable.getSchema(); Schema schema = lastTable.getSchema();
assertEquals(schema.getFieldClasses().length, indexCnt); assertEquals(schema.getFields().length, indexCnt);
assertEquals(totalTableCnt, tableCnt); assertEquals(totalTableCnt, tableCnt);
} }

View file

@ -17,6 +17,8 @@ package db;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Random; import java.util.Random;
import org.junit.Assert; import org.junit.Assert;
@ -31,50 +33,85 @@ public class DBTestUtils {
// Schema Types // Schema Types
static final int EMPTY = 0; static final int EMPTY = 0;
static final int SINGLE_BYTE = 1; static final int SINGLE_BOOLEAN = 1;
static final int SINGLE_INT = 2; static final int SINGLE_BYTE = 2;
static final int SINGLE_SHORT = 3; static final int SINGLE_INT = 3;
static final int SINGLE_LONG = 4; static final int SINGLE_SHORT = 4;
static final int SINGLE_STRING = 5; static final int SINGLE_LONG = 5;
static final int SINGLE_BINARY = 6; static final int SINGLE_STRING = 6;
static final int ALL_TYPES = 7; static final int SINGLE_BINARY = 7;
static final int SINGLE_FIXED = 8;
static final int ALL_TYPES = 9;
static final int MAX_SCHEMA_TYPE = 7; static final int MAX_SCHEMA_TYPE = 9;
private static Class<?>[][] schemaFields = { {}, // no columns //@formatter:off
{ ByteField.class }, { IntField.class }, { ShortField.class }, { LongField.class }, private static final Field[][] schemaFields = {
{ StringField.class }, { BinaryField.class }, { ByteField.class, IntField.class, {}, // no columns
ShortField.class, LongField.class, StringField.class, BinaryField.class } }; { BooleanField.INSTANCE },
{ ByteField.INSTANCE },
{ IntField.INSTANCE },
{ ShortField.INSTANCE },
{ LongField.INSTANCE },
{ StringField.INSTANCE },
{ BinaryField.INSTANCE },
{ FixedField10.INSTANCE },
{ BooleanField.INSTANCE, ByteField.INSTANCE, IntField.INSTANCE, ShortField.INSTANCE,
LongField.INSTANCE, StringField.INSTANCE, BinaryField.INSTANCE, FixedField10.INSTANCE } };
//@formatter:on
private static String[][] schemaFieldNames = { {}, // no columns private static final int[][] schemaIndexedColumns =
{ "Byte" }, { "Int" }, { "Short" }, { "Long" }, { "String" }, { "Binary" }, { {}, {}, {}, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 2, 3, 4, 5, 6, 7 } };
{ "Byte", "Int", "Short", "Long", "String", "Binary" } };
private static Schema[] longKeySchemas = //@formatter:off
{ new Schema(0, "LongKey", schemaFields[0], schemaFieldNames[0]), private static final String[][] schemaFieldNames = {
new Schema(0, "LongKey", schemaFields[1], schemaFieldNames[1]), {}, // no columns
new Schema(0, "LongKey", schemaFields[2], schemaFieldNames[2]), { "Boolean" }, { "Byte" }, { "Int" }, { "Short" }, { "Long" },
new Schema(0, "LongKey", schemaFields[3], schemaFieldNames[3]), { "String" }, { "Binary" }, { "Fixed" },
new Schema(0, "LongKey", schemaFields[4], schemaFieldNames[4]), { "Boolean", "Byte", "Int", "Short", "Long", "String", "Binary", "Fixed" }
new Schema(0, "LongKey", schemaFields[5], schemaFieldNames[5]), };
new Schema(0, "LongKey", schemaFields[6], schemaFieldNames[6]), //@formatter:on
new Schema(0, "LongKey", schemaFields[7], schemaFieldNames[7]) };
private static Field varKeyType = new BinaryField(); private static final Schema[] longKeySchemas;
private static Class<? extends Field> varKeyClass = varKeyType.getClass(); static {
longKeySchemas = new Schema[MAX_SCHEMA_TYPE + 1];
for (int i = 0; i < longKeySchemas.length; i++) {
longKeySchemas[i] = new Schema(0, "LongKey", schemaFields[i], schemaFieldNames[i]);
}
}
private static Schema[] binaryKeySchemas = private static final Field fixedKeyType = new FixedField10();
{ new Schema(0, varKeyClass, "VarKey", schemaFields[0], schemaFieldNames[0]),
new Schema(0, varKeyClass, "VarKey", schemaFields[1], schemaFieldNames[1]), private static final Schema[] fixedKeySchemas;
new Schema(0, varKeyClass, "VarKey", schemaFields[2], schemaFieldNames[2]), static {
new Schema(0, varKeyClass, "VarKey", schemaFields[3], schemaFieldNames[3]), fixedKeySchemas = new Schema[MAX_SCHEMA_TYPE + 1];
new Schema(0, varKeyClass, "VarKey", schemaFields[4], schemaFieldNames[4]), for (int i = 0; i < fixedKeySchemas.length; i++) {
new Schema(0, varKeyClass, "VarKey", schemaFields[5], schemaFieldNames[5]), fixedKeySchemas[i] =
new Schema(0, varKeyClass, "VarKey", schemaFields[6], schemaFieldNames[6]), new Schema(0, fixedKeyType, "FixedKey", schemaFields[i], schemaFieldNames[i]);
new Schema(0, varKeyClass, "VarKey", schemaFields[7], schemaFieldNames[7]) }; }
}
private static final Field varKeyType = new BinaryField();
private static final Schema[] binaryKeySchemas;
static {
binaryKeySchemas = new Schema[MAX_SCHEMA_TYPE + 1];
for (int i = 0; i < binaryKeySchemas.length; i++) {
binaryKeySchemas[i] =
new Schema(0, varKeyType, "VarKey", schemaFields[i], schemaFieldNames[i]);
}
}
static Random random = new Random(0x123456789L); static Random random = new Random(0x123456789L);
static int[] getIndexedColumns(int schemaType) {
return schemaIndexedColumns[schemaType];
}
static int getIndexedColumnCount(int schemaType) {
return schemaIndexedColumns[schemaType].length;
}
/** /**
* Create a new long-keyed table within the specified database. * Create a new long-keyed table within the specified database.
* @param db database handle * @param db database handle
@ -89,11 +126,8 @@ public class DBTestUtils {
Table t; Table t;
int indexCnt = 0; int indexCnt = 0;
if (createIndex) { if (createIndex) {
indexCnt = schemaFields[schemaType].length; indexCnt = getIndexedColumnCount(schemaType);
int[] indexedColumns = new int[indexCnt]; int[] indexedColumns = getAllowedIndexColumns(schemaFields[schemaType]);
for (int i = 0; i < indexedColumns.length; i++) {
indexedColumns[i] = i;
}
t = db.createTable(name, longKeySchemas[schemaType], indexedColumns); t = db.createTable(name, longKeySchemas[schemaType], indexedColumns);
} }
else { else {
@ -108,6 +142,49 @@ public class DBTestUtils {
return t; return t;
} }
static int[] getAllowedIndexColumns(Field[] columnFields) {
ArrayList<Integer> list = new ArrayList<>();
for (int i = 0; i < columnFields.length; i++) {
if (Field.canIndex(columnFields[i])) {
list.add(i);
}
}
int[] columnIndexes = new int[list.size()];
for (int i = 0; i < columnIndexes.length; i++) {
columnIndexes[i] = list.get(i);
}
return columnIndexes;
}
/**
* Create a new FixedField-keyed table within the specified database.
* @param db database handle
* @param name name of table
* @param schemaType type of schema (use static identifier)
* @param createIndex all fields will be indexed if true
* @return Table new table
* @throws IOException
*/
static Table createFixedKeyTable(DBHandle db, String name, int schemaType, boolean createIndex)
throws IOException {
Table t;
if (createIndex) {
int[] indexedColumns = getAllowedIndexColumns(schemaFields[schemaType]);
t = db.createTable(name, fixedKeySchemas[schemaType], indexedColumns);
Assert.assertArrayEquals(schemaIndexedColumns[schemaType], t.getIndexedColumns());
}
else {
t = db.createTable(name, fixedKeySchemas[schemaType]);
Assert.assertEquals(0, t.getIndexedColumns().length);
}
Assert.assertEquals(name, t.getName());
Assert.assertEquals(Long.MIN_VALUE, t.getMaxKey());
Assert.assertEquals(0, t.getRecordCount());
Assert.assertEquals(fixedKeySchemas[schemaType], t.getSchema());
Assert.assertTrue(!t.useLongKeys());
return t;
}
/** /**
* Create a new BinaryField-keyed table within the specified database. * Create a new BinaryField-keyed table within the specified database.
* @param db database handle * @param db database handle
@ -122,11 +199,7 @@ public class DBTestUtils {
Table t; Table t;
int indexCnt = 0; int indexCnt = 0;
if (createIndex) { if (createIndex) {
indexCnt = schemaFields[schemaType].length; int[] indexedColumns = getAllowedIndexColumns(schemaFields[schemaType]);
int[] indexedColumns = new int[indexCnt];
for (int i = 0; i < indexedColumns.length; i++) {
indexedColumns[i] = i;
}
t = db.createTable(name, binaryKeySchemas[schemaType], indexedColumns); t = db.createTable(name, binaryKeySchemas[schemaType], indexedColumns);
} }
else { else {
@ -181,6 +254,33 @@ public class DBTestUtils {
} }
} }
/**
* Create a new random-FixedField-keyed record.
* @param table
* @param varDataSize
* @param doInsert
* @return Record
* @throws IOException
* @throws DuplicateKeyException
*/
static Record createFixedKeyRecord(Table table, int varDataSize, boolean doInsert)
throws IOException, DuplicateKeyException {
int keyLength = 10;
byte[] bytes = new byte[keyLength];
random.nextBytes(bytes);
Field key = fixedKeyType.newField();
key.setBinaryData(bytes);
try {
Record rec = createRecord(table, key, varDataSize, doInsert);
Assert.assertEquals(key, rec.getKeyField());
return rec;
}
catch (DuplicateKeyException dke) {
return createFixedKeyRecord(table, varDataSize, doInsert);
}
}
/** /**
* Create a new random-BinaryField-keyed record. * Create a new random-BinaryField-keyed record.
* @param table * @param table
@ -278,6 +378,31 @@ public class DBTestUtils {
return rec; return rec;
} }
static FixedField addToFixedField(Field fixedField, long increment) {
FixedField f = (FixedField) fixedField;
byte[] valueBytes = f.getBinaryData();
BigInteger v = new BigInteger(1, valueBytes);
v = v.add(BigInteger.valueOf(increment));
byte[] resultBytes = v.toByteArray();
if (resultBytes.length > valueBytes.length) {
if (resultBytes[0] != 0) {
throw new UnsupportedOperationException("overflow in test data");
}
byte[] b = new byte[valueBytes.length];
System.arraycopy(resultBytes, 1, b, 0, valueBytes.length);
resultBytes = b;
}
else if (resultBytes.length < valueBytes.length) {
byte[] b = new byte[valueBytes.length];
System.arraycopy(resultBytes, 0, b, valueBytes.length - resultBytes.length,
resultBytes.length);
resultBytes = b;
}
FixedField r = f.newField();
r.setBinaryData(resultBytes);
return r;
}
/** /**
* Create a new record whose value is in the center portion of the valid * Create a new record whose value is in the center portion of the valid
* values range for byte, short, int, or long. * values range for byte, short, int, or long.
@ -359,7 +484,10 @@ public class DBTestUtils {
Field[] fields = rec.getFields(); Field[] fields = rec.getFields();
for (int i = 0; i < fields.length; i++) { for (int i = 0; i < fields.length; i++) {
if (fields[i] instanceof ByteField) { if (fields[i] instanceof BooleanField) {
rec.setBooleanValue(i, (random.nextInt() % 2) == 0);
}
else if (fields[i] instanceof ByteField) {
rec.setByteValue(i, (byte) random.nextInt()); rec.setByteValue(i, (byte) random.nextInt());
} }
else if (fields[i] instanceof ShortField) { else if (fields[i] instanceof ShortField) {
@ -389,7 +517,7 @@ public class DBTestUtils {
} }
} }
else if (fields[i] instanceof BinaryField) { else if (fields[i] instanceof BinaryField) {
int size = varDataSize; int size = fields[i].isVariableLength() ? varDataSize : fields[i].length();
if (size < 0) { if (size < 0) {
size = random.nextInt(6) - 1; size = random.nextInt(6) - 1;
} }

View file

@ -35,18 +35,19 @@ public class TableTest extends AbstractGenericTest {
private static final int BUFFER_SIZE = 256; private static final int BUFFER_SIZE = 256;
private static final int CACHE_SIZE = 4 * 1024 * 1024; private static final int CACHE_SIZE = 4 * 1024 * 1024;
private static final Class<?>[] FIXED_SIZE_SCHEMA_FIELD_CLASSES = private static final Field[] FIXED_SIZE_SCHEMA_FIELDS = new Field[] { LongField.INSTANCE,
new Class[] { LongField.class, IntField.class, ShortField.class }; IntField.INSTANCE, ShortField.INSTANCE, FixedField10.INSTANCE };
private static final Class<?>[] VARIABLE_SIZE_SCHEMA_FIELD_CLASSES = private static final Field[] VARIABLE_SIZE_SCHEMA_FIELDS =
new Class[] { StringField.class, }; new Field[] { StringField.INSTANCE, };
private static final String[] FIXED_SIZE_SCHEMA_COLUMN_NAMES = { "Long1", "Int2", "Short3" }; private static final String[] FIXED_SIZE_SCHEMA_COLUMN_NAMES =
{ "Long1", "Int2", "Short3", "Fixed4" };
private static final String[] VARIABLE_SIZE_SCHEMA_COLUMN_NAMES = { "String" }; private static final String[] VARIABLE_SIZE_SCHEMA_COLUMN_NAMES = { "String" };
private static final Schema FIXED_SIZE_SCHEMA = private static final Schema FIXED_SIZE_SCHEMA =
new Schema(0, "LongKey", FIXED_SIZE_SCHEMA_FIELD_CLASSES, FIXED_SIZE_SCHEMA_COLUMN_NAMES); new Schema(0, "LongKey", FIXED_SIZE_SCHEMA_FIELDS, FIXED_SIZE_SCHEMA_COLUMN_NAMES);
private static final Schema VARIABLE_SIZE_SCHEMA = new Schema(0, "LongKey", private static final Schema VARIABLE_SIZE_SCHEMA =
VARIABLE_SIZE_SCHEMA_FIELD_CLASSES, VARIABLE_SIZE_SCHEMA_COLUMN_NAMES); new Schema(0, "LongKey", VARIABLE_SIZE_SCHEMA_FIELDS, VARIABLE_SIZE_SCHEMA_COLUMN_NAMES);
private static final int BUFFER_COUNT = 5; private static final int BUFFER_COUNT = 5;
private static final int FIRST_KEY = 0; private static final int FIRST_KEY = 0;
private static final int END_KEY = BUFFER_COUNT * 100 - 10; private static final int END_KEY = BUFFER_COUNT * 100 - 10;
@ -272,6 +273,9 @@ public class TableTest extends AbstractGenericTest {
Record rec = schema.createRecord(i * RECORD_KEY_SPACING); Record rec = schema.createRecord(i * RECORD_KEY_SPACING);
if (fixedSize) { if (fixedSize) {
rec.setLongValue(0, i); rec.setLongValue(0, i);
rec.setIntValue(1, i);
rec.setShortValue(2, (short) i);
rec.setField(3, FixedField10.INSTANCE.getMaxValue());
} }
else { else {
rec.setString(0, "abcdef"); rec.setString(0, "abcdef");

View file

@ -600,7 +600,7 @@ public abstract class AbstractLocalFileSystemTest extends AbstractGenericTest {
DBHandle dbh = new DBHandle(); DBHandle dbh = new DBHandle();
long id = dbh.startTransaction(); long id = dbh.startTransaction();
dbh.createTable("test", dbh.createTable("test",
new Schema(0, "key", new Class[] { IntField.class }, new String[] { "dummy" })); new Schema(0, "key", new Field[] { IntField.INSTANCE }, new String[] { "dummy" }));
dbh.endTransaction(id, true); dbh.endTransaction(id, true);
BufferFile bf = BufferFile bf =
fs.createDatabase("/abc", "fred", null, "Database", dbh.getBufferSize(), "bob", null); fs.createDatabase("/abc", "fred", null, "Database", dbh.getBufferSize(), "bob", null);
@ -741,7 +741,7 @@ public abstract class AbstractLocalFileSystemTest extends AbstractGenericTest {
DBHandle dbh = new DBHandle(); DBHandle dbh = new DBHandle();
long id = dbh.startTransaction(); long id = dbh.startTransaction();
dbh.createTable("test", dbh.createTable("test",
new Schema(0, "key", new Class[] { IntField.class }, new String[] { "dummy" })); new Schema(0, "key", new Field[] { IntField.INSTANCE }, new String[] { "dummy" }));
dbh.endTransaction(id, true); dbh.endTransaction(id, true);
BufferFile bf = BufferFile bf =
fs.createDatabase("/abc", "greg", "123", "Database", dbh.getBufferSize(), "test", null); fs.createDatabase("/abc", "greg", "123", "Database", dbh.getBufferSize(), "test", null);
@ -789,7 +789,7 @@ public abstract class AbstractLocalFileSystemTest extends AbstractGenericTest {
DBHandle dbh = new DBHandle(); DBHandle dbh = new DBHandle();
long id = dbh.startTransaction(); long id = dbh.startTransaction();
dbh.createTable("test", dbh.createTable("test",
new Schema(0, "key", new Class[] { IntField.class }, new String[] { "dummy" })); new Schema(0, "key", new Field[] { IntField.INSTANCE }, new String[] { "dummy" }));
dbh.endTransaction(id, true); dbh.endTransaction(id, true);
BufferFile bf = BufferFile bf =
fs.createDatabase("/abc", "greg", "123", "Database", dbh.getBufferSize(), "test", null); fs.createDatabase("/abc", "greg", "123", "Database", dbh.getBufferSize(), "test", null);
@ -933,7 +933,7 @@ public abstract class AbstractLocalFileSystemTest extends AbstractGenericTest {
DBHandle dbh = new DBHandle(); DBHandle dbh = new DBHandle();
long id = dbh.startTransaction(); long id = dbh.startTransaction();
dbh.createTable("test", dbh.createTable("test",
new Schema(0, "key", new Class[] { IntField.class }, new String[] { "dummy" })); new Schema(0, "key", new Field[] { IntField.INSTANCE }, new String[] { "dummy" }));
dbh.endTransaction(id, true); dbh.endTransaction(id, true);
BufferFile bf = fs.createDatabase(folderPath, itemName, fileId, "Database", BufferFile bf = fs.createDatabase(folderPath, itemName, fileId, "Database",
dbh.getBufferSize(), "test", null); dbh.getBufferSize(), "test", null);

View file

@ -35,9 +35,10 @@ public class RecoveryDBTest extends AbstractGenericTest {
private static int RECORD_COUNT = 1000; private static int RECORD_COUNT = 1000;
private static Schema SCHEMA = private static Schema SCHEMA =
new Schema(1, "key", new Class[] { StringField.class }, new String[] { "field1" }); new Schema(1, "key", new Field[] { StringField.INSTANCE }, new String[] { "field1" });
private static final File testDir = new File(AbstractGenericTest.getTestDirectoryPath(), "test"); private static final File testDir =
new File(AbstractGenericTest.getTestDirectoryPath(), "test");
private LocalFileSystem fileSystem; private LocalFileSystem fileSystem;

View file

@ -33,7 +33,7 @@ import utilities.util.FileUtilities;
public class PackedDatabaseTest extends AbstractGenericTest { public class PackedDatabaseTest extends AbstractGenericTest {
private static final Schema TEST_SCHEMA = private static final Schema TEST_SCHEMA =
new Schema(1, "Key", new Class[] { StringField.class }, new String[] { "Col1" }); new Schema(1, "Key", new Field[] { StringField.INSTANCE }, new String[] { "Col1" });
private File packedDbFile; private File packedDbFile;
private PackedDatabase db; private PackedDatabase db;

View file

@ -32,8 +32,9 @@ class OptionsDB extends AbstractOptions {
private static final String PROPERTY_TABLE_NAME = "Property Table"; private static final String PROPERTY_TABLE_NAME = "Property Table";
private final static Schema PROPERTY_SCHEMA = new Schema(0, StringField.class, "Property Name", private final static Schema PROPERTY_SCHEMA = new Schema(0, StringField.INSTANCE,
new Class[] { StringField.class, ByteField.class }, new String[] { "Value", "Type" }); "Property Name", new Field[] { StringField.INSTANCE, ByteField.INSTANCE },
new String[] { "Value", "Type" });
private static final int VALUE_COL = 0; private static final int VALUE_COL = 0;
private static final int TYPE_COL = 1; private static final int TYPE_COL = 1;
@ -81,8 +82,8 @@ class OptionsDB extends AbstractOptions {
throw new IllegalArgumentException("property alteration old-path may not be null"); throw new IllegalArgumentException("property alteration old-path may not be null");
} }
if (path != null && path.endsWith(DELIMITER_STRING)) { if (path != null && path.endsWith(DELIMITER_STRING)) {
throw new IllegalArgumentException("property alteration paths must not end with '" + throw new IllegalArgumentException(
DELIMITER + "': " + path); "property alteration paths must not end with '" + DELIMITER + "': " + path);
} }
} }
@ -118,8 +119,8 @@ class OptionsDB extends AbstractOptions {
String keyName = ((StringField) rec.getKeyField()).getString(); String keyName = ((StringField) rec.getKeyField()).getString();
if (keyName.startsWith(oldSubListPath)) { if (keyName.startsWith(oldSubListPath)) {
iterator.delete(); iterator.delete();
rec.setKey(new StringField(newSubListPath + rec.setKey(
keyName.substring(oldSubListPath.length()))); new StringField(newSubListPath + keyName.substring(oldSubListPath.length())));
list.add(rec); list.add(rec);
} }
else { else {

View file

@ -158,6 +158,7 @@ public class DBObjectCache<T extends DatabaseObject> {
* within the specified keyRanges. * within the specified keyRanges.
* @param keyRanges key ranges to delete * @param keyRanges key ranges to delete
*/ */
//TODO: Discourage large cases by only allowing a single range to be specified
public synchronized void delete(List<KeyRange> keyRanges) { public synchronized void delete(List<KeyRange> keyRanges) {
hardCache.clear(); hardCache.clear();
processQueue(); processQueue();

View file

@ -45,8 +45,11 @@ public class DataTypeArchiveDB extends DomainObjectAdapterDB
* database schema associated with any of the managers. * database schema associated with any of the managers.
* 18-Sep-2008 - version 1 - added fields for synchronizing program data types with project archives. * 18-Sep-2008 - version 1 - added fields for synchronizing program data types with project archives.
* 03-Dec-2009 - version 2 - Added source archive updating (consolidating windows.gdt, clib.gdt, ntddk.gdt) * 03-Dec-2009 - version 2 - Added source archive updating (consolidating windows.gdt, clib.gdt, ntddk.gdt)
* 14-Nov-2019 - version 3 - Corrected fixed length indexing implementation causing
* change in index table low-level storage for newly
* created tables.
*/ */
static final int DB_VERSION = 2; static final int DB_VERSION = 3;
/** /**
* UPGRADE_REQUIRED_BEFORE_VERSION should be changed to DB_VERSION any time the * UPGRADE_REQUIRED_BEFORE_VERSION should be changed to DB_VERSION any time the
@ -76,10 +79,10 @@ public class DataTypeArchiveDB extends DomainObjectAdapterDB
private static final String DEFAULT_POINTER_SIZE = "Default Pointer Size"; private static final String DEFAULT_POINTER_SIZE = "Default Pointer Size";
private final static Class<?>[] COL_CLASS = new Class[] { StringField.class }; private final static Field[] COL_FIELDS = new Field[] { StringField.INSTANCE };
private final static String[] COL_TYPES = new String[] { "Value" }; private final static String[] COL_TYPES = new String[] { "Value" };
private final static Schema SCHEMA = private final static Schema SCHEMA =
new Schema(0, StringField.class, "Key", COL_CLASS, COL_TYPES); new Schema(0, StringField.INSTANCE, "Key", COL_FIELDS, COL_TYPES);
private ProjectDataTypeManager dataTypeManager; private ProjectDataTypeManager dataTypeManager;

View file

@ -30,7 +30,7 @@ import ghidra.program.model.listing.DataTypeArchiveChangeSet;
class DataTypeArchiveDBChangeSet implements DataTypeArchiveChangeSet, DomainObjectDBChangeSet { class DataTypeArchiveDBChangeSet implements DataTypeArchiveChangeSet, DomainObjectDBChangeSet {
private static final Schema STORED_ID_SCHEMA = private static final Schema STORED_ID_SCHEMA =
new Schema(0, "Key", new Class[] { LongField.class }, new String[] { "value" }); new Schema(0, "Key", new Field[] { LongField.INSTANCE }, new String[] { "value" });
private static final String DATATYPE_ADDITIONS = "DataType Additions"; private static final String DATATYPE_ADDITIONS = "DataType Additions";
private static final String DATATYPE_CHANGES = "DataType Changes"; private static final String DATATYPE_CHANGES = "DataType Changes";

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,6 +15,10 @@
*/ */
package ghidra.program.database; package ghidra.program.database;
import java.util.ConcurrentModificationException;
import db.*;
import db.util.ErrorHandler;
import ghidra.program.database.map.AddressMap; import ghidra.program.database.map.AddressMap;
import ghidra.program.database.util.AddressRangeMapDB; import ghidra.program.database.util.AddressRangeMapDB;
import ghidra.program.model.address.*; import ghidra.program.model.address.*;
@ -25,11 +28,6 @@ import ghidra.util.exception.CancelledException;
import ghidra.util.exception.DuplicateNameException; import ghidra.util.exception.DuplicateNameException;
import ghidra.util.task.TaskMonitor; import ghidra.util.task.TaskMonitor;
import java.util.ConcurrentModificationException;
import db.*;
import db.util.ErrorHandler;
public class IntRangeMapDB implements IntRangeMap { public class IntRangeMapDB implements IntRangeMap {
private static final String MY_PREFIX = "IntMap - "; private static final String MY_PREFIX = "IntMap - ";
@ -65,8 +63,8 @@ public class IntRangeMapDB implements IntRangeMap {
DBHandle dbh = program.getDBHandle(); DBHandle dbh = program.getDBHandle();
String tableName = TABLE_PREFIX + mapName; String tableName = TABLE_PREFIX + mapName;
if (dbh.getTable(tableName) != null) { if (dbh.getTable(tableName) != null) {
throw new DuplicateNameException("Address Set Property Map named " + mapName + throw new DuplicateNameException(
" already exists."); "Address Set Property Map named " + mapName + " already exists.");
} }
return new IntRangeMapDB(program, mapName, program, addrMap, lock); return new IntRangeMapDB(program, mapName, program, addrMap, lock);
@ -82,9 +80,8 @@ public class IntRangeMapDB implements IntRangeMap {
this.mapName = mapName; this.mapName = mapName;
this.lock = lock; this.lock = lock;
propertyMap = propertyMap = new AddressRangeMapDB(program.getDBHandle(), program.getAddressMap(),
new AddressRangeMapDB(program.getDBHandle(), program.getAddressMap(), program.getLock(), MY_PREFIX + mapName, errHandler, IntField.INSTANCE, true);
program.getLock(), MY_PREFIX + mapName, errHandler, IntField.class, true);
} }

View file

@ -30,7 +30,8 @@ import ghidra.util.exception.DuplicateNameException;
class OverlaySpaceAdapterDB { class OverlaySpaceAdapterDB {
private static String TABLE_NAME = "Overlay Spaces"; private static String TABLE_NAME = "Overlay Spaces";
static final Schema SCHEMA = new Schema(0, "ID", static final Schema SCHEMA = new Schema(0, "ID",
new Class[] { StringField.class, StringField.class, LongField.class, LongField.class }, new Field[] { StringField.INSTANCE, StringField.INSTANCE, LongField.INSTANCE,
LongField.INSTANCE },
new String[] { "Overlay Space", "Template Space", "Minimum Offset", "Maximum Offset" }); new String[] { "Overlay Space", "Template Space", "Minimum Offset", "Maximum Offset" });
private static final int OV_SPACE_NAME_COL = 0; private static final int OV_SPACE_NAME_COL = 0;

View file

@ -93,8 +93,11 @@ public class ProgramDB extends DomainObjectAdapterDB implements Program, ChangeM
* Read of old symbol data3 format does not require upgrade. * Read of old symbol data3 format does not require upgrade.
* 14-May-2020 - version 21 - added support for overlay mapped blocks and byte mapping * 14-May-2020 - version 21 - added support for overlay mapped blocks and byte mapping
* schemes other than the default 1:1 * schemes other than the default 1:1
* 19-Jun-2020 - version 22 - Corrected fixed length indexing implementation causing
* change in index table low-level storage for newly
* created tables.
*/ */
static final int DB_VERSION = 21; static final int DB_VERSION = 22;
/** /**
* UPGRADE_REQUIRED_BFORE_VERSION should be changed to DB_VERSION anytime the * UPGRADE_REQUIRED_BFORE_VERSION should be changed to DB_VERSION anytime the
@ -133,10 +136,10 @@ public class ProgramDB extends DomainObjectAdapterDB implements Program, ChangeM
private static final String EXECUTE_FORMAT = "Execute Format"; private static final String EXECUTE_FORMAT = "Execute Format";
private static final String IMAGE_OFFSET = "Image Offset"; private static final String IMAGE_OFFSET = "Image Offset";
private final static Class<?>[] COL_CLASS = new Class[] { StringField.class }; private final static Field[] COL_FIELDS = new Field[] { StringField.INSTANCE };
private final static String[] COL_TYPES = new String[] { "Value" }; private final static String[] COL_TYPES = new String[] { "Value" };
private final static Schema SCHEMA = private final static Schema SCHEMA =
new Schema(0, StringField.class, "Key", COL_CLASS, COL_TYPES); new Schema(0, StringField.INSTANCE, "Key", COL_FIELDS, COL_TYPES);
// //
// The numbering of managers controls the order in which they are notified. // The numbering of managers controls the order in which they are notified.

View file

@ -34,10 +34,10 @@ import ghidra.program.model.listing.ProgramChangeSet;
class ProgramDBChangeSet implements ProgramChangeSet, DomainObjectDBChangeSet { class ProgramDBChangeSet implements ProgramChangeSet, DomainObjectDBChangeSet {
private static final Schema STORED_ID_SCHEMA = private static final Schema STORED_ID_SCHEMA =
new Schema(0, "Key", new Class[] { LongField.class }, new String[] { "value" }); new Schema(0, "Key", new Field[] { LongField.INSTANCE }, new String[] { "value" });
private static final Schema STORED_ADDRESS_RANGE_SCHEMA = new Schema(0, "Key", private static final Schema STORED_ADDRESS_RANGE_SCHEMA = new Schema(0, "Key",
new Class[] { LongField.class, LongField.class }, new String[] { "addr1", "addr2" }); new Field[] { LongField.INSTANCE, LongField.INSTANCE }, new String[] { "addr1", "addr2" });
private static final String DATATYPE_ADDITIONS = "DataType Additions"; private static final String DATATYPE_ADDITIONS = "DataType Additions";
private static final String DATATYPE_CHANGES = "DataType Changes"; private static final String DATATYPE_CHANGES = "DataType Changes";

View file

@ -51,7 +51,7 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
* DB_VERSION should be incremented any time a change is made to the overall * DB_VERSION should be incremented any time a change is made to the overall
* database schema associated with any of the managers. * database schema associated with any of the managers.
*/ */
static final int DB_VERSION = 1; static final int DB_VERSION = 2;
/** /**
* UPGRADE_REQUIRED_BFORE_VERSION should be changed to DB_VERSION any time the * UPGRADE_REQUIRED_BFORE_VERSION should be changed to DB_VERSION any time the
@ -59,13 +59,13 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
* until upgrade is performed). It is assumed that read-only mode is supported * until upgrade is performed). It is assumed that read-only mode is supported
* if the data's version is &gt;= UPGRADE_REQUIRED_BEFORE_VERSION and &lt;= DB_VERSION. * if the data's version is &gt;= UPGRADE_REQUIRED_BEFORE_VERSION and &lt;= DB_VERSION.
*/ */
private static final int UPGRADE_REQUIRED_BEFORE_VERSION = 1; private static final int UPGRADE_REQUIRED_BEFORE_VERSION = 2;
private static final String TABLE_NAME = "ProgramUserData"; private static final String TABLE_NAME = "ProgramUserData";
private final static Class<?>[] COL_CLASS = new Class[] { StringField.class }; private final static Field[] COL_FIELDS = new Field[] { StringField.INSTANCE };
private final static String[] COL_NAMES = new String[] { "Value" }; private final static String[] COL_NAMES = new String[] { "Value" };
private final static Schema SCHEMA = private final static Schema SCHEMA =
new Schema(0, StringField.class, "Key", COL_CLASS, COL_NAMES); new Schema(0, StringField.INSTANCE, "Key", COL_FIELDS, COL_NAMES);
private static final int VALUE_COL = 0; private static final int VALUE_COL = 0;
private static final String STORED_DB_VERSION = "DB Version"; private static final String STORED_DB_VERSION = "DB Version";
@ -73,12 +73,12 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
private static final String LANGUAGE_ID = "Language ID"; private static final String LANGUAGE_ID = "Language ID";
private static final String REGISTRY_TABLE_NAME = "PropertyRegistry"; private static final String REGISTRY_TABLE_NAME = "PropertyRegistry";
private final static Class<?>[] REGISTRY_COL_CLASS = private final static Field[] REGISTRY_COL_FIELDS = new Field[] { StringField.INSTANCE,
new Class[] { StringField.class, StringField.class, IntField.class, StringField.class }; StringField.INSTANCE, IntField.INSTANCE, StringField.INSTANCE };
private final static String[] REGISTRY_COL_NAMES = private final static String[] REGISTRY_COL_NAMES =
new String[] { "Owner", "PropertyName", "PropertyType", "SaveableClass" }; new String[] { "Owner", "PropertyName", "PropertyType", "SaveableClass" };
private final static Schema REGISTRY_SCHEMA = private final static Schema REGISTRY_SCHEMA =
new Schema(0, "ID", REGISTRY_COL_CLASS, REGISTRY_COL_NAMES); new Schema(0, "ID", REGISTRY_COL_FIELDS, REGISTRY_COL_NAMES);
private static final int PROPERTY_OWNER_COL = 0; private static final int PROPERTY_OWNER_COL = 0;
private static final int PROPERTY_NAME_COL = 1; private static final int PROPERTY_NAME_COL = 1;
private static final int PROPERTY_TYPE_COL = 2; private static final int PROPERTY_TYPE_COL = 2;
@ -467,7 +467,8 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
Class<?> saveableClass, boolean create) throws PropertyTypeMismatchException { Class<?> saveableClass, boolean create) throws PropertyTypeMismatchException {
try { try {
for (long key : registryTable.findRecords(new StringField(owner), PROPERTY_OWNER_COL)) { for (Field key : registryTable.findRecords(new StringField(owner),
PROPERTY_OWNER_COL)) {
Record rec = registryTable.getRecord(key); Record rec = registryTable.getRecord(key);
if (propertyName.equals(rec.getString(PROPERTY_NAME_COL))) { if (propertyName.equals(rec.getString(PROPERTY_NAME_COL))) {
int type = rec.getIntValue(PROPERTY_TYPE_COL); int type = rec.getIntValue(PROPERTY_TYPE_COL);
@ -573,7 +574,8 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
public synchronized List<PropertyMap> getProperties(String owner) { public synchronized List<PropertyMap> getProperties(String owner) {
List<PropertyMap> list = new ArrayList<PropertyMap>(); List<PropertyMap> list = new ArrayList<PropertyMap>();
try { try {
for (long key : registryTable.findRecords(new StringField(owner), PROPERTY_OWNER_COL)) { for (Field key : registryTable.findRecords(new StringField(owner),
PROPERTY_OWNER_COL)) {
Record rec = registryTable.getRecord(key); Record rec = registryTable.getRecord(key);
list.add(getPropertyMap(rec)); list.add(getPropertyMap(rec));
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,15 +15,14 @@
*/ */
package ghidra.program.database.bookmark; package ghidra.program.database.bookmark;
import ghidra.program.database.map.AddressMap;
import ghidra.program.database.util.EmptyRecordIterator;
import ghidra.program.model.address.*;
import ghidra.util.exception.VersionException;
import java.io.IOException; import java.io.IOException;
import java.util.HashSet; import java.util.HashSet;
import db.*; import db.*;
import ghidra.program.database.map.AddressMap;
import ghidra.program.database.util.EmptyRecordIterator;
import ghidra.program.model.address.*;
import ghidra.util.exception.VersionException;
public class BookmarkDBAdapterV3 extends BookmarkDBAdapter { public class BookmarkDBAdapterV3 extends BookmarkDBAdapter {
@ -35,8 +33,9 @@ public class BookmarkDBAdapterV3 extends BookmarkDBAdapter {
static final int V3_COMMENT_COL = 2; static final int V3_COMMENT_COL = 2;
static final int VERSION = 3; static final int VERSION = 3;
static final Schema V3_SCHEMA = new Schema(VERSION, "ID", new Class[] { LongField.class, static final Schema V3_SCHEMA = new Schema(VERSION, "ID",
StringField.class, StringField.class }, new String[] { "Address", "Category", "Comment" }); new Field[] { LongField.INSTANCE, StringField.INSTANCE, StringField.INSTANCE },
new String[] { "Address", "Category", "Comment" });
static int[] INDEXED_COLUMNS = new int[] { V3_ADDRESS_COL, V3_CATEGORY_COL }; static int[] INDEXED_COLUMNS = new int[] { V3_ADDRESS_COL, V3_CATEGORY_COL };

View file

@ -296,9 +296,8 @@ public class BookmarkDBManager implements BookmarkManager, ErrorHandler, Manager
bm.setComment(comment); bm.setComment(comment);
} }
else { else {
Record rec = Record rec = bookmarkAdapter.createBookmark(typeId, category,
bookmarkAdapter.createBookmark(typeId, category, addrMap.getKey(addr, true), addrMap.getKey(addr, true), comment);
comment);
bm = new BookmarkDB(this, cache, rec); bm = new BookmarkDB(this, cache, rec);
// fire event // fire event
@ -606,8 +605,7 @@ public class BookmarkDBManager implements BookmarkManager, ErrorHandler, Manager
RecordIterator it; RecordIterator it;
try { try {
if (bmt != null && bmt.hasBookmarks()) { if (bmt != null && bmt.hasBookmarks()) {
it = it = bookmarkAdapter.getRecordsByTypeStartingAtAddress(bmt.getTypeId(),
bookmarkAdapter.getRecordsByTypeStartingAtAddress(bmt.getTypeId(),
addrMap.getKey(startAddress, false), forward); addrMap.getKey(startAddress, false), forward);
} }
else { else {
@ -761,11 +759,10 @@ public class BookmarkDBManager implements BookmarkManager, ErrorHandler, Manager
try { try {
Table table = bookmarkAdapter.getTable(typeId); Table table = bookmarkAdapter.getTable(typeId);
if (table != null) { if (table != null) {
DBLongIterator it = DBFieldIterator it = new AddressIndexPrimaryKeyIterator(table,
new AddressIndexPrimaryKeyIterator(table, BookmarkDBAdapter.ADDRESS_COL, BookmarkDBAdapter.ADDRESS_COL, addrMap, set, true);
addrMap, set, true);
while (it.hasNext()) { while (it.hasNext()) {
BookmarkDB bm = (BookmarkDB) getBookmark(it.next()); BookmarkDB bm = (BookmarkDB) getBookmark(it.next().getLongValue());
if (category == null || category.equals(bm.getCategory())) { if (category == null || category.equals(bm.getCategory())) {
doRemoveBookmark(bm); doRemoveBookmark(bm);
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -16,11 +15,10 @@
*/ */
package ghidra.program.database.bookmark; package ghidra.program.database.bookmark;
import ghidra.util.exception.VersionException;
import java.io.IOException; import java.io.IOException;
import db.*; import db.*;
import ghidra.util.exception.VersionException;
abstract class BookmarkTypeDBAdapter { abstract class BookmarkTypeDBAdapter {
@ -28,8 +26,8 @@ abstract class BookmarkTypeDBAdapter {
static final int TYPE_NAME_COL = 0; static final int TYPE_NAME_COL = 0;
static final Schema SCHEMA = new Schema(0, "ID", new Class[] { StringField.class }, static final Schema SCHEMA =
new String[] { "Name" }); new Schema(0, "ID", new Field[] { StringField.INSTANCE }, new String[] { "Name" });
static BookmarkTypeDBAdapter getAdapter(DBHandle dbHandle, int openMode) static BookmarkTypeDBAdapter getAdapter(DBHandle dbHandle, int openMode)
throws VersionException, IOException { throws VersionException, IOException {
@ -58,8 +56,8 @@ abstract class BookmarkTypeDBAdapter {
return new BookmarkTypeDBAdapterNoTable(dbHandle); return new BookmarkTypeDBAdapterNoTable(dbHandle);
} }
private static BookmarkTypeDBAdapter upgrade(DBHandle dbHandle, BookmarkTypeDBAdapter oldAdapter) private static BookmarkTypeDBAdapter upgrade(DBHandle dbHandle,
throws VersionException, IOException { BookmarkTypeDBAdapter oldAdapter) throws VersionException, IOException {
return new BookmarkTypeDBAdapterV0(dbHandle, true); return new BookmarkTypeDBAdapterV0(dbHandle, true);
} }

View file

@ -32,8 +32,8 @@ abstract class CommentHistoryAdapter {
static final String COMMENT_HISTORY_TABLE_NAME = "Comment History"; static final String COMMENT_HISTORY_TABLE_NAME = "Comment History";
static final Schema COMMENT_HISTORY_SCHEMA = new Schema(0, "Key", static final Schema COMMENT_HISTORY_SCHEMA = new Schema(0, "Key",
new Class[] { LongField.class, ByteField.class, IntField.class, IntField.class, new Field[] { LongField.INSTANCE, ByteField.INSTANCE, IntField.INSTANCE, IntField.INSTANCE,
StringField.class, StringField.class, LongField.class }, StringField.INSTANCE, StringField.INSTANCE, LongField.INSTANCE },
new String[] { "Address", "Comment Type", "Pos1", "Pos2", "String Data", "User", "Date" }); new String[] { "Address", "Comment Type", "Pos1", "Pos2", "String Data", "User", "Date" });
static final int HISTORY_ADDRESS_COL = 0; static final int HISTORY_ADDRESS_COL = 0;

View file

@ -57,8 +57,8 @@ abstract class CommentsDBAdapter {
NAMES[REPEATABLE_COMMENT_COL] = "Repeatable"; NAMES[REPEATABLE_COMMENT_COL] = "Repeatable";
COMMENTS_SCHEMA = COMMENTS_SCHEMA =
new Schema(1, "Address", new Class[] { StringField.class, StringField.class, new Schema(1, "Address", new Field[] { StringField.INSTANCE, StringField.INSTANCE,
StringField.class, StringField.class, StringField.class }, NAMES); StringField.INSTANCE, StringField.INSTANCE, StringField.INSTANCE }, NAMES);
} }
// /** comment type for end of line */ // /** comment type for end of line */
@ -110,8 +110,8 @@ abstract class CommentsDBAdapter {
} }
private static CommentsDBAdapter upgrade(DBHandle dbHandle, AddressMap addrMap, private static CommentsDBAdapter upgrade(DBHandle dbHandle, AddressMap addrMap,
CommentsDBAdapter oldAdapter, TaskMonitor monitor) throws VersionException, CommentsDBAdapter oldAdapter, TaskMonitor monitor)
IOException, CancelledException { throws VersionException, IOException, CancelledException {
AddressMap oldAddrMap = addrMap.getOldAddressMap(); AddressMap oldAddrMap = addrMap.getOldAddressMap();

View file

@ -103,6 +103,8 @@ class DataDB extends CodeUnitDB implements Data {
DataType dt; DataType dt;
if (rec != null) { if (rec != null) {
// ensure that record provided corresponds to a DataDB record // ensure that record provided corresponds to a DataDB record
// since following an undo/redo the record could correspond to
// a different type of code unit (hopefully with a different record schema)
if (!rec.hasSameSchema(DataDBAdapter.DATA_SCHEMA)) { if (!rec.hasSameSchema(DataDBAdapter.DATA_SCHEMA)) {
return true; return true;
} }

View file

@ -1,6 +1,5 @@
/* ### /* ###
* IP: GHIDRA * IP: GHIDRA
* REVIEWED: YES
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -19,6 +18,9 @@
*/ */
package ghidra.program.database.code; package ghidra.program.database.code;
import java.io.IOException;
import db.*;
import ghidra.program.database.map.AddressKeyIterator; import ghidra.program.database.map.AddressKeyIterator;
import ghidra.program.database.map.AddressMap; import ghidra.program.database.map.AddressMap;
import ghidra.program.model.address.Address; import ghidra.program.model.address.Address;
@ -27,10 +29,6 @@ import ghidra.util.exception.CancelledException;
import ghidra.util.exception.VersionException; import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor; import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import db.*;
/** /**
* Adapter to access the Data table. * Adapter to access the Data table.
*/ */
@ -38,7 +36,7 @@ abstract class DataDBAdapter {
static final String DATA_TABLE_NAME = "Data"; static final String DATA_TABLE_NAME = "Data";
static final Schema DATA_SCHEMA = new Schema(0, "Address", new Class[] { LongField.class }, static final Schema DATA_SCHEMA = new Schema(0, "Address", new Field[] { LongField.INSTANCE },
new String[] { "Data Type ID" }); new String[] { "Data Type ID" });
static final int DATA_TYPE_ID_COL = 0; static final int DATA_TYPE_ID_COL = 0;
@ -75,8 +73,8 @@ abstract class DataDBAdapter {
} }
private static DataDBAdapter upgrade(DBHandle dbHandle, AddressMap addrMap, private static DataDBAdapter upgrade(DBHandle dbHandle, AddressMap addrMap,
DataDBAdapter oldAdapter, TaskMonitor monitor) throws VersionException, IOException, DataDBAdapter oldAdapter, TaskMonitor monitor)
CancelledException { throws VersionException, IOException, CancelledException {
AddressMap oldAddrMap = addrMap.getOldAddressMap(); AddressMap oldAddrMap = addrMap.getOldAddressMap();

Some files were not shown because too many files have changed in this diff Show more