GT-3294 Added support for DB FixedField with improved indexing.

This commit is contained in:
ghidra1 2020-02-24 18:02:01 -05:00
parent 14d4c87ef4
commit fcb3151f94
224 changed files with 9574 additions and 7913 deletions

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -15,12 +14,11 @@
* limitations under the License.
*/
// Performs database consistency check on the current program
import db.DBHandle;
import ghidra.app.script.GhidraScript;
import ghidra.app.services.ProgramManager;
import ghidra.framework.model.DomainFile;
import ghidra.program.database.ProgramDB;
import ghidra.program.model.listing.Program;
import db.DBHandle;
public class ConsistencyCheck extends GhidraScript {
@ -56,6 +54,8 @@ public class ConsistencyCheck extends GhidraScript {
return;
}
monitor.checkCanceled();
if (!df.canSave() || !currentProgram.hasExclusiveAccess()) {
popup("Program database is NOT consistent!\nRebuild requires exclusive checkout.");
return;
@ -67,19 +67,22 @@ public class ConsistencyCheck extends GhidraScript {
}
end(false);
ProgramDB program = (ProgramDB) df.getDomainObject(this, false, false, monitor);
programMgr.closeProgram(currentProgram, true);
currentProgram = (Program) df.getDomainObject(this, false, false, monitor);
dbh = ((ProgramDB) currentProgram).getDBHandle();
monitor.clearCanceled(); // compensate for Script Manager cancelling task on program close
dbh = program.getDBHandle();
try {
boolean success = false;
long txId = dbh.startTransaction();
int txId = program.startTransaction("Rebuild DB Indexes");
try {
success = dbh.rebuild(monitor);
}
finally {
dbh.endTransaction(txId, success);
program.endTransaction(txId, success);
}
if (!success) {
@ -92,11 +95,12 @@ public class ConsistencyCheck extends GhidraScript {
return;
}
currentProgram.save("DB Rebuild", monitor);
program.save("DB Rebuild", monitor);
}
finally {
currentProgram.release(this);
currentProgram = programMgr.openProgram(df);
programMgr.openProgram(program);
program.release(this);
currentProgram = program;
start();
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,9 +15,6 @@
*/
package ghidra.app.plugin.debug.dbtable;
import ghidra.util.Msg;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import java.util.*;
@ -26,6 +22,8 @@ import javax.swing.event.TableModelListener;
import javax.swing.table.TableModel;
import db.*;
import ghidra.util.Msg;
import ghidra.util.exception.AssertException;
public class DbLargeTableModel implements TableModel {
private ArrayList<TableModelListener> listeners = new ArrayList<TableModelListener>();
@ -43,7 +41,7 @@ public class DbLargeTableModel implements TableModel {
this.table = table;
schema = table.getSchema();
try {
keyType = schema.getKeyFieldClass().newInstance();
keyType = schema.getKeyFieldType();
}
catch (Exception e) {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
@ -59,39 +57,39 @@ public class DbLargeTableModel implements TableModel {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
columns.add(getColumn(schema.getKeyFieldClass()));
columns.add(getColumn(schema.getKeyFieldType()));
Class<?>[] classes = schema.getFieldClasses();
int fieldCount = schema.getFieldCount();
for (int i = 0; i < fieldCount; i++) {
columns.add(getColumn(classes[i]));
Field[] fields = schema.getFields();
for (Field field : fields) {
columns.add(getColumn(field));
}
}
private AbstractColumnAdapter getColumn(Class<?> c) {
if (c == ByteField.class) {
private AbstractColumnAdapter getColumn(Field field) {
if (field instanceof ByteField) {
return new ByteColumnAdapter();
}
else if (c == BooleanField.class) {
else if (field instanceof BooleanField) {
return new BooleanColumnAdapter();
}
else if (c == ShortField.class) {
else if (field instanceof ShortField) {
return new ShortColumnAdapter();
}
else if (c == IntField.class) {
else if (field instanceof IntField) {
return new IntegerColumnAdapter();
}
else if (c == LongField.class) {
else if (field instanceof LongField) {
return new LongColumnAdapter();
}
else if (c == StringField.class) {
else if (field instanceof StringField) {
return new StringColumnAdapter();
}
else if (c == BinaryField.class) {
else if (field instanceof BinaryField) {
return new BinaryColumnAdapter();
}
throw new AssertException("New, unexpected DB column class type: " + c);
throw new AssertException(
"New, unexpected DB column type: " + field.getClass().getSimpleName());
}
private void findMinKey() throws IOException {

View file

@ -36,12 +36,11 @@ public class DbSmallTableModel extends AbstractSortedTableModel<Record> {
records = new ArrayList<>(table.getRecordCount());
columns.add(getColumn(schema.getKeyFieldClass()));
columns.add(getColumn(schema.getKeyFieldType()));
Class<?>[] classes = schema.getFieldClasses();
int fieldCount = schema.getFieldCount();
for (int i = 0; i < fieldCount; i++) {
columns.add(getColumn(classes[i]));
Field[] fields = schema.getFields();
for (Field field : fields) {
columns.add(getColumn(field));
}
try {
@ -55,29 +54,30 @@ public class DbSmallTableModel extends AbstractSortedTableModel<Record> {
}
}
private AbstractColumnAdapter getColumn(Class<?> c) {
if (c == ByteField.class) {
private AbstractColumnAdapter getColumn(Field field) {
if (field instanceof ByteField) {
return new ByteColumnAdapter();
}
else if (c == BooleanField.class) {
else if (field instanceof BooleanField) {
return new BooleanColumnAdapter();
}
else if (c == ShortField.class) {
else if (field instanceof ShortField) {
return new ShortColumnAdapter();
}
else if (c == IntField.class) {
else if (field instanceof IntField) {
return new IntegerColumnAdapter();
}
else if (c == LongField.class) {
else if (field instanceof LongField) {
return new LongColumnAdapter();
}
else if (c == StringField.class) {
else if (field instanceof StringField) {
return new StringColumnAdapter();
}
else if (c == BinaryField.class) {
else if (field instanceof BinaryField) {
return new BinaryColumnAdapter();
}
throw new AssertException("New, unexpected DB column class type: " + c);
throw new AssertException(
"New, unexpected DB column type: " + field.getClass().getSimpleName());
}
@Override

View file

@ -66,7 +66,7 @@ public class AddressIndexPrimaryKeyIteratorTest extends AbstractGhidraHeadedInte
// Create table with indexed address column
Schema schema =
new Schema(0, "id", new Class[] { LongField.class }, new String[] { "addr" });
new Schema(0, "id", new Field[] { LongField.INSTANCE }, new String[] { "addr" });
DBHandle handle = program.getDBHandle();
myTable = handle.createTable("MyTable", schema, new int[] { 0 });

View file

@ -32,7 +32,7 @@ import ghidra.util.datastruct.LongArray;
public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest {
private static Schema SCHEMA =
new Schema(0, "addr", new Class[] { StringField.class }, new String[] { "str" });
new Schema(0, "addr", new Field[] { StringField.INSTANCE }, new String[] { "str" });
private ProgramDB program;
private AddressSpace space;
@ -114,7 +114,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator0() throws Exception {
public void testIterator0() throws Exception {
AddressKeyIterator it = new AddressKeyIterator();
assertTrue(!it.hasNext());
assertTrue(!it.hasPrevious());
@ -133,7 +133,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator1() throws Exception {
public void testIterator1() throws Exception {
int index = 0;
AddressKeyIterator it = new AddressKeyIterator(myTable, addrMap, true);
while (it.hasNext()) {
@ -144,7 +144,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator2() throws Exception {
public void testIterator2() throws Exception {
int index = 0x10;
AddressKeyIterator it = new AddressKeyIterator(myTable, addrMap, addr(0x4000), true);
while (it.hasNext()) {
@ -155,7 +155,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator3() throws Exception {
public void testIterator3() throws Exception {
int index = 0x11;
AddressKeyIterator it = new AddressKeyIterator(myTable, addrMap, addr(0x5000), false);
while (it.hasNext()) {
@ -166,7 +166,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator4() throws Exception {
public void testIterator4() throws Exception {
int index = 0x10;
AddressKeyIterator it = new AddressKeyIterator(myTable, addrMap, addr(0x5000), true);
while (it.hasNext()) {
@ -177,7 +177,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator5() throws Exception {
public void testIterator5() throws Exception {
int index = 0x0f;
AddressKeyIterator it = new AddressKeyIterator(myTable, addrMap, addr(0x5000), true);
while (it.hasPrevious()) {
@ -188,7 +188,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator6() throws Exception {
public void testIterator6() throws Exception {
int index = 0x10;
AddressKeyIterator it = new AddressKeyIterator(myTable, addrMap, addr(0x5000), false);
while (it.hasPrevious()) {
@ -199,7 +199,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator7() throws Exception {
public void testIterator7() throws Exception {
AddressSet set = new AddressSet();
set.addRange(addr(0x3008), addr(0x5008));
set.addRange(addr(0x9008), addr(0x10000));
@ -216,7 +216,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator8() throws Exception {
public void testIterator8() throws Exception {
AddressSet set = new AddressSet();
set.addRange(addr(0x3008), addr(0x5008));
set.addRange(addr(0x9008), addr(0x10000));
@ -233,7 +233,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator9() throws Exception {
public void testIterator9() throws Exception {
AddressSet set = new AddressSet();
set.addRange(addr(0x3008), addr(0x5008));
set.addRange(addr(0x9008), addr(0x10000));
@ -247,7 +247,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator10() throws Exception {
public void testIterator10() throws Exception {
AddressSet set = new AddressSet();
set.addRange(addr(0x3008), addr(0x5008));
set.addRange(addr(0x9008), addr(0x10000));
@ -264,7 +264,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator11() throws Exception {
public void testIterator11() throws Exception {
AddressSet set = new AddressSet();
set.addRange(addr(0x3008), addr(0x5008));
set.addRange(addr(0x9008), addr(0x10000));
@ -279,7 +279,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator12() throws Exception {
public void testIterator12() throws Exception {
int index = 0x3f;
AddressKeyIterator it = new AddressKeyIterator(myTable, addrMap, null, false);
while (it.hasPrevious()) {
@ -290,7 +290,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIterator13() throws Exception {
public void testIterator13() throws Exception {
AddressSet set = new AddressSet();
set.addRange(addr(0x3008), addr(0x5008));
set.addRange(addr(0x9008), addr(0x10000));
@ -304,7 +304,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIteratorCheckWrap1() throws Exception {
public void testIteratorCheckWrap1() throws Exception {
addRecord(addr(0x0));
addRecord(addr(0x0100));
@ -320,7 +320,7 @@ public class AddressKeyIteratorTest extends AbstractGhidraHeadedIntegrationTest
}
@Test
public void testIteratorCheckWrap2() throws Exception {
public void testIteratorCheckWrap2() throws Exception {
addRecord(addr(0x0));
addRecord(addr(0x0100));

View file

@ -35,7 +35,8 @@ import ghidra.util.Lock;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitorAdapter;
public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest implements ErrorHandler {
public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest
implements ErrorHandler {
private TestEnv env; // needed to discover languages
private ProgramDB program;
@ -83,7 +84,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testTransaction() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true);
new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
try {
map.paintRange(addr(0), addr(0x1000), ONE);
@ -114,7 +115,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testPaint() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true);
new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
int id = program.startTransaction("TEST");
try {
@ -152,7 +153,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testClear() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true);
new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
int id = program.startTransaction("TEST");
try {
@ -186,7 +187,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testAddressRangeIterator() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true);
new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
int id = program.startTransaction("TEST");
try {
@ -248,7 +249,7 @@ public class AddressRangeMapDBTest extends AbstractGhidraHeadedIntegrationTest i
public void testMove() {
AddressRangeMapDB map = new AddressRangeMapDB(program.getDBHandle(), addrMap,
new Lock("Test"), "TEST", this, LongField.class, true);
new Lock("Test"), "TEST", this, LongField.INSTANCE, true);
int id = program.startTransaction("TEST");
try {

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -21,57 +20,59 @@ import java.util.Random;
import db.*;
public class DatabaseBenchMarks {
static int BUFFER_SIZE = 16*1024;
static int CACHE_SIZE = 32*1024*1024;
static int BUFFER_SIZE = 16 * 1024;
static int CACHE_SIZE = 32 * 1024 * 1024;
public static void main(String[] args) {
TestTimer timer = new TestTimer();
testOrderedIntInsertions(timer,1000);
testOrderedIntInsertions(timer,10000);
testOrderedIntInsertions(timer,100000);
testOrderedIntInsertions(timer,1000000);
testOrderedIntInsertions(timer, 1000);
testOrderedIntInsertions(timer, 10000);
testOrderedIntInsertions(timer, 100000);
testOrderedIntInsertions(timer, 1000000);
System.out.println("");
testOrderedStringInsertions(timer,1000);
testOrderedStringInsertions(timer,10000);
testOrderedStringInsertions(timer,100000);
testOrderedStringInsertions(timer,1000000);
testOrderedStringInsertions(timer, 1000);
testOrderedStringInsertions(timer, 10000);
testOrderedStringInsertions(timer, 100000);
testOrderedStringInsertions(timer, 1000000);
System.out.println("");
testRandomIntInsertions(timer,1000);
testRandomIntInsertions(timer,10000);
testRandomIntInsertions(timer,100000);
testRandomIntInsertions(timer,1000000);
testRandomIntInsertions(timer, 1000);
testRandomIntInsertions(timer, 10000);
testRandomIntInsertions(timer, 100000);
testRandomIntInsertions(timer, 1000000);
System.out.println("");
testIteration(timer);
System.out.println("");
testRandomAccess(timer);
}
private static void testOrderedIntInsertions(TestTimer timer, int numInsertions) {
try {
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{IntField.class}, new String[]{"Value"});
Schema schema =
new Schema(1, "Key", new Field[] { IntField.INSTANCE }, new String[] { "Value" });
Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0);
timer.start("Inserting "+numInsertions+" sorted records with long keys and integer values");
for(int i=0;i<numInsertions;i++) {
timer.start(
"Inserting " + numInsertions + " sorted records with long keys and integer values");
for (int i = 0; i < numInsertions; i++) {
record.setKey(i);
record.setIntValue(0,i);
record.setIntValue(0, i);
table.putRecord(record);
}
timer.end();
dbh.endTransaction(transactionID, true);
dbh.close();
}catch(IOException e) {
e.printStackTrace();
}
catch (IOException e) {
e.printStackTrace();
}
}
@ -79,69 +80,77 @@ public class DatabaseBenchMarks {
try {
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{StringField.class}, new String[]{"Value"});
Schema schema = new Schema(1, "Key", new Field[] { StringField.INSTANCE },
new String[] { "Value" });
Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0);
timer.start("Inserting "+numInsertions+" sorted records with long keys and String (length = 8) values");
for(int i=0;i<numInsertions;i++) {
timer.start("Inserting " + numInsertions +
" sorted records with long keys and String (length = 8) values");
for (int i = 0; i < numInsertions; i++) {
record.setKey(i);
record.setString(0,"abcdefgh");
record.setString(0, "abcdefgh");
table.putRecord(record);
}
timer.end();
dbh.endTransaction(transactionID, true);
dbh.close();
}catch(IOException e) {
e.printStackTrace();
}
catch (IOException e) {
e.printStackTrace();
}
}
private static void testRandomIntInsertions(TestTimer timer, int numInsertions) {
try {
Random random = new Random();
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{IntField.class}, new String[]{"Value"});
Schema schema =
new Schema(1, "Key", new Field[] { IntField.INSTANCE }, new String[] { "Value" });
Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0);
timer.start("Inserting "+numInsertions+" random records with long keys and integer values");
for(int i=0;i<numInsertions;i++) {
timer.start(
"Inserting " + numInsertions + " random records with long keys and integer values");
for (int i = 0; i < numInsertions; i++) {
record.setKey(random.nextLong());
record.setIntValue(0,i);
record.setIntValue(0, i);
table.putRecord(record);
}
timer.end();
dbh.endTransaction(transactionID, true);
dbh.close();
}catch(IOException e) {
e.printStackTrace();
}
catch (IOException e) {
e.printStackTrace();
}
}
private static void testIteration(TestTimer timer) {
try {
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{IntField.class}, new String[]{"Value"});
Schema schema =
new Schema(1, "Key", new Field[] { IntField.INSTANCE }, new String[] { "Value" });
Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0);
System.out.print("building database...");
for(int i=0;i<1000000;i++) {
for (int i = 0; i < 1000000; i++) {
record.setKey(i);
record.setIntValue(0,i);
record.setIntValue(0, i);
table.putRecord(record);
}
timer.start("Iterating over 1000000 int records");
RecordIterator it = table.iterator();
while(it.hasNext()) {
while (it.hasNext()) {
it.next();
}
timer.end();
timer.end();
dbh.endTransaction(transactionID, true);
dbh.close();
}catch(IOException e) {
e.printStackTrace();
}
catch (IOException e) {
e.printStackTrace();
}
}
@ -149,42 +158,43 @@ public class DatabaseBenchMarks {
try {
DBHandle dbh = new DBHandle(BUFFER_SIZE, CACHE_SIZE);
long transactionID = dbh.startTransaction();
Schema schema = new Schema(1, "Key", new Class[]{IntField.class}, new String[]{"Value"});
Schema schema =
new Schema(1, "Key", new Field[] { IntField.INSTANCE }, new String[] { "Value" });
Table table = dbh.createTable("Test", schema);
Record record = schema.createRecord(0);
System.out.print("building database...");
for(int i=0;i<1000000;i++) {
for (int i = 0; i < 1000000; i++) {
record.setKey(i);
record.setIntValue(0,i);
record.setIntValue(0, i);
table.putRecord(record);
}
Random random = new Random();
timer.start("Randomly accessing 1000000 int records");
for(int i=0;i<1000000;i++) {
for (int i = 0; i < 1000000; i++) {
table.getRecord(random.nextInt(1000000));
}
timer.end();
timer.end();
dbh.endTransaction(transactionID, true);
dbh.close();
}catch(IOException e) {
e.printStackTrace();
}
catch (IOException e) {
e.printStackTrace();
}
}
}
class TestTimer {
long start;
void start(String testMsg) {
System.out.print(testMsg+"... ");
System.out.print(testMsg + "... ");
start = System.currentTimeMillis();
}
void end() {
long end = System.currentTimeMillis();
System.out.println(""+(end-start)/1000.0+" seconds");
System.out.println("" + (end - start) / 1000.0 + " seconds");
}
}

View file

@ -23,7 +23,6 @@ import java.io.IOException;
import java.util.*;
import javax.swing.*;
import javax.swing.event.TableModelListener;
import javax.swing.table.TableModel;
import db.buffers.LocalBufferFile;
@ -33,6 +32,8 @@ import docking.widgets.combobox.GComboBox;
import docking.widgets.filechooser.GhidraFileChooser;
import docking.widgets.label.GDLabel;
import docking.widgets.label.GLabel;
import ghidra.app.plugin.debug.dbtable.DbLargeTableModel;
import ghidra.app.plugin.debug.dbtable.DbSmallTableModel;
import ghidra.framework.Application;
import ghidra.framework.store.db.PackedDatabase;
import ghidra.util.Msg;
@ -292,525 +293,3 @@ public class DbViewer extends JFrame {
}
}
class ColumnAdapter {
static final int BYTE = 0;
static final int BOOLEAN = 1;
static final int SHORT = 2;
static final int INT = 3;
static final int LONG = 4;
static final int STRING = 5;
static final int BINARY = 6;
int type;
Class<?> valueClass;
ColumnAdapter(Class<?> c) {
if (c == ByteField.class) {
type = BYTE;
valueClass = Byte.class;
}
else if (c == BooleanField.class) {
type = BOOLEAN;
valueClass = Boolean.class;
}
else if (c == ShortField.class) {
type = SHORT;
valueClass = Short.class;
}
else if (c == IntField.class) {
type = INT;
valueClass = Integer.class;
}
else if (c == LongField.class) {
type = LONG;
//valueClass = Long.class;
valueClass = String.class;
}
else if (c == StringField.class) {
type = STRING;
valueClass = String.class;
}
else if (c == BinaryField.class) {
type = BINARY;
valueClass = String.class;
}
}
Class<?> getValueClass() {
return valueClass;
}
Object getKeyValue(Record rec) {
switch (type) {
case BYTE:
return new Byte(((ByteField) rec.getKeyField()).getByteValue());
case BOOLEAN:
return new Boolean(((BooleanField) rec.getKeyField()).getBooleanValue());
case SHORT:
return new Short(((ShortField) rec.getKeyField()).getShortValue());
case INT:
return new Integer(((IntField) rec.getKeyField()).getIntValue());
case LONG:
return "0x" + Long.toHexString(rec.getKey());
//return new Long(rec.getKey());
case STRING:
return ((StringField) rec.getKeyField()).getString();
case BINARY:
byte[] bytes = ((BinaryField) rec.getKeyField()).getBinaryData();
StringBuffer buf = new StringBuffer(" byte[" + bytes.length + "] = ");
if (bytes.length > 0) {
int len = Math.min(bytes.length, 20);
buf.append(bytes[0]);
for (int i = 1; i < len; i++) {
buf.append(",");
buf.append(bytes[i]);
}
if (bytes.length > 20) {
buf.append("...");
}
}
return buf.toString();
}
return "";
}
Object getValue(Record rec, int col) {
switch (type) {
case BYTE:
return new Byte(rec.getByteValue(col));
case BOOLEAN:
return Boolean.valueOf(rec.getBooleanValue(col));
case SHORT:
return new Short(rec.getShortValue(col));
case INT:
return new Integer(rec.getIntValue(col));
case LONG:
return "0x" + Long.toHexString(rec.getLongValue(col));
//return new Long(rec.getLongValue(col));
case STRING:
return " " + rec.getString(col);
case BINARY:
byte[] bytes = rec.getBinaryData(col);
StringBuffer buf = new StringBuffer(" byte[" + bytes.length + "] = ");
if (bytes.length > 0) {
int len = Math.min(bytes.length, 20);
String str = getByteString(bytes[0]);
buf.append(str);
for (int i = 1; i < len; i++) {
buf.append(",");
buf.append(getByteString(bytes[i]));
}
if (bytes.length > 20) {
buf.append("...");
}
}
return buf.toString();
}
return "";
}
private String getByteString(byte b) {
String str = Integer.toHexString(b);
if (str.length() > 2) {
str = str.substring(str.length() - 2);
}
return "0x" + str;
}
// private String format(long l, int size) {
// String hex = Long.toHexString(l);
// if (hex.length() > size) {
// hex = hex.substring(hex.length()-size);
// }
// else if (hex.length() < size) {
// StringBuffer b = new StringBuffer(20);
// for(int i=hex.length();i<size;i++) {
// b.append("");
// }
// b.append(hex);
// hex = b.toString();
// }
//
// return hex;
// }
}
class DbSmallTableModel implements TableModel {
ArrayList<TableModelListener> listeners = new ArrayList<>();
Table table;
Schema schema;
ColumnAdapter[] colAdapters;
ColumnAdapter keyAdapter;
Record[] records;
DbSmallTableModel(Table table) {
this.table = table;
schema = table.getSchema();
records = new Record[table.getRecordCount()];
keyAdapter = new ColumnAdapter(schema.getKeyFieldClass());
colAdapters = new ColumnAdapter[schema.getFieldCount()];
Class<?>[] classes = schema.getFieldClasses();
for (int i = 0; i < colAdapters.length; i++) {
colAdapters[i] = new ColumnAdapter(classes[i]);
}
try {
RecordIterator it = table.iterator();
for (int i = 0; i < records.length; i++) {
records[i] = it.next();
}
}
catch (IOException e) {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#addTableModelListener(javax.swing.event.TableModelListener)
*/
@Override
public void addTableModelListener(TableModelListener l) {
listeners.add(l);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnClass(int)
*/
@Override
public Class<?> getColumnClass(int columnIndex) {
if (columnIndex == 0) {
return keyAdapter.getValueClass();
}
return colAdapters[columnIndex - 1].getValueClass();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnCount()
*/
@Override
public int getColumnCount() {
return schema.getFieldCount() + 1;
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnName(int)
*/
@Override
public String getColumnName(int columnIndex) {
if (columnIndex == 0) {
return schema.getKeyName();
}
--columnIndex;
int[] indexCols = table.getIndexedColumns();
boolean isIndexed = false;
for (int indexCol : indexCols) {
if (indexCol == columnIndex) {
isIndexed = true;
break;
}
}
return schema.getFieldNames()[columnIndex] + (isIndexed ? "*" : "");
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getRowCount()
*/
@Override
public int getRowCount() {
return table.getRecordCount();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getValueAt(int, int)
*/
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
Record rec = records[rowIndex];
if (columnIndex == 0) {
return keyAdapter.getKeyValue(rec);
}
return colAdapters[columnIndex - 1].getValue(rec, columnIndex - 1);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#isCellEditable(int, int)
*/
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return false;
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#removeTableModelListener(javax.swing.event.TableModelListener)
*/
@Override
public void removeTableModelListener(TableModelListener l) {
listeners.remove(l);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#setValueAt(java.lang.Object, int, int)
*/
@Override
public void setValueAt(Object aValue, int rowIndex, int columnIndex) {
}
}
class DbLargeTableModel implements TableModel {
ArrayList<TableModelListener> listeners = new ArrayList<>();
Table table;
Schema schema;
ColumnAdapter keyAdapter;
ColumnAdapter[] colAdapters;
RecordIterator recIt;
Record lastRecord;
int lastIndex;
Field minKey;
Field maxKey;
Field keyType;
DbLargeTableModel(Table table) {
this.table = table;
schema = table.getSchema();
keyAdapter = new ColumnAdapter(schema.getKeyFieldClass());
try {
keyType = schema.getKeyFieldClass().newInstance();
}
catch (Exception e) {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
try {
recIt = table.iterator();
lastRecord = recIt.next();
lastIndex = 0;
findMaxKey();
findMinKey();
}
catch (IOException e) {
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
colAdapters = new ColumnAdapter[schema.getFieldCount()];
Class<?>[] classes = schema.getFieldClasses();
for (int i = 0; i < colAdapters.length; i++) {
colAdapters[i] = new ColumnAdapter(classes[i]);
}
}
private void findMinKey() throws IOException {
RecordIterator iter = table.iterator();
Record rec = iter.next();
minKey = rec.getKeyField();
}
private void findMaxKey() throws IOException {
Field max = keyType.newField();
if (table.useLongKeys()) {
max.setLongValue(Long.MAX_VALUE);
}
else {
byte[] maxBytes = new byte[128];
Arrays.fill(maxBytes, 0, 128, (byte) 0x7f);
max.setBinaryData(maxBytes);
}
RecordIterator iter = table.iterator(max);
Record rec = iter.previous();
maxKey = rec.getKeyField();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#addTableModelListener(javax.swing.event.TableModelListener)
*/
@Override
public void addTableModelListener(TableModelListener l) {
listeners.add(l);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnClass(int)
*/
@Override
public Class<?> getColumnClass(int columnIndex) {
if (columnIndex == 0) {
return keyAdapter.getValueClass();
}
return colAdapters[columnIndex - 1].getValueClass();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnCount()
*/
@Override
public int getColumnCount() {
return schema.getFieldCount() + 1;
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getColumnName(int)
*/
@Override
public String getColumnName(int columnIndex) {
if (columnIndex == 0) {
return schema.getKeyName();
}
--columnIndex;
int[] indexCols = table.getIndexedColumns();
boolean isIndexed = false;
for (int indexCol : indexCols) {
if (indexCol == columnIndex) {
isIndexed = true;
break;
}
}
return schema.getFieldNames()[columnIndex] + (isIndexed ? "*" : "");
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getRowCount()
*/
@Override
public int getRowCount() {
return table.getRecordCount();
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#getValueAt(int, int)
*/
@Override
public Object getValueAt(int rowIndex, int columnIndex) {
Record rec = getRecord(rowIndex);
if (rec == null) {
return null;
}
if (columnIndex == 0) {
return keyAdapter.getKeyValue(rec);
}
return colAdapters[columnIndex - 1].getValue(rec, columnIndex - 1);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#isCellEditable(int, int)
*/
@Override
public boolean isCellEditable(int rowIndex, int columnIndex) {
return false;
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#removeTableModelListener(javax.swing.event.TableModelListener)
*/
@Override
public void removeTableModelListener(TableModelListener l) {
listeners.remove(l);
}
/* (non-Javadoc)
* @see javax.swing.table.TableModel#setValueAt(java.lang.Object, int, int)
*/
@Override
public void setValueAt(Object aValue, int rowIndex, int columnIndex) {
}
private Record getRecord(int index) {
try {
if (index == lastIndex + 1) {
if (!recIt.hasNext()) {
// do something
}
lastRecord = recIt.next();
lastIndex = index;
}
else if (index != lastIndex) {
if (index < lastIndex && (lastIndex - index) < 200) {
int backup = lastIndex - index + 1;
for (int i = 0; i < backup; i++) {
if (recIt.hasPrevious()) {
recIt.previous();
}
}
lastRecord = recIt.next();
lastIndex = index;
}
else {
findRecord(index);
lastRecord = recIt.next();
lastIndex = index;
}
}
}
catch (IOException e) {
// XXX Auto-generated catch block
Msg.error(this, "Unexpected Exception: " + e.getMessage(), e);
}
return lastRecord;
}
private void findRecord(int index) throws IOException {
if (index < 1000) {
recIt = table.iterator();
for (int i = 0; i < index; i++) {
recIt.next();
}
}
else if (index > table.getRecordCount() - 1000) {
recIt = table.iterator(maxKey);
if (recIt.hasNext()) {
recIt.next();
}
for (int i = 0; i < table.getRecordCount() - index; i++) {
recIt.previous();
}
}
else {
recIt = table.iterator(approxKey(index));
}
}
private Field approxKey(int index) {
Field key = keyType.newField();
if (table.useLongKeys()) {
long min = minKey.getLongValue();
long max = maxKey.getLongValue();
long k = min + ((max - min) * index / table.getRecordCount());
key.setLongValue(k);
}
else {
long min = getLong(minKey.getBinaryData());
long max = getLong(maxKey.getBinaryData());
long k = min + ((max - min) * index / table.getRecordCount());
byte[] bytes = new byte[8];
for (int i = 7; i >= 0; i--) {
bytes[i] = (byte) k;
k >>= 8;
}
key.setBinaryData(bytes);
}
return key;
}
private long getLong(byte[] bytes) {
if (bytes == null || bytes.length == 0) {
return 0;
}
long value = 0;
for (int i = 0; i < 8; i++) {
value <<= 8;
if (i < bytes.length) {
value += bytes[i] & 0xff;
}
}
return value;
}
}

View file

@ -44,11 +44,11 @@ public class FunctionsTable {
static final int CACHE_SIZE = 10000;
// @formatter:off
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "Function ID", new Class[] {
ShortField.class, LongField.class,
ByteField.class, LongField.class, LongField.class,
LongField.class, LongField.class, LongField.class,
ByteField.class
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "Function ID", new Field[] {
ShortField.INSTANCE, LongField.INSTANCE,
ByteField.INSTANCE, LongField.INSTANCE, LongField.INSTANCE,
LongField.INSTANCE, LongField.INSTANCE, LongField.INSTANCE,
ByteField.INSTANCE
}, new String[] {
"Code Unit Size", "Full Hash",
"Specific Hash Additional Size", "Specific Hash", "Library ID",
@ -133,14 +133,15 @@ public class FunctionsTable {
*/
public List<FunctionRecord> getFunctionRecordsByFullHash(long hash) throws IOException {
LongField hashField = new LongField(hash);
DBLongIterator iterator = table.indexKeyIterator(FULL_HASH_COL, hashField, hashField, true);
DBFieldIterator iterator =
table.indexKeyIterator(FULL_HASH_COL, hashField, hashField, true);
if (!iterator.hasNext()) {
return Collections.emptyList();
}
List<FunctionRecord> list = new ArrayList<>();
while (iterator.hasNext()) {
long key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key);
Field key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key.getLongValue());
if (functionRecord == null) {
Record record = table.getRecord(key);
functionRecord = new FunctionRecord(fidDb, functionCache, record);
@ -216,15 +217,15 @@ public class FunctionsTable {
*/
public List<FunctionRecord> getFunctionRecordsByNameSubstring(String nameSearch)
throws IOException {
DBLongIterator iterator = table.indexKeyIterator(NAME_ID_COL);
DBFieldIterator iterator = table.indexKeyIterator(NAME_ID_COL);
if (!iterator.hasNext()) {
return Collections.emptyList();
}
List<FunctionRecord> list = new ArrayList<>();
while (iterator.hasNext()) {
long key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key);
Field key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key.getLongValue());
if (functionRecord == null) {
Record record = table.getRecord(key);
long nameID = record.getLongValue(NAME_ID_COL);
@ -255,15 +256,15 @@ public class FunctionsTable {
*/
public List<FunctionRecord> getFunctionRecordsByNameRegex(String regex) throws IOException {
Matcher matcher = Pattern.compile(regex).matcher("");
DBLongIterator iterator = table.indexKeyIterator(NAME_ID_COL);
DBFieldIterator iterator = table.indexKeyIterator(NAME_ID_COL);
if (!iterator.hasNext()) {
return Collections.emptyList();
}
List<FunctionRecord> list = new ArrayList<>();
while (iterator.hasNext()) {
long key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key);
Field key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key.getLongValue());
if (functionRecord == null) {
Record record = table.getRecord(key);
long nameID = record.getLongValue(NAME_ID_COL);
@ -347,15 +348,15 @@ public class FunctionsTable {
return Collections.emptyList();
}
LongField field = new LongField(stringID);
DBLongIterator iterator = table.indexKeyIterator(NAME_ID_COL, field, field, true);
DBFieldIterator iterator = table.indexKeyIterator(NAME_ID_COL, field, field, true);
if (!iterator.hasNext()) {
return Collections.emptyList();
}
final long libraryKey = library.getLibraryID();
List<FunctionRecord> list = new ArrayList<>();
while (iterator.hasNext()) {
long key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key);
Field key = iterator.next();
FunctionRecord functionRecord = functionCache.get(key.getLongValue());
if (functionRecord == null) {
Record record = table.getRecord(key);
if (record.getLongValue(LIBRARY_ID_COL) == libraryKey) {

View file

@ -49,10 +49,10 @@ public class LibrariesTable {
static final int GHIDRA_COMPILER_SPEC_ID_COL = 7;
// @formatter:off
static final Schema SCHEMA = new Schema(VERSION, "Library ID", new Class[] {
StringField.class, StringField.class, StringField.class,
StringField.class, StringField.class, IntField.class, IntField.class,
StringField.class
static final Schema SCHEMA = new Schema(VERSION, "Library ID", new Field[] {
StringField.INSTANCE, StringField.INSTANCE, StringField.INSTANCE,
StringField.INSTANCE, StringField.INSTANCE, IntField.INSTANCE, IntField.INSTANCE,
StringField.INSTANCE
}, new String[] {
"Library Family Name", "Library Version", "Library Variant",
"Ghidra Version", "Ghidra Language ID", "Ghidra Language Version", "Ghidra Language Minor Version",
@ -90,8 +90,9 @@ public class LibrariesTable {
if (libraryVersion != VERSION) {
String msg = "Expected version " + VERSION + " for table " + LIBRARIES_TABLE +
" but got " + table.getSchema().getVersion();
throw new VersionException(msg, libraryVersion < VERSION
? VersionException.OLDER_VERSION : VersionException.NEWER_VERSION,
throw new VersionException(msg,
libraryVersion < VERSION ? VersionException.OLDER_VERSION
: VersionException.NEWER_VERSION,
false);
}
}
@ -155,14 +156,14 @@ public class LibrariesTable {
public List<LibraryRecord> getLibrariesByName(String name, String version, String variant)
throws IOException {
StringField hashField = new StringField(name);
DBLongIterator iterator =
DBFieldIterator iterator =
table.indexKeyIterator(LIBRARY_FAMILY_NAME_COL, hashField, hashField, true);
if (!iterator.hasNext()) {
return Collections.emptyList();
}
List<LibraryRecord> list = new ArrayList<LibraryRecord>();
while (iterator.hasNext()) {
long key = iterator.next();
Field key = iterator.next();
Record record = table.getRecord(key);
LibraryRecord libraryRecord = new LibraryRecord(record);
if (version != null) {

View file

@ -27,8 +27,8 @@ public class RelationsTable {
// static final int CACHE_SIZE = 10000;
// @formatter:off
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "Relation Smash", new Class[] {
}, new String[] {
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "Relation Smash",
new Field[] { }, new String[] {
});
// @formatter:on

View file

@ -35,11 +35,9 @@ public class StringsTable {
static final int CACHE_SIZE = 10000;
// @formatter:off
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "String ID", new Class[] {
StringField.class
}, new String[] {
"String Value"
});
static final Schema SCHEMA = new Schema(LibrariesTable.VERSION, "String ID",
new Field[] { StringField.INSTANCE },
new String[] { "String Value" });
// @formatter:on
static int[] INDEXED_COLUMNS = new int[] { STRING_VALUE_COL };
@ -69,29 +67,30 @@ public class StringsTable {
* @throws IOException if the database has a problem
*/
long obtainStringID(String value) throws IOException {
long[] records = table.findRecords(new StringField(value), STRING_VALUE_COL);
Field[] records = table.findRecords(new StringField(value), STRING_VALUE_COL);
if (records == null || records.length == 0) {
// create
Record record = SCHEMA.createRecord(UniversalIdGenerator.nextID().getValue());
long key = UniversalIdGenerator.nextID().getValue();
Record record = SCHEMA.createRecord(key);
record.setString(STRING_VALUE_COL, value);
table.putRecord(record);
return record.getKey();
return key;
}
return records[0];
return records[0].getLongValue();
}
/**
* Lookup existing ID or return null for String value.
* @param value the string value
* @return the existing interned string primary key, or null if nonexistent
* @return the existing interned string primary key as LongField, or null if nonexistent
* @throws IOException if the database has a problem
*/
Long lookupStringID(String value) throws IOException {
long[] records = table.findRecords(new StringField(value), STRING_VALUE_COL);
Field[] records = table.findRecords(new StringField(value), STRING_VALUE_COL);
if (records == null || records.length == 0) {
return null;
}
return records[0];
return records[0].getLongValue();
}
/**

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,41 +13,41 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db.util;
package ghidra.feature.vt.api.db;
import db.Field;
public class TableColumn {
private final Class<? extends Field> columnClass;
private final Field columnField;
private boolean indexed;
private int ordinal;
private String name;
public TableColumn( Class<? extends Field> columnClass ) {
this( columnClass, false );
public TableColumn(Field columnField) {
this(columnField, false);
}
public TableColumn( Class<? extends Field> columnClass, boolean isIndexed ) {
this.columnClass = columnClass;
public TableColumn(Field columnField, boolean isIndexed) {
this.columnField = columnField;
indexed = isIndexed;
}
void setName( String name ) {
this.name = name;
void setName(String name) {
this.name = name;
}
void setOrdinal( int ordinal ) {
void setOrdinal(int ordinal) {
this.ordinal = ordinal;
}
public boolean isIndexed() {
return indexed;
}
public Class<? extends Field> getColumnClass() {
return columnClass;
public Field getColumnField() {
return columnField;
}
public String name() {
@ -61,6 +60,6 @@ public class TableColumn {
@Override
public String toString() {
return name() + "("+ ordinal +")";
return name() + "(" + ordinal + ")";
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,63 +13,61 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db.util;
import ghidra.util.Msg;
package ghidra.feature.vt.api.db;
import java.util.*;
import db.Field;
import ghidra.util.Msg;
public class TableDescriptor {
private TableColumn[] columns;
protected TableDescriptor() {
this.columns = discoverTableColumns();
}
private TableColumn[] discoverTableColumns() {
Class<? extends TableDescriptor> clazz = getClass();
java.lang.reflect.Field[] fields = clazz.getFields();
List<TableColumn> list = new ArrayList<TableColumn>(fields.length);
for ( java.lang.reflect.Field field : fields ) {
for (java.lang.reflect.Field field : fields) {
Class<?> type = field.getType();
if ( !TableColumn.class.isAssignableFrom( type ) ) {
if (!TableColumn.class.isAssignableFrom(type)) {
continue;
}
try {
TableColumn column = (TableColumn) field.get( null );
column.setName( field.getName() );
column.setOrdinal( list.size() );
list.add( column );
TableColumn column = (TableColumn) field.get(null);
column.setName(field.getName());
column.setOrdinal(list.size());
list.add(column);
}
catch ( IllegalArgumentException e ) {
catch (IllegalArgumentException e) {
// shouldn't happen
}
catch ( IllegalAccessException e ) {
Msg.showError( this, null, "Class Usage Error", "You must provide public " +
"static members for your TableColumns" );
catch (IllegalAccessException e) {
Msg.showError(this, null, "Class Usage Error",
"You must provide public " + "static members for your TableColumns");
}
}
return list.toArray( new TableColumn[list.size()] );
return list.toArray(new TableColumn[list.size()]);
}
public int[] getIndexedColumns() {
int count = 0;
for ( TableColumn column : columns ) {
for (TableColumn column : columns) {
if (column.isIndexed()) {
count++;
}
}
int[] indexedColumns = new int[count];
count = 0;
for ( TableColumn column : columns ) {
for (TableColumn column : columns) {
if (column.isIndexed()) {
indexedColumns[count++] = column.column();
}
@ -80,19 +77,18 @@ public class TableDescriptor {
public String[] getColumnNames() {
List<String> list = new LinkedList<String>();
for ( TableColumn column : columns ) {
list.add( column.name() );
for (TableColumn column : columns) {
list.add(column.name());
}
return list.toArray( new String[ columns.length ] );
return list.toArray(new String[columns.length]);
}
@SuppressWarnings("unchecked") // we know our class types are safe
public Class<? extends Field>[] getColumnClasses() {
List<Class<? extends Field>> list = new LinkedList<Class<? extends Field>>();
for ( TableColumn column : columns ) {
list.add( column.getColumnClass() );
}
return list.toArray( new Class[ columns.length ] );
public Field[] getColumnFields() {
Field[] fields = new Field[columns.length];
for (int i = 0; i < fields.length; i++) {
fields[i] = columns[i].getColumnField().newField();
}
return fields;
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -17,48 +16,50 @@
package ghidra.feature.vt.api.db;
import static ghidra.feature.vt.api.db.VTAddressCorrelatorAdapter.AddressCorrelationTableDescriptor.*;
import ghidra.util.exception.CancelledException;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.File;
import java.io.IOException;
import java.util.List;
import db.*;
import db.util.TableColumn;
import ghidra.util.exception.CancelledException;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public abstract class VTAddressCorrelatorAdapter {
public static class AddressCorrelationTableDescriptor extends db.util.TableDescriptor {
public static class AddressCorrelationTableDescriptor
extends ghidra.feature.vt.api.db.TableDescriptor {
public static TableColumn SOURCE_ENTRY_COL = new TableColumn(LongField.class, true);
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.class);
public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.class);
public static AddressCorrelationTableDescriptor INSTANCE = new AddressCorrelationTableDescriptor();
public static TableColumn SOURCE_ENTRY_COL = new TableColumn(LongField.INSTANCE, true);
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.INSTANCE);
public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.INSTANCE);
public static AddressCorrelationTableDescriptor INSTANCE =
new AddressCorrelationTableDescriptor();
}
static String TABLE_NAME = "AddressCorrelationTable";
static Schema TABLE_SCHEMA = new Schema(0, "Key",
INSTANCE.getColumnClasses(), INSTANCE.getColumnNames());
static Schema TABLE_SCHEMA =
new Schema(0, "Key", INSTANCE.getColumnFields(), INSTANCE.getColumnNames());
static int[] TABLE_INDEXES = INSTANCE.getIndexedColumns();
private DBHandle dbHandle;
protected VTAddressCorrelatorAdapter(DBHandle dbHandle) {
this.dbHandle = dbHandle;
}
public static VTAddressCorrelatorAdapter createAdapter(DBHandle dbHandle) throws IOException {
return new VTAddressCorrelationAdapterV0(dbHandle);
}
public static VTAddressCorrelatorAdapter getAdapter(DBHandle dbHandle, TaskMonitor monitor)
public static VTAddressCorrelatorAdapter getAdapter(DBHandle dbHandle, TaskMonitor monitor)
throws VersionException {
return new VTAddressCorrelationAdapterV0(dbHandle, monitor);
}
abstract void createAddressRecord(long sourceEntryLong, long sourceLong, long destinationLong) throws IOException;
abstract void createAddressRecord(long sourceEntryLong, long sourceLong, long destinationLong)
throws IOException;
abstract List<Record> getAddressRecords(long sourceEntryLong) throws IOException;
@ -69,8 +70,9 @@ public abstract class VTAddressCorrelatorAdapter {
void save(TaskMonitor monitor) throws CancelledException, IOException {
dbHandle.save("", null, monitor);
}
void saveAs(File file, TaskMonitor monitor) throws CancelledException, IOException {
dbHandle.saveAs(file, true, monitor);
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,35 +15,36 @@
*/
package ghidra.feature.vt.api.db;
import static ghidra.feature.vt.api.db.VTAssociationTableDBAdapter.AssociationTableDescriptor.INSTANCE;
import ghidra.feature.vt.api.main.VTAssociationStatus;
import ghidra.feature.vt.api.main.VTAssociationType;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import static ghidra.feature.vt.api.db.VTAssociationTableDBAdapter.AssociationTableDescriptor.*;
import java.io.IOException;
import java.util.Set;
import db.*;
import db.util.TableColumn;
import ghidra.feature.vt.api.main.VTAssociationStatus;
import ghidra.feature.vt.api.main.VTAssociationType;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public abstract class VTAssociationTableDBAdapter {
public static class AssociationTableDescriptor extends db.util.TableDescriptor {
public static class AssociationTableDescriptor
extends ghidra.feature.vt.api.db.TableDescriptor {
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.class, true);
public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.class, true);
public static TableColumn TYPE_COL = new TableColumn(ByteField.class);
public static TableColumn STATUS_COL = new TableColumn(ByteField.class);
public static TableColumn APPLIED_STATUS_COL = new TableColumn(ByteField.class);
public static TableColumn VOTE_COUNT_COL = new TableColumn(IntField.class);
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.INSTANCE, true);
public static TableColumn DESTINATION_ADDRESS_COL =
new TableColumn(LongField.INSTANCE, true);
public static TableColumn TYPE_COL = new TableColumn(ByteField.INSTANCE);
public static TableColumn STATUS_COL = new TableColumn(ByteField.INSTANCE);
public static TableColumn APPLIED_STATUS_COL = new TableColumn(ByteField.INSTANCE);
public static TableColumn VOTE_COUNT_COL = new TableColumn(IntField.INSTANCE);
public static AssociationTableDescriptor INSTANCE = new AssociationTableDescriptor();
}
static String TABLE_NAME = "AssociationTable";
static Schema TABLE_SCHEMA =
new Schema(0, "Key", INSTANCE.getColumnClasses(), INSTANCE.getColumnNames());
new Schema(0, "Key", INSTANCE.getColumnFields(), INSTANCE.getColumnNames());
static int[] TABLE_INDEXES = INSTANCE.getIndexedColumns();
public static VTAssociationTableDBAdapter createAdapter(DBHandle dbHandle) throws IOException {

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,36 +15,35 @@
*/
package ghidra.feature.vt.api.db;
import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.INSTANCE;
import ghidra.feature.vt.api.impl.MarkupItemStorage;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import static ghidra.feature.vt.api.db.VTMatchMarkupItemTableDBAdapter.MarkupTableDescriptor.*;
import java.io.IOException;
import db.*;
import db.util.TableColumn;
import ghidra.feature.vt.api.impl.MarkupItemStorage;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public abstract class VTMatchMarkupItemTableDBAdapter {
public static class MarkupTableDescriptor extends db.util.TableDescriptor {
public static TableColumn ASSOCIATION_KEY_COL = new TableColumn(LongField.class, true);
public static TableColumn ADDRESS_SOURCE_COL = new TableColumn(StringField.class);
public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.class);
public static TableColumn MARKUP_TYPE_COL = new TableColumn(ShortField.class);
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.class);
public static TableColumn SOURCE_VALUE_COL = new TableColumn(StringField.class);
public static class MarkupTableDescriptor extends ghidra.feature.vt.api.db.TableDescriptor {
public static TableColumn ASSOCIATION_KEY_COL = new TableColumn(LongField.INSTANCE, true);
public static TableColumn ADDRESS_SOURCE_COL = new TableColumn(StringField.INSTANCE);
public static TableColumn DESTINATION_ADDRESS_COL = new TableColumn(LongField.INSTANCE);
public static TableColumn MARKUP_TYPE_COL = new TableColumn(ShortField.INSTANCE);
public static TableColumn SOURCE_ADDRESS_COL = new TableColumn(LongField.INSTANCE);
public static TableColumn SOURCE_VALUE_COL = new TableColumn(StringField.INSTANCE);
public static TableColumn ORIGINAL_DESTINATION_VALUE_COL =
new TableColumn(StringField.class);
public static TableColumn STATUS_COL = new TableColumn(ByteField.class);
public static TableColumn STATUS_DESCRIPTION_COL = new TableColumn(StringField.class);
new TableColumn(StringField.INSTANCE);
public static TableColumn STATUS_COL = new TableColumn(ByteField.INSTANCE);
public static TableColumn STATUS_DESCRIPTION_COL = new TableColumn(StringField.INSTANCE);
public static MarkupTableDescriptor INSTANCE = new MarkupTableDescriptor();
}
protected static String TABLE_NAME = "MatchMarkupItemTable";
static Schema TABLE_SCHEMA =
new Schema(0, "Key", INSTANCE.getColumnClasses(), INSTANCE.getColumnNames());
new Schema(0, "Key", INSTANCE.getColumnFields(), INSTANCE.getColumnNames());
protected static int[] INDEXED_COLUMNS = INSTANCE.getIndexedColumns();
@ -71,6 +69,5 @@ public abstract class VTMatchMarkupItemTableDBAdapter {
public abstract int getRecordCount();
public abstract Record createMarkupItemRecord(MarkupItemStorage markupItem)
throws IOException;
public abstract Record createMarkupItemRecord(MarkupItemStorage markupItem) throws IOException;
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,34 +15,33 @@
*/
package ghidra.feature.vt.api.db;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import db.*;
import ghidra.feature.vt.api.main.VTProgramCorrelator;
import ghidra.program.database.map.AddressMap;
import ghidra.program.model.address.AddressSet;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import db.*;
public abstract class VTMatchSetTableDBAdapter {
public enum ColumnDescription {
CORRELATOR_CLASS_COL(StringField.class),
CORRELATOR_NAME_COL(StringField.class),
OPTIONS_COL(StringField.class);
CORRELATOR_CLASS_COL(StringField.INSTANCE),
CORRELATOR_NAME_COL(StringField.INSTANCE),
OPTIONS_COL(StringField.INSTANCE);
private final Class<? extends Field> columnClass;
private final Field columnField;
private ColumnDescription(Class<? extends Field> columnClass) {
this.columnClass = columnClass;
private ColumnDescription(Field columnField) {
this.columnField = columnField;
}
public Class<? extends Field> getColumnClass() {
return columnClass;
public Field getColumnField() {
return columnField;
}
public int column() {
@ -59,20 +57,18 @@ public abstract class VTMatchSetTableDBAdapter {
return list.toArray(new String[columns.length]);
}
@SuppressWarnings("unchecked")
// we know our class types are safe
private static Class<? extends Field>[] getColumnClasses() {
private static Field[] getColumnFields() {
ColumnDescription[] columns = ColumnDescription.values();
List<Class<? extends Field>> list = new LinkedList<Class<? extends Field>>();
for (ColumnDescription column : columns) {
list.add(column.getColumnClass());
Field[] fields = new Field[columns.length];
for (int i = 0; i < fields.length; i++) {
fields[i] = columns[i].getColumnField();
}
return list.toArray(new Class[columns.length]);
return fields;
}
}
static String TABLE_NAME = "MatchSetTable";
static Schema TABLE_SCHEMA = new Schema(0, "Key", ColumnDescription.getColumnClasses(),
static Schema TABLE_SCHEMA = new Schema(0, "Key", ColumnDescription.getColumnFields(),
ColumnDescription.getColumnNames());
static VTMatchSetTableDBAdapter createAdapter(DBHandle dbHandle) throws IOException {

View file

@ -16,13 +16,6 @@
package ghidra.feature.vt.api.db;
import static ghidra.feature.vt.api.db.VTMatchSetTableDBAdapter.ColumnDescription.*;
import ghidra.feature.vt.api.main.VTProgramCorrelator;
import ghidra.framework.options.ToolOptions;
import ghidra.program.database.map.AddressMap;
import ghidra.program.model.address.*;
import ghidra.program.model.listing.Program;
import ghidra.util.exception.VersionException;
import ghidra.util.xml.GenericXMLOutputter;
import java.io.IOException;
import java.io.StringWriter;
@ -31,13 +24,20 @@ import org.jdom.Element;
import org.jdom.output.XMLOutputter;
import db.*;
import ghidra.feature.vt.api.main.VTProgramCorrelator;
import ghidra.framework.options.ToolOptions;
import ghidra.program.database.map.AddressMap;
import ghidra.program.model.address.*;
import ghidra.program.model.listing.Program;
import ghidra.util.exception.VersionException;
import ghidra.util.xml.GenericXMLOutputter;
public class VTMatchSetTableDBAdapterV0 extends VTMatchSetTableDBAdapter {
private Table table;
private static final Schema STORED_ADDRESS_RANGE_SCHEMA = new Schema(0, "Key", new Class[] {
LongField.class, LongField.class }, new String[] { "addr1", "addr2" });
private static final Schema STORED_ADDRESS_RANGE_SCHEMA = new Schema(0, "Key",
new Field[] { LongField.INSTANCE, LongField.INSTANCE }, new String[] { "addr1", "addr2" });
private final DBHandle dbHandle;
@ -46,7 +46,8 @@ public class VTMatchSetTableDBAdapterV0 extends VTMatchSetTableDBAdapter {
table = dbHandle.createTable(TABLE_NAME, TABLE_SCHEMA);
}
public VTMatchSetTableDBAdapterV0(DBHandle dbHandle, OpenMode openMode) throws VersionException {
public VTMatchSetTableDBAdapterV0(DBHandle dbHandle, OpenMode openMode)
throws VersionException {
this.dbHandle = dbHandle;
table = dbHandle.getTable(TABLE_NAME);
if (table == null) {
@ -59,7 +60,8 @@ public class VTMatchSetTableDBAdapterV0 extends VTMatchSetTableDBAdapter {
}
@Override
public Record createMatchSetRecord(long key, VTProgramCorrelator correlator) throws IOException {
public Record createMatchSetRecord(long key, VTProgramCorrelator correlator)
throws IOException {
Record record = TABLE_SCHEMA.createRecord(key);
record.setString(CORRELATOR_CLASS_COL.column(), correlator.getClass().getName());

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,36 +15,35 @@
*/
package ghidra.feature.vt.api.db;
import ghidra.feature.vt.api.main.VTMatchInfo;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import db.*;
import ghidra.feature.vt.api.main.VTMatchInfo;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
public abstract class VTMatchTableDBAdapter {
public enum ColumnDescription {
TAG_KEY_COL(LongField.class),
MATCH_SET_COL(LongField.class),
SIMILARITY_SCORE_COL(StringField.class),
CONFIDENCE_SCORE_COL(StringField.class),
LENGTH_TYPE(StringField.class),
SOURCE_LENGTH_COL(IntField.class),
DESTINATION_LENGTH_COL(IntField.class),
ASSOCIATION_COL(LongField.class);
TAG_KEY_COL(LongField.INSTANCE),
MATCH_SET_COL(LongField.INSTANCE),
SIMILARITY_SCORE_COL(StringField.INSTANCE),
CONFIDENCE_SCORE_COL(StringField.INSTANCE),
LENGTH_TYPE(StringField.INSTANCE),
SOURCE_LENGTH_COL(IntField.INSTANCE),
DESTINATION_LENGTH_COL(IntField.INSTANCE),
ASSOCIATION_COL(LongField.INSTANCE);
private final Class<? extends Field> columnClass;
private final Field columnField;
private ColumnDescription(Class<? extends Field> columnClass) {
this.columnClass = columnClass;
private ColumnDescription(Field columnField) {
this.columnField = columnField;
}
public Class<? extends Field> getColumnClass() {
return columnClass;
public Field getColumnField() {
return columnField;
}
public int column() {
@ -61,22 +59,19 @@ public abstract class VTMatchTableDBAdapter {
return list.toArray(new String[columns.length]);
}
@SuppressWarnings("unchecked")
// we know our class types are safe
private static Class<? extends Field>[] getColumnClasses() {
private static Field[] getColumnFields() {
ColumnDescription[] columns = ColumnDescription.values();
List<Class<? extends Field>> list = new LinkedList<Class<? extends Field>>();
for (ColumnDescription column : columns) {
list.add(column.getColumnClass());
Field[] fields = new Field[columns.length];
for (int i = 0; i < fields.length; i++) {
fields[i] = columns[i].getColumnField();
}
return list.toArray(new Class[columns.length]);
return fields;
}
}
static String TABLE_NAME = "MatchTable";
static Schema TABLE_SCHEMA =
new Schema(0, "Key", ColumnDescription.getColumnClasses(),
ColumnDescription.getColumnNames());
static Schema TABLE_SCHEMA = new Schema(0, "Key", ColumnDescription.getColumnFields(),
ColumnDescription.getColumnNames());
static VTMatchTableDBAdapter createAdapter(DBHandle dbHandle, long tableID) throws IOException {
return new VTMatchTableDBAdapterV0(dbHandle, tableID);

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,14 +15,13 @@
*/
package ghidra.feature.vt.api.db;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import java.util.LinkedList;
import java.util.List;
import db.*;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
/**
* Abstract adapter for the database table that holds tags for version tracking matches.
@ -31,16 +29,16 @@ import db.*;
public abstract class VTMatchTagDBAdapter {
public enum ColumnDescription {
TAG_NAME_COL(StringField.class);
TAG_NAME_COL(StringField.INSTANCE);
private final Class<? extends Field> columnClass;
private final Field columnField;
private ColumnDescription(Class<? extends Field> columnClass) {
this.columnClass = columnClass;
private ColumnDescription(Field columnField) {
this.columnField = columnField;
}
public Class<? extends Field> getColumnClass() {
return columnClass;
public Field getColumnField() {
return columnField;
}
public int column() {
@ -56,22 +54,19 @@ public abstract class VTMatchTagDBAdapter {
return list.toArray(new String[columns.length]);
}
@SuppressWarnings("unchecked")
// we know our class types are safe
private static Class<? extends Field>[] getColumnClasses() {
private static Field[] getColumnFields() {
ColumnDescription[] columns = ColumnDescription.values();
List<Class<? extends Field>> list = new LinkedList<Class<? extends Field>>();
for (ColumnDescription column : columns) {
list.add(column.getColumnClass());
Field[] fields = new Field[columns.length];
for (int i = 0; i < fields.length; i++) {
fields[i] = columns[i].getColumnField();
}
return list.toArray(new Class[columns.length]);
return fields;
}
}
static String TABLE_NAME = "MatchTagTable";
static Schema TABLE_SCHEMA =
new Schema(0, LongField.class, "Key", ColumnDescription.getColumnClasses(),
ColumnDescription.getColumnNames());
static Schema TABLE_SCHEMA = new Schema(0, "Key", ColumnDescription.getColumnFields(),
ColumnDescription.getColumnNames());
static VTMatchTagDBAdapter createAdapter(DBHandle dbHandle) throws IOException {
return new VTMatchTagDBAdapterV0(dbHandle);

View file

@ -39,10 +39,10 @@ import ghidra.util.exception.*;
import ghidra.util.task.*;
public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTChangeManager {
private final static Class<?>[] COL_CLASS = new Class[] { StringField.class };
private final static Field[] COL_FIELDS = new Field[] { StringField.INSTANCE };
private final static String[] COL_TYPES = new String[] { "Value" };
private final static Schema SCHEMA =
new Schema(0, StringField.class, "Key", COL_CLASS, COL_TYPES);
new Schema(0, StringField.INSTANCE, "Key", COL_FIELDS, COL_TYPES);
private static final String PROGRAM_ID_PROPERTYLIST_NAME = "ProgramIDs";
private static final String SOURCE_PROGRAM_ID_PROPERTY_KEY = "SourceProgramID";
@ -55,7 +55,24 @@ public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTC
private static final long IMPLIED_MATCH_SET_ID = -1;
private static final String PROPERTY_TABLE_NAME = "PropertyTable";
private static final String DB_VERSION_PROPERTY_NAME = "DB_VERSION";
private static final int DB_VERSION = 1;
/**
* DB_VERSION should be incremented any time a change is made to the overall
* database schema associated with any of the adapters.
* 14-Nov-2019 - version 2 - Corrected fixed length indexing implementation causing
* change in index table low-level storage for newly
* created tables.
*/
private static final int DB_VERSION = 2;
/**
* UPGRADE_REQUIRED_BFORE_VERSION should be changed to DB_VERSION any time the
* latest version requires a forced upgrade (i.e., Read-only mode not supported
* until upgrade is performed). It is assumed that read-only mode is supported
* if the data's version is >= UPGRADE_REQUIRED_BEFORE_VERSION and <= DB_VERSION.
*/
// NOTE: Schema upgrades are not currently supported
private static final int UPGRADE_REQUIRED_BEFORE_VERSION = 1;
private VTMatchSetTableDBAdapter matchSetTableAdapter;
private AssociationDatabaseManager associationManager;
@ -78,12 +95,11 @@ public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTC
int ID = session.startTransaction("Constructing New Version Tracking Match Set");
try {
session.propertyTable = createPropertyTable(session.getDBHandle());
session.matchSetTableAdapter =
VTMatchSetTableDBAdapter.createAdapter(session.getDBHandle());
session.propertyTable = session.dbh.createTable(PROPERTY_TABLE_NAME, SCHEMA);
session.matchSetTableAdapter = VTMatchSetTableDBAdapter.createAdapter(session.dbh);
session.associationManager =
AssociationDatabaseManager.createAssociationManager(session.getDBHandle(), session);
session.matchTagAdapter = VTMatchTagDBAdapter.createAdapter(session.getDBHandle());
AssociationDatabaseManager.createAssociationManager(session.dbh, session);
session.matchTagAdapter = VTMatchTagDBAdapter.createAdapter(session.dbh);
session.initializePrograms(sourceProgram, destinationProgram);
session.createMatchSet(
new ManualMatchProgramCorrelator(sourceProgram, destinationProgram),
@ -91,6 +107,7 @@ public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTC
session.createMatchSet(
new ImpliedMatchProgramCorrelator(sourceProgram, destinationProgram),
IMPLIED_MATCH_SET_ID);
session.updateVersion();
}
finally {
session.endTransaction(ID, true);
@ -105,21 +122,29 @@ public class VTSessionDB extends DomainObjectAdapterDB implements VTSession, VTC
return session;
}
private static Table createPropertyTable(DBHandle dbh) throws IOException {
Table table = dbh.createTable(PROPERTY_TABLE_NAME, SCHEMA);
private void updateVersion() throws IOException {
Record record = SCHEMA.createRecord(new StringField(DB_VERSION_PROPERTY_NAME));
record.setString(0, Integer.toString(DB_VERSION));
table.putRecord(record);
return table;
propertyTable.putRecord(record);
}
public static VTSessionDB getVTSession(DBHandle dbHandle, OpenMode openMode, Object consumer,
TaskMonitor monitor) throws VersionException, IOException {
VTSessionDB session = new VTSessionDB(dbHandle, consumer);
if (session.getVersion() < DB_VERSION) {
throw new VersionException("Version Tracking Sessions do not support upgrades.");
int storedVersion = session.getVersion();
if (storedVersion > DB_VERSION) {
throw new VersionException(VersionException.NEWER_VERSION, false);
}
// The following version logic holds true for DB_VERSION=2 which assumes no additional
// DB index tables will be added when open for update/upgrade. This will not hold
// true for future revisions associated with table schema changes in which case the
// UPGRADE_REQUIRED_BEFORE_VERSION value should equal DB_VERSION.
if (storedVersion < UPGRADE_REQUIRED_BEFORE_VERSION) {
throw new VersionException("Version Tracking Sessions do not support schema upgrades.");
}
session.matchSetTableAdapter =
VTMatchSetTableDBAdapter.getAdapter(session.getDBHandle(), openMode, monitor);
session.associationManager =

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,12 +15,11 @@
*/
package db;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
/**
* <code>BTreeNode</code> defines a common interface for all types
@ -30,17 +28,22 @@ import db.buffers.DataBuffer;
interface BTreeNode {
/**
* Return the data buffer ID associated with this node.
* @return the parent node or null if this is the root
*/
public InteriorNode getParent();
/**
* @return the data buffer ID associated with this node.
*/
public int getBufferId();
/**
* Return the data buffer associated with this node.
* @return the data buffer associated with this node.
*/
public DataBuffer getBuffer();
/**
* Return the number of keys contained within this node.
* @return the number of keys contained within this node.
*/
public int getKeyCount();
@ -50,6 +53,26 @@ interface BTreeNode {
*/
public void setKeyCount(int cnt);
/**
* Get the key value at a specific index.
* @param index key index
* @return key value
* @throws IOException thrown if an IO error occurs
*/
public Field getKeyField(int index) throws IOException;
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. This method is intended to find the insertion
* index or exact match for a child key. A negative value will be returned
* when an exact match is not found and may be transformed into an
* insertion index (insetIndex = -returnedIndex-1).
* @param key key to search for
* @return int buffer ID index.
* @throws IOException thrown if an IO error occurs
*/
public int getKeyIndex(Field key) throws IOException;
/**
* Delete this node and all child nodes.
* @throws IOException thrown if IO error occurs
@ -67,11 +90,12 @@ interface BTreeNode {
* Check the consistency of this node and all of its children.
* @return true if consistency check passed, else false
* @param tableName name of table containing this node
* @param monitor
* @throws IOException
* @param monitor task monitor
* @throws IOException if IO error occured
* @throws CancelledException if task cancelled
* @{@link ThrowsTag} CancelledException
*/
public boolean isConsistent(String tableName, TaskMonitor monitor) throws IOException,
CancelledException;
public boolean isConsistent(String tableName, TaskMonitor monitor)
throws IOException, CancelledException;
}

View file

@ -15,11 +15,11 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import ghidra.util.exception.AssertException;
/**
* Allows various non-database supported data types to be
* encoded within a BinaryField which may be stored within the
@ -31,63 +31,63 @@ import java.util.ArrayList;
* support a byte array.
*/
public class BinaryCodedField extends BinaryField {
/**
* byte[] data type
*/
public static final byte BYTE_ARRAY = 0;
/**
* float data type
*/
public static final byte FLOAT = 1;
/**
* double data type
*/
public static final byte DOUBLE = 2;
/**
* short data type
*/
public static final byte SHORT_ARRAY = 3;
/**
* int[] data type
*/
public static final byte INT_ARRAY = 4;
/**
* long[] data type
*/
public static final byte LONG_ARRAY = 5;
/**
* float[] data type
*/
public static final byte FLOAT_ARRAY = 6;
/**
* double[] data type
*/
public static final byte DOUBLE_ARRAY = 7;
/**
* String[] data type
*/
public static final byte STRING_ARRAY = 8;
private static final int DATA_TYPE_OFFSET = 0;
private static final int DATA_OFFSET = 1;
private static final String STRING_ENCODING = "UTF-8";
/**
* Default constructor
*/
BinaryCodedField() {
}
/**
* Construct a coded field from an existing binary field.
* @param binField the binary field
@ -95,7 +95,7 @@ public class BinaryCodedField extends BinaryField {
public BinaryCodedField(BinaryField binField) {
data = binField.getBinaryData();
}
/**
* Construct a coded field from a double value.
* @param value the double value
@ -106,7 +106,7 @@ public class BinaryCodedField extends BinaryField {
buffer.putLong(DATA_OFFSET, Double.doubleToLongBits(value));
data = buffer.getData();
}
/**
* Construct a coded field from a float value.
* @param value the float value
@ -117,7 +117,7 @@ public class BinaryCodedField extends BinaryField {
buffer.putInt(DATA_OFFSET, Float.floatToIntBits(value));
data = buffer.getData();
}
/**
* Construct a coded field from a byte array.
* @param values byte array
@ -125,121 +125,121 @@ public class BinaryCodedField extends BinaryField {
public BinaryCodedField(byte[] values) {
if (values != null) {
data = new byte[values.length + 2];
data[DATA_OFFSET] = (byte)0;
data[DATA_OFFSET] = (byte) 0;
System.arraycopy(values, 0, data, 2, values.length);
}
else {
data = new byte[2];
data[DATA_OFFSET] = (byte)-1;
data[DATA_OFFSET] = (byte) -1;
}
data[DATA_TYPE_OFFSET] = BYTE_ARRAY;
}
/**
* Construct a coded field from a short array.
* @param values short array
*/
public BinaryCodedField(short[] values) {
int len = (values != null ? (2*values.length) : 0) + 2;
int len = (values != null ? (2 * values.length) : 0) + 2;
BinaryDataBuffer buffer = new BinaryDataBuffer(len);
buffer.putByte(DATA_TYPE_OFFSET, SHORT_ARRAY);
if (values != null) {
int offset = DATA_OFFSET;
buffer.putByte(offset++, (byte)0);
buffer.putByte(offset++, (byte) 0);
for (int i = 0; i < values.length; i++) {
offset = buffer.putShort(offset, values[i]);
}
}
else {
buffer.putByte(DATA_OFFSET, (byte)-1);
buffer.putByte(DATA_OFFSET, (byte) -1);
}
data = buffer.getData();
}
/**
* Construct a coded field from a int array.
* @param values int array
*/
public BinaryCodedField(int[] values) {
int len = (values != null ? (4*values.length) : 0) + 2;
int len = (values != null ? (4 * values.length) : 0) + 2;
BinaryDataBuffer buffer = new BinaryDataBuffer(len);
buffer.putByte(DATA_TYPE_OFFSET, INT_ARRAY);
if (values != null) {
int offset = DATA_OFFSET;
buffer.putByte(offset++, (byte)0);
buffer.putByte(offset++, (byte) 0);
for (int i = 0; i < values.length; i++) {
offset = buffer.putInt(offset, values[i]);
}
}
else {
buffer.putByte(DATA_OFFSET, (byte)-1);
buffer.putByte(DATA_OFFSET, (byte) -1);
}
data = buffer.getData();
}
/**
* Construct a coded field from a long array.
* @param values long array
*/
public BinaryCodedField(long[] values) {
int len = (values != null ? (8*values.length) : 0) + 2;
int len = (values != null ? (8 * values.length) : 0) + 2;
BinaryDataBuffer buffer = new BinaryDataBuffer(len);
buffer.putByte(DATA_TYPE_OFFSET, LONG_ARRAY);
if (values != null) {
int offset = DATA_OFFSET;
buffer.putByte(offset++, (byte)0);
buffer.putByte(offset++, (byte) 0);
for (int i = 0; i < values.length; i++) {
offset = buffer.putLong(offset, values[i]);
}
}
else {
buffer.putByte(DATA_OFFSET, (byte)-1);
buffer.putByte(DATA_OFFSET, (byte) -1);
}
data = buffer.getData();
}
/**
* Construct a coded field from a float array.
* @param values float array
*/
public BinaryCodedField(float[] values) {
int len = (values != null ? (4*values.length) : 0) + 2;
int len = (values != null ? (4 * values.length) : 0) + 2;
BinaryDataBuffer buffer = new BinaryDataBuffer(len);
buffer.putByte(DATA_TYPE_OFFSET, FLOAT_ARRAY);
if (values != null) {
int offset = DATA_OFFSET;
buffer.putByte(offset++, (byte)0);
buffer.putByte(offset++, (byte) 0);
for (int i = 0; i < values.length; i++) {
offset = buffer.putInt(offset, Float.floatToIntBits(values[i]));
}
}
else {
buffer.putByte(DATA_OFFSET, (byte)-1);
buffer.putByte(DATA_OFFSET, (byte) -1);
}
data = buffer.getData();
}
/**
* Construct a coded field from a double array.
* @param values double array
*/
public BinaryCodedField(double[] values) {
int len = (values != null ? (8*values.length) : 0) + 2;
int len = (values != null ? (8 * values.length) : 0) + 2;
BinaryDataBuffer buffer = new BinaryDataBuffer(len);
buffer.putByte(DATA_TYPE_OFFSET, DOUBLE_ARRAY);
if (values != null) {
int offset = DATA_OFFSET;
buffer.putByte(offset++, (byte)0);
buffer.putByte(offset++, (byte) 0);
for (int i = 0; i < values.length; i++) {
offset = buffer.putLong(offset, Double.doubleToLongBits(values[i]));
}
}
else {
buffer.putByte(DATA_OFFSET, (byte)-1);
buffer.putByte(DATA_OFFSET, (byte) -1);
}
data = buffer.getData();
}
/**
* Construct a coded field from a String array.
* @param strings String array
@ -256,29 +256,31 @@ public class BinaryCodedField extends BinaryField {
}
buffer = new BinaryDataBuffer(len);
int offset = DATA_OFFSET;
buffer.putByte(offset++, (byte)0);
buffer.putByte(offset++, (byte) 0);
try {
for (int i = 0; i < strings.length; i++) {
if (strings[i] == null) {
offset = buffer.putInt(offset, -1);
} else {
}
else {
byte[] bytes = strings[i].getBytes(STRING_ENCODING);
offset = buffer.putInt(offset, bytes.length);
offset = buffer.put(offset, bytes);
}
}
} catch (UnsupportedEncodingException e) {
}
catch (UnsupportedEncodingException e) {
throw new AssertException();
}
}
else {
buffer = new BinaryDataBuffer(2);
buffer.putByte(DATA_OFFSET, (byte)-1);
buffer.putByte(DATA_OFFSET, (byte) -1);
}
buffer.putByte(DATA_TYPE_OFFSET, STRING_ARRAY);
data = buffer.getData();
}
/**
* Get the data type associated with this field.
* @return data type
@ -286,7 +288,7 @@ public class BinaryCodedField extends BinaryField {
public byte getDataType() {
return data[DATA_TYPE_OFFSET];
}
/**
* Get the double value contained with this field.
* @return double value
@ -299,7 +301,7 @@ public class BinaryCodedField extends BinaryField {
BinaryDataBuffer buffer = new BinaryDataBuffer(data);
return Double.longBitsToDouble(buffer.getLong(DATA_OFFSET));
}
/**
* Get the float value contained with this field.
* @return float value
@ -312,7 +314,7 @@ public class BinaryCodedField extends BinaryField {
BinaryDataBuffer buffer = new BinaryDataBuffer(data);
return Float.intBitsToFloat(buffer.getInt(DATA_OFFSET));
}
/**
* Get the byte array contained with this field.
* @return byte array
@ -329,7 +331,7 @@ public class BinaryCodedField extends BinaryField {
System.arraycopy(data, 2, values, 0, values.length);
return values;
}
/**
* Get the short array contained with this field.
* @return short array
@ -342,7 +344,7 @@ public class BinaryCodedField extends BinaryField {
if (data[DATA_OFFSET] < 0) {
return null;
}
short[] values = new short[(data.length -2) / 2];
short[] values = new short[(data.length - 2) / 2];
BinaryDataBuffer buffer = new BinaryDataBuffer(data);
int offset = DATA_OFFSET + 1;
for (int i = 0; i < values.length; i++) {
@ -351,7 +353,7 @@ public class BinaryCodedField extends BinaryField {
}
return values;
}
/**
* Get the int array contained with this field.
* @return int array
@ -364,7 +366,7 @@ public class BinaryCodedField extends BinaryField {
if (data[DATA_OFFSET] < 0) {
return null;
}
int[] values = new int[(data.length -2) / 4];
int[] values = new int[(data.length - 2) / 4];
BinaryDataBuffer buffer = new BinaryDataBuffer(data);
int offset = DATA_OFFSET + 1;
for (int i = 0; i < values.length; i++) {
@ -373,7 +375,7 @@ public class BinaryCodedField extends BinaryField {
}
return values;
}
/**
* Get the long array contained with this field.
* @return long array
@ -386,7 +388,7 @@ public class BinaryCodedField extends BinaryField {
if (data[DATA_OFFSET] < 0) {
return null;
}
long[] values = new long[(data.length -2) / 8];
long[] values = new long[(data.length - 2) / 8];
BinaryDataBuffer buffer = new BinaryDataBuffer(data);
int offset = DATA_OFFSET + 1;
for (int i = 0; i < values.length; i++) {
@ -395,7 +397,7 @@ public class BinaryCodedField extends BinaryField {
}
return values;
}
/**
* Get the float array contained with this field.
* @return float array
@ -408,7 +410,7 @@ public class BinaryCodedField extends BinaryField {
if (data[DATA_OFFSET] < 0) {
return null;
}
float[] values = new float[(data.length -2) / 4];
float[] values = new float[(data.length - 2) / 4];
BinaryDataBuffer buffer = new BinaryDataBuffer(data);
int offset = DATA_OFFSET + 1;
for (int i = 0; i < values.length; i++) {
@ -417,7 +419,7 @@ public class BinaryCodedField extends BinaryField {
}
return values;
}
/**
* Get the double array contained with this field.
* @return double array
@ -430,7 +432,7 @@ public class BinaryCodedField extends BinaryField {
if (data[DATA_OFFSET] < 0) {
return null;
}
double[] values = new double[(data.length -2) / 8];
double[] values = new double[(data.length - 2) / 8];
BinaryDataBuffer buffer = new BinaryDataBuffer(data);
int offset = DATA_OFFSET + 1;
for (int i = 0; i < values.length; i++) {
@ -439,7 +441,7 @@ public class BinaryCodedField extends BinaryField {
}
return values;
}
/**
* Get the String array contained with this field.
* @return String array
@ -463,11 +465,13 @@ public class BinaryCodedField extends BinaryField {
byte[] bytes = buffer.get(offset, len);
strList.add(new String(bytes, STRING_ENCODING));
offset += len;
} else {
strList.add(null);
}
else {
strList.add(null);
}
}
} catch (UnsupportedEncodingException e) {
}
catch (UnsupportedEncodingException e) {
throw new AssertException();
}
String[] strings = new String[strList.size()];

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,13 +18,21 @@ package db;
import java.io.IOException;
import java.util.Arrays;
import db.buffers.DataBuffer;
/**
* <code>BinaryField</code> provides a wrapper for variable length binary data which is read or
* written to a Record.
*/
public class BinaryField extends Field {
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final BinaryField INSTANCE = new BinaryField(null, true);
protected byte[] data;
private Integer hashcode;
/**
* Construct a binary data field with an initial value of null.
@ -38,36 +45,41 @@ public class BinaryField extends Field {
* @param data initial value
*/
public BinaryField(byte[] data) {
this(data, false);
}
/**
* Construct a binary data field with an initial value of data.
* @param data initial value
* @param immutable true if field value is immutable
*/
BinaryField(byte[] data, boolean immutable) {
super(immutable);
this.data = data;
}
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override
void checkImmutable() {
super.checkImmutable();
hashcode = null;
}
@Override
public byte[] getBinaryData() {
return data;
}
/*
* @see ghidra.framework.store.db.Field#setBinaryData(byte[])
*/
@Override
public void setBinaryData(byte[] data) {
checkImmutable();
this.data = data;
}
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override
int length() {
return (data == null) ? 4 : (data.length + 4);
}
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override
int write(Buffer buf, int offset) throws IOException {
if (data == null) {
@ -77,11 +89,9 @@ public class BinaryField extends Field {
return buf.put(offset, data);
}
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
int len = buf.getInt(offset);
offset += 4;
if (len < 0) {
@ -94,97 +104,25 @@ public class BinaryField extends Field {
return offset;
}
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override
int readLength(Buffer buf, int offset) throws IOException {
int len = buf.getInt(offset);
return (len < 0 ? 0 : len) + 4;
}
/*
* @see ghidra.framework.store.db.Field#isVariableLength()
*/
@Override
public boolean isVariableLength() {
return true;
}
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override
protected byte getFieldType() {
byte getFieldType() {
return BINARY_OBJ_TYPE;
}
/*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
if (data == null) {
return "BinaryField: null";
}
return "BinaryField[" + data.length + "] = " + getValueAsString();
}
@Override
public String getValueAsString() {
StringBuffer buf = new StringBuffer();
int i = 0;
for (; i < 24 && i < data.length; i++) {
String b = Integer.toHexString(data[i] & 0xff);
if (b.length() == 1) {
buf.append('0');
}
buf.append(b);
buf.append(' ');
}
if (i < data.length) {
buf.append("...");
}
return buf.toString();
}
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof BinaryField))
return false;
BinaryField f = (BinaryField) obj;
return Arrays.equals(f.data, data);
}
// /**
// * Get first 8 bytes of data as long value.
// * First data byte corresponds to most significant byte
// * of long value so that proper sign is preserved.
// * If data is null, Long.MIN_VALUE is returned.
// * @see ghidra.framework.store.db.Field#getLongValue()
// */
// public long getLongValue() {
// long value = 0;
// if (data == null) {
// return Long.MIN_VALUE;
// }
// for (int i = 0; i < 8 && i < data.length; i++) {
// value = (value << 8) | ((long)data[i] & 0x000000ff);
// }
// if (data.length < 8) {
// value = value << (8 * (8 - data.length));
// }
// return value;
// }
/*
* @see ghidra.framework.store.db.Field#truncate(int)
*/
@Override
void truncate(int length) {
checkImmutable();
int maxLen = length - 4;
if (data != null && data.length > maxLen) {
byte[] newData = new byte[maxLen];
@ -193,9 +131,6 @@ public class BinaryField extends Field {
}
}
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Field o) {
BinaryField f = (BinaryField) o;
@ -224,28 +159,105 @@ public class BinaryField extends Field {
return len1 - len2;
}
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override
public Field newField(Field fieldValue) {
return new BinaryField(fieldValue.getBinaryData());
int compareTo(DataBuffer buffer, int offset) {
int len = buffer.getInt(offset);
if (data == null) {
if (len < 0) {
return 0;
}
return -1;
}
else if (len < 0) {
return 1;
}
return -buffer.unsignedCompareTo(data, offset + 4, len);
}
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override
public Field newField() {
public BinaryField copyField() {
return new BinaryField(getBinaryData().clone());
}
@Override
public BinaryField newField() {
return new BinaryField();
}
/*
* @see java.lang.Object#hashCode()
*/
@Override
BinaryField getMinValue() {
throw new UnsupportedOperationException();
}
@Override
BinaryField getMaxValue() {
throw new UnsupportedOperationException();
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass())
return false;
BinaryField f = (BinaryField) obj;
return Arrays.equals(f.data, data);
}
@Override
public int hashCode() {
return data.hashCode();
if (hashcode == null) {
int h = 0;
if (data != null) {
for (byte b : data) {
h = 31 * h + (b & 0xff);
}
}
hashcode = h;
}
return hashcode;
}
/// Methods below should not use data field directly
@Override
public String toString() {
String classname = getClass().getSimpleName();
byte[] d = getBinaryData();
if (d == null) {
return classname + ": null";
}
return classname = "[" + d.length + "] = 0x" + getValueAsString(d);
}
@Override
public String getValueAsString() {
byte[] d = getBinaryData();
if (d == null) {
return "null";
}
return "{" + getValueAsString(d) + "}";
}
/**
* Get format value string for byte array
* @param data byte array
* @return formatted value string
*/
public static String getValueAsString(byte[] data) {
StringBuffer buf = new StringBuffer();
int i = 0;
for (; i < 24 && i < data.length; i++) {
String b = Integer.toHexString(data[i] & 0xff);
if (b.length() == 1) {
buf.append('0');
}
buf.append(b);
buf.append(' ');
}
if (i < data.length) {
buf.append("...");
}
return buf.toString();
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>BooleanField</code> provides a wrapper for boolean data which is read or
* written to a Record.
*/
public class BooleanField extends Field {
public final class BooleanField extends Field {
/**
* Minimum boolean field value (FALSE)
*/
public static final BooleanField MIN_VALUE = new BooleanField(false, true);
/**
* Maximum boolean field value (TRUE)
*/
public static final BooleanField MAX_VALUE = new BooleanField(true, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final BooleanField INSTANCE = MIN_VALUE;
private byte value;
@ -39,70 +53,57 @@ public class BooleanField extends Field {
* @param b initial value
*/
public BooleanField(boolean b) {
this(b, false);
}
/**
* Construct a boolean data field with an initial value of b.
* @param b initial value
* @param immutable true if field value is immutable
*/
BooleanField(boolean b, boolean immutable) {
super(immutable);
value = b ? (byte) 1 : (byte) 0;
}
/*
* @see ghidra.framework.store.db.Field#getBooleanValue()
*/
@Override
public boolean getBooleanValue() {
return (value == 0) ? false : true;
}
/*
* @see ghidra.framework.store.db.Field#setBooleanValue(boolean)
*/
@Override
public void setBooleanValue(boolean b) {
checkImmutable();
this.value = b ? (byte) 1 : (byte) 0;
}
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override
int length() {
return 1;
}
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override
int write(Buffer buf, int offset) throws IOException {
return buf.putByte(offset, value);
}
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getByte(offset);
return offset + 1;
}
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override
int readLength(Buffer buf, int offset) throws IOException {
return 1;
}
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override
protected byte getFieldType() {
byte getFieldType() {
return BOOLEAN_TYPE;
}
/*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "BooleanField: " + Boolean.toString(getBooleanValue());
@ -113,9 +114,6 @@ public class BooleanField extends Field {
return Boolean.toString(getBooleanValue());
}
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof BooleanField))
@ -124,9 +122,6 @@ public class BooleanField extends Field {
return otherField.value == value;
}
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Field o) {
BooleanField f = (BooleanField) o;
@ -137,44 +132,58 @@ public class BooleanField extends Field {
return 1;
}
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override
public Field newField(Field fieldValue) {
if (fieldValue.isVariableLength())
throw new AssertException();
return new BooleanField(fieldValue.getLongValue() != 0);
int compareTo(DataBuffer buffer, int offset) {
byte otherValue = buffer.getByte(offset);
if (value == otherValue)
return 0;
else if (value < otherValue)
return -1;
return 1;
}
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override
public Field newField() {
public BooleanField copyField() {
return new BooleanField(getLongValue() != 0);
}
@Override
public BooleanField newField() {
return new BooleanField();
}
/*
* @see ghidra.framework.store.db.Field#getLongValue()
*/
@Override
public long getLongValue() {
return value;
}
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override
public byte[] getBinaryData() {
return new byte[] { value };
}
@Override
public void setBinaryData(byte[] bytes) {
checkImmutable();
if (bytes.length != 1) {
throw new IllegalFieldAccessException();
}
value = bytes[0];
}
@Override
public int hashCode() {
// TODO Auto-generated method stub
return value;
}
@Override
BooleanField getMinValue() {
return MIN_VALUE;
}
@Override
BooleanField getMaxValue() {
return MAX_VALUE;
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -23,20 +22,20 @@ import java.io.IOException;
* providing various data access methods.
*/
public interface Buffer {
/**
* Get the buffer ID for this buffer.
* @return int
*/
public int getId();
/**
* Get the length of the buffer in bytes. The length reflects the number of
* bytes which have been allocated to the buffer.
* @return length of allocated buffer.
*/
public int length();
/**
* Get the byte data located at the specified offset and store into the
* bytes array provided.
@ -62,10 +61,11 @@ public interface Buffer {
* underlying storage.
*/
public void get(int offset, byte[] data, int dataOffset, int length) throws IOException;
/**
* Get the byte data located at the specified offset.
* @param offset byte offset from start of buffer.
* @param length number of bytes to be read and returned
* @return the byte array.
* @throws ArrayIndexOutOfBoundsException is thrown if an invalid offset is
* specified or the end of the buffer was encountered while reading the
@ -74,7 +74,7 @@ public interface Buffer {
* underlying storage.
*/
public byte[] get(int offset, int length) throws IOException;
/**
* Get the 8-bit byte value located at the specified offset.
* @param offset byte offset from start of buffer.
@ -85,7 +85,7 @@ public interface Buffer {
* underlying storage.
*/
public byte getByte(int offset) throws IOException;
/**
* Get the 32-bit integer value located at the specified offset.
* @param offset byte offset from start of buffer.
@ -97,7 +97,7 @@ public interface Buffer {
* underlying storage.
*/
public int getInt(int offset) throws IOException;
/**
* Get the 16-bit short value located at the specified offset.
* @param offset byte offset from start of buffer.
@ -109,7 +109,7 @@ public interface Buffer {
* underlying storage.
*/
public short getShort(int offset) throws IOException;
/**
* Get the 64-bit long value located at the specified offset.
* @param offset byte offset from start of buffer.
@ -121,7 +121,7 @@ public interface Buffer {
* underlying storage.
*/
public long getLong(int offset) throws IOException;
/**
* Put a specified number of bytes from the array provided into the buffer
* at the specified offset. The number of bytes stored is specified by the
@ -153,7 +153,7 @@ public interface Buffer {
* underlying storage.
*/
public int put(int offset, byte[] bytes) throws IOException;
/**
* Put the 8-bit byte value into the buffer at the specified offset.
* @param offset byte offset from start of buffer.
@ -165,7 +165,7 @@ public interface Buffer {
* underlying storage.
*/
public int putByte(int offset, byte b) throws IOException;
/**
* Put the 32-bit integer value into the buffer at the specified offset.
* @param offset byte offset from start of buffer.
@ -178,7 +178,7 @@ public interface Buffer {
* underlying storage.
*/
public int putInt(int offset, int v) throws IOException;
/**
* Put the 16-bit short value into the buffer at the specified offset.
* @param offset byte offset from start of buffer.
@ -191,7 +191,7 @@ public interface Buffer {
* underlying storage.
*/
public int putShort(int offset, short v) throws IOException;
/**
* Put the 64-bit long value into the buffer at the specified offset.
* @param offset byte offset from start of buffer.
@ -204,5 +204,5 @@ public interface Buffer {
* underlying storage.
*/
public int putLong(int offset, long v) throws IOException;
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>ByteField</code> provides a wrapper for single signed byte data
* which is read or written to a Record.
*/
public class ByteField extends Field {
public final class ByteField extends Field {
/**
* Minimum byte field value
*/
public static final ByteField MIN_VALUE = new ByteField(Byte.MIN_VALUE, true);
/**
* Maximum byte field value
*/
public static final ByteField MAX_VALUE = new ByteField(Byte.MAX_VALUE, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final ByteField INSTANCE = MIN_VALUE;
private byte value;
@ -39,69 +53,57 @@ public class ByteField extends Field {
* @param b initial value
*/
public ByteField(byte b) {
this(b, false);
}
/**
* Construct a byte field with an initial value of b.
* @param b initial value
* @param immutable true if field value is immutable
*/
ByteField(byte b, boolean immutable) {
super(immutable);
value = b;
}
/*
* @see ghidra.framework.store.db.Field#getByteValue()
*/
@Override
public byte getByteValue() {
return value;
}
/*
* @see ghidra.framework.store.db.Field#setByteValue(byte)
*/
@Override
public void setByteValue(byte value) {
checkImmutable();
this.value = value;
}
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override
int length() {
return 1;
}
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override
int write(Buffer buf, int offset) throws IOException {
return buf.putByte(offset, value);
}
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getByte(offset);
return offset + 1;
}
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override
int readLength(Buffer buf, int offset) throws IOException {
return 1;
}
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override
protected byte getFieldType() {
byte getFieldType() {
return BYTE_TYPE;
}
/*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "Byte: " + Byte.toString(value);
@ -109,12 +111,9 @@ public class ByteField extends Field {
@Override
public String getValueAsString() {
return Integer.toHexString(value);
return "0x" + Integer.toHexString(value);
}
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof ByteField))
@ -122,9 +121,6 @@ public class ByteField extends Field {
return ((ByteField) obj).value == value;
}
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Field o) {
ByteField f = (ByteField) o;
@ -135,54 +131,63 @@ public class ByteField extends Field {
return 1;
}
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override
public Field newField(Field fieldValue) {
if (fieldValue.isVariableLength())
throw new AssertException();
return new ByteField((byte) fieldValue.getLongValue());
int compareTo(DataBuffer buffer, int offset) {
byte otherValue = buffer.getByte(offset);
if (value == otherValue)
return 0;
else if (value < otherValue)
return -1;
return 1;
}
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override
public Field newField() {
public ByteField copyField() {
return new ByteField((byte) getLongValue());
}
@Override
public ByteField newField() {
return new ByteField();
}
/*
* @see ghidra.framework.store.db.Field#getLongValue()
*/
@Override
public long getLongValue() {
return value;
}
/*
* @see ghidra.framework.store.db.Field#setLongValue(long)
*/
@Override
public void setLongValue(long value) {
this.value = (byte) value;
setByteValue((byte) value);
}
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override
public byte[] getBinaryData() {
return new byte[] { value };
}
/*
* @see java.lang.Object#hashCode()
*/
@Override
public void setBinaryData(byte[] bytes) {
checkImmutable();
if (bytes.length != 1) {
throw new IllegalFieldAccessException();
}
value = bytes[0];
}
@Override
public int hashCode() {
return value;
}
@Override
ByteField getMinValue() {
return MIN_VALUE;
}
@Override
ByteField getMaxValue() {
return MAX_VALUE;
}
}

View file

@ -123,7 +123,7 @@ public class ChainedBuffer implements Buffer {
* @param unintializedDataSourceOffset uninitialized data source offset which corresponds to
* this buffers contents.
* @param bufferMgr database buffer manager
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
public ChainedBuffer(int size, boolean enableObfuscation, Buffer uninitializedDataSource,
int unintializedDataSourceOffset, BufferMgr bufferMgr) throws IOException {
@ -171,7 +171,7 @@ public class ChainedBuffer implements Buffer {
* @param size {@literal buffer size (0 < size <= 0x7fffffff)}
* @param enableObfuscation true to enable xor-ing of stored data to facilitate data obfuscation.
* @param bufferMgr database buffer manager
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
public ChainedBuffer(int size, boolean enableObfuscation, BufferMgr bufferMgr)
throws IOException {
@ -183,7 +183,7 @@ public class ChainedBuffer implements Buffer {
* This method may only be invoked while a database transaction is in progress.
* @param size {@literal buffer size (0 < size <= 0x7fffffff)}
* @param bufferMgr database buffer manager
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
public ChainedBuffer(int size, BufferMgr bufferMgr) throws IOException {
this(size, false, null, 0, bufferMgr);
@ -198,7 +198,7 @@ public class ChainedBuffer implements Buffer {
* This should not be specified if buffer will be completely filled/initialized.
* @param unintializedDataSourceOffset uninitialized data source offset which corresponds to
* this buffers contents.
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
public ChainedBuffer(BufferMgr bufferMgr, int bufferId, Buffer uninitializedDataSource,
int unintializedDataSourceOffset) throws IOException {
@ -238,6 +238,7 @@ public class ChainedBuffer implements Buffer {
* Construct an existing chained buffer.
* @param bufferMgr database buffer manager
* @param bufferId database buffer ID which corresponds to a stored ChainedBuffer
* @throws IOException thrown if an IO error occurs
*/
public ChainedBuffer(BufferMgr bufferMgr, int bufferId) throws IOException {
this(bufferMgr, bufferId, null, 0);
@ -249,12 +250,12 @@ public class ChainedBuffer implements Buffer {
}
/**
* Generate the XOR value for the specified byteValue which is located at the
* Generate the XOR'd value for the specified byteValue which is located at the
* specified bufferOffset.
* @param bufferOffset offset within a single chained buffer, valid values are in the
* range 0 to (dataSpace-1).
* @param byteValue
* @return
* range 0 to (dataSpace-1). This value is used to determine the appropriate XOR mask.
* @param byteValue value to be XOR'd against appropriate mask value
* @return XOR'd value
*/
private byte xorMaskByte(int bufferOffset, byte byteValue) {
byte maskByte = XOR_MASK_BYTES[bufferOffset % XOR_MASK_BYTES.length];
@ -267,7 +268,7 @@ public class ChainedBuffer implements Buffer {
* @param bufferOffset offset within a single chained buffer, valid values are in the
* range 0 to (dataSpace-1). The value (bufferOffset+len-1) must be less than dataSpace.
* @param len mask length (2, 4, or 8)
* @return
* @return XOR mask of specified length which corresponds to specified bufferOffset.
*/
private long getXorMask(int bufferOffset, int len) {
long mask = 0;
@ -284,8 +285,9 @@ public class ChainedBuffer implements Buffer {
* The same uninitialized read-only dataSource used for a chained buffer should be re-applied
* anytime this chained buffer is re-instantiated.
*
* @param dataSource
* @param dataSourceOffset
* @param dataSource data source for unitilized bytes
* @param dataSourceOffset offset within dataSource which corresponds to first byte of
* this chained buffer.
*/
private void setUnintializedDataSource(Buffer dataSource, int dataSourceOffset) {
@ -321,6 +323,7 @@ public class ChainedBuffer implements Buffer {
/**
* Return the maximum number of buffers consumed by the storage of this DBBuffer object.
* The actual number may be less if data has not been written to the entire buffer.
* @return total number of buffers consumed by this ChaninedBuffer.
*/
int getBufferCount() {
return dataBufferIdTable.length +
@ -734,7 +737,7 @@ public class ChainedBuffer implements Buffer {
* The index buffer provided is always released.
* @param indexBuffer the last index buffer.
* @return DataBuffer
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
private DataBuffer appendIndexBuffer(DataBuffer indexBuffer) throws IOException {
try {
@ -856,6 +859,7 @@ public class ChainedBuffer implements Buffer {
/**
* Delete and release all underlying DataBuffers.
* @throws IOException thrown if an IO error occurs
*/
public synchronized void delete() throws IOException {
if (readOnly) {
@ -1115,6 +1119,7 @@ public class ChainedBuffer implements Buffer {
* @param startOffset starting offset, inclusive
* @param endOffset ending offset, exclusive
* @param fillByte byte value
* @throws IOException thrown if an IO error occurs
*/
public synchronized void fill(int startOffset, int endOffset, byte fillByte)
throws IOException {
@ -1160,7 +1165,7 @@ public class ChainedBuffer implements Buffer {
* @return int actual number of bytes written.
* This could be smaller than length if the end of buffer is
* encountered while writing data.
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
private int putBytes(int index, int bufferDataOffset, byte[] data, int dataOffset, int length)
throws IOException {
@ -1370,9 +1375,6 @@ public class ChainedBuffer implements Buffer {
return offset + 8;
}
/*
* @see ghidra.framework.store.Buffer#putShort(int, short)
*/
@Override
public synchronized int putShort(int offset, short v) throws IOException {
if (readOnly) {
@ -1406,7 +1408,7 @@ public class ChainedBuffer implements Buffer {
* Get a data buffer.
* @param index index of within buffer chain
* @return requested data buffer.
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
private DataBuffer getBuffer(int index) throws IOException {
// if databufferIdTable is null, index must be null. let it throw null pointer in this case.
@ -1425,7 +1427,7 @@ public class ChainedBuffer implements Buffer {
* Initialize specified DataBuffer which corresponds to the chain index.
* @param chainBufferIndex chain buffer index
* @param buf newly allocated database buffer
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
private void initializeAllocatedBuffer(int chainBufferIndex, DataBuffer buf)
throws IOException {
@ -1455,7 +1457,7 @@ public class ChainedBuffer implements Buffer {
* Add a new data buffer as an indexed buffer.
* @param index buffer index.
* @param buf new data buffer.
* @throws IOException
* @throws IOException thrown if an IO error occurs
*/
private void addBuffer(int index, DataBuffer buf) throws IOException {
buf.putByte(NODE_TYPE_OFFSET, NodeMgr.CHAINED_BUFFER_DATA_NODE);

View file

@ -1,239 +0,0 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import ghidra.util.LongIterator;
import java.io.IOException;
import java.util.NoSuchElementException;
/**
* <code>DBFieldMap</code> provides a database-backed map of non-unique Field values to long values.
*/
public class DBFieldMap {
private static final Class<?>[] fieldClasses = {
};
private static final String[] fieldNames = {
};
private static final int BUFFER_SIZE = 16 * 1024;
private DBHandle dbh;
private Schema schema;
private Table indexTable;
private Class<? extends Field> fieldClass;
/**
* Construct a new map.
* A temporary database is used to provide storage for the map.
* @param fieldClass specifies class of Field values to be stored in this map.
* @param cacheSizeMB size of data cache in MBytes.
*/
public DBFieldMap(Class<? extends Field> fieldClass, int cacheSizeMB) {
if (!Field.class.isAssignableFrom(fieldClass)) {
throw new IllegalArgumentException("Field class expected");
}
this.fieldClass = fieldClass;
int indexFieldType;
try {
indexFieldType = Field.INDEX_TYPE_FLAG |
fieldClass.newInstance().getFieldType();
} catch (Exception e) {
throw new IllegalArgumentException("Bad Field class: " + e.getMessage());
}
Field indexKeyField = IndexField.getIndexField((byte)indexFieldType);
schema = new Schema(0, indexKeyField.getClass(), "MapKey", fieldClasses, fieldNames);
boolean success = false;
try {
dbh = new DBHandle(BUFFER_SIZE, cacheSizeMB * 1024 * 1024);
long txId = dbh.startTransaction();
indexTable = dbh.createTable("DBFieldMap", schema);
dbh.endTransaction(txId, true);
success = true;
}
catch (IOException e) {
throw new RuntimeException(e);
}
finally {
if (!success && dbh != null) {
dbh.close();
dbh = null;
}
}
}
/**
* Dispose all resources associated with this map.
* This method should be invoked when the map is no longer needed.
*/
public void dispose() {
if (dbh != null) {
dbh.close();
dbh = null;
}
}
/*
* @see java.lang.Object#finalize()
*/
@Override
protected void finalize() throws Throwable {
dispose();
}
/**
* Add the specified value pair to this map.
* If the entry already exists, this method has no affect.
* @param fieldValue
* @param longValue
*/
public void addEntry(Field fieldValue, long longValue) {
if (!fieldClass.isInstance(fieldValue)) {
throw new IllegalArgumentException("Instance of " + fieldClass.getName() + " expected");
}
IndexField indexField = IndexField.getIndexField(fieldValue, longValue);
Record rec = schema.createRecord(indexField);
try {
long txId = dbh.startTransaction();
indexTable.putRecord(rec);
dbh.endTransaction(txId, true);
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
}
}
/**
* Delete the specified value pair from this map.
* @param fieldValue
* @param longValue
* @return true if entry exists and was deleted
*/
public boolean deleteEntry(Field fieldValue, long longValue) {
if (!fieldClass.isInstance(fieldValue)) {
throw new IllegalArgumentException("Instance of " + fieldClass.getName() + " expected");
}
IndexField indexField = IndexField.getIndexField(fieldValue, longValue);
try {
long txId = dbh.startTransaction();
boolean success = indexTable.deleteRecord(indexField);
dbh.endTransaction(txId, true);
return success;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/**
* Determine if the specified value pair exists within this map.
* (This method provided for test purposes).
* @param fieldValue
* @param longValue
* @return
*/
boolean hasEntry(Field fieldValue, long longValue) {
if (!fieldClass.isInstance(fieldValue)) {
throw new IllegalArgumentException("Instance of " + fieldClass.getName() + " expected");
}
IndexField indexField = IndexField.getIndexField(fieldValue, longValue);
try {
return indexTable.hasRecord(indexField);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public LongIterator iterator() {
return new MapLongIterator();
}
private class MapLongIterator implements LongIterator {
DBFieldIterator indexIterator;
MapLongIterator() {
try {
indexIterator = indexTable.fieldKeyIterator();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* @see ghidra.util.LongIterator#hasNext()
*/
public boolean hasNext() {
try {
return indexIterator.hasNext();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* @see ghidra.util.LongIterator#next()
*/
public long next() {
try {
IndexField indexField = (IndexField) indexIterator.next();
if (indexField == null) {
throw new NoSuchElementException();
}
return indexField.getPrimaryKey();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* @see ghidra.util.LongIterator#hasPrevious()
*/
public boolean hasPrevious() {
try {
return indexIterator.hasPrevious();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*
* @see ghidra.util.LongIterator#previous()
*/
public long previous() {
try {
IndexField indexField = (IndexField) indexIterator.previous();
if (indexField == null) {
throw new NoSuchElementException();
}
return indexField.getPrimaryKey();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}

View file

@ -17,11 +17,24 @@ package db;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>Field</code> is an abstract data wrapper for use with Records.
* Note that when comparing two Field instances both must be of the same
* class.
*/
public abstract class Field implements Comparable<Field> {
public static final Field[] EMPTY_ARRAY = new Field[0];
/**
* 8-bit Field Type Encoding (PPPPFFFF)
* where:
* FFFF - normal/indexed field type
* PPPP - indexed table primary key type (1000b indicates LegacyIndexField)
*/
/**
* Field type for ByteField
* @see db.ByteField
@ -65,19 +78,56 @@ public abstract class Field implements Comparable<Field> {
static final byte BOOLEAN_TYPE = 6;
/**
* Field type flag mask used to isolate flag bits
* Field type for 10-byte binary FixedField(10)
* @see db.FixedField
*/
static final byte TYPE_FLAG_MASK = (byte) 0xC0;
static final byte FIXED_10_TYPE = 7;
/**
* Field base type mask used to isolate base type
* Legacy Index Primary Key Field type for LongField
* which was previously a boolean indicator for an index
* field with assumed long primary key.
* (see {@link LegacyIndexField})
*/
static final byte BASE_TYPE_MASK = (byte) 0x3F;
static final byte LEGACY_INDEX_LONG_TYPE = 8;
/**
* Field type flag bit shared by all Index type fields
* Field base type mask
*/
static final byte INDEX_TYPE_FLAG = (byte) 0x80;
static final byte FIELD_TYPE_MASK = (byte) 0x0F;
/**
* Field index primary key type mask
*/
static final byte INDEX_PRIMARY_KEY_TYPE_MASK = (byte) ~FIELD_TYPE_MASK;
/**
* Index Primary Key Field Type Shift
*/
static final int INDEX_FIELD_TYPE_SHIFT = 4;
private final boolean immutable;
/**
* Abstract Field Constructor for a mutable instance
*/
Field() {
immutable = false;
}
/**
* Abstract Field Constructor
* @param immutable true if field value is immutable
*/
Field(boolean immutable) {
this.immutable = immutable;
}
void checkImmutable() {
if (immutable) {
throw new IllegalFieldAccessException("immutable field instance");
}
}
/**
* Get field as a long value.
@ -191,10 +241,11 @@ public abstract class Field implements Comparable<Field> {
* Set data from binary byte array.
* All variable-length fields must implement this method.
* @param bytes field data
* @throws IllegalFieldAccessException if error occurs while reading bytes
* into field which will generally be caused by the incorrect number of
* bytes provided to a fixed-length field.
*/
public void setBinaryData(byte[] bytes) {
throw new IllegalFieldAccessException();
}
abstract public void setBinaryData(byte[] bytes);
/**
* Get field as a String value.
@ -219,10 +270,10 @@ public abstract class Field implements Comparable<Field> {
/**
* Truncate a variable length field to the specified length.
* If current length is shorterm, this method has no affect.
* @param length
* @param length truncated length
*/
void truncate(int length) {
throw new IllegalFieldAccessException();
throw new UnsupportedOperationException("Field may not be truncated");
}
/**
@ -233,22 +284,31 @@ public abstract class Field implements Comparable<Field> {
}
/**
* Create new instance of this field type.
* @param fieldValue initial field value.
* @return long
* Determine if specified field is same type as this field
* @param field a Field instance
* @return true if field is same type as this field
*/
public abstract Field newField(Field fieldValue);
public boolean isSameType(Field field) {
return field != null && field.getClass() == getClass();
}
/**
* Create new instance of this field with the same value.
* @return new field instance with same value
*/
public abstract Field copyField();
/**
* Create new instance of this field type.
* @return long
* @return new field instance with undefined initial value
*/
public abstract Field newField();
/**
* Return Field instance type as an integer value
* Return Field instance type as an integer value.
* @return encoded field type
*/
protected abstract byte getFieldType();
abstract byte getFieldType();
/**
* Write the field to buf at the specified offset. When writing variable length
@ -292,40 +352,73 @@ public abstract class Field implements Comparable<Field> {
*/
abstract int length();
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public abstract boolean equals(Object obj);
@Override
public abstract int hashCode();
/**
* Get field value as a formatted string
* @return field value string
*/
public abstract String getValueAsString();
/**
* Get minimum field value.
*
* Supported for fixed-length fields only.
* @return minimum value
* @throws UnsupportedOperationException if field is not fixed-length
*/
abstract Field getMinValue();
/**
* Get maximum field value.
*
* Supported for fixed-length fields only.
* @return maximum value
* @throws UnsupportedOperationException if field is not fixed-length
*/
abstract Field getMaxValue();
/**
* Performs a fast in-place comparison of this field value with another
* field value stored within the specified buffer at the the specified offset.
* @param buffer data buffer
* @param offset field value offset within buffer
* @return comparison value, zero if equal, -1 if this field has a value
* less than the stored field, or +1 if this field has a value greater than
* the stored field located at keyIndex.
*/
abstract int compareTo(DataBuffer buffer, int offset);
/**
* Get the field associated with the specified type value.
* @param fieldType encoded Field type
* @return Field
* @param fieldType field type index
* @return Field field instance which corresponds to the specified fieldType
* @throws UnsupportedFieldException if unsupported fieldType specified
*/
static Field getField(byte fieldType) throws UnsupportedFieldException {
if ((fieldType & INDEX_TYPE_FLAG) == 0) {
switch (fieldType & BASE_TYPE_MASK) {
if ((fieldType & INDEX_PRIMARY_KEY_TYPE_MASK) == 0) {
switch (fieldType & FIELD_TYPE_MASK) {
case LONG_TYPE:
return new LongField();
return LongField.INSTANCE;
case INT_TYPE:
return new IntField();
return IntField.INSTANCE;
case STRING_TYPE:
return new StringField();
return StringField.INSTANCE;
case SHORT_TYPE:
return new ShortField();
return ShortField.INSTANCE;
case BYTE_TYPE:
return new ByteField();
return ByteField.INSTANCE;
case BOOLEAN_TYPE:
return new BooleanField();
return BooleanField.INSTANCE;
case BINARY_OBJ_TYPE:
return new BinaryField();
return BinaryField.INSTANCE;
case FIXED_10_TYPE:
return FixedField10.INSTANCE;
}
}
else {
@ -340,4 +433,53 @@ public abstract class Field implements Comparable<Field> {
}
}
/**
* Get the type index value of the FixedField type which corresponds
* to the specified fixed-length;
* @param fixedLength fixed length
* @return FixedLength field type index
*/
static byte getFixedType(int fixedLength) {
if (fixedLength == 10) {
return FIXED_10_TYPE;
}
throw new IllegalArgumentException(
"Unsupported fixed-length binary type size: " + fixedLength);
}
/**
* Get a fixed-length field of the specified size
* @param size fixed-field length (supported sizes: 1, 4, 8, 10)
* @return fixed field instance
* @throws IllegalArgumentException if unsupported fixed field length
*/
static Field getFixedField(int size) {
switch (size) {
case 1:
return new ByteField();
case 4:
return new IntField();
case 8:
return new LongField();
case 10:
return new FixedField10();
}
throw new IllegalArgumentException("Unsupported fixed-field length: " + size);
}
/**
* Determine if a specified field instance may be indexed
* @param field field to be checked
* @return true if field can be indexed
*/
public static boolean canIndex(Field field) {
if (field == null) {
return false;
}
if (field instanceof IndexField) {
return false;
}
return !field.isSameType(BooleanField.INSTANCE) && !field.isSameType(ByteField.INSTANCE);
}
}

View file

@ -19,21 +19,27 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.NoSuchElementException;
/**
* <code>FieldIndexTable</code> provides a simplified index table whoose key is
* a fixed or variable length {@link IndexField} which consists of a concatenation of
* the index field value and associated primary table key.
*/
public class FieldIndexTable extends IndexTable {
private static final Class<?>[] fieldClasses = {};
private static final Field[] fields = {};
private static final String[] fieldNames = {};
private final Schema indexSchema;
private final int indexColumn;
private final IndexField indexKeyType;
/**
* Construct a new secondary index which is based upon a specific field within the
* primary table specified by name.
* @param primaryTable primary table.
* @param colIndex identifies the indexed column within the primary table.
* @throws IOException thrown if an IO error occurs
* Construct a new secondary index which is based upon a specific field column within the
* primary table.
* @param primaryTable primary table
* @param colIndex field column index
* @throws IOException thrown if IO error occurs
*/
FieldIndexTable(Table primaryTable, int colIndex) throws IOException {
this(primaryTable, primaryTable.getDBHandle().getMasterTable().createTableRecord(
@ -49,27 +55,35 @@ public class FieldIndexTable extends IndexTable {
*/
FieldIndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException {
super(primaryTable, indexTableRecord);
this.indexSchema = indexTable.getSchema();
this.indexColumn = indexTableRecord.getIndexedColumn();
indexKeyType = (IndexField) indexTable.getSchema().getKeyFieldType();
}
private static Schema getIndexTableSchema(Table primaryTable, int colIndex) {
byte fieldType = primaryTable.getSchema().getField(colIndex).getFieldType();
IndexField indexKeyField = IndexField.getIndexField(fieldType);
return new Schema(0, indexKeyField.getClass(), "IndexKey", fieldClasses, fieldNames);
}
/*
* @see ghidra.framework.store.db.IndexTable#findPrimaryKeys(ghidra.framework.store.db.Field)
/**
* Generate index table schema for specified primaryTable and index column
* @param primaryTable primary table
* @param colIndex index column
* @return index table schema
*/
private static Schema getIndexTableSchema(Table primaryTable, int colIndex) {
Schema primarySchema = primaryTable.getSchema();
Field indexedField = primarySchema.getField(colIndex);
Field primaryKeyType = primarySchema.getKeyFieldType();
IndexField indexKeyField = new IndexField(indexedField, primaryKeyType);
return new Schema(0, indexKeyField, "IndexKey", fields, fieldNames);
}
@Override
long[] findPrimaryKeys(Field indexValue) throws IOException {
IndexField indexField = IndexField.getIndexField(indexValue, Long.MIN_VALUE);
Field[] findPrimaryKeys(Field indexValue) throws IOException {
IndexField indexField =
indexKeyType.newIndexField(indexValue, getPrimaryTableKeyType().getMinValue());
DBFieldIterator iter = indexTable.fieldKeyIterator(indexField);
ArrayList<IndexField> list = new ArrayList<>(20);
while (iter.hasNext()) {
IndexField f = (IndexField) iter.next();
if (!f.hasSameIndex(indexField)) {
if (!f.hasSameIndexValue(indexField)) {
break;
}
if (indexField.usesTruncatedFieldValue()) {
@ -82,7 +96,7 @@ public class FieldIndexTable extends IndexTable {
}
list.add(f);
}
long[] keys = new long[list.size()];
Field[] keys = new Field[list.size()];
for (int i = 0; i < keys.length; i++) {
IndexField f = list.get(i);
keys[i] = f.getPrimaryKey();
@ -90,55 +104,37 @@ public class FieldIndexTable extends IndexTable {
return keys;
}
/*
* @see ghidra.framework.store.db.IndexTable#getKeyCount(ghidra.framework.store.db.Field)
*/
@Override
int getKeyCount(Field indexValue) throws IOException {
return findPrimaryKeys(indexValue).length;
}
/*
* @see ghidra.framework.store.db.IndexTable#addEntry(ghidra.framework.store.db.Record)
*/
@Override
void addEntry(Record record) throws IOException {
Field indexedField = record.getField(colIndex);
IndexField f = IndexField.getIndexField(indexedField, record.getKey());
Record rec = indexSchema.createRecord(f);
IndexField f = indexKeyType.newIndexField(indexedField, record.getKeyField());
Record rec = indexTable.getSchema().createRecord(f);
indexTable.putRecord(rec);
}
/*
* @see ghidra.framework.store.db.IndexTable#deleteEntry(ghidra.framework.store.db.Record)
*/
@Override
void deleteEntry(Record record) throws IOException {
Field indexedField = record.getField(colIndex);
IndexField f = IndexField.getIndexField(indexedField, record.getKey());
IndexField f = indexKeyType.newIndexField(indexedField, record.getKeyField());
indexTable.deleteRecord(f);
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator()
*/
@Override
DBFieldIterator indexIterator() throws IOException {
return new IndexFieldIterator();
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator(ghidra.framework.store.db.Field, ghidra.framework.store.db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, boolean before)
throws IOException {
return new IndexFieldIterator(minField, maxField, before);
}
/*
* @see db.IndexTable#indexIterator(db.Field, db.Field, db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException {
@ -161,19 +157,19 @@ public class FieldIndexTable extends IndexTable {
/**
* Construct an index field iterator starting with the minimum index value.
* @throws IOException an IO error occurred
*/
IndexFieldIterator() throws IOException {
this(null, null, true);
}
/**
* Construct an index field iterator. The iterator is positioned at index
* value identified by startValue.
* Construct an index field iterator.
* @param minValue minimum index value or null if no minimum
* @param maxValue maximum index value or null if no maximum
* @param before if true initial position is before minValue, else position
* after maxValue
* @throws IOException
* @throws IOException an IO error occurred
*/
IndexFieldIterator(Field minValue, Field maxValue, boolean before) throws IOException {
@ -182,8 +178,13 @@ public class FieldIndexTable extends IndexTable {
"Due to potential truncation issues, operation not permitted on variable length fields");
}
min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null;
max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null;
Field primaryKeyType = getPrimaryTableKeyType();
min = minValue != null
? indexKeyType.newIndexField(minValue, primaryKeyType.getMinValue())
: null;
max = maxValue != null
? indexKeyType.newIndexField(maxValue, primaryKeyType.getMaxValue())
: null;
IndexField start = null;
if (before && minValue != null) {
@ -209,13 +210,16 @@ public class FieldIndexTable extends IndexTable {
}
/**
* @param minField
* @param maxField
* @param startField
* @param before
* @throws IOException
* Construct an index field iterator. The iterator is positioned at index
* value identified by startValue.
* @param minValue minimum index value or null if no minimum
* @param maxValue maximum index value or null if no maximum
* @param startValue initial index value position
* @param before if true initial position is before minValue, else position
* after maxValue
* @throws IOException an IO error occurred
*/
public IndexFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before)
IndexFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before)
throws IOException {
if (primaryTable.getSchema().getField(indexColumn).isVariableLength()) {
@ -226,17 +230,23 @@ public class FieldIndexTable extends IndexTable {
if (startValue == null) {
throw new IllegalArgumentException("starting index value required");
}
min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null;
max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null;
IndexField start =
IndexField.getIndexField(startValue, before ? Long.MIN_VALUE : Long.MAX_VALUE);
Field primaryKeyType = getPrimaryTableKeyType();
min = minValue != null
? indexKeyType.newIndexField(minValue, primaryKeyType.getMinValue())
: null;
max = maxValue != null
? indexKeyType.newIndexField(maxValue, primaryKeyType.getMaxValue())
: null;
IndexField start = indexKeyType.newIndexField(startValue,
before ? primaryKeyType.getMinValue() : primaryKeyType.getMaxValue());
indexIterator = indexTable.fieldKeyIterator(min, max, start);
if (indexIterator.hasNext()) {
IndexField f = (IndexField) indexIterator.next();
if (before || !f.getIndexField().equals(startValue)) {
if (before || !f.getIndexedField().equals(startValue)) {
indexIterator.previous();
}
}
@ -250,11 +260,12 @@ public class FieldIndexTable extends IndexTable {
hasPrev = false; // TODO ???
indexKey = (IndexField) indexIterator.next();
int skipCnt = 0;
while (indexKey != null && indexKey.hasSameIndex(lastKey)) {
while (indexKey != null && indexKey.hasSameIndexValue(lastKey)) {
if (++skipCnt > 10) {
// Reinit iterator to skip large number of same index value
indexIterator = indexTable.fieldKeyIterator(min, max,
IndexField.getIndexField(indexKey.getIndexField(), Long.MAX_VALUE));
indexIterator =
indexTable.fieldKeyIterator(min, max, indexKeyType.newIndexField(
indexKey.getIndexedField(), getPrimaryTableKeyType().getMaxValue()));
skipCnt = 0;
}
indexKey = (IndexField) indexIterator.next();
@ -276,11 +287,12 @@ public class FieldIndexTable extends IndexTable {
hasNext = false; // TODO ???
indexKey = (IndexField) indexIterator.previous();
int skipCnt = 0;
while (indexKey != null && indexKey.hasSameIndex(lastKey)) {
while (indexKey != null && indexKey.hasSameIndexValue(lastKey)) {
if (++skipCnt > 10) {
// Reinit iterator to skip large number of same index value
indexIterator = indexTable.fieldKeyIterator(min, max,
IndexField.getIndexField(indexKey.getIndexField(), Long.MIN_VALUE));
indexIterator =
indexTable.fieldKeyIterator(min, max, indexKeyType.newIndexField(
indexKey.getIndexedField(), getPrimaryTableKeyType().getMinValue()));
skipCnt = 0;
}
indexKey = (IndexField) indexIterator.previous();
@ -300,8 +312,7 @@ public class FieldIndexTable extends IndexTable {
hasNext = false;
hasPrev = true;
lastKey = indexKey;
Field f = indexKey.getIndexField();
return f.newField(f);
return indexKey.getIndexedField();
}
return null;
}
@ -312,8 +323,7 @@ public class FieldIndexTable extends IndexTable {
hasNext = true;
hasPrev = false;
lastKey = indexKey;
Field f = indexKey.getIndexField();
return f.newField(f);
return indexKey.getIndexedField();
}
return null;
}
@ -329,8 +339,8 @@ public class FieldIndexTable extends IndexTable {
return false;
}
synchronized (db) {
long[] keys = findPrimaryKeys(lastKey.getIndexField());
for (long key : keys) {
Field[] keys = findPrimaryKeys(lastKey.getIndexedField());
for (Field key : keys) {
primaryTable.deleteRecord(key);
}
lastKey = null;
@ -339,16 +349,14 @@ public class FieldIndexTable extends IndexTable {
}
}
/* (non-Javadoc)
* @see ghidra.framework.store.db.IndexTable#hasRecord(ghidra.framework.store.db.Field)
*/
@Override
boolean hasRecord(Field field) throws IOException {
IndexField indexField = IndexField.getIndexField(field, Long.MIN_VALUE);
IndexField indexField =
indexKeyType.newIndexField(field, getPrimaryTableKeyType().getMinValue());
DBFieldIterator iter = indexTable.fieldKeyIterator(indexField);
while (iter.hasNext()) {
IndexField f = (IndexField) iter.next();
if (!f.hasSameIndex(indexField)) {
if (!f.hasSameIndexValue(indexField)) {
return false;
}
if (indexField.usesTruncatedFieldValue()) {
@ -364,109 +372,54 @@ public class FieldIndexTable extends IndexTable {
return false;
}
/**
* Iterate over all primary keys sorted based upon the associated index key.
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override
DBLongIterator keyIterator() throws IOException {
DBFieldIterator keyIterator() throws IOException {
return new PrimaryKeyIterator();
}
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is initially positioned before the first index buffer whose index key
* is greater than or equal to the specified startField value.
* @param startField index key value which determines initial position of iterator
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override
DBLongIterator keyIteratorBefore(Field startField) throws IOException {
DBFieldIterator keyIteratorBefore(Field startField) throws IOException {
return new PrimaryKeyIterator(startField, false);
}
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is initially positioned after the index buffer whose index key
* is equal to the specified startField value or immediately before the first
* index buffer whose index key is greater than the specified startField value.
* @param startField index key value which determines initial position of iterator
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override
DBLongIterator keyIteratorAfter(Field startField) throws IOException {
DBFieldIterator keyIteratorAfter(Field startField) throws IOException {
return new PrimaryKeyIterator(startField, true);
}
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is initially positioned before the primaryKey within the index buffer
* whose index key is equal to the specified startField value or immediately before the first
* index buffer whose index key is greater than the specified startField value.
* @param startField index key value which determines initial position of iterator
* @param primaryKey initial position within index buffer if index key matches startField value.
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override
DBLongIterator keyIteratorBefore(Field startField, long primaryKey) throws IOException {
DBFieldIterator keyIteratorBefore(Field startField, Field primaryKey) throws IOException {
return new PrimaryKeyIterator(null, null, startField, primaryKey, false);
}
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is initially positioned after the primaryKey within the index buffer
* whose index key is equal to the specified startField value or immediately before the first
* index buffer whose index key is greater than the specified startField value.
* @param startField index key value which determines initial position of iterator
* @param primaryKey initial position within index buffer if index key matches startField value.
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override
DBLongIterator keyIteratorAfter(Field startField, long primaryKey) throws IOException {
DBFieldIterator keyIteratorAfter(Field startField, Field primaryKey) throws IOException {
return new PrimaryKeyIterator(null, null, startField, primaryKey, true);
}
/**
* Iterate over all primary keys sorted based upon the associated index key.
* The iterator is limited to range of index keys of startField through endField, inclusive.
* If atStart is true, the iterator is initially positioned before the first index
* buffer whose index key is greater than or equal to the specified startField value.
* If atStart is false, the iterator is initially positioned after the first index
* buffer whose index key is less than or equal to the specified endField value.
* @param startField minimum index key value
* @param endField maximum index key value
* @param atStart if true, position iterator before start value.
* Otherwise, position iterator after end value.
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
@Override
DBLongIterator keyIterator(Field startField, Field endField, boolean atStart)
DBFieldIterator keyIterator(Field startField, Field endField, boolean atStart)
throws IOException {
return new PrimaryKeyIterator(startField, endField, atStart ? startField : endField,
atStart ? Long.MIN_VALUE : Long.MAX_VALUE, !atStart);
atStart ? getPrimaryTableKeyType().getMinValue()
: getPrimaryTableKeyType().getMaxValue(),
!atStart);
}
/**
* @see db.IndexTable#keyIterator(db.Field, db.Field, db.Field, boolean)
*/
@Override
DBLongIterator keyIterator(Field minField, Field maxField, Field startField, boolean before)
DBFieldIterator keyIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException {
return new PrimaryKeyIterator(minField, maxField, startField,
before ? Long.MIN_VALUE : Long.MAX_VALUE, !before);
before ? getPrimaryTableKeyType().getMinValue()
: getPrimaryTableKeyType().getMaxValue(),
!before);
}
/**
* Iterates over primary keys which correspond to index field values within a specified range.
* NOTE: Primary keys corresponding to index fields which have been truncated may be returned out of order.
*/
private class PrimaryKeyIterator implements DBLongIterator {
private class PrimaryKeyIterator implements DBFieldIterator {
private IndexField min;
private IndexField max;
@ -479,6 +432,7 @@ public class FieldIndexTable extends IndexTable {
/**
* Construct a key iterator starting with the minimum secondary key.
* @throws IOException thrown if IO error occurs
*/
PrimaryKeyIterator() throws IOException {
indexIterator = indexTable.fieldKeyIterator();
@ -490,9 +444,12 @@ public class FieldIndexTable extends IndexTable {
* @param startValue indexed field value.
* @param after if true the iterator is positioned immediately after
* the last occurance of the specified startValue position.
* @throws IOException thrown if IO error occurs
*/
PrimaryKeyIterator(Field startValue, boolean after) throws IOException {
this(null, null, startValue, after ? Long.MAX_VALUE : Long.MIN_VALUE, after);
this(null, null, startValue, after ? getPrimaryTableKeyType().getMaxValue()
: getPrimaryTableKeyType().getMinValue(),
after);
}
/**
@ -505,13 +462,18 @@ public class FieldIndexTable extends IndexTable {
* @param after if true iterator is positioned immediately after
* the startValue/primaryKey,
* otherwise immediately before.
* @throws IOException
* @throws IOException thrown if IO error occurs
*/
PrimaryKeyIterator(Field minValue, Field maxValue, Field startValue, long primaryKey,
PrimaryKeyIterator(Field minValue, Field maxValue, Field startValue, Field primaryKey,
boolean after) throws IOException {
min = minValue != null ? IndexField.getIndexField(minValue, Long.MIN_VALUE) : null;
max = maxValue != null ? IndexField.getIndexField(maxValue, Long.MAX_VALUE) : null;
Field primaryKeyType = getPrimaryTableKeyType();
min = minValue != null
? indexKeyType.newIndexField(minValue, primaryKeyType.getMinValue())
: null;
max = maxValue != null
? indexKeyType.newIndexField(maxValue, primaryKeyType.getMaxValue())
: null;
IndexField start = null;
if (after && startValue == null && maxValue == null) {
@ -522,7 +484,7 @@ public class FieldIndexTable extends IndexTable {
}
else {
start =
startValue != null ? IndexField.getIndexField(startValue, primaryKey) : null;
startValue != null ? indexKeyType.newIndexField(startValue, primaryKey) : null;
indexIterator = indexTable.fieldKeyIterator(min, max, start);
if (indexIterator.hasNext()) {
Field f = indexIterator.next();
@ -540,18 +502,18 @@ public class FieldIndexTable extends IndexTable {
* @return true if field value corresponding to f is outside the min/max range.
* It is assumed that the underlying table iterator will not return index values
* out of range which do not have the same truncated index value.
* @throws IOException
* @throws IOException thrown if IO error occurs
*/
private boolean indexValueOutOfRange(IndexField f) throws IOException {
Field val = null;
if (min != null && min.usesTruncatedFieldValue() && min.hasSameIndex(f)) {
if (min != null && min.usesTruncatedFieldValue() && min.hasSameIndexValue(f)) {
Record rec = primaryTable.getRecord(f.getPrimaryKey());
val = rec.getField(indexColumn);
if (val.compareTo(min.getNonTruncatedIndexField()) < 0) {
return true;
}
}
if (max != null && max.usesTruncatedFieldValue() && max.hasSameIndex(f)) {
if (max != null && max.usesTruncatedFieldValue() && max.hasSameIndexValue(f)) {
if (val == null) {
Record rec = primaryTable.getRecord(f.getPrimaryKey());
val = rec.getField(indexColumn);
@ -563,9 +525,6 @@ public class FieldIndexTable extends IndexTable {
return false;
}
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#hasNext()
*/
@Override
public boolean hasNext() throws IOException {
if (hasNext) {
@ -582,9 +541,6 @@ public class FieldIndexTable extends IndexTable {
return hasNext;
}
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#hasPrevious()
*/
@Override
public boolean hasPrevious() throws IOException {
if (hasPrev) {
@ -601,11 +557,8 @@ public class FieldIndexTable extends IndexTable {
return hasPrev;
}
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#next()
*/
@Override
public long next() throws IOException {
public Field next() throws IOException {
if (hasNext()) {
lastKey = key;
hasNext = false;
@ -614,11 +567,8 @@ public class FieldIndexTable extends IndexTable {
throw new NoSuchElementException();
}
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#previous()
*/
@Override
public long previous() throws IOException {
public Field previous() throws IOException {
if (hasPrevious()) {
lastKey = key;
hasPrev = false;
@ -627,13 +577,10 @@ public class FieldIndexTable extends IndexTable {
throw new NoSuchElementException();
}
/* (non-Javadoc)
* @see ghidra.framework.store.db.DBLongIterator#delete()
*/
@Override
public boolean delete() throws IOException {
if (lastKey != null) {
long primaryKey = lastKey.getPrimaryKey();
Field primaryKey = lastKey.getPrimaryKey();
lastKey = null;
return primaryTable.deleteRecord(primaryKey);
}

View file

@ -0,0 +1,35 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* <code>FieldKeyInteriorNode</code> defines a common interface for {@link FieldKeyNode}
* implementations which are also an {@link InteriorNode}.
*/
public interface FieldKeyInteriorNode extends InteriorNode, FieldKeyNode {
/**
* Callback method for when a child node's leftmost key changes.
* @param oldKey previous leftmost key.
* @param newKey new leftmost key.
* @param childNode child node containing oldKey (null if not a VarKeyNode)
* @throws IOException if IO error occurs
*/
void keyChanged(Field oldKey, Field newKey, FieldKeyNode childNode) throws IOException;
}

View file

@ -0,0 +1,65 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* <code>FieldKeyNode</code> defines a common interface for {@link BTreeNode}
* implementations which utilize a {@link Field} key.
*/
interface FieldKeyNode extends BTreeNode {
/**
* @return the parent node or null if this is the root
*/
@Override
public FieldKeyInteriorNode getParent();
/**
* Get the leaf node which contains the specified key.
* @param key key value
* @return leaf node
* @throws IOException thrown if an IO error occurs
*/
public FieldKeyRecordNode getLeafNode(Field key) throws IOException;
/**
* Get the left-most leaf node within the tree.
* @return left-most leaf node.
* @throws IOException thrown if IO error occurs
*/
abstract FieldKeyRecordNode getLeftmostLeafNode() throws IOException;
/**
* Get the right-most leaf node within the tree.
* @return right-most leaf node.
* @throws IOException thrown if IO error occurs
*/
abstract FieldKeyRecordNode getRightmostLeafNode() throws IOException;
/**
* Performs a fast in-place key comparison of the specified key
* value with a key stored within this node at the specified keyIndex.
* @param k key value to be compared
* @param keyIndex key index to another key within this node's buffer
* @return comparison value, zero if equal, -1 if k has a value less than
* the store key, or +1 if k has a value greater than the stored key located
* at keyIndex.
*/
abstract int compareKeyField(Field k, int keyIndex);
}

View file

@ -0,0 +1,145 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* <code>FieldKeyRecordNode</code> defines a common interface for {@link FieldKeyNode}
* implementations which are also a {@link RecordNode} (i.e., leaf node).
*/
interface FieldKeyRecordNode extends RecordNode, FieldKeyNode {
/**
* Get the record located at the specified index.
* @param schema record data schema
* @param index key index
* @return Record
* @throws IOException thrown if IO error occurs
*/
Record getRecord(Schema schema, int index) throws IOException;
/**
* Insert or Update a record.
* @param record data record with long key
* @param table table which will be notified when record is inserted or updated.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
FieldKeyNode putRecord(Record record, Table table) throws IOException;
/**
* Remove the record identified by index.
* This will never be the last record within the node.
* @param index record index
* @throws IOException thrown if IO error occurs
*/
void remove(int index) throws IOException;
/**
* Determine if this record node has a right sibling.
* @return true if right sibling exists
* @throws IOException if IO error occurs
*/
boolean hasNextLeaf() throws IOException;
/**
* Get this leaf node's right sibling
* @return this leaf node's right sibling or null if right sibling does not exist.
* @throws IOException if an IO error occurs
*/
FieldKeyRecordNode getNextLeaf() throws IOException;
/**
* Determine if this record node has a left sibling.
* @return true if left sibling exists
* @throws IOException if IO error occurs
*/
boolean hasPreviousLeaf() throws IOException;
/**
* Get this leaf node's left sibling
* @return this leaf node's left sibling or null if left sibling does not exist.
* @throws IOException if an IO error occurs
*/
FieldKeyRecordNode getPreviousLeaf() throws IOException;
/**
* Remove this leaf from the tree.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
FieldKeyNode removeLeaf() throws IOException;
/**
* Delete the record identified by the specified key.
* @param key record key
* @param table table which will be notified when record is deleted.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
FieldKeyNode deleteRecord(Field key, Table table) throws IOException;
/**
* Get the record with the minimum key value which is greater than or equal
* to the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAtOrAfter(Field key, Schema schema) throws IOException;
/**
* Get the record with the maximum key value which is less than or equal
* to the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAtOrBefore(Field key, Schema schema) throws IOException;
/**
* Get the record with the minimum key value which is greater than
* the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAfter(Field key, Schema schema) throws IOException;
/**
* Get the record with the maximum key value which is less than
* the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordBefore(Field key, Schema schema) throws IOException;
/**
* Get the record identified by the specified key.
* @param key search key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecord(Field key, Schema schema) throws IOException;
}

View file

@ -0,0 +1,55 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
/**
* <code>FixedField</code> provides an abstract implementation of a fixed-length
* binary field.
*/
public abstract class FixedField extends BinaryField {
/**
* Construct a fixed-length field
* @param data initial value
* @param immutable true if field value is immutable
*/
FixedField(byte[] data, boolean immutable) {
super(data, immutable);
}
@Override
public final boolean isVariableLength() {
return false;
}
@Override
void truncate(int length) {
throw new UnsupportedOperationException("Field may not be truncated");
}
@Override
public abstract FixedField copyField();
@Override
public abstract FixedField newField();
@Override
abstract FixedField getMinValue();
@Override
abstract FixedField getMaxValue();
}

View file

@ -0,0 +1,215 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import generic.util.UnsignedDataUtils;
import ghidra.util.BigEndianDataConverter;
/**
* <code>FixedField10</code> is a 10-byte fixed-length binary field.
*/
public class FixedField10 extends FixedField {
/**
* Minimum long field value
*/
public static FixedField10 MIN_VALUE = new FixedField10(0L, (short) 0, true);
/**
* Maximum long field value
*/
public static FixedField10 MAX_VALUE = new FixedField10(-1L, (short) -1, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
@SuppressWarnings("hiding")
public static final FixedField10 INSTANCE = MIN_VALUE;
// This implementation uses both a data byte array and short+long variables
// for data storage. While the short+long is always available, the data
// byte array is only set when needed or supplied during construction.
// The use of the short+long is done to speed-up comparison with other
// FixedField10 instances or directly from a DataBuffer.
private short lo2;
private long hi8;
/**
* Construct a 10-byte fixed-length field with an initial value of 0.
*/
public FixedField10() {
super(null, false);
}
/**
* Construct a 10-byte fixed-length field with an initial value of data.
* @param data initial 10-byte binary value
* @throws IllegalArgumentException thrown if data is not 10-bytes in length
*/
public FixedField10(byte[] data) {
this(data, false);
}
/**
* Construct a 10-byte fixed-length binary field with an initial value of data.
* @param data initial 10-byte binary value
* @param immutable true if field value is immutable
* @throws IllegalArgumentException thrown if data is not 10-bytes in length
*/
public FixedField10(byte[] data, boolean immutable) {
super(null, immutable);
setBinaryData(data);
}
FixedField10(long hi8, short lo2, boolean immutable) {
super(null, immutable);
this.hi8 = hi8;
this.lo2 = lo2;
}
@Override
public int compareTo(Field o) {
if (!(o instanceof FixedField10)) {
throw new UnsupportedOperationException("may only compare similar Field types");
}
FixedField10 f = (FixedField10) o;
if (hi8 != f.hi8) {
return UnsignedDataUtils.unsignedLessThan(hi8, f.hi8) ? -1 : 1;
}
if (lo2 != f.lo2) {
return UnsignedDataUtils.unsignedLessThan(lo2, f.lo2) ? -1 : 1;
}
return 0;
}
@Override
int compareTo(DataBuffer buffer, int offset) {
long otherHi8 = buffer.getLong(offset);
if (hi8 != otherHi8) {
return UnsignedDataUtils.unsignedLessThan(hi8, otherHi8) ? -1 : 1;
}
short otherLo2 = buffer.getShort(offset + 8);
if (lo2 != otherLo2) {
return UnsignedDataUtils.unsignedLessThan(lo2, otherLo2) ? -1 : 1;
}
return 0;
}
@Override
public FixedField copyField() {
return new FixedField10(hi8, lo2, false);
}
@Override
public FixedField newField() {
return new FixedField10();
}
@Override
FixedField getMinValue() {
return MIN_VALUE;
}
@Override
FixedField getMaxValue() {
return MAX_VALUE;
}
@Override
public byte[] getBinaryData() {
if (data != null) {
return data;
}
data = new byte[10];
BigEndianDataConverter.INSTANCE.putLong(data, 0, hi8);
BigEndianDataConverter.INSTANCE.putShort(data, 8, lo2);
return data;
}
@Override
public void setBinaryData(byte[] data) {
if (data.length != 10) {
throw new IllegalArgumentException("Invalid FixedField10 length: " + data.length);
}
this.data = data;
hi8 = BigEndianDataConverter.INSTANCE.getLong(data, 0);
lo2 = BigEndianDataConverter.INSTANCE.getShort(data, 8);
}
@Override
byte getFieldType() {
return FIXED_10_TYPE;
}
@Override
int write(Buffer buf, int offset) throws IOException {
if (data != null) {
return buf.put(offset, data);
}
offset = buf.putLong(offset, hi8);
return buf.putShort(offset, lo2);
}
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
data = null; // be lazy
hi8 = buf.getLong(offset);
lo2 = buf.getShort(offset + 8);
return offset + 10;
}
@Override
int readLength(Buffer buf, int offset) throws IOException {
return 10;
}
@Override
int length() {
return 10;
}
@Override
public int hashCode() {
final int prime = 31;
int result = (int) (hi8 ^ (hi8 >>> 32));
result = prime * result + lo2;
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (getClass() != obj.getClass())
return false;
FixedField10 other = (FixedField10) obj;
if (hi8 != other.hi8)
return false;
if (lo2 != other.lo2)
return false;
return true;
}
@Override
public String getValueAsString() {
return "{" + BinaryField.getValueAsString(getBinaryData()) + "}";
}
}

View file

@ -1,328 +0,0 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import java.util.NoSuchElementException;
/**
* The <code>FixedIndexTable</code> provides a secondary index on a fixed-length table column
* (e.g., IntField, LongField, etc.). For each unique secondary index value, an IndexBuffer is
* stored within an underlying index Table record. The secondary index value is used as the long
* key to access this record. Within a single IndexBuffer is stored all primary keys which
* correspond to an index value.
*/
class FixedIndexTable extends IndexTable {
private static final Class<?>[] fieldClasses = { BinaryField.class, // index data
};
private static final String[] fieldNames = { "IndexBuffer" };
private static Schema indexSchema = new Schema(0, "IndexKey", fieldClasses, fieldNames);
/**
* Construct a new secondary index which is based upon a field within the
* primary table specified by name.
* @param primaryTable primary table.
* @param colIndex identifies the indexed column within the primary table.
* @throws IOException thrown if an IO error occurs
*/
FixedIndexTable(Table primaryTable, int colIndex) throws IOException {
this(primaryTable, primaryTable.getDBHandle().getMasterTable().createTableRecord(
primaryTable.getName(), indexSchema, colIndex));
}
/**
* Construct a new or existing secondary index. An existing index must have
* its root ID specified within the tableRecord.
* @param primaryTable primary table.
* @param indexTableRecord specifies the index parameters.
* @throws IOException thrown if an IO error occurs
*/
FixedIndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException {
super(primaryTable, indexTableRecord);
}
/**
* Find all primary keys which correspond to the specified indexed field
* value.
* @param indexValue the field value to search for.
* @return list of primary keys
* @throws IOException thrown if an IO error occurs
*/
@Override
long[] findPrimaryKeys(Field indexValue) throws IOException {
if (!indexValue.getClass().equals(fieldType.getClass())) {
throw new IllegalArgumentException("Incorrect indexed field type");
}
Record indexRecord = indexTable.getRecord(indexValue.getLongValue());
if (indexRecord == null) {
return emptyKeyArray;
}
IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0));
return indexBuffer.getPrimaryKeys();
}
/**
* Get the number of primary keys which correspond to the specified indexed field
* value.
* @param indexValue the field value to search for.
* @return key count
*/
@Override
int getKeyCount(Field indexValue) throws IOException {
if (!indexValue.getClass().equals(fieldType.getClass())) {
throw new IllegalArgumentException("Incorrect indexed field type");
}
Record indexRecord = indexTable.getRecord(indexValue.getLongValue());
if (indexRecord == null) {
return 0;
}
IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0));
return indexBuffer.keyCount;
}
/*
* @see ghidra.framework.store.db.IndexTable#addEntry(ghidra.framework.store.db.Record)
*/
@Override
void addEntry(Record record) throws IOException {
Field indexField = record.getField(colIndex);
long secondaryKey = indexField.getLongValue();
Record indexRecord = indexTable.getRecord(secondaryKey);
if (indexRecord == null) {
indexRecord = indexSchema.createRecord(secondaryKey);
}
IndexBuffer indexBuffer = new IndexBuffer(indexField, indexRecord.getBinaryData(0));
indexBuffer.addEntry(record.getKey());
indexRecord.setBinaryData(0, indexBuffer.getData());
indexTable.putRecord(indexRecord);
}
/*
* @see ghidra.framework.store.db.IndexTable#deleteEntry(ghidra.framework.store.db.Record)
*/
@Override
void deleteEntry(Record record) throws IOException {
Field indexField = record.getField(colIndex);
long secondaryKey = indexField.getLongValue();
Record indexRecord = indexTable.getRecord(secondaryKey);
if (indexRecord != null) {
IndexBuffer indexBuffer = new IndexBuffer(indexField, indexRecord.getBinaryData(0));
indexBuffer.deleteEntry(record.getKey());
byte[] data = indexBuffer.getData();
if (data == null) {
indexTable.deleteRecord(secondaryKey);
}
else {
indexRecord.setBinaryData(0, data);
indexTable.putRecord(indexRecord);
}
}
}
/**
* Get the index buffer associated with the specified index key
* @param indexKey index key
* @return index buffer or null if not found
* @throws IOException thrown if IO error occurs
*/
private IndexBuffer getIndexBuffer(Field indexKey) throws IOException {
Record indexRec = indexTable.getRecord(indexKey.getLongValue());
return indexRec != null ? new IndexBuffer(indexKey, indexRec.getBinaryData(0)) : null;
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator()
*/
@Override
DBFieldIterator indexIterator() throws IOException {
return new IndexLongIterator();
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator(ghidra.framework.store.db.Field, ghidra.framework.store.db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, boolean atMin)
throws IOException {
long min = minField != null ? minField.getLongValue() : Long.MIN_VALUE;
long max = maxField != null ? maxField.getLongValue() : Long.MAX_VALUE;
return new IndexLongIterator(min, max, atMin);
}
/*
* @see db.IndexTable#indexIterator(db.Field, db.Field, db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException {
if (startField == null) {
throw new IllegalArgumentException("starting index value required");
}
long min = minField != null ? minField.getLongValue() : Long.MIN_VALUE;
long max = maxField != null ? maxField.getLongValue() : Long.MAX_VALUE;
return new IndexLongIterator(min, max, startField.getLongValue(), before);
}
/**
* Iterates over index field values within a specified range.
*/
class IndexLongIterator implements DBFieldIterator {
private Field lastKey;
private Field keyField;
private DBLongIterator indexIterator;
private boolean hasNext = false;
private boolean hasPrev = false;
/**
* Construct an index field iterator starting with the minimum index value.
*/
IndexLongIterator() throws IOException {
indexIterator = indexTable.longKeyIterator();
}
/**
* Construct an index field iterator. The iterator is positioned at index
* value identified by startValue.
* @param startValue minimum index value or null if no minimum
* @param endValue maximum index value or null if no maximum
* @param atStart if true initial position is before startValue, else position
* is after endValue
* @throws IOException
*/
IndexLongIterator(long minValue, long maxValue, boolean atMin) throws IOException {
long start = atMin ? minValue : maxValue;
indexIterator = indexTable.longKeyIterator(minValue, maxValue, start);
if (indexIterator.hasNext()) {
indexIterator.next();
if (atMin) {
indexIterator.previous();
}
}
}
/**
* @param min
* @param max
* @param longValue
* @param before
*/
public IndexLongIterator(long minValue, long maxValue, long start, boolean before)
throws IOException {
indexIterator = indexTable.longKeyIterator(minValue, maxValue, start);
if (indexIterator.hasNext()) {
long val = indexIterator.next();
if (before || val != start) {
indexIterator.previous();
}
}
}
@Override
public boolean hasNext() throws IOException {
if (hasNext) {
return true;
}
try {
long key = indexIterator.next();
keyField = fieldType.newField();
keyField.setLongValue(key);
hasNext = true;
hasPrev = false;
}
catch (NoSuchElementException e) {
return false;
}
return true;
}
@Override
public boolean hasPrevious() throws IOException {
if (hasPrev) {
return true;
}
try {
long key = indexIterator.previous();
keyField = fieldType.newField();
keyField.setLongValue(key);
hasNext = false;
hasPrev = true;
}
catch (NoSuchElementException e) {
return false;
}
return true;
}
@Override
public Field next() throws IOException {
if (hasNext || hasNext()) {
hasNext = false;
hasPrev = true;
lastKey = keyField;
return keyField;
}
return null;
}
@Override
public Field previous() throws IOException {
if (hasPrev || hasPrevious()) {
hasNext = true;
hasPrev = false;
lastKey = keyField;
return keyField;
}
return null;
}
/**
* Delete all primary records which have the current
* index value (lastKey).
* @see db.DBFieldIterator#delete()
*/
@Override
public boolean delete() throws IOException {
if (lastKey == null) {
return false;
}
synchronized (db) {
IndexBuffer indexBuf = getIndexBuffer(lastKey);
if (indexBuf != null) {
long[] keys = indexBuf.getPrimaryKeys();
for (long key : keys) {
primaryTable.deleteRecord(key);
}
// The following does not actually delete the index record since it
// should already have been removed with the removal of all associated
// primary records. Invoking this method allows the iterator to
// recover from the index table change.
indexIterator.delete();
}
lastKey = null;
return true;
}
}
}
}

View file

@ -0,0 +1,199 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/**
* <code>FixedKeyFixedRecNode</code> is an implementation of a BTree leaf node
* which utilizes fixed-length key values and stores fixed-length records.
* <p>
* This type of node has the following layout within a single DataBuffer
* (field size in bytes, where 'L' is the fixed length of the fixed-length
* key as specified by key type in associated Schema):
* <pre>
* | NodeType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | Key0(L) | Rec0 | ...
*
* | KeyN(L) | RecN |
* </pre>
*/
class FixedKeyFixedRecNode extends FixedKeyRecordNode {
private static final int HEADER_SIZE = RECORD_LEAF_HEADER_SIZE;
private static final int ENTRY_BASE_OFFSET = HEADER_SIZE;
private static final int[] EMPTY_ID_LIST = new int[0];
private int entrySize;
private int recordLength;
/**
* Construct an existing fixed-length key fixed-length record leaf node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException if IO error occurs
*/
FixedKeyFixedRecNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
super(nodeMgr, buf);
this.recordLength = nodeMgr.getTableSchema().getFixedLength();
entrySize = keySize + recordLength;
}
/**
* Construct a new fixed-length key fixed-length record leaf node.
* @param nodeMgr table node manager instance
* @param prevLeafId node buffer id for previous leaf ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf ( &lt; 0 : no leaf)
* @throws IOException if IO error occurs
*/
FixedKeyFixedRecNode(NodeMgr nodeMgr, int prevLeafId, int nextLeafId) throws IOException {
super(nodeMgr, NodeMgr.FIXEDKEY_FIXED_REC_NODE, prevLeafId, nextLeafId);
this.recordLength = nodeMgr.getTableSchema().getFixedLength();
entrySize = keySize + recordLength;
}
@Override
FixedKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
return new FixedKeyFixedRecNode(nodeMgr, prevLeafId, nextLeafId);
}
@Override
public int getKeyOffset(int index) {
return ENTRY_BASE_OFFSET + (index * entrySize);
}
/**
* Get the record offset within the buffer
* @param index key index
* @return record offset
*/
@Override
public int getRecordOffset(int index) {
return ENTRY_BASE_OFFSET + (index * entrySize);
}
/**
* Shift all records by one starting with index to the end.
* @param index the smaller key index (0 &lt;= index1)
* @param rightShift shift right by one record if true, else shift left by
* one record.
*/
private void shiftRecords(int index, boolean rightShift) {
// No movement needed for appended record
if (index == keyCount)
return;
// Determine block to be moved
int start = getRecordOffset(index);
int end = getRecordOffset(keyCount);
int len = end - start;
// Move record data
int offset = start + (rightShift ? entrySize : -entrySize);
buffer.move(start, offset, len);
}
@Override
public void remove(int index) {
if (index < 0 || index >= keyCount)
throw new AssertException();
shiftRecords(index + 1, false);
setKeyCount(keyCount - 1);
}
@Override
boolean insertRecord(int index, Record record) throws IOException {
// Check for use of indirect chained record node(s)
// int len = record.length();
if (keyCount == ((buffer.length() - HEADER_SIZE) / entrySize))
return false; // insufficient space for record storage
// Make room for new record
shiftRecords(index, true);
// Store new record
int offset = getRecordOffset(index);
record.getKeyField().write(buffer, offset);
record.write(buffer, offset + keySize);
setKeyCount(keyCount + 1);
return true;
}
@Override
FixedKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordOffset(index) + keySize;
record.write(buffer, offset);
return getRoot();
}
@Override
public Record getRecord(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
return null;
Record record = schema.createRecord(key);
record.read(buffer, getRecordOffset(index) + keySize);
return record;
}
@Override
public Record getRecord(Schema schema, int index) throws IOException {
Field key = getKeyField(index);
Record record = schema.createRecord(key);
record.read(buffer, getRecordOffset(index) + keySize);
return record;
}
@Override
void splitData(FixedKeyRecordNode newRightLeaf) {
FixedKeyFixedRecNode rightNode = (FixedKeyFixedRecNode) newRightLeaf;
int splitIndex = keyCount / 2;
int count = keyCount - splitIndex;
int start = getRecordOffset(splitIndex); // start of block to be moved
int end = getRecordOffset(keyCount); // end of block to be moved
int splitLen = end - start; // length of block to be moved
// Copy data to new leaf node
rightNode.buffer.copy(ENTRY_BASE_OFFSET, buffer, start, splitLen);
// Adjust key counts
setKeyCount(keyCount - count);
rightNode.setKeyCount(count);
}
@Override
public void delete() throws IOException {
nodeMgr.deleteNode(this);
}
@Override
public int[] getBufferReferences() {
return EMPTY_ID_LIST;
}
}

View file

@ -0,0 +1,610 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.Msg;
import ghidra.util.exception.AssertException;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
/**
* <code>FixedKeyInteriorNode</code> stores a BTree node for use as an interior
* node when searching for Table records within the database. This type of node
* has the following layout within a single DataBuffer (field size in bytes,
* where 'L' is the fixed length of the fixed-length key as specified by
* key type in associated Schema):
* <pre>
* | NodeType(1) | KeyCount(4) | Key0(L) | ID0(4) | ... | KeyN(L) | IDN(4) |
* </pre>
*/
class FixedKeyInteriorNode extends FixedKeyNode implements FieldKeyInteriorNode {
private static final int BASE = FIXEDKEY_NODE_HEADER_SIZE;
private static final int ID_SIZE = 4; // int
private final int maxKeyCount;
private final int entrySize;
/**
* Construct an existing fixed-length key interior node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException thrown if IO error occurs
*/
FixedKeyInteriorNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
super(nodeMgr, buf);
entrySize = keySize + ID_SIZE;
maxKeyCount = (buffer.length() - BASE) / entrySize;
}
/**
* Construct a new fixed-length key interior node with two child nodes.
* @param nodeMgr table node manager.
* @param keyType key Field type
* @param key1 left child node left-most key
* @param id1 left child node buffer ID
* @param key2 right child node left-most key
* @param id2 right child node buffer ID
* @throws IOException thrown if IO error occurs
*/
FixedKeyInteriorNode(NodeMgr nodeMgr, Field keyType, byte[] key1, int id1, byte[] key2, int id2)
throws IOException {
super(nodeMgr, NodeMgr.FIXEDKEY_INTERIOR_NODE);
if (keySize != key1.length || keySize != key2.length) {
throw new IllegalArgumentException("mismatched fixed-length key sizes");
}
entrySize = keySize + ID_SIZE;
maxKeyCount = (buffer.length() - BASE) / entrySize;
setKeyCount(2);
// Store key and node ids
putEntry(0, key1, id1);
putEntry(1, key2, id2);
}
/**
* Construct a new empty fixed-length key interior node.
* Node must be initialized with a minimum of two keys.
* @param nodeMgr table node manager.
* @param keyType key Field type
* @throws IOException thrown if IO error occurs
*/
private FixedKeyInteriorNode(NodeMgr nodeMgr, Field keyType) throws IOException {
super(nodeMgr, NodeMgr.FIXEDKEY_INTERIOR_NODE);
entrySize = keySize + ID_SIZE;
maxKeyCount = (buffer.length() - BASE) / entrySize;
}
void logConsistencyError(String tableName, String msg, Throwable t) {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " parent.key[0]=" + BinaryField.getValueAsString(getKey(0)) +
" bufferID=" + getBufferId());
if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t);
}
}
@Override
public boolean isConsistent(String tableName, TaskMonitor monitor)
throws IOException, CancelledException {
boolean consistent = true;
Field lastMinKey = null;
Field lastMaxKey = null;
for (int i = 0; i < keyCount; i++) {
// Compare each key entry with the previous entries key-range
Field key = getKeyField(i);
if (lastMinKey != null && key.compareTo(lastMinKey) <= 0) {
consistent = false;
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null);
Msg.debug(this, " child[" + i + "].minKey = " + key.getValueAsString() +
" bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].minKey = " +
lastMinKey.getValueAsString() + " bufferID=" + getBufferId(i - 1));
}
else if (lastMaxKey != null && key.compareTo(lastMaxKey) <= 0) {
consistent = false;
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null);
Msg.debug(this, " child[" + i + "].minKey = " + key.getValueAsString() +
" bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].maxKey = " +
lastMaxKey.getValueAsString() + " bufferID=" + getBufferId(i - 1));
}
lastMinKey = key;
FixedKeyNode node = null;
try {
try {
node = nodeMgr.getFixedKeyNode(getBufferId(i));
node.parent = this;
}
catch (IOException e) {
logConsistencyError(tableName, "failed to fetch child node: " + e.getMessage(),
e);
}
catch (RuntimeException e) {
logConsistencyError(tableName, "failed to fetch child node: " + e.getMessage(),
e);
}
if (node == null) {
consistent = false;
lastMaxKey = key; // for lack of a better solution
continue; // skip child
}
lastMaxKey = node.getKeyField(node.getKeyCount() - 1);
// Verify key matchup between parent and child
Field childKey0 = node.getKeyField(0);
if (!key.equals(childKey0)) {
consistent = false;
logConsistencyError(tableName,
"parent key entry mismatch with child[" + i + "].minKey", null);
Msg.debug(this, " child[" + i + "].minKey = " + childKey0.getValueAsString() +
" bufferID=" + getBufferId(i - 1));
Msg.debug(this, " parent key entry = " + key.getValueAsString());
}
consistent &= node.isConsistent(tableName, monitor);
monitor.checkCanceled();
}
finally {
if (node != null) {
// Release nodes as we go - this is not the norm!
nodeMgr.releaseReadOnlyNode(node.getBufferId());
}
}
}
monitor.checkCanceled();
return consistent;
}
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. This method is intended to locate the child
* node which contains the specified key. The returned index corresponds
* to a child's stored buffer/node ID and may correspond to another interior
* node or a leaf record node. Each stored key within this interior node
* effectively identifies the maximum key contained within the corresponding
* child node.
* @param key key to search for
* @return int buffer ID index of child node. An existing positive index
* value will always be returned.
*/
int getIdIndex(Field key) {
int min = 1;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max) / 2;
int c = compareKeyField(key, i);
if (c == 0) {
return i;
}
else if (c > 0) {
min = i + 1;
}
else {
max = i - 1;
}
}
return max;
}
@Override
public int getKeyIndex(Field key) {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max) / 2;
int rc = compareKeyField(key, i);
if (rc == 0) {
return i;
}
else if (rc > 0) {
min = i + 1;
}
else {
max = i - 1;
}
}
return -(min + 1);
}
@Override
byte[] getKey(int index) {
byte[] key = new byte[keySize];
buffer.get(BASE + (index * entrySize), key);
return key;
}
@Override
public int compareKeyField(Field k, int keyIndex) {
return k.compareTo(buffer, BASE + (keyIndex * entrySize));
}
/**
* Store a key at the specified index
* @param index key index
* @param key key value
*/
private void putKey(int index, byte[] key) {
buffer.put(BASE + (index * entrySize), key);
}
/**
* Get the child node buffer ID associated with the specified key index
* @param index child key index
* @return child node buffer ID
*/
private int getBufferId(int index) {
return buffer.getInt(BASE + (index * entrySize) + keySize);
}
/**
* Store the child node entry (key and buffer ID) associated with the specified key index.
* The entry at index is overwritten. Since each entry is a fixed length, movement of
* existing entries is not necessary.
* @param index child key index
* @param key child node key
* @param bufferId child node buffer ID
*/
private void putEntry(int index, byte[] key, int bufferId) {
int offset = BASE + (index * entrySize);
buffer.put(offset, key);
buffer.putInt(offset + keySize, bufferId);
}
/**
* Insert the child node entry (key and buffer ID) associated with the specified key index.
* All entries at and after index are shifted right to make space for new entry.
* The node key count is adjusted to reflect the addition of a child.
* @param index child key index
* @param key child node key
* @param bufferId child node buffer ID
*/
private void insertEntry(int index, byte[] key, int bufferId) {
int start = BASE + (index * entrySize);
int end = BASE + (keyCount * entrySize);
buffer.move(start, start + entrySize, end - start);
buffer.put(start, key);
buffer.putInt(start + keySize, bufferId);
setKeyCount(keyCount + 1);
}
/**
* Delete the child node entry (key and buffer ID) associated with the specified key index.
* All entries after index are shifted left.
* The node key count is adjusted to reflect the removal of a child.
* @param index child key index
*/
private void deleteEntry(int index) {
if (keyCount < 3 || index >= keyCount)
throw new AssertException();
++index;
if (index < keyCount) {
int start = BASE + (index * entrySize);
int end = BASE + (keyCount * entrySize);
buffer.move(start, start - entrySize, end - start);
}
setKeyCount(keyCount - 1);
}
/**
* Callback method for when a child node's leftmost key changes.
* @param oldKey previous leftmost key.
* @param newKeyData new leftmost key.
*/
void keyChanged(Field oldKey, byte[] newKeyData) {
int index = getKeyIndex(oldKey);
if (index < 0) {
throw new AssertException();
}
// Update key
putKey(index, newKeyData);
if (index == 0 && parent != null) {
parent.keyChanged(oldKey, newKeyData);
}
}
@Override
public void keyChanged(Field oldKey, Field newKey, FieldKeyNode childNode) throws IOException {
keyChanged(oldKey, newKey.getBinaryData());
}
/**
* Insert a new node into this node.
* @param id id of new node
* @param key leftmost key associated with new node.
* @return root node.
* @throws IOException thrown if an IO error occurs
*/
FixedKeyNode insert(int id, Field key) throws IOException {
// Split this node if full
if (keyCount == maxKeyCount) {
return split(key, id);
}
// Insert key into this node
int index = -(getKeyIndex(key) + 1);
if (index < 0 || id == 0)
throw new AssertException();
byte[] keyData = key.getBinaryData();
insertEntry(index, keyData, id);
if (index == 0 && parent != null) {
parent.keyChanged(getKeyField(1), keyData);
}
return getRoot();
}
/**
* Split this interior node and insert new child entry (key and buffer ID).
* Assumes 3 or more child keys exist in this node.
* @param newKey new child key
* @param newId new child node's buffer ID
* @return root node.
* @throws IOException thrown if IO error occurs
*/
private FixedKeyNode split(Field newKey, int newId) throws IOException {
// Create new interior node
FixedKeyInteriorNode newNode = new FixedKeyInteriorNode(nodeMgr, keyType);
moveKeysRight(this, newNode, keyCount / 2);
// Insert new key/id
Field rightKey = newNode.getKeyField(0);
if (newKey.compareTo(rightKey) < 0) {
insert(newId, newKey);
}
else {
newNode.insert(newId, newKey);
}
if (parent != null) {
// Ask parent to insert new node and return root
return parent.insert(newNode.getBufferId(), rightKey);
}
// New parent node becomes root
return new FixedKeyInteriorNode(nodeMgr, keyType, getKey(0), buffer.getId(),
rightKey.getBinaryData(), newNode.getBufferId());
}
@Override
public FixedKeyRecordNode getLeafNode(Field key) throws IOException {
FixedKeyNode node = nodeMgr.getFixedKeyNode(getBufferId(getIdIndex(key)));
node.parent = this;
return (FixedKeyRecordNode) node.getLeafNode(key);
}
@Override
public FieldKeyRecordNode getLeftmostLeafNode() throws IOException {
FixedKeyNode node = nodeMgr.getFixedKeyNode(getBufferId(0));
return node.getLeftmostLeafNode();
}
@Override
public FieldKeyRecordNode getRightmostLeafNode() throws IOException {
FixedKeyNode node = nodeMgr.getFixedKeyNode(getBufferId(keyCount - 1));
return node.getRightmostLeafNode();
}
/**
* Callback method allowing child node to remove itself from parent.
* Rebalancing of the tree is performed if the interior node falls
* below the half-full point.
* @param key child node key
* @return root node
* @throws IOException thrown if IO error occurs
*/
FixedKeyNode deleteChild(Field key) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
throw new AssertException();
// Handle ellimination of this node
if (keyCount == 2) {
if (parent != null)
throw new AssertException();
FixedKeyNode rootNode = nodeMgr.getFixedKeyNode(getBufferId(1 - index));
rootNode.parent = null;
nodeMgr.deleteNode(this);
return rootNode;
}
// Delete child entry
deleteEntry(index);
if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0));
}
return (parent != null) ? parent.balanceChild(this) : this;
}
/**
* Callback method allowing a child interior node to request balancing of its
* content with its sibling nodes. Balancing is only done if the specified node
* is half-full or less.
* @param node child interior node
* @return root node
*/
private FixedKeyNode balanceChild(FixedKeyInteriorNode node) throws IOException {
// Do nothing if node more than half full
if (node.keyCount > maxKeyCount / 2) {
return getRoot();
}
// balance with right sibling except if node corresponds to the right-most
// key within this interior node - in that case balance with left sibling.
int index = getIdIndex(node.getKeyField(0));
if (index == (keyCount - 1)) {
return balanceChild(
(FixedKeyInteriorNode) nodeMgr.getFixedKeyNode(getBufferId(index - 1)), node);
}
return balanceChild(node,
(FixedKeyInteriorNode) nodeMgr.getFixedKeyNode(getBufferId(index + 1)));
}
/**
* Balance the entries contained within two adjacent child interior nodes.
* One of the two nodes must be half-full or less.
* This could result in the removal of a child node if entries will fit within
* one node.
* @param leftNode left child interior node
* @param rightNode right child interior node
* @return new root
* @throws IOException thrown if an IO error occurs
*/
private FixedKeyNode balanceChild(FixedKeyInteriorNode leftNode, FixedKeyInteriorNode rightNode)
throws IOException {
Field rightKey = rightNode.getKeyField(0);
int leftKeyCount = leftNode.keyCount;
int rightKeyCount = rightNode.keyCount;
int newLeftKeyCount = leftKeyCount + rightKeyCount;
// Can right keys fit within left node
if (newLeftKeyCount <= maxKeyCount) {
// Right node is elliminated and all entries stored in left node
moveKeysLeft(leftNode, rightNode, rightKeyCount);
nodeMgr.deleteNode(rightNode);
return deleteChild(rightKey);
}
newLeftKeyCount = newLeftKeyCount / 2;
if (newLeftKeyCount < leftKeyCount) {
moveKeysRight(leftNode, rightNode, leftKeyCount - newLeftKeyCount);
}
else if (newLeftKeyCount > leftKeyCount) {
moveKeysLeft(leftNode, rightNode, newLeftKeyCount - leftKeyCount);
}
this.keyChanged(rightKey, rightNode.getKey(0));
return getRoot();
}
/**
* Move some (not all) of the entries from the left node into the right node.
* @param leftNode
* @param rightNode
* @param count
*/
private static void moveKeysRight(FixedKeyInteriorNode leftNode, FixedKeyInteriorNode rightNode,
int count) {
if (leftNode.keySize != rightNode.keySize) {
throw new IllegalArgumentException("mismatched fixed key sizes");
}
int leftKeyCount = leftNode.keyCount;
int rightKeyCount = rightNode.keyCount;
int leftOffset = BASE + ((leftKeyCount - count) * leftNode.entrySize);
int len = count * leftNode.entrySize;
rightNode.buffer.move(BASE, BASE + len, rightKeyCount * leftNode.entrySize);
rightNode.buffer.copy(BASE, leftNode.buffer, leftOffset, len);
leftNode.setKeyCount(leftKeyCount - count);
rightNode.setKeyCount(rightKeyCount + count);
}
/**
* Move some or all of the entries from the right node into the left node.
* If all keys are moved, the caller is responsible for deleting the right
* node.
* @param leftNode
* @param rightNode
* @param count
*/
private static void moveKeysLeft(FixedKeyInteriorNode leftNode, FixedKeyInteriorNode rightNode,
int count) {
if (leftNode.keySize != rightNode.keySize) {
throw new IllegalArgumentException("mismatched fixed key sizes");
}
int leftKeyCount = leftNode.keyCount;
int rightKeyCount = rightNode.keyCount;
int leftOffset = BASE + (leftKeyCount * leftNode.entrySize);
int len = count * leftNode.entrySize;
leftNode.buffer.copy(leftOffset, rightNode.buffer, BASE, len);
leftNode.setKeyCount(leftKeyCount + count);
if (count < rightKeyCount) {
// Only need to update right node if partial move
rightKeyCount -= count;
rightNode.buffer.move(BASE + len, BASE, rightKeyCount * leftNode.entrySize);
rightNode.setKeyCount(rightKeyCount);
}
}
@Override
public void delete() throws IOException {
// Delete all child nodes
for (int index = 0; index < keyCount; index++) {
nodeMgr.getFixedKeyNode(getBufferId(index)).delete();
}
// Remove this node
nodeMgr.deleteNode(this);
}
@Override
public int[] getBufferReferences() {
int[] ids = new int[keyCount];
for (int i = 0; i < keyCount; i++) {
ids[i] = getBufferId(i);
}
return ids;
}
boolean isLeftmostKey(Field key) {
if (getIdIndex(key) == 0) {
if (parent != null) {
return parent.isLeftmostKey(key);
}
return true;
}
return false;
}
boolean isRightmostKey(Field key) {
if (getIdIndex(key) == (keyCount - 1)) {
if (parent != null) {
return parent.isRightmostKey(getKeyField(0));
}
return true;
}
return false;
}
}

View file

@ -0,0 +1,136 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/**
* <code>FixedKeyNode</code> is an abstract implementation of a BTree node
* which utilizes fixed-length key values.
* <pre>
* | NodeType(1) | KeyCount(4) | ...
* </pre>
*/
abstract class FixedKeyNode implements FieldKeyNode {
private static final int KEY_COUNT_SIZE = 4;
private static final int KEY_COUNT_OFFSET = NodeMgr.NODE_HEADER_SIZE;
static final int FIXEDKEY_NODE_HEADER_SIZE = NodeMgr.NODE_HEADER_SIZE + KEY_COUNT_SIZE;
protected final Field keyType;
protected final int keySize;
protected NodeMgr nodeMgr;
protected DataBuffer buffer;
protected FixedKeyInteriorNode parent;
protected int keyCount;
/**
* Construct an existing fixed-length key node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException thrown if IO error occurs
*/
FixedKeyNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
this.nodeMgr = nodeMgr;
buffer = buf;
Schema schema = nodeMgr.getTableSchema();
if (!schema.useFixedKeyNodes()) {
throw new AssertException("unsupported schema");
}
keyType = schema.getKeyFieldType();
keySize = keyType.length();
keyCount = buffer.getInt(KEY_COUNT_OFFSET);
nodeMgr.addNode(this);
}
/**
* Construct a new fixed-length key node.
* @param nodeMgr table node manager.
* @param nodeType node type
* @throws IOException thrown if IO error occurs
*/
FixedKeyNode(NodeMgr nodeMgr, byte nodeType) throws IOException {
this.nodeMgr = nodeMgr;
buffer = nodeMgr.getBufferMgr().createBuffer();
NodeMgr.setNodeType(buffer, nodeType);
Schema schema = nodeMgr.getTableSchema();
if (!schema.useFixedKeyNodes()) {
throw new AssertException("unsupported schema");
}
keyType = schema.getKeyFieldType();
keySize = keyType.length();
setKeyCount(0);
nodeMgr.addNode(this);
}
@Override
public FixedKeyInteriorNode getParent() {
return parent;
}
@Override
public int getBufferId() {
return buffer.getId();
}
@Override
public DataBuffer getBuffer() {
return buffer;
}
/**
* Get the root for this node. If setParent has not been invoked, this node
* is assumed to be the root.
* @return root node
*/
FixedKeyNode getRoot() {
if (parent != null) {
return parent.getRoot();
}
return this;
}
@Override
public int getKeyCount() {
return keyCount;
}
@Override
public void setKeyCount(int cnt) {
keyCount = cnt;
buffer.putInt(KEY_COUNT_OFFSET, keyCount);
}
/**
* Get the key value at a specific index.
* @param index key index
* @return key value
*/
abstract byte[] getKey(int index);
@Override
public final Field getKeyField(int index) {
Field key = keyType.newField();
key.setBinaryData(getKey(index));
return key;
}
}

View file

@ -0,0 +1,508 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.Msg;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
/**
* <code>FixedKeyRecordNode</code> is an abstract implementation of a BTree leaf node
* which utilizes fixed-length binary key values and stores records.
* <p>
* This type of node has the following partial layout within a single DataBuffer
* (field size in bytes):
* <pre>
* | NodeType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) ...
* </pre>
*/
abstract class FixedKeyRecordNode extends FixedKeyNode implements FieldKeyRecordNode {
private static final int ID_SIZE = 4;
private static final int PREV_LEAF_ID_OFFSET = FIXEDKEY_NODE_HEADER_SIZE;
private static final int NEXT_LEAF_ID_OFFSET = PREV_LEAF_ID_OFFSET + ID_SIZE;
static final int RECORD_LEAF_HEADER_SIZE = FIXEDKEY_NODE_HEADER_SIZE + (2 * ID_SIZE);
/**
* Construct an existing fixed-length key record leaf node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException thrown if an IO error occurs
*/
FixedKeyRecordNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
super(nodeMgr, buf);
}
/**
* Construct a new fixed-length key record leaf node.
* @param nodeMgr table node manager instance
* @param nodeType node type
* @param prevLeafId node buffer id for previous leaf - left sibling ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @throws IOException thrown if an IO error occurs
*/
FixedKeyRecordNode(NodeMgr nodeMgr, byte nodeType, int prevLeafId, int nextLeafId)
throws IOException {
super(nodeMgr, nodeType);
// Initialize header
buffer.putInt(PREV_LEAF_ID_OFFSET, prevLeafId);
buffer.putInt(NEXT_LEAF_ID_OFFSET, nextLeafId);
}
void logConsistencyError(String tableName, String msg, Throwable t) {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this,
" bufferID=" + getBufferId() + " key[0]=" + BinaryField.getValueAsString(getKey(0)));
if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t);
}
}
@Override
public boolean isConsistent(String tableName, TaskMonitor monitor)
throws IOException, CancelledException {
boolean consistent = true;
Field prevKey = null;
for (int i = 0; i < keyCount; i++) {
// Compare each key entry with the previous key
Field key = getKeyField(i);
if (prevKey != null && key.compareTo(prevKey) <= 0) {
consistent = false;
logConsistencyError(tableName, "key[" + i + "] <= key[" + (i - 1) + "]", null);
Msg.debug(this, " key[" + i + "].minKey = " + key.getValueAsString());
Msg.debug(this, " key[" + (i - 1) + "].minKey = " + prevKey.getValueAsString());
}
prevKey = key;
}
Field key0 = getKeyField(0);
if ((parent == null || parent.isLeftmostKey(key0)) && getPreviousLeaf() != null) {
consistent = false;
logConsistencyError(tableName, "previous-leaf should not exist", null);
}
FixedKeyRecordNode node = getNextLeaf();
if (node != null) {
if (parent == null || parent.isRightmostKey(key0)) {
consistent = false;
logConsistencyError(tableName, "next-leaf should not exist", null);
}
else {
FixedKeyRecordNode me = node.getPreviousLeaf();
if (me != this) {
consistent = false;
logConsistencyError(tableName, "next-leaf is not linked to this leaf", null);
}
}
}
else if (parent != null && !parent.isRightmostKey(key0)) {
consistent = false;
logConsistencyError(tableName, "this leaf is not linked to next-leaf", null);
}
return consistent;
}
@Override
byte[] getKey(int index) {
byte[] key = new byte[keySize];
buffer.get(getKeyOffset(index), key);
return key;
}
@Override
public int compareKeyField(Field k, int keyIndex) {
return k.compareTo(buffer, getKeyOffset(keyIndex));
}
/**
* Get the key offset within the node's data buffer
* @param index key/record index
* @return positive record offset within buffer
*/
@Override
public abstract int getKeyOffset(int index);
@Override
public FixedKeyRecordNode getLeafNode(Field key) throws IOException {
return this;
}
@Override
public FixedKeyRecordNode getLeftmostLeafNode() throws IOException {
FixedKeyRecordNode leaf = getPreviousLeaf();
return leaf != null ? leaf.getLeftmostLeafNode() : this;
}
@Override
public FixedKeyRecordNode getRightmostLeafNode() throws IOException {
FixedKeyRecordNode leaf = getNextLeaf();
return leaf != null ? leaf.getRightmostLeafNode() : this;
}
@Override
public boolean hasNextLeaf() throws IOException {
int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
return (nextLeafId >= 0);
}
@Override
public FixedKeyRecordNode getNextLeaf() throws IOException {
FixedKeyRecordNode leaf = null;
int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
if (nextLeafId >= 0) {
leaf = (FixedKeyRecordNode) nodeMgr.getFixedKeyNode(nextLeafId);
}
return leaf;
}
@Override
public boolean hasPreviousLeaf() throws IOException {
int prevLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
return (prevLeafId >= 0);
}
@Override
public FixedKeyRecordNode getPreviousLeaf() throws IOException {
FixedKeyRecordNode leaf = null;
int prevLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
if (prevLeafId >= 0) {
leaf = (FixedKeyRecordNode) nodeMgr.getFixedKeyNode(prevLeafId);
}
return leaf;
}
@Override
public int getKeyIndex(Field key) {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max) / 2;
int rc = compareKeyField(key, i);
if (rc == 0) {
return i;
}
else if (rc > 0) {
min = i + 1;
}
else {
max = i - 1;
}
}
return -(min + 1);
}
/**
* Split this leaf node in half and update tree.
* When a split is performed, the next operation must be performed
* from the root node since the tree may have been restructured.
* @return root node which may have changed.
* @throws IOException thrown if an IO error occurs
*/
FixedKeyNode split() throws IOException {
// Create new leaf
int oldSiblingId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
FixedKeyRecordNode newLeaf = createNewLeaf(buffer.getId(), oldSiblingId);
DataBuffer newBuf = newLeaf.buffer;
int newBufId = newBuf.getId();
buffer.putInt(NEXT_LEAF_ID_OFFSET, newBufId);
if (oldSiblingId >= 0) {
FixedKeyRecordNode leaf = (FixedKeyRecordNode) nodeMgr.getFixedKeyNode(oldSiblingId);
leaf.buffer.putInt(PREV_LEAF_ID_OFFSET, newBufId);
}
// Split node creating two balanced leaves
splitData(newLeaf);
if (parent != null) {
// Ask parent to insert new node and return root
return parent.insert(newBufId, newLeaf.getKeyField(0));
}
// New parent node becomes root
return new FixedKeyInteriorNode(nodeMgr, keyType, getKey(0), buffer.getId(),
newLeaf.getKey(0), newBufId);
}
/**
* Append a leaf which contains one or more keys and update tree. Leaf is inserted
* as the new right sibling of this leaf.
* @param leaf new right sibling leaf (must be same node type as this leaf)
* @return root node which may have changed.
* @throws IOException thrown if an IO error occurs
*/
FixedKeyNode appendLeaf(FixedKeyRecordNode leaf) throws IOException {
// Create new leaf and link
leaf.buffer.putInt(PREV_LEAF_ID_OFFSET, buffer.getId());
int rightLeafBufId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
leaf.buffer.putInt(NEXT_LEAF_ID_OFFSET, rightLeafBufId);
// Adjust this node
int newBufId = leaf.buffer.getId();
buffer.putInt(NEXT_LEAF_ID_OFFSET, newBufId);
// Adjust old right node if present
if (rightLeafBufId >= 0) {
FixedKeyNode rightLeaf = nodeMgr.getFixedKeyNode(rightLeafBufId);
rightLeaf.buffer.putInt(PREV_LEAF_ID_OFFSET, newBufId);
}
if (parent != null) {
// Ask parent to insert new node and return root - leaf parent is unknown
return parent.insert(newBufId, leaf.getKeyField(0));
}
// New parent node becomes root
return new FixedKeyInteriorNode(nodeMgr, keyType, getKey(0), buffer.getId(), leaf.getKey(0),
newBufId);
}
/**
* Remove this leaf from the tree.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
@Override
public FixedKeyNode removeLeaf() throws IOException {
Field key = getKeyField(0);
int prevBufferId = buffer.getInt(PREV_LEAF_ID_OFFSET);
int nextBufferId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
if (prevBufferId >= 0) {
FixedKeyRecordNode prevNode =
(FixedKeyRecordNode) nodeMgr.getFixedKeyNode(prevBufferId);
prevNode.getBuffer().putInt(NEXT_LEAF_ID_OFFSET, nextBufferId);
}
if (nextBufferId >= 0) {
FixedKeyRecordNode nextNode =
(FixedKeyRecordNode) nodeMgr.getFixedKeyNode(nextBufferId);
nextNode.getBuffer().putInt(PREV_LEAF_ID_OFFSET, prevBufferId);
}
nodeMgr.deleteNode(this);
if (parent == null) {
return null;
}
return parent.deleteChild(key);
}
/**
* Split the contents of this leaf node; placing the right half of the records into the
* empty leaf node provided.
* @param newRightLeaf empty right sibling leaf
*/
abstract void splitData(FixedKeyRecordNode newRightLeaf);
/**
* Create a new leaf and add to the node manager.
* The new leaf's parent is unknown.
* @param prevNodeId node buffer id for previous leaf - left sibling ( &lt; 0: no leaf)
* @param nextNodeId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @return new leaf node.
* @throws IOException thrown if IO error occurs
*/
abstract FixedKeyRecordNode createNewLeaf(int prevNodeId, int nextNodeId) throws IOException;
@Override
public FixedKeyNode putRecord(Record record, Table table) throws IOException {
Field key = record.getKeyField();
int index = getKeyIndex(key);
// Handle record update case
if (index >= 0) {
if (table != null) {
table.updatedRecord(getRecord(table.getSchema(), index), record);
}
FixedKeyNode newRoot = updateRecord(index, record);
return newRoot;
}
// Handle new record - see if we have room in this leaf
index = -index - 1;
if (insertRecord(index, record)) {
if (index == 0 && parent != null) {
parent.keyChanged(getKeyField(1), key, null);
}
if (table != null) {
table.insertedRecord(record);
}
return getRoot();
}
// Special Case - append new leaf to right
if (index == keyCount) {
FixedKeyNode newRoot = appendNewLeaf(record);
if (table != null) {
table.insertedRecord(record);
}
return newRoot;
}
// Split leaf and complete insertion
FixedKeyRecordNode leaf = (FixedKeyRecordNode) split().getLeafNode(key);
return leaf.putRecord(record, table);
}
/**
* Append a new leaf and insert the specified record.
* @param record data record with long key
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
FixedKeyNode appendNewLeaf(Record record) throws IOException {
FixedKeyRecordNode newLeaf = createNewLeaf(-1, -1);
newLeaf.insertRecord(0, record);
return appendLeaf(newLeaf);
}
@Override
public FieldKeyNode deleteRecord(Field key, Table table) throws IOException {
// Handle non-existent key - do nothing
int index = getKeyIndex(key);
if (index < 0) {
return getRoot();
}
if (table != null) {
table.deletedRecord(getRecord(table.getSchema(), index));
}
// Handle removal of last record in node
if (keyCount == 1) {
FixedKeyNode newRoot = removeLeaf();
return newRoot;
}
// Remove record within this node
remove(index);
// Notify parent of leftmost key change
if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0));
}
return getRoot();
}
/**
* Inserts the record at the given index if there is sufficient space in
* the buffer.
* @param index insertion index
* @param record record to be inserted
* @return true if the record was successfully inserted.
* @throws IOException thrown if IO error occurs
*/
abstract boolean insertRecord(int index, Record record) throws IOException;
/**
* Updates the record at the given index.
* @param index record index
* @param record new record
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
abstract FixedKeyNode updateRecord(int index, Record record) throws IOException;
@Override
public db.Record getRecordBefore(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -index - 2;
}
else {
--index;
}
if (index < 0) {
FixedKeyRecordNode nextLeaf = getPreviousLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, nextLeaf.keyCount - 1) : null;
}
return getRecord(schema, index);
}
@Override
public db.Record getRecordAfter(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -(index + 1);
}
else {
++index;
}
if (index == keyCount) {
FixedKeyRecordNode nextLeaf = getNextLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, 0) : null;
}
return getRecord(schema, index);
}
@Override
public Record getRecordAtOrBefore(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -index - 2;
}
if (index < 0) {
FixedKeyRecordNode nextLeaf = getPreviousLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, nextLeaf.keyCount - 1) : null;
}
return getRecord(schema, index);
}
@Override
public Record getRecordAtOrAfter(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -(index + 1);
}
if (index == keyCount) {
FixedKeyRecordNode nextLeaf = getNextLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, 0) : null;
}
return getRecord(schema, index);
}
/**
* Create a new record node with no siblings attached.
* @param nodeMgr table node manager instance
* @return new record leaf node
* @throws IOException thrown if IO error occurs
*/
static FixedKeyRecordNode createRecordNode(NodeMgr nodeMgr) throws IOException {
Schema schema = nodeMgr.getTableSchema();
FixedKeyRecordNode node = null;
if (schema.isVariableLength()) {
node = new FixedKeyVarRecNode(nodeMgr, -1, -1);
}
else {
node = new FixedKeyFixedRecNode(nodeMgr, -1, -1);
}
return node;
}
}

View file

@ -0,0 +1,454 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.datastruct.IntArrayList;
import ghidra.util.exception.AssertException;
/**
* <code>FixedKeyVarRecNode</code> is an implementation of a BTree leaf node
* which utilizes fixed-length key values and stores variable-length records.
* <p>
* This type of node has the following layout within a single DataBuffer
* (field size in bytes, where 'L' is the fixed length of the fixed-length
* key as specified by key type in associated Schema)::
* <pre>
* | NodeType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | Key0(L) | RecOffset0(4) | IndFlag0(1) |...
*
* | KeyN(L) | RecOffsetN(4) | IndFlagN(1) |...&lt;FreeSpace&gt;... | RecN |... | Rec1 |
* </pre>
* IndFlag - if not zero the record has been stored within a chained DBBuffer
* whose 4-byte integer buffer ID has been stored within this leaf at the record offset.
*/
class FixedKeyVarRecNode extends FixedKeyRecordNode {
private static final int HEADER_SIZE = RECORD_LEAF_HEADER_SIZE;
private static final int OFFSET_SIZE = 4;
private static final int INDIRECT_OPTION_SIZE = 1;
private static final int KEY_BASE_OFFSET = HEADER_SIZE;
private final int entrySize;
private final int dataOffsetBaseOffset;
private final int indirectOptionBaseOffset;
/**
* Construct an existing fixed-length key variable-length record leaf node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @throws IOException if IO error occurs
*/
FixedKeyVarRecNode(NodeMgr nodeMgr, DataBuffer buf) throws IOException {
super(nodeMgr, buf);
entrySize = keySize + OFFSET_SIZE + INDIRECT_OPTION_SIZE;
dataOffsetBaseOffset = KEY_BASE_OFFSET + keySize;
indirectOptionBaseOffset = dataOffsetBaseOffset + OFFSET_SIZE;
}
/**
* Construct a new fixed-length key variable-length record leaf node.
* @param nodeMgr table node manager instance
* @param prevLeafId node buffer id for previous leaf ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf ( &lt; 0 : no leaf)
* @throws IOException if IO error occurs
*/
FixedKeyVarRecNode(NodeMgr nodeMgr, int prevLeafId, int nextLeafId) throws IOException {
super(nodeMgr, NodeMgr.FIXEDKEY_VAR_REC_NODE, prevLeafId, nextLeafId);
entrySize = keySize + OFFSET_SIZE + INDIRECT_OPTION_SIZE;
dataOffsetBaseOffset = KEY_BASE_OFFSET + keySize;
indirectOptionBaseOffset = dataOffsetBaseOffset + OFFSET_SIZE;
}
@Override
FixedKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
return new FixedKeyVarRecNode(nodeMgr, prevLeafId, nextLeafId);
}
@Override
public int getKeyOffset(int index) {
return KEY_BASE_OFFSET + (index * entrySize);
}
/**
* Get the record offset within the buffer
* @param index key index
* @return record offset
*/
public int getRecordDataOffset(int index) {
return buffer.getInt(dataOffsetBaseOffset + (index * entrySize));
}
/**
* Store the record offset within the buffer for the specified key index
* @param index key index
* @param offset record offset
*/
private void putRecordDataOffset(int index, int offset) {
buffer.putInt(dataOffsetBaseOffset + (index * entrySize), offset);
}
/**
* Determine if a record is utilizing a chained DBBuffer for data storage
* @param index key index
* @return true if indirect storage is used for record, else false
*/
private boolean hasIndirectStorage(int index) {
return buffer.getByte(indirectOptionBaseOffset + (index * entrySize)) != 0;
}
/**
* Set the indirect storage flag associated with a record
* @param index key index
* @param state indirect storage used (true) or not used (false)
*/
private void enableIndirectStorage(int index, boolean state) {
buffer.putByte(indirectOptionBaseOffset + (index * entrySize), state ? (byte) 1 : (byte) 0);
}
/**
* @return unused free space within node
*/
private int getFreeSpace() {
return (keyCount == 0 ? buffer.length() : getRecordDataOffset(keyCount - 1)) -
(keyCount * entrySize) - RECORD_LEAF_HEADER_SIZE;
}
/**
* Get the length of a stored record.
* @param index index associated with record.
*/
private int getRecordLength(int index) {
if (index == 0) {
return buffer.length() - getRecordDataOffset(0);
}
return getRecordDataOffset(index - 1) - getRecordDataOffset(index);
}
/**
* Get the length of a stored record. Optimized if record offset
* already known.
* @param index index associated with record.
* @param offset record offset
*/
private int getRecordLength(int index, int offset) {
if (index == 0) {
return buffer.length() - offset;
}
return getRecordDataOffset(index - 1) - offset;
}
/**
* Move all record data, starting with index, by the specified offset amount.
* If the node contains 5 records, an index of 3 would shift the record data
* for indexes 3 and 4 left by the spacified offset amount. This is used to
* make space for a new or updated record.
* @param index the smaller key/record index (0 &lt;= index1)
* @param offset movement offset in bytes
* @return insertion offset immediately following moved block.
*/
private int moveRecords(int index, int offset) {
int lastIndex = keyCount - 1;
// No movement needed for appended record
if (index == keyCount) {
if (index == 0) {
return buffer.length() + offset;
}
return getRecordDataOffset(lastIndex) + offset;
}
// Determine block to be moved
int start = getRecordDataOffset(lastIndex);
int end = (index == 0) ? buffer.length() : getRecordDataOffset(index - 1);
int len = end - start;
// Move record data
buffer.move(start, start + offset, len);
// Adjust stored offsets
for (int i = index; i < keyCount; i++) {
putRecordDataOffset(i, getRecordDataOffset(i) + offset);
}
return end + offset;
}
@Override
public Record getRecord(Schema schema, int index) throws IOException {
Field key = getKeyField(index);
Record record = schema.createRecord(key);
if (hasIndirectStorage(index)) {
int bufId = buffer.getInt(getRecordDataOffset(index));
ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufId);
record.read(chainedBuffer, 0);
}
else {
record.read(buffer, getRecordDataOffset(index));
}
return record;
}
@Override
public int getRecordOffset(int index) throws IOException {
if (hasIndirectStorage(index)) {
return -buffer.getInt(getRecordDataOffset(index));
}
return getRecordDataOffset(index);
}
@Override
public Record getRecord(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
return null;
return getRecord(schema, index);
}
/**
* Find the index which represents the halfway point within the record data.
* @returns key index.
*/
private int getSplitIndex() {
int halfway = ((keyCount == 0 ? buffer.length() : getRecordDataOffset(keyCount - 1)) +
buffer.length()) / 2;
int min = 1;
int max = keyCount - 1;
while (min < max) {
int i = (min + max) / 2;
int offset = getRecordDataOffset(i);
if (offset == halfway) {
return i;
}
else if (offset < halfway) {
max = i - 1;
}
else {
min = i + 1;
}
}
return min;
}
@Override
void splitData(FixedKeyRecordNode newRightLeaf) {
FixedKeyVarRecNode rightNode = (FixedKeyVarRecNode) newRightLeaf;
int splitIndex = getSplitIndex();
int count = keyCount - splitIndex;
int start = getRecordDataOffset(keyCount - 1); // start of block to be moved
int end = getRecordDataOffset(splitIndex - 1); // end of block to be moved
int splitLen = end - start; // length of block to be moved
int rightOffset = buffer.length() - splitLen; // data offset within new leaf node
// Copy data to new leaf node
DataBuffer newBuf = rightNode.buffer;
newBuf.copy(rightOffset, buffer, start, splitLen);
newBuf.copy(KEY_BASE_OFFSET, buffer, KEY_BASE_OFFSET + (splitIndex * entrySize),
count * entrySize);
// Fix record offsets in new leaf node
int offsetCorrection = buffer.length() - end;
for (int i = 0; i < count; i++) {
rightNode.putRecordDataOffset(i, rightNode.getRecordDataOffset(i) + offsetCorrection);
}
// Adjust key counts
setKeyCount(keyCount - count);
rightNode.setKeyCount(count);
}
@Override
FixedKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordDataOffset(index);
int oldLen = getRecordLength(index, offset);
int len = record.length();
// Check for use of indirect chained record node(s)
int maxRecordLength = ((buffer.length() - HEADER_SIZE) >> 2) - entrySize; // min 4 records per node
boolean wasIndirect = hasIndirectStorage(index);
boolean useIndirect = (len > maxRecordLength);
if (useIndirect) {
// Store record in chained buffers
len = 4;
ChainedBuffer chainedBuffer = null;
if (wasIndirect) {
chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), buffer.getInt(offset));
chainedBuffer.setSize(record.length(), false);
}
else {
chainedBuffer = new ChainedBuffer(record.length(), nodeMgr.getBufferMgr());
buffer.putInt(offset + oldLen - 4, chainedBuffer.getId()); // assumes old len is always > 4
enableIndirectStorage(index, true);
}
record.write(chainedBuffer, 0);
}
else if (wasIndirect) {
removeChainedBuffer(buffer.getInt(offset));
enableIndirectStorage(index, false);
}
// See if updated record will fit in current buffer
if (useIndirect || len <= (getFreeSpace() + oldLen)) {
// Overwrite record data - move other data if needed
int dataShift = oldLen - len;
if (dataShift != 0) {
offset = moveRecords(index + 1, dataShift);
putRecordDataOffset(index, offset);
}
if (!useIndirect) {
record.write(buffer, offset);
}
return getRoot();
}
// Insufficient room for updated record - remove and re-add
Field key = record.getKeyField();
FixedKeyRecordNode leaf = (FixedKeyRecordNode) deleteRecord(key, null).getLeafNode(key);
return leaf.putRecord(record, null);
}
/**
* Insert the specified record at the specified key index.
* Existing data may be shifted within the buffer to make room for
* the new record. Parent must be notified if this changes the leftmost
* key.
* @param index insertion index for stored key
* @param record record to be inserted
* @throws IOException thrown if an IO error occurs
*/
@Override
boolean insertRecord(int index, Record record) throws IOException {
// Check for use of indirect chained record node(s)
int len = record.length();
int maxRecordLength = ((buffer.length() - HEADER_SIZE) >> 2) - entrySize; // min 4 records per node
boolean useIndirect = (len > maxRecordLength);
if (useIndirect) {
len = 4;
}
if ((len + entrySize) > getFreeSpace())
return false; // insufficient space for record storage
// Make room for new record
int offset = moveRecords(index, -len);
// Make room for new key/offset entry
int start = KEY_BASE_OFFSET + (index * entrySize);
len = (keyCount - index) * entrySize;
buffer.move(start, start + entrySize, len);
// Store new record key/offset
buffer.put(start, record.getKeyField().getBinaryData());
buffer.putInt(start + keySize, offset);
setKeyCount(keyCount + 1);
// Store record data
if (useIndirect) {
ChainedBuffer chainedBuffer =
new ChainedBuffer(record.length(), nodeMgr.getBufferMgr());
buffer.putInt(offset, chainedBuffer.getId());
record.write(chainedBuffer, 0);
}
else {
record.write(buffer, offset);
}
enableIndirectStorage(index, useIndirect);
return true;
}
@Override
public void remove(int index) throws IOException {
if (index < 0 || index >= keyCount)
throw new AssertException();
if (hasIndirectStorage(index)) {
removeChainedBuffer(buffer.getInt(getRecordDataOffset(index)));
enableIndirectStorage(index, false);
}
int len = getRecordLength(index);
moveRecords(index + 1, len);
int start = KEY_BASE_OFFSET + ((index + 1) * entrySize);
len = (keyCount - index - 1) * entrySize;
buffer.move(start, start - entrySize, len);
setKeyCount(keyCount - 1);
}
@Override
public FixedKeyNode removeLeaf() throws IOException {
// Remove all chained buffers associated with this leaf
for (int index = 0; index < keyCount; ++index) {
if (hasIndirectStorage(index)) {
removeChainedBuffer(buffer.getInt(getRecordDataOffset(index)));
}
}
return super.removeLeaf();
}
/**
* Remove a chained buffer.
* @param bufferId chained buffer ID
*/
private void removeChainedBuffer(int bufferId) throws IOException {
ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufferId);
chainedBuffer.delete();
}
@Override
public void delete() throws IOException {
// Remove all chained buffers associated with this node.
for (int index = 0; index < keyCount; index++) {
if (hasIndirectStorage(index)) {
int offset = getRecordDataOffset(index);
int bufferId = buffer.getInt(offset);
removeChainedBuffer(bufferId);
buffer.putInt(offset, -1);
}
}
// Remove this node
nodeMgr.deleteNode(this);
}
@Override
public int[] getBufferReferences() {
IntArrayList idList = new IntArrayList();
for (int i = 0; i < keyCount; i++) {
if (hasIndirectStorage(i)) {
int offset = getRecordDataOffset(i);
idList.add(buffer.getInt(offset));
}
}
return idList.toArray();
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,11 +15,10 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/**
* <code>FixedRecNode</code> is an implementation of a BTree leaf node
@ -37,75 +35,68 @@ import db.buffers.DataBuffer;
class FixedRecNode extends LongKeyRecordNode {
private static final int HEADER_SIZE = RECORD_LEAF_HEADER_SIZE;
private static final int ENTRY_BASE_OFFSET = HEADER_SIZE;
private static final int KEY_SIZE = 8;
private static final int[] EMPTY_ID_LIST = new int[0];
private int entrySize;
private int recordLength;
/**
* Construct an existing long-key fixed-length record leaf node.
* @param nodeMgr table node manager instance
* @param buf node buffer
* @param recordLength fixed record length
*/
FixedRecNode(NodeMgr nodeMgr, DataBuffer buf, int recordLength) {
super(nodeMgr, buf);
this.recordLength = recordLength;
entrySize = KEY_SIZE + recordLength;
}
/**
* Construct a new long-key fixed-length record leaf node.
* @param nodeMgr table node manager instance
* @param recordLength fixed record length
* @param prevLeafId node buffer id for previous leaf ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf ( &lt; 0 : no leaf)
* @throws IOException
* @throws IOException thrown if IO error occurs
*/
FixedRecNode(NodeMgr nodeMgr, int recordLength, int prevLeafId, int nextLeafId) throws IOException {
FixedRecNode(NodeMgr nodeMgr, int recordLength, int prevLeafId, int nextLeafId)
throws IOException {
super(nodeMgr, NodeMgr.LONGKEY_FIXED_REC_NODE, prevLeafId, nextLeafId);
this.recordLength = recordLength;
entrySize = KEY_SIZE + recordLength;
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#createNewLeaf(int, int)
*/
@Override
LongKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
LongKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
return new FixedRecNode(nodeMgr, recordLength, prevLeafId, nextLeafId);
}
/*
* @see ghidra.framework.store.db.LongKeyNode#getKey(int)
*/
@Override
long getKey(int index) {
return buffer.getLong(ENTRY_BASE_OFFSET + (index * entrySize));
long getKey(int index) {
return buffer.getLong(getKeyOffset(index));
}
// /**
// * Store a key at the specified index
// * @param index key index
// * @param key key value
// */
// private void putKey(int index, long key) {
// buffer.putLong(ENTRY_BASE_OFFSET + (index * entrySize), key);
// }
@Override
public int getKeyOffset(int index) {
return ENTRY_BASE_OFFSET + (index * entrySize);
}
/**
* Get the record offset within the buffer
* @param index key index
* @return record offset
*/
private int getRecordOffset(int index) {
@Override
public int getRecordOffset(int index) {
return ENTRY_BASE_OFFSET + (index * entrySize);
}
/**
* Shift all records by one starting with index to the end.
* @param index the smaller key index (0 &lt;= index1)
@ -113,49 +104,41 @@ class FixedRecNode extends LongKeyRecordNode {
* one record.
*/
private void shiftRecords(int index, boolean rightShift) {
// No movement needed for appended record
if (index == keyCount)
return;
// Determine block to be moved
int start = getRecordOffset(index);
int end = getRecordOffset(keyCount);
int len = end - start;
// Move record data
int offset = start + (rightShift ? entrySize : -entrySize);
buffer.move(start, offset, len);
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#remove(int)
*/
@Override
void remove(int index) {
if (index < 0 || index >= keyCount)
throw new AssertException();
@Override
public void remove(int index) {
if (index < 0 || index >= keyCount)
throw new AssertException();
shiftRecords(index + 1, false);
setKeyCount(keyCount - 1);
setKeyCount(keyCount - 1);
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#insertRecord(int, ghidra.framework.store.db.Record)
*/
@Override
boolean insertRecord(int index, Record record) throws IOException {
// Check for use of indirect chained record node(s)
// int len = record.length();
boolean insertRecord(int index, Record record) throws IOException {
if (keyCount == ((buffer.length() - HEADER_SIZE) / entrySize))
if (keyCount == ((buffer.length() - HEADER_SIZE) / entrySize)) {
return false; // insufficient space for record storage
}
// Make room for new record
shiftRecords(index, true);
// Store new record
int offset = getRecordOffset(index);
buffer.putLong(offset, record.getKey());
@ -165,21 +148,15 @@ throw new AssertException();
return true;
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#updateRecord(int, ghidra.framework.store.db.Record)
*/
@Override
LongKeyNode updateRecord(int index, Record record) throws IOException {
LongKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordOffset(index) + KEY_SIZE;
record.write(buffer, offset);
return getRoot();
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#getRecord(long, ghidra.framework.store.db.Schema)
*/
@Override
Record getRecord(long key, Schema schema) throws IOException {
Record getRecord(long key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
return null;
@ -188,50 +165,39 @@ throw new AssertException();
return record;
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#getRecord(ghidra.framework.store.db.Schema, int)
*/
@Override
Record getRecord(Schema schema, int index) throws IOException {
public Record getRecord(Schema schema, int index) throws IOException {
long key = getKey(index);
Record record = schema.createRecord(key);
record.read(buffer, getRecordOffset(index) + KEY_SIZE);
return record;
return record;
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#splitData(ghidra.framework.store.db.LongKeyRecordNode)
*/
@Override
void splitData(LongKeyRecordNode newRightLeaf) {
void splitData(LongKeyRecordNode newRightLeaf) {
FixedRecNode rightNode = (FixedRecNode) newRightLeaf;
int splitIndex = keyCount / 2;
int count = keyCount - splitIndex;
int start = getRecordOffset(splitIndex); // start of block to be moved
int end = getRecordOffset(keyCount); // end of block to be moved
int splitLen = end - start; // length of block to be moved
// Copy data to new leaf node
rightNode.buffer.copy(ENTRY_BASE_OFFSET, buffer, start, splitLen);
rightNode.buffer.copy(ENTRY_BASE_OFFSET, buffer, start, splitLen);
// Adjust key counts
setKeyCount(keyCount - count);
rightNode.setKeyCount(count);
}
/*
* @see ghidra.framework.store.db.LongKeyNode#delete()
*/
@Override
public void delete() throws IOException {
public void delete() throws IOException {
nodeMgr.deleteNode(this);
}
/*
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
@Override
public int[] getBufferReferences() {
return EMPTY_ID_LIST;
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -27,4 +26,13 @@ public class IllegalFieldAccessException extends RuntimeException {
IllegalFieldAccessException() {
super("Illegal field access");
}
/**
* Construct an illegal field access exception
* with a specific message
*/
IllegalFieldAccessException(String msg) {
super(msg);
}
}

View file

@ -1,237 +0,0 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>IndexBuffer</code> stores index data for a common index key
* within a data buffer. The index data has the following layout (field size in
* bytes):
* <pre>
* | FieldType(1) | KeyCount(4) | PrimeKey1(8) | ... | PrimeKeyN(8) |
* </pre>
* This type of index buffer is used to store primary keys associated with a
* single secondary key. The association to a specific secondary key
* is handled by the <code>IndexTable</code>. The primary keys are maintained
* within the buffer in an asscending sorted order.
*/
class IndexBuffer {
private static final int FIELD_TYPE_SIZE = 1;
private static final int KEY_COUNT_SIZE = 4;
private static final int FIELD_TYPE_OFFSET = 0;
private static final int KEY_COUNT_OFFSET = FIELD_TYPE_OFFSET + FIELD_TYPE_SIZE;
static final int INDEX_HEADER_SIZE = FIELD_TYPE_SIZE + KEY_COUNT_SIZE;
static final int PRIMARY_KEY_SIZE = 8;
Field indexKey;
int keyCount;
IndexDataBuffer dataBuffer;
/**
* Construct a new index buffer.
* @param indexKey associated index key
* @param data existing index buffer data from storage or null for an
* empty index buffer.
* @throws IOException thrown if IO error occurs
*/
IndexBuffer(Field indexKey, byte[] data) throws IOException {
this.indexKey = indexKey;
if (data == null) {
dataBuffer = new IndexDataBuffer(INDEX_HEADER_SIZE);
dataBuffer.putByte(FIELD_TYPE_OFFSET, indexKey.getFieldType());
dataBuffer.putInt(KEY_COUNT_OFFSET, 0);
}
else {
if (data[FIELD_TYPE_OFFSET] != indexKey.getFieldType())
throw new IOException("Invalid index data");
dataBuffer = new IndexDataBuffer(data);
}
keyCount = dataBuffer.getInt(KEY_COUNT_OFFSET);
}
/**
* Get the associated index key
* @return index key
*/
Field getIndexKey() {
return indexKey;
}
/**
* Set the stored primary key count
* @param cnt primary key count
*/
private void setKeyCount(int cnt) {
keyCount = cnt;
dataBuffer.putInt(KEY_COUNT_OFFSET, keyCount);
}
/**
* Provides data buffer manipulation for the index data
*/
class IndexDataBuffer extends DataBuffer {
/**
* Construct an index data buffer.
* @see db.buffers.DataBuffer#DataBuffer(byte[])
*/
IndexDataBuffer(byte[] data) {
super(data);
}
/**
* Construct an index data buffer.
* @see db.buffers.DataBuffer#DataBuffer(int)
*/
IndexDataBuffer(int size) {
super(size);
}
/**
* Get the storage array associated with this buffer.
* @return byte storage array.
*/
@Override
protected byte[] getData() {
return data;
}
/**
* Get the storage array associated with this buffer.
* @return byte storage array.
*/
@Override
protected void setData(byte[] data) {
this.data = data;
}
}
/**
* Get the index buffer data.
* @return index data or null if index data is empty.
*/
byte[] getData() {
byte[] data = dataBuffer.getData();
if (data.length <= INDEX_HEADER_SIZE)
return null;
return data;
}
/**
* Get the primary key associated with the specified entry index.
* This method does not perform any bounds checking on the index value.
* @param index index entry index.
* @return primary key associated with entry.
*/
long getPrimaryKey(int index) {
return dataBuffer.getLong(INDEX_HEADER_SIZE + (index * PRIMARY_KEY_SIZE));
}
/**
* Get the secondary key index within the buffer.
* @param primaryKey primary key
* @return key index if found, else -(key index + 1) indicates insertion
* point.
*/
int getIndex(long primaryKey) {
return getKeyIndex(primaryKey);
}
/**
* Perform a binary search to locate the specified primary key.
* @param primaryKey primary key
* @return key index if found, else -(key index + 1) indicates insertion
* point.
*/
private int getKeyIndex(long primaryKey) {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max)/2;
long k = getPrimaryKey(i);
if (k == primaryKey) {
return i;
}
else if (k < primaryKey) {
min = i + 1;
}
else {
max = i - 1;
}
}
return -(min+1);
}
/**
* Add a new primary key to this index buffer.
* @param primaryKey primary key
*/
void addEntry(long primaryKey) {
int index = getKeyIndex(primaryKey);
if (index < 0) {
index = -index-1;
IndexDataBuffer newDataBuffer = new IndexDataBuffer(dataBuffer.length() + PRIMARY_KEY_SIZE);
int len = INDEX_HEADER_SIZE + (index * PRIMARY_KEY_SIZE);
newDataBuffer.copy(0, dataBuffer, 0, len);
newDataBuffer.copy(len + PRIMARY_KEY_SIZE, dataBuffer, len, dataBuffer.length() - len);
newDataBuffer.putLong(len, primaryKey);
dataBuffer = newDataBuffer;
setKeyCount(keyCount + 1);
}
}
/**
* Delete the specified index entry from this index buffer.
* @param primaryKey primary key
*/
void deleteEntry(long primaryKey) {
int index = getKeyIndex(primaryKey);
if (index >= 0) {
IndexDataBuffer newDataBuffer = new IndexDataBuffer(dataBuffer.length() - PRIMARY_KEY_SIZE);
int len = INDEX_HEADER_SIZE + (index * PRIMARY_KEY_SIZE);
newDataBuffer.copy(0, dataBuffer, 0, len);
newDataBuffer.copy(len, dataBuffer, len + PRIMARY_KEY_SIZE, dataBuffer.length() - len - PRIMARY_KEY_SIZE);
dataBuffer = newDataBuffer;
setKeyCount(keyCount - 1);
}
}
/**
* Get the list of primary keys contained within this index buffer.
* @return long[] list of primary keys
* @throws IOException thrown if IO error occurs
*/
long[] getPrimaryKeys() {
long[] keys = new long[keyCount];
for (int i = 0; i < keyCount; i++) {
keys[i] = getPrimaryKey(i);
}
return keys;
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,486 +15,276 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.IOException;
abstract class IndexField extends Field {
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
private static final int MAX_INDEX_FIELD_LENGTH = 64;
/**
* <code>IndexField</code> provides a index table primary key {@link Field}
* implementation which wraps both the index field value (fixed or varaible length)
* and its' corresponding primary key (fixed or variable length).
*/
class IndexField extends Field {
private long primaryKey;
private Field nonTruncatedIndexField;
private Field indexField;
static final int MAX_INDEX_FIELD_LENGTH = 64;
private Field primaryKey;
private Field nonTruncatedIndexedField;
private Field indexedField;
private boolean isTruncated = false;
/**
* Construct a new index field with an initial value of null.
* Construct an index field with an initial value.
* @param indexedField indexed field value
* @param primaryKey primary key value
*/
IndexField(Field newIndexField) {
indexField = newIndexField;
nonTruncatedIndexField = newIndexField;
IndexField(Field indexedField, Field primaryKey) {
if (primaryKey.isVariableLength()) {
throw new IllegalArgumentException("variable length primaryKey not supported");
}
this.primaryKey = primaryKey.copyField();
this.nonTruncatedIndexedField = indexedField;
this.indexedField = indexedField;
if (indexedField.isVariableLength() && indexedField.length() >= MAX_INDEX_FIELD_LENGTH) {
// Ensure that we do not exceed the maximum allowed index key length
// and conserves space when indexing very long values
this.indexedField = indexedField.copyField();
this.indexedField.truncate(MAX_INDEX_FIELD_LENGTH);
isTruncated = true;
}
}
/**
* Construct an index field with an initial value.
* Get the indexed field value. If the original value exceeded
* {@link #MAX_INDEX_FIELD_LENGTH} in length the returned value will
* be truncated.
* @return indexed field value
*/
IndexField(Field value, long primaryKey) {
this.nonTruncatedIndexField = value;
indexField = value.newField(value);
if (indexField.isVariableLength() && indexField.length() >= MAX_INDEX_FIELD_LENGTH) {
// Ensure that we do not exceed the maximum allowed index key length
// and conserves space when indexing very long values
indexField.truncate(MAX_INDEX_FIELD_LENGTH);
isTruncated = true;
}
this.primaryKey = primaryKey;
}
Field getIndexField() {
return indexField;
Field getIndexedField() {
return indexedField;
}
/**
* Get the non-truncated index field value.
* @return non-truncated index field value.
* @deprecated this method serves no real purpose since the non-truncated
* indexed field value is not retained within the index table.
*/
@Deprecated
Field getNonTruncatedIndexField() {
return nonTruncatedIndexField;
return nonTruncatedIndexedField;
}
/**
* Determine if the index field value has been truncated from its' original
* value.
* @return true if truncated else false
* @deprecated this method serves no real purpose since the truncation
* status is not retained within the index table.
*/
@Deprecated
boolean usesTruncatedFieldValue() {
return isTruncated;
}
long getPrimaryKey() {
Field getPrimaryKey() {
return primaryKey;
}
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override
int length() {
return indexField.length() + 8;
return indexedField.length() + primaryKey.length();
}
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override
int write(Buffer buf, int offset) throws IOException {
offset = indexField.write(buf, offset);
return buf.putLong(offset, primaryKey);
offset = indexedField.write(buf, offset);
return primaryKey.write(buf, offset);
}
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override
int read(Buffer buf, int offset) throws IOException {
offset = indexField.read(buf, offset);
primaryKey = buf.getLong(offset);
return offset + 8;
offset = indexedField.read(buf, offset);
return primaryKey.read(buf, offset);
}
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override
int readLength(Buffer buf, int offset) throws IOException {
return indexField.readLength(buf, offset) + 8;
return indexedField.readLength(buf, offset) + primaryKey.length();
}
/*
* @see ghidra.framework.store.db.Field#isVariableLength()
*/
@Override
public boolean isVariableLength() {
return true;
return indexedField.isVariableLength();
}
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override
protected abstract byte getFieldType();
public IndexField copyField() {
return new IndexField(indexedField.copyField(), primaryKey.copyField());
}
abstract String getFieldTypeString();
@Override
public IndexField newField() {
return new IndexField(indexedField.newField(), primaryKey.newField());
}
/*
* @see java.lang.Object#toString()
/**
* Construct a new {@link IndexField} instance for the given indexValue and
* associated primary key. These fields are verified against this instance to
* ensure that they are of the correct type.
* @param indexValue column field value to be indexed
* @param key primary key associated with indexValue
* @return new IndexField instance
*/
IndexField newIndexField(Field indexValue, Field key) {
if (!indexValue.isSameType(indexedField) || !primaryKey.isSameType(getPrimaryKey())) {
throw new IllegalArgumentException("incorrect index value or key type");
}
return new IndexField(indexValue, key);
}
@Override
final IndexField getMinValue() {
throw new UnsupportedOperationException();
}
@Override
final IndexField getMaxValue() {
throw new UnsupportedOperationException();
}
@Override
byte getFieldType() {
return getIndexFieldType(indexedField, primaryKey);
}
@Override
public String toString() {
return getFieldTypeString() + ": " + indexField;
return indexedField + "/" + primaryKey;
}
@Override
public String getValueAsString() {
return indexField.getValueAsString() + " + " + Long.toHexString(primaryKey);
return indexedField.getValueAsString() + " / " + primaryKey.getValueAsString();
}
boolean hasSameIndex(IndexField field) {
boolean hasSameIndexValue(IndexField field) {
if (field == null) {
return false;
}
if (indexField == null) {
return field.indexField == null;
if (indexedField == null) {
return field.indexedField == null;
}
return indexField.equals(field.indexField);
return indexedField.equals(field.indexedField);
}
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override
public byte[] getBinaryData() {
byte[] indexBytes = indexField.getBinaryData();
int len = indexBytes.length;
byte[] bytes = new byte[len + 8];
System.arraycopy(indexBytes, 0, bytes, 0, len);
bytes[len] = (byte) (primaryKey >> 56);
bytes[++len] = (byte) (primaryKey >> 48);
bytes[++len] = (byte) (primaryKey >> 40);
bytes[++len] = (byte) (primaryKey >> 32);
bytes[++len] = (byte) (primaryKey >> 24);
bytes[++len] = (byte) (primaryKey >> 16);
bytes[++len] = (byte) (primaryKey >> 8);
bytes[++len] = (byte) primaryKey;
byte[] indexBytes = indexedField.getBinaryData();
byte[] primaryKeyBytes = primaryKey.getBinaryData();
int len = indexBytes.length + primaryKeyBytes.length;
byte[] bytes = new byte[len];
System.arraycopy(indexBytes, 0, bytes, 0, indexBytes.length);
System.arraycopy(primaryKeyBytes, 0, bytes, indexBytes.length, primaryKeyBytes.length);
return bytes;
}
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public void setBinaryData(byte[] bytes) {
if (isVariableLength()) {
throw new IllegalFieldAccessException("Unsupported for variable length IndexField");
}
if (bytes.length != length()) {
throw new IllegalFieldAccessException();
}
BinaryDataBuffer buffer = new BinaryDataBuffer(bytes);
try {
read(buffer, 0);
}
catch (IOException e) {
throw new IllegalFieldAccessException();
}
}
@Override
public int compareTo(Field o) {
IndexField f = (IndexField) o;
int result = indexField.compareTo(f.indexField);
int result = indexedField.compareTo(f.indexedField);
if (result != 0) {
return result;
}
if (primaryKey == f.primaryKey) {
return 0;
}
else if (primaryKey < f.primaryKey) {
return -1;
}
return 1;
return primaryKey.compareTo(f.primaryKey);
}
@Override
int compareTo(DataBuffer buffer, int offset) {
int result = indexedField.compareTo(buffer, offset);
if (result != 0) {
return result;
}
try {
int indexedFieldLen = indexedField.readLength(buffer, offset);
return primaryKey.compareTo(buffer, offset + indexedFieldLen);
}
catch (IOException e) {
throw new AssertException(e); // DataBuffer does not throw IOException
}
}
@Override
public boolean isSameType(Field field) {
if (!(field instanceof IndexField)) {
return false;
}
IndexField otherField = (IndexField) field;
return indexedField.isSameType(otherField.indexedField) &&
primaryKey.isSameType(otherField.primaryKey);
}
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (!getClass().isInstance(obj))
if (obj == null || obj.getClass() != getClass()) {
return false;
}
IndexField f = (IndexField) obj;
return primaryKey == f.primaryKey && indexField.equals(f.indexField);
return primaryKey.equals(f.primaryKey) && indexedField.equals(f.indexedField);
}
/*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return (int) primaryKey;
return (indexedField.hashCode() * 31) + primaryKey.hashCode();
}
static byte getIndexFieldType(Field indexedFieldType, Field primaryKeyFieldType) {
if (primaryKeyFieldType instanceof IndexField) {
throw new IllegalArgumentException();
}
if (indexedFieldType instanceof IndexField) {
throw new IllegalArgumentException();
}
return (byte) ((primaryKeyFieldType.getFieldType() << INDEX_FIELD_TYPE_SHIFT) |
indexedFieldType.getFieldType());
}
/**
* Get the field associated with the specified type value.
* @param fieldType
* @return Field
* Get the index field associated with the specified encoded field type.
* @param fieldType field type
* @return IndexField
* @throws UnsupportedFieldException if unsupported fieldType specified
*/
static IndexField getIndexField(byte fieldType) {
switch (fieldType & BASE_TYPE_MASK) {
case LONG_TYPE:
return new LongIndexField();
case INT_TYPE:
return new IntIndexField();
case STRING_TYPE:
return new StringIndexField();
case SHORT_TYPE:
return new ShortIndexField();
case BYTE_TYPE:
return new ByteIndexField();
case BOOLEAN_TYPE:
return new BooleanIndexField();
case BINARY_OBJ_TYPE:
return new BinaryIndexField();
static IndexField getIndexField(byte fieldType) throws UnsupportedFieldException {
Field indexedField = Field.getField((byte) (fieldType & FIELD_TYPE_MASK));
byte primaryKeyFeldType = (byte) (fieldType >> INDEX_FIELD_TYPE_SHIFT & FIELD_TYPE_MASK);
if (primaryKeyFeldType == LEGACY_INDEX_LONG_TYPE) {
return new LegacyIndexField(indexedField);
}
throw new AssertException();
Field primaryKeyType = Field.getField(primaryKeyFeldType);
return new IndexField(indexedField, primaryKeyType);
}
static IndexField getIndexField(Field indexedField, long primaryKey) {
switch (indexedField.getFieldType()) {
case LONG_TYPE:
return new LongIndexField((LongField) indexedField, primaryKey);
case INT_TYPE:
return new IntIndexField((IntField) indexedField, primaryKey);
case STRING_TYPE:
return new StringIndexField((StringField) indexedField, primaryKey);
case SHORT_TYPE:
return new ShortIndexField((ShortField) indexedField, primaryKey);
case BYTE_TYPE:
return new ByteIndexField((ByteField) indexedField, primaryKey);
case BOOLEAN_TYPE:
return new BooleanIndexField((BooleanField) indexedField, primaryKey);
case BINARY_OBJ_TYPE:
return new BinaryIndexField((BinaryField) indexedField, primaryKey);
}
throw new AssertException();
}
private static class LongIndexField extends IndexField {
LongIndexField() {
super(new LongField());
}
LongIndexField(LongField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | LONG_TYPE;
}
@Override
String getFieldTypeString() {
return "LongIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof LongIndexField)) {
throw new AssertException();
}
LongIndexField f = (LongIndexField) fieldValue;
return new LongIndexField((LongField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new LongIndexField();
}
}
private static class IntIndexField extends IndexField {
IntIndexField() {
super(new IntField());
}
IntIndexField(IntField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | INT_TYPE;
}
@Override
String getFieldTypeString() {
return "IntIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof IntIndexField)) {
throw new AssertException();
}
IntIndexField f = (IntIndexField) fieldValue;
return new IntIndexField((IntField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new IntIndexField();
}
}
private static class StringIndexField extends IndexField {
StringIndexField() {
super(new StringField());
}
StringIndexField(StringField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | STRING_TYPE;
}
@Override
String getFieldTypeString() {
return "StringIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof StringIndexField)) {
throw new AssertException();
}
StringIndexField f = (StringIndexField) fieldValue;
return new StringIndexField((StringField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new StringIndexField();
}
}
private static class ShortIndexField extends IndexField {
ShortIndexField() {
super(new ShortField());
}
ShortIndexField(ShortField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | SHORT_TYPE;
}
@Override
String getFieldTypeString() {
return "ShortIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof ShortIndexField)) {
throw new AssertException();
}
ShortIndexField f = (ShortIndexField) fieldValue;
return new ShortIndexField((ShortField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new ShortIndexField();
}
}
private static class ByteIndexField extends IndexField {
ByteIndexField() {
super(new ByteField());
}
ByteIndexField(ByteField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | BYTE_TYPE;
}
@Override
String getFieldTypeString() {
return "ByteIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof ByteIndexField)) {
throw new AssertException();
}
ByteIndexField f = (ByteIndexField) fieldValue;
return new ByteIndexField((ByteField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new ByteIndexField();
}
}
private static class BooleanIndexField extends IndexField {
BooleanIndexField() {
super(new BooleanField());
}
BooleanIndexField(BooleanField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | BOOLEAN_TYPE;
}
@Override
String getFieldTypeString() {
return "BooleanIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof BooleanIndexField)) {
throw new AssertException();
}
BooleanIndexField f = (BooleanIndexField) fieldValue;
return new BooleanIndexField((BooleanField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new BooleanIndexField();
}
}
private static class BinaryIndexField extends IndexField {
BinaryIndexField() {
super(new BinaryField());
}
BinaryIndexField(BinaryField indexedField, long primaryKey) {
super(indexedField, primaryKey);
}
@Override
protected byte getFieldType() {
return INDEX_TYPE_FLAG | BINARY_OBJ_TYPE;
}
@Override
String getFieldTypeString() {
return "BinaryIndexField";
}
@Override
public Field newField(Field fieldValue) {
if (!(fieldValue instanceof BinaryIndexField)) {
throw new AssertException();
}
BinaryIndexField f = (BinaryIndexField) fieldValue;
return new BinaryIndexField((BinaryField) f.getIndexField(), f.getPrimaryKey());
}
@Override
public Field newField() {
return new BinaryIndexField();
}
}
}

View file

@ -16,7 +16,6 @@
package db;
import java.io.IOException;
import java.util.NoSuchElementException;
import ghidra.util.exception.AssertException;
import ghidra.util.exception.CancelledException;
@ -29,7 +28,7 @@ import ghidra.util.task.TaskMonitor;
*/
abstract class IndexTable {
protected static final long[] emptyKeyArray = new long[0];
protected static final Field[] emptyKeyArray = Field.EMPTY_ARRAY;
/**
* Database Handle
@ -51,11 +50,6 @@ abstract class IndexTable {
*/
protected Table indexTable;
/**
* Field type associated with indexed column.
*/
protected final Field fieldType;
/**
* Indexed column within primary table schema.
*/
@ -69,15 +63,14 @@ abstract class IndexTable {
* @throws IOException thrown if IO error occurs
*/
IndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException {
if (!primaryTable.useLongKeys()) {
throw new AssertException("Only long-key tables may be indexed");
if (!primaryTable.useLongKeys() && !primaryTable.useFixedKeys()) {
throw new AssertException("Only fixed-length key tables may be indexed");
}
this.db = primaryTable.getDBHandle();
this.primaryTable = primaryTable;
this.indexTableRecord = indexTableRecord;
this.indexTable = new Table(primaryTable.getDBHandle(), indexTableRecord);
this.colIndex = indexTableRecord.getIndexedColumn();
fieldType = primaryTable.getSchema().getField(indexTableRecord.getIndexedColumn());
primaryTable.addIndex(this);
}
@ -95,14 +88,12 @@ abstract class IndexTable {
throw new AssertException("Table not found: " + name);
}
if (indexTableRecord.getSchema().getKeyFieldType() instanceof IndexField) {
Field keyFieldType = indexTableRecord.getSchema().getKeyFieldType();
if (keyFieldType instanceof IndexField) {
return new FieldIndexTable(primaryTable, indexTableRecord);
}
Field fieldType = primaryTable.getSchema().getField(indexTableRecord.getIndexedColumn());
if (fieldType.isVariableLength()) {
return new VarIndexTable(primaryTable, indexTableRecord);
}
return new FixedIndexTable(primaryTable, indexTableRecord);
throw new AssertException(
"Unexpected index field type: " + keyFieldType.getClass().getName());
}
/**
@ -121,14 +112,23 @@ abstract class IndexTable {
/**
* Check the consistency of this index table.
* @param monitor task monitor
* @return true if consistency check passed, else false
* @throws IOException
* @throws CancelledException
* @throws IOException if IO error occurs
* @throws CancelledException if task cancelled
*/
boolean isConsistent(TaskMonitor monitor) throws IOException, CancelledException {
return indexTable.isConsistent(primaryTable.getSchema().getFieldNames()[colIndex], monitor);
}
/**
* Get the primary table key type
* @return primary table key type
*/
Field getPrimaryTableKeyType() {
return primaryTable.getSchema().getKeyFieldType();
}
/**
* Get the table number associated with the underlying index table.
* @return table number
@ -160,6 +160,7 @@ abstract class IndexTable {
* Determine if there is an occurance of the specified index key value.
* @param field index key value
* @return true if an index key value equal to field exists.
* @throws IOException if IO error occurs
*/
boolean hasRecord(Field field) throws IOException {
return indexTable.hasRecord(field);
@ -168,16 +169,18 @@ abstract class IndexTable {
/**
* Find all primary keys which correspond to the specified indexed field
* value.
* @param field the field value to search for.
* @param indexValue the field value to search for.
* @return list of primary keys
* @throws IOException if IO error occurs
*/
abstract long[] findPrimaryKeys(Field indexValue) throws IOException;
abstract Field[] findPrimaryKeys(Field indexValue) throws IOException;
/**
* Get the number of primary keys which correspond to the specified indexed field
* value.
* @param field the field value to search for.
* @param indexValue the field value to search for.
* @return key count
* @throws IOException if IO error occurs
*/
abstract int getKeyCount(Field indexValue) throws IOException;
@ -185,19 +188,20 @@ abstract class IndexTable {
* Add an entry to this index. Caller is responsible for ensuring that this
* is not a duplicate entry.
* @param record new record
* @throws IOException
* @throws IOException if IO error occurs
*/
abstract void addEntry(Record record) throws IOException;
/**
* Delete an entry from this index.
* @param record deleted record
* @throws IOException
* @throws IOException if IO error occurs
*/
abstract void deleteEntry(Record record) throws IOException;
/**
* Delete all records within this index table.
* @throws IOException if IO error occurs
*/
void deleteAll() throws IOException {
indexTable.deleteAll();
@ -218,7 +222,7 @@ abstract class IndexTable {
* @param before if true initial position is before minField, else position
* is after endField
* @return index field iterator.
* @throws IOException
* @throws IOException if IO error occurs
*/
abstract DBFieldIterator indexIterator(Field minField, Field maxField, boolean before)
throws IOException;
@ -233,7 +237,7 @@ abstract class IndexTable {
* @param before if true initial position is before startField value, else position
* is after startField value
* @return index field iterator.
* @throws IOException
* @throws IOException if IO error occurs
*/
abstract DBFieldIterator indexIterator(Field minField, Field maxField, Field startField,
boolean before) throws IOException;
@ -243,9 +247,7 @@ abstract class IndexTable {
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
DBLongIterator keyIterator() throws IOException {
return new PrimaryKeyIterator();
}
abstract DBFieldIterator keyIterator() throws IOException;
/**
* Iterate over all primary keys sorted based upon the associated index key.
@ -255,9 +257,7 @@ abstract class IndexTable {
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
DBLongIterator keyIteratorBefore(Field startField) throws IOException {
return new PrimaryKeyIterator(startField, false);
}
abstract DBFieldIterator keyIteratorBefore(Field startField) throws IOException;
/**
* Iterate over all primary keys sorted based upon the associated index key.
@ -268,9 +268,7 @@ abstract class IndexTable {
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
DBLongIterator keyIteratorAfter(Field startField) throws IOException {
return new PrimaryKeyIterator(startField, true);
}
abstract DBFieldIterator keyIteratorAfter(Field startField) throws IOException;
/**
* Iterate over all primary keys sorted based upon the associated index key.
@ -282,9 +280,8 @@ abstract class IndexTable {
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
DBLongIterator keyIteratorBefore(Field startField, long primaryKey) throws IOException {
return new PrimaryKeyIterator(null, null, startField, primaryKey, false);
}
abstract DBFieldIterator keyIteratorBefore(Field startField, Field primaryKey)
throws IOException;
/**
* Iterate over all primary keys sorted based upon the associated index key.
@ -296,9 +293,8 @@ abstract class IndexTable {
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
DBLongIterator keyIteratorAfter(Field startField, long primaryKey) throws IOException {
return new PrimaryKeyIterator(null, null, startField, primaryKey, true);
}
abstract DBFieldIterator keyIteratorAfter(Field startField, Field primaryKey)
throws IOException;
/**
* Iterate over all primary keys sorted based upon the associated index key.
@ -314,17 +310,8 @@ abstract class IndexTable {
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
DBLongIterator keyIterator(Field minField, Field maxField, boolean before) throws IOException {
Field startField = before ? minField : maxField;
if (startField == null && !before) {
}
return new PrimaryKeyIterator(minField, maxField, before ? minField : maxField,
before ? Long.MIN_VALUE : Long.MAX_VALUE, !before);
}
abstract DBFieldIterator keyIterator(Field minField, Field maxField, boolean before)
throws IOException;
/**
* Iterate over all primary keys sorted based upon the associated index key.
@ -337,295 +324,7 @@ abstract class IndexTable {
* @return primary key iterator
* @throws IOException thrown if IO error occurs
*/
DBLongIterator keyIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException {
return new PrimaryKeyIterator(minField, maxField, startField,
before ? Long.MIN_VALUE : Long.MAX_VALUE, !before);
}
/**
* Iterates over primary keys for a range of index field values.
*/
private class PrimaryKeyIterator implements DBLongIterator {
private RecordIterator indexIterator;
private int expectedModCount;
private int index;
private long indexPrimaryKey;
private IndexBuffer indexBuffer;
private boolean forward = true;
private boolean reverse = true;
private Field lastKey;
private boolean hasPrev = false;
private boolean hasNext = false;
/**
* Construct a key iterator starting with the minimum secondary key.
*/
PrimaryKeyIterator() throws IOException {
expectedModCount = indexTable.modCount;
indexIterator = indexTable.iterator();
}
/**
* Construct a key iterator. The iterator is positioned immediately before
* the key associated with the first occurance of the startValue.
* @param startValue indexed field value.
* @param after if true the iterator is positioned immediately after
* the last occurance of the specified startValue position.
*/
PrimaryKeyIterator(Field startValue, boolean after) throws IOException {
this(null, null, startValue, after ? Long.MAX_VALUE : Long.MIN_VALUE, after);
}
/**
* Construct a key iterator. The iterator is positioned immediately before
* or after the key associated with the specified startValue/primaryKey.
* @param minValue minimum index value or null if no minimum
* @param maxValue maximum index value or null if no maximum
* @param startValue starting index value.
* @param primaryKey starting primary key value.
* @param after if true iterator is positioned immediately after
* the startValue/primaryKey,
* otherwise immediately before.
* @throws IOException
*/
PrimaryKeyIterator(Field minValue, Field maxValue, Field startValue, long primaryKey,
boolean after) throws IOException {
expectedModCount = indexTable.modCount;
indexIterator = indexTable.iterator(minValue, maxValue, startValue);
if (hasNext()) {
if (startValue.equals(indexBuffer.getIndexKey())) {
index = indexBuffer.getIndex(primaryKey);
if (index < 0) {
index = -index - 1;
}
else if (after) {
index++;
}
if (index == indexBuffer.keyCount) {
--index;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = false;
hasPrev = true;
}
}
}
}
@Override
public boolean hasNext() throws IOException {
if (hasNext) {
return true;
}
synchronized (db) {
// Handle concurrent modification if necessary
// This is a slightly lazy approach which could miss keys added to the end of an index buffer
if (indexBuffer != null && index < (indexBuffer.keyCount - 1) &&
indexTable.modCount != expectedModCount) {
// refetch index buffer which may have changed
Field indexKey = indexBuffer.getIndexKey();
Record indexRecord;
if (indexKey.isVariableLength()) {
indexRecord = indexTable.getRecord(indexKey);
}
else {
indexRecord = indexTable.getRecord(indexKey.getLongValue());
}
if (indexRecord != null) {
// recover position within index buffer
indexBuffer = new IndexBuffer(indexKey, indexRecord.getBinaryData(0));
index = indexBuffer.getIndex(indexPrimaryKey + 1);
if (index < 0) {
index = -index - 1;
if (index == indexBuffer.keyCount) {
// next must be found in next index buffer below
indexBuffer = null;
}
else {
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = true;
}
}
else {
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = true;
}
}
else {
// index buffer no longer exists - will need to get next buffer below
indexBuffer = null;
}
hasPrev = false;
}
if (!hasNext) {
// Goto next index buffer
if ((indexBuffer == null || index >= (indexBuffer.keyCount) - 1)) {
// get next index buffer
Record indexRecord = indexIterator.next();
if (indexRecord != null) {
if (!forward) {
indexRecord = indexIterator.next();
forward = true;
}
reverse = false;
if (indexRecord != null) {
indexBuffer =
new IndexBuffer(fieldType.newField(indexRecord.getKeyField()),
indexRecord.getBinaryData(0));
index = 0;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = true;
hasPrev = false;
}
}
}
// Step within current index buffer
else {
++index;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = true;
hasPrev = false;
}
}
expectedModCount = indexTable.modCount;
return hasNext;
}
}
@Override
public boolean hasPrevious() throws IOException {
if (hasPrev) {
return true;
}
synchronized (db) {
// Handle concurrent modification if necessary
// This is a slightly lazy approach which could miss keys added to the beginning of an index buffer
if (indexBuffer != null && index > 0 && indexTable.modCount != expectedModCount) {
// refetch index buffer which may have changed
Field indexKey = indexBuffer.getIndexKey();
Record indexRecord; // refetch index buffer which may have changed
if (indexKey.isVariableLength()) {
indexRecord = indexTable.getRecord(indexKey);
}
else {
indexRecord = indexTable.getRecord(indexKey.getLongValue());
}
if (indexRecord != null) {
// recover position within index buffer
indexBuffer = new IndexBuffer(indexKey, indexRecord.getBinaryData(0));
index = indexBuffer.getIndex(indexPrimaryKey - 1);
if (index < 0) {
index = -index - 1;
if (index == 0) {
// previous must be found in previous index buffer below
indexBuffer = null;
}
else {
--index;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasPrev = true;
}
}
else {
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasPrev = true;
}
}
else {
indexBuffer = null;
}
hasNext = false;
}
if (!hasPrev) {
// Goto previous index buffer
if ((indexBuffer == null || index == 0)) {
// get previous index buffer
Record indexRecord = indexIterator.previous();
if (indexRecord != null) {
if (!reverse) {
indexRecord = indexIterator.previous();
reverse = true;
}
forward = false;
if (indexRecord != null) {
indexBuffer =
new IndexBuffer(fieldType.newField(indexRecord.getKeyField()),
indexRecord.getBinaryData(0));
index = indexBuffer.keyCount - 1;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = false;
hasPrev = true;
}
}
}
// Step within current index buffer
else {
--index;
indexPrimaryKey = indexBuffer.getPrimaryKey(index);
hasNext = false;
hasPrev = true;
}
}
expectedModCount = indexTable.modCount;
return hasPrev;
}
}
@Override
public long next() throws IOException {
if (hasNext || hasNext()) {
long key = indexBuffer.getPrimaryKey(index);
lastKey = new LongField(key);
hasNext = false;
hasPrev = true;
return key;
}
throw new NoSuchElementException();
}
@Override
public long previous() throws IOException {
if (hasPrev || hasPrevious()) {
long key = indexBuffer.getPrimaryKey(index);
lastKey = new LongField(key);
hasNext = true;
hasPrev = false;
return key;
}
throw new NoSuchElementException();
}
/**
* WARNING: This could be slow since the index buffer must be read
* after each record deletion.
* @see db.DBLongIterator#delete()
*/
@Override
public boolean delete() throws IOException {
if (lastKey == null) {
return false;
}
synchronized (db) {
long key = lastKey.getLongValue();
primaryTable.deleteRecord(key);
lastKey = null;
return true;
}
}
}
abstract DBFieldIterator keyIterator(Field minField, Field maxField, Field startField,
boolean before) throws IOException;
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>IntField</code> provides a wrapper for 4-byte signed integer data
* which is read or written to a Record.
*/
public class IntField extends Field {
public final class IntField extends Field {
/**
* Minimum integer field value
*/
public static final IntField MIN_VALUE = new IntField(Integer.MIN_VALUE, true);
/**
* Maximum integer field value
*/
public static final IntField MAX_VALUE = new IntField(Integer.MAX_VALUE, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final IntField INSTANCE = MIN_VALUE;
private int value;
@ -39,69 +53,57 @@ public class IntField extends Field {
* @param i initial value
*/
public IntField(int i) {
value = i;
this(i, false);
}
/**
* @see db.Field#getIntValue()
* Construct an integer field with an initial value of i.
* @param i initial value
* @param immutable true if field value is immutable
*/
IntField(int i, boolean immutable) {
super(immutable);
value = i;
}
@Override
public int getIntValue() {
return value;
}
/**
* @see db.Field#setIntValue(int)
*/
@Override
public void setIntValue(int value) {
checkImmutable();
this.value = value;
}
/**
* @see db.Field#length()
*/
@Override
int length() {
return 4;
}
/**
* @see db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override
int write(Buffer buf, int offset) throws IOException {
return buf.putInt(offset, value);
}
/**
* @see db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getInt(offset);
return offset + 4;
}
/**
* @see db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override
int readLength(Buffer buf, int offset) throws IOException {
return 4;
}
/**
* @see db.Field#getFieldType()
*/
@Override
protected byte getFieldType() {
byte getFieldType() {
return INT_TYPE;
}
/**
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "IntField: " + Integer.toString(value);
@ -109,12 +111,9 @@ public class IntField extends Field {
@Override
public String getValueAsString() {
return Integer.toHexString(value);
return "0x" + Integer.toHexString(value);
}
/**
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof IntField))
@ -122,9 +121,6 @@ public class IntField extends Field {
return ((IntField) obj).value == value;
}
/**
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Field o) {
IntField f = (IntField) o;
@ -135,54 +131,64 @@ public class IntField extends Field {
return 1;
}
/**
* @see db.Field#newField(docking.widgets.fieldpanel.Field)
*/
@Override
public Field newField(Field fieldValue) {
if (fieldValue.isVariableLength())
throw new AssertException();
return new IntField((int) fieldValue.getLongValue());
int compareTo(DataBuffer buffer, int offset) {
int otherValue = buffer.getInt(offset);
if (value == otherValue)
return 0;
else if (value < otherValue)
return -1;
return 1;
}
/**
* @see db.Field#newField()
*/
@Override
public Field newField() {
public IntField copyField() {
return new IntField((int) getLongValue());
}
@Override
public IntField newField() {
return new IntField();
}
/**
* @see db.Field#getLongValue()
*/
@Override
public long getLongValue() {
return value;
}
/**
* @see db.Field#setLongValue(long)
*/
@Override
public void setLongValue(long value) {
this.value = (int) value;
setIntValue((int) value);
}
/**
* @see db.Field#getBinaryData()
*/
@Override
public byte[] getBinaryData() {
return new byte[] { (byte) (value >> 24), (byte) (value >> 16), (byte) (value >> 8),
(byte) value };
}
/**
* @see java.lang.Object#hashCode()
*/
@Override
public void setBinaryData(byte[] bytes) {
checkImmutable();
if (bytes.length != 4) {
throw new IllegalFieldAccessException();
}
value = ((bytes[0] & 0xff) << 24) | ((bytes[1] & 0xff) << 16) | ((bytes[2] & 0xff) << 8) |
(bytes[3] & 0xff);
}
@Override
public int hashCode() {
return value;
}
@Override
IntField getMinValue() {
return MIN_VALUE;
}
@Override
IntField getMaxValue() {
return MAX_VALUE;
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -14,8 +13,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.feature.vt.api.db;
public class VTAppliedMarkupTableDBAdapterV0 {
package db;
/**
* Marker interface for {@link Table} interior nodes within the BTree structure.
*/
public interface InteriorNode extends BTreeNode {
// marker interface only
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -24,25 +23,26 @@ import java.util.NoSuchElementException;
*/
public class KeyToRecordIterator implements RecordIterator {
private DBLongIterator keyIter;
private DBFieldIterator keyIter;
private Table table;
private DBHandle db;
/**
* Construct a record iterator from a secondary index key iterator.
* @param keyIter key iterator.
*/
public KeyToRecordIterator(Table table, DBLongIterator keyIter) {
public KeyToRecordIterator(Table table, DBFieldIterator keyIter) {
this.table = table;
this.db = table.getDBHandle();
this.keyIter = keyIter;
}
/**
* @see db.RecordIterator#hasNext()
*/
@Override
public boolean hasNext() throws IOException {
synchronized(db) {
synchronized (db) {
return keyIter.hasNext();
}
}
@ -50,8 +50,9 @@ public class KeyToRecordIterator implements RecordIterator {
/**
* @see db.RecordIterator#hasPrevious()
*/
@Override
public boolean hasPrevious() throws IOException {
synchronized(db) {
synchronized (db) {
return keyIter.hasPrevious();
}
}
@ -59,12 +60,14 @@ public class KeyToRecordIterator implements RecordIterator {
/**
* @see db.RecordIterator#next()
*/
@Override
public Record next() throws IOException {
synchronized(db) {
synchronized (db) {
try {
return table.getRecord(keyIter.next());
} catch (NoSuchElementException e) {
return null;
}
catch (NoSuchElementException e) {
return null;
}
}
}
@ -72,12 +75,14 @@ public class KeyToRecordIterator implements RecordIterator {
/**
* @see db.RecordIterator#previous()
*/
@Override
public Record previous() throws IOException {
synchronized(db) {
synchronized (db) {
try {
return table.getRecord(keyIter.previous());
} catch (NoSuchElementException e) {
return null;
}
catch (NoSuchElementException e) {
return null;
}
}
}
@ -85,6 +90,7 @@ public class KeyToRecordIterator implements RecordIterator {
/**
* @see db.RecordIterator#delete()
*/
@Override
public boolean delete() throws IOException {
return keyIter.delete();
}

View file

@ -0,0 +1,61 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
/**
* <code>LegacyIndexField</code> supports legacy index tables where the indexed
* field was a {@link LongField} and improperly employed a variable-length
* index storage scheme when the primary key was a LongField.
*/
class LegacyIndexField extends IndexField {
/**
* Constructor
* @param indexField primary table field type being indexed
*/
LegacyIndexField(Field indexField) {
super(indexField, new LongField());
}
private LegacyIndexField(Field indexField, LongField primaryKey) {
super(indexField, primaryKey);
}
@Override
public boolean isVariableLength() {
// NOTE: while fixed-length IndexFields are possible this past
// oversight failed to override this method for fixed-length cases
// (e.g., indexing fixed-length field with long primary key).
// To preserve backward compatibility this can not be changed for
// long primary keys.
return true;
}
@Override
public boolean equals(Object obj) {
return (obj instanceof LegacyIndexField) && super.equals(obj);
}
@Override
LegacyIndexField newIndexField(Field indexValue, Field primaryKey) {
if (!indexValue.isSameType(getIndexedField()) || !(primaryKey instanceof LongField)) {
throw new IllegalArgumentException("incorrect index value or key type");
}
return new LegacyIndexField(indexValue, (LongField) primaryKey);
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>LongField</code> provides a wrapper for 8-byte signed long data
* which is read or written to a Record.
*/
public class LongField extends Field {
public final class LongField extends Field {
/**
* Minimum long field value
*/
public static final LongField MIN_VALUE = new LongField(Long.MIN_VALUE, true);
/**
* Maximum long field value
*/
public static final LongField MAX_VALUE = new LongField(Long.MAX_VALUE, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final LongField INSTANCE = MIN_VALUE;
private long value;
@ -39,69 +53,57 @@ public class LongField extends Field {
* @param l initial value
*/
public LongField(long l) {
this(l, false);
}
/**
* Construct a long field with an initial value of l.
* @param l initial value
* @param immutable true if field value is immutable
*/
LongField(long l, boolean immutable) {
super(immutable);
value = l;
}
/*
* @see ghidra.framework.store.db.Field#getLongValue()
*/
@Override
public long getLongValue() {
return value;
}
/*
* @see ghidra.framework.store.db.Field#setLongValue(long)
*/
@Override
public void setLongValue(long value) {
checkImmutable();
this.value = value;
}
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override
int length() {
return 8;
}
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override
int write(Buffer buf, int offset) throws IOException {
return buf.putLong(offset, value);
}
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getLong(offset);
return offset + 8;
}
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override
int readLength(Buffer buf, int offset) throws IOException {
return 8;
}
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override
protected byte getFieldType() {
byte getFieldType() {
return LONG_TYPE;
}
/*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "LongField: " + Long.toString(value);
@ -109,12 +111,9 @@ public class LongField extends Field {
@Override
public String getValueAsString() {
return Long.toHexString(value);
return "0x" + Long.toHexString(value);
}
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof LongField))
@ -122,11 +121,11 @@ public class LongField extends Field {
return ((LongField) obj).value == value;
}
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Field o) {
if (!(o instanceof LongField)) {
throw new UnsupportedOperationException("may only compare similar Field types");
}
LongField f = (LongField) o;
if (value == f.value)
return 0;
@ -135,27 +134,26 @@ public class LongField extends Field {
return 1;
}
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override
public Field newField(Field fieldValue) {
if (fieldValue.isVariableLength())
throw new AssertException();
return new LongField(fieldValue.getLongValue());
int compareTo(DataBuffer buffer, int offset) {
long otherValue = buffer.getLong(offset);
if (value == otherValue)
return 0;
else if (value < otherValue)
return -1;
return 1;
}
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override
public Field newField() {
public LongField copyField() {
return new LongField(getLongValue());
}
@Override
public LongField newField() {
return new LongField();
}
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override
public byte[] getBinaryData() {
return new byte[] { (byte) (value >> 56), (byte) (value >> 48), (byte) (value >> 40),
@ -163,12 +161,31 @@ public class LongField extends Field {
(byte) value };
}
/*
* @see java.lang.Object#hashCode()
*/
@Override
public void setBinaryData(byte[] bytes) {
checkImmutable();
if (bytes.length != 8) {
throw new IllegalFieldAccessException();
}
value = (((long) bytes[0] & 0xff) << 56) | (((long) bytes[1] & 0xff) << 48) |
(((long) bytes[2] & 0xff) << 40) | (((long) bytes[3] & 0xff) << 32) |
(((long) bytes[4] & 0xff) << 24) | (((long) bytes[5] & 0xff) << 16) |
(((long) bytes[6] & 0xff) << 8) | ((long) bytes[7] & 0xff);
}
@Override
public int hashCode() {
return (int) (value ^ (value >>> 32));
}
@Override
LongField getMinValue() {
return MIN_VALUE;
}
@Override
LongField getMaxValue() {
return MAX_VALUE;
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,15 +15,14 @@
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.Msg;
import ghidra.util.exception.AssertException;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>LongKeyInteriorNode</code> stores a BTree node for use as an interior
* node when searching for Table records within the database. This type of node
@ -33,7 +31,7 @@ import db.buffers.DataBuffer;
* | NodeType(1) | KeyCount(4) | Key0(8) | ID0(4) | ... | KeyN(8) | IDN(4) |
* </pre>
*/
class LongKeyInteriorNode extends LongKeyNode {
class LongKeyInteriorNode extends LongKeyNode implements InteriorNode {
private static final int BASE = LONGKEY_NODE_HEADER_SIZE;
@ -63,7 +61,8 @@ class LongKeyInteriorNode extends LongKeyNode {
* @param id2 right child node buffer ID
* @throws IOException thrown if IO error occurs
*/
LongKeyInteriorNode(NodeMgr nodeMgr, long key1, int id1, long key2, int id2) throws IOException {
LongKeyInteriorNode(NodeMgr nodeMgr, long key1, int id1, long key2, int id2)
throws IOException {
super(nodeMgr, NodeMgr.LONGKEY_INTERIOR_NODE);
maxKeyCount = (buffer.length() - BASE) / ENTRY_SIZE;
setKeyCount(2);
@ -73,6 +72,11 @@ class LongKeyInteriorNode extends LongKeyNode {
putEntry(1, key2, id2);
}
@Override
public LongKeyInteriorNode getParent() {
return parent;
}
/**
* Construct a new empty long-key interior node.
* Node must be initialized with a minimum of two keys.
@ -86,16 +90,16 @@ class LongKeyInteriorNode extends LongKeyNode {
void logConsistencyError(String tableName, String msg, Throwable t) {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " parent.key[0]=" + Long.toHexString(getKey(0)) + " bufferID=" +
getBufferId());
Msg.debug(this,
" parent.key[0]=" + Long.toHexString(getKey(0)) + " bufferID=" + getBufferId());
if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t);
}
}
@Override
public boolean isConsistent(String tableName, TaskMonitor monitor) throws IOException,
CancelledException {
public boolean isConsistent(String tableName, TaskMonitor monitor)
throws IOException, CancelledException {
boolean consistent = true;
long lastMinKey = 0;
long lastMaxKey = 0;
@ -106,23 +110,21 @@ class LongKeyInteriorNode extends LongKeyNode {
if (i != 0) {
if (key <= lastMinKey) {
consistent = false;
logConsistencyError(tableName, "child[" + i + "].minKey <= child[" + (i - 1) +
"].minKey", null);
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null);
Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(key) +
" bufferID=" + getBufferId(i));
Msg.debug(this,
" child[" + (i - 1) + "].minKey = 0x" + Long.toHexString(lastMinKey) +
" bufferID=" + getBufferId(i - 1));
Msg.debug(this, " child[" + (i - 1) + "].minKey = 0x" +
Long.toHexString(lastMinKey) + " bufferID=" + getBufferId(i - 1));
}
else if (key <= lastMaxKey) {
consistent = false;
logConsistencyError(tableName, "child[" + i + "].minKey <= child[" + (i - 1) +
"].maxKey", null);
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null);
Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(key) +
" bufferID=" + getBufferId(i));
Msg.debug(this,
" child[" + (i - 1) + "].maxKey = 0x" + Long.toHexString(lastMaxKey) +
" bufferID=" + getBufferId(i - 1));
Msg.debug(this, " child[" + (i - 1) + "].maxKey = 0x" +
Long.toHexString(lastMaxKey) + " bufferID=" + getBufferId(i - 1));
}
}
@ -155,8 +157,8 @@ class LongKeyInteriorNode extends LongKeyNode {
long childKey0 = node.getKey(0);
if (key != childKey0) {
consistent = false;
logConsistencyError(tableName, "parent key entry mismatch with child[" + i +
"].minKey", null);
logConsistencyError(tableName,
"parent key entry mismatch with child[" + i + "].minKey", null);
Msg.debug(this, " child[" + i + "].minKey = 0x" + Long.toHexString(childKey0) +
" bufferID=" + getBufferId(i - 1));
Msg.debug(this, " parent key entry = 0x" + Long.toHexString(key));
@ -178,9 +180,15 @@ class LongKeyInteriorNode extends LongKeyNode {
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage.
* @param key
* @return int buffer ID index.
* into the Buffer ID storage. This method is intended to locate the child
* node which contains the specified key. The returned index corresponds
* to a child's stored buffer/node ID and may correspond to another interior
* node or a leaf record node. Each stored key within this interior node
* effectively identifies the maximum key contained within the corresponding
* child node.
* @param key key to search for
* @return int buffer ID index of child node. An existing positive index
* value will always be returned.
*/
int getIdIndex(long key) {
@ -203,10 +211,18 @@ class LongKeyInteriorNode extends LongKeyNode {
return max;
}
@Override
public int getKeyIndex(Field key) throws IOException {
return getKeyIndex(key.getLongValue());
}
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage.
* @param key
* into the Buffer ID storage. This method is intended to find the insertion
* index or exact match for a child key. A negative value will be returned
* when an exact match is not found and may be transformed into an
* insertion index (insetIndex = -returnedIndex-1).
* @param key key to search for
* @return int buffer ID index.
*/
private int getKeyIndex(long key) {
@ -230,9 +246,6 @@ class LongKeyInteriorNode extends LongKeyNode {
return -(min + 1);
}
/*
* @see ghidra.framework.store.db.LongKeyNode#getKey(int)
*/
@Override
long getKey(int index) {
return buffer.getLong(BASE + (index * ENTRY_SIZE));
@ -329,10 +342,11 @@ class LongKeyInteriorNode extends LongKeyNode {
if (index < 0) {
throw new AssertException();
}
// Update key
putKey(index, newKey);
if (index == 0 && parent != null) {
parent.keyChanged(oldKey, newKey);
}
putKey(index, newKey);
}
/**
@ -395,9 +409,6 @@ class LongKeyInteriorNode extends LongKeyNode {
newNode.getBufferId());
}
/*
* @see ghidra.framework.store.db.LongKeyNode#getLeafNode(long)
*/
@Override
LongKeyRecordNode getLeafNode(long key) throws IOException {
LongKeyNode node = nodeMgr.getLongKeyNode(getBufferId(getIdIndex(key)));
@ -544,9 +555,6 @@ class LongKeyInteriorNode extends LongKeyNode {
}
}
/*
* @see ghidra.framework.store.db.LongKeyNode#delete()
*/
@Override
public void delete() throws IOException {
@ -559,9 +567,7 @@ class LongKeyInteriorNode extends LongKeyNode {
nodeMgr.deleteNode(this);
}
/*
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
@Override
public int[] getBufferReferences() {
int[] ids = new int[keyCount];
for (int i = 0; i < keyCount; i++) {

View file

@ -101,6 +101,11 @@ abstract class LongKeyNode implements BTreeNode {
*/
abstract long getKey(int index);
@Override
public final Field getKeyField(int index) throws IOException {
return new LongField(getKey(index));
}
/**
* Get the leaf node which contains the specified key.
* @param key key value

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,28 +15,32 @@
*/
package db;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.Msg;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>LongKeyRecordNode</code> is an abstract implementation of a BTree leaf node
* which utilizes long key values and stores records.
* <p>
* This type of node has the following partial layout within a single DataBuffer
* (field size in bytes):
* <pre>
* | NodeType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | ...
* </pre>
*/
abstract class LongKeyRecordNode extends LongKeyNode {
abstract class LongKeyRecordNode extends LongKeyNode implements RecordNode {
private static final int ID_SIZE = 4;
private static final int PREV_LEAF_ID_OFFSET = LONGKEY_NODE_HEADER_SIZE;
private static final int NEXT_LEAF_ID_OFFSET = PREV_LEAF_ID_OFFSET + ID_SIZE;
static final int RECORD_LEAF_HEADER_SIZE = LONGKEY_NODE_HEADER_SIZE + 2*ID_SIZE;
static final int RECORD_LEAF_HEADER_SIZE = LONGKEY_NODE_HEADER_SIZE + 2 * ID_SIZE;
/**
* Construct an existing long-key record leaf node.
* @param nodeMgr table node manager instance
@ -46,7 +49,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
LongKeyRecordNode(NodeMgr nodeMgr, DataBuffer buf) {
super(nodeMgr, buf);
}
/**
* Construct a new long-key record leaf node.
* @param nodeMgr table node manager instance
@ -55,14 +58,20 @@ abstract class LongKeyRecordNode extends LongKeyNode {
* @param nextLeafId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @throws IOException thrown if an IO error occurs
*/
LongKeyRecordNode(NodeMgr nodeMgr, byte nodeType, int prevLeafId, int nextLeafId) throws IOException {
LongKeyRecordNode(NodeMgr nodeMgr, byte nodeType, int prevLeafId, int nextLeafId)
throws IOException {
super(nodeMgr, nodeType);
// Initialize header
buffer.putInt(PREV_LEAF_ID_OFFSET, prevLeafId);
buffer.putInt(NEXT_LEAF_ID_OFFSET, nextLeafId);
}
@Override
public LongKeyInteriorNode getParent() {
return parent;
}
void logConsistencyError(String tableName, String msg, Throwable t) {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " bufferID=" + getBufferId() + " key[0]=0x" + Long.toHexString(getKey(0)));
@ -70,9 +79,10 @@ abstract class LongKeyRecordNode extends LongKeyNode {
Msg.error(this, "Consistency Error (" + tableName + ")", t);
}
}
@Override
public boolean isConsistent(String tableName, TaskMonitor monitor) throws IOException, CancelledException {
public boolean isConsistent(String tableName, TaskMonitor monitor)
throws IOException, CancelledException {
boolean consistent = true;
long prevKey = 0;
for (int i = 0; i < keyCount; i++) {
@ -81,14 +91,15 @@ abstract class LongKeyRecordNode extends LongKeyNode {
if (i != 0) {
if (key <= prevKey) {
consistent = false;
logConsistencyError(tableName, "key[" + i + "] <= key[" + (i-1) + "]", null);
logConsistencyError(tableName, "key[" + i + "] <= key[" + (i - 1) + "]", null);
Msg.debug(this, " key[" + i + "].minKey = 0x" + Long.toHexString(key));
Msg.debug(this, " key[" + (i-1) + "].minKey = 0x" + Long.toHexString(prevKey));
Msg.debug(this,
" key[" + (i - 1) + "].minKey = 0x" + Long.toHexString(prevKey));
}
}
prevKey = key;
}
if ((parent == null || parent.isLeftmostKey(getKey(0))) && getPreviousLeaf() != null) {
consistent = false;
logConsistencyError(tableName, "previous-leaf should not exist", null);
@ -112,18 +123,15 @@ abstract class LongKeyRecordNode extends LongKeyNode {
consistent = false;
logConsistencyError(tableName, "this leaf is not linked to next-leaf", null);
}
return consistent;
}
/*
* @see ghidra.framework.store.db.LongKeyNode#getLeafNode(long)
*/
@Override
LongKeyRecordNode getLeafNode(long key) throws IOException {
LongKeyRecordNode getLeafNode(long key) throws IOException {
return this;
}
/**
* Get this leaf node's right sibling
* @return this leaf node's right sibling or null if right sibling does not exist.
@ -135,9 +143,9 @@ abstract class LongKeyRecordNode extends LongKeyNode {
if (nextLeafId >= 0) {
leaf = (LongKeyRecordNode) nodeMgr.getLongKeyNode(nextLeafId);
}
return leaf;
return leaf;
}
/**
* Get this leaf node's left sibling
* @return this leaf node's left sibling or null if left sibling does not exist.
@ -149,9 +157,9 @@ abstract class LongKeyRecordNode extends LongKeyNode {
if (nextLeafId >= 0) {
leaf = (LongKeyRecordNode) nodeMgr.getLongKeyNode(nextLeafId);
}
return leaf;
return leaf;
}
/**
* Perform a binary search to locate the specified key.
* @param key key value
@ -159,12 +167,12 @@ abstract class LongKeyRecordNode extends LongKeyNode {
* point.
*/
int getKeyIndex(long key) {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max)/2;
int i = (min + max) / 2;
long k = getKey(i);
if (k == key) {
return i;
@ -176,9 +184,14 @@ abstract class LongKeyRecordNode extends LongKeyNode {
max = i - 1;
}
}
return -(min+1);
return -(min + 1);
}
@Override
public int getKeyIndex(Field key) throws IOException {
return getKeyIndex(key.getLongValue());
}
/**
* Split this leaf node in half and update tree.
* When a split is performed, the next operation must be performed
@ -187,7 +200,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
* @throws IOException thrown if an IO error occurs
*/
LongKeyNode split() throws IOException {
// Create new leaf
int oldSiblingId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
LongKeyRecordNode newLeaf = createNewLeaf(buffer.getId(), oldSiblingId);
@ -199,59 +212,61 @@ abstract class LongKeyRecordNode extends LongKeyNode {
LongKeyRecordNode leaf = (LongKeyRecordNode) nodeMgr.getLongKeyNode(oldSiblingId);
leaf.buffer.putInt(PREV_LEAF_ID_OFFSET, newBufId);
}
// Split node creating two balanced leaves
splitData(newLeaf);
if (parent != null) {
// Ask parent to insert new node and return root
return parent.insert(newBufId, newLeaf.getKey(0));
}
// New parent node becomes root
return new LongKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newLeaf.getKey(0), newBufId);
return new LongKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newLeaf.getKey(0),
newBufId);
}
/**
* Append a leaf which contains one or more keys and update tree. Leaf is inserted
* as the new right sibling of this leaf.
* @param newLeaf new right sibling leaf (must be same node type as this leaf)
* @param leaf new right sibling leaf (must be same node type as this leaf)
* @return root node which may have changed.
* @throws IOException thrown if an IO error occurs
*/
LongKeyNode appendLeaf(LongKeyRecordNode leaf) throws IOException {
// Create new leaf and link
leaf.buffer.putInt(PREV_LEAF_ID_OFFSET, buffer.getId());
int rightLeafBufId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
leaf.buffer.putInt(NEXT_LEAF_ID_OFFSET, rightLeafBufId);
// Adjust this node
int newBufId = leaf.buffer.getId();
buffer.putInt(NEXT_LEAF_ID_OFFSET, newBufId);
// Adjust old right node if present
if (rightLeafBufId >= 0) {
LongKeyNode rightLeaf = nodeMgr.getLongKeyNode(rightLeafBufId);
rightLeaf.buffer.putInt(PREV_LEAF_ID_OFFSET, newBufId);
}
if (parent != null) {
// Ask parent to insert new node and return root - leaf parent is unknown
return parent.insert(newBufId, leaf.getKey(0));
}
// New parent node becomes root
return new LongKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), leaf.getKey(0), newBufId);
return new LongKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), leaf.getKey(0),
newBufId);
}
/**
* Remove this leaf from the tree.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
LongKeyNode removeLeaf() throws IOException {
long key = getKey(0);
int prevBufferId = buffer.getInt(PREV_LEAF_ID_OFFSET);
int nextBufferId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
@ -263,11 +278,11 @@ abstract class LongKeyRecordNode extends LongKeyNode {
LongKeyRecordNode nextNode = (LongKeyRecordNode) nodeMgr.getLongKeyNode(nextBufferId);
nextNode.getBuffer().putInt(PREV_LEAF_ID_OFFSET, prevBufferId);
}
nodeMgr.deleteNode(this);
if (parent == null) {
return null;
}
}
return parent.deleteChild(key);
}
@ -277,12 +292,12 @@ abstract class LongKeyRecordNode extends LongKeyNode {
* @param newRightLeaf empty right sibling leaf
*/
abstract void splitData(LongKeyRecordNode newRightLeaf);
/**
* Create a new leaf and add to the node manager.
* The new leaf's parent is unknown.
* @param prevLeafId node buffer id for previous leaf - left sibling ( &lt; 0: no leaf)
* @param nextLeafId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @param prevNodeId node buffer id for previous leaf - left sibling ( &lt; 0: no leaf)
* @param nextNodeId node buffer id for next leaf - right sibling ( &lt; 0 : no leaf)
* @return new leaf node.
* @throws IOException thrown if IO error occurs
*/
@ -296,10 +311,10 @@ abstract class LongKeyRecordNode extends LongKeyNode {
* @throws IOException thrown if IO error occurs
*/
LongKeyNode putRecord(Record record, Table table) throws IOException {
long key = record.getKey();
int index = getKeyIndex(key);
// Handle record update case
if (index >= 0) {
if (table != null) {
@ -308,19 +323,19 @@ abstract class LongKeyRecordNode extends LongKeyNode {
LongKeyNode newRoot = updateRecord(index, record);
return newRoot;
}
// Handle new record - see if we have room in this leaf
index = -index-1;
index = -index - 1;
if (insertRecord(index, record)) {
if (index == 0 && parent != null) {
parent.keyChanged(getKey(1), key);
parent.keyChanged(getKey(1), key);
}
if (table != null) {
table.insertedRecord(record);
}
return getRoot();
}
// Special Case - append new leaf to right
if (index == keyCount) {
LongKeyNode newRoot = appendNewLeaf(record);
@ -334,19 +349,19 @@ abstract class LongKeyRecordNode extends LongKeyNode {
LongKeyRecordNode leaf = split().getLeafNode(key);
return leaf.putRecord(record, table);
}
/**
* Append a new leaf and insert the specified record.
* @param record data record with long key
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
*/
LongKeyNode appendNewLeaf(Record record) throws IOException {
LongKeyRecordNode newLeaf = createNewLeaf(-1, -1);
newLeaf.insertRecord(0, record);
return appendLeaf(newLeaf);
}
/**
* Delete the record identified by the specified key.
* @param key record key
@ -361,7 +376,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
if (index < 0) {
return getRoot();
}
if (table != null) {
table.deletedRecord(getRecord(table.getSchema(), index));
}
@ -374,15 +389,15 @@ abstract class LongKeyRecordNode extends LongKeyNode {
// Remove record within this node
remove(index);
// Notify parent of leftmost key change
if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0));
parent.keyChanged(key, getKey(0));
}
return getRoot();
}
/**
* Remove the record identified by index.
* This will never be the last record within the node.
@ -391,7 +406,6 @@ abstract class LongKeyRecordNode extends LongKeyNode {
*/
abstract void remove(int index) throws IOException;
/**
* Inserts the record at the given index if there is sufficient space in
* the buffer.
@ -401,7 +415,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
* @throws IOException thrown if IO error occurs
*/
abstract boolean insertRecord(int index, Record record) throws IOException;
/**
* Updates the record at the given index.
* @param index record index
@ -419,16 +433,16 @@ abstract class LongKeyRecordNode extends LongKeyNode {
* @throws IOException thrown if IO error occurs
*/
abstract Record getRecord(long key, Schema schema) throws IOException;
/**
* Get the record located at the specified index.
* @param schema record data schema
* @param keyIndex key index
* @param index key index
* @return Record
* @throws IOException thrown if IO error occurs
*/
abstract Record getRecord(Schema schema, int index) throws IOException;
/**
* Get the first record whoose key is less than the specified key.
* @param key record key
@ -439,7 +453,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
Record getRecordBefore(long key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -index-2;
index = -index - 2;
}
else {
--index;
@ -448,9 +462,9 @@ abstract class LongKeyRecordNode extends LongKeyNode {
LongKeyRecordNode nextLeaf = getPreviousLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, nextLeaf.keyCount - 1) : null;
}
return getRecord(schema, index);
return getRecord(schema, index);
}
/**
* Get the first record whoose key is greater than the specified key.
* @param key record key
@ -461,7 +475,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
Record getRecordAfter(long key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -(index+1);
index = -(index + 1);
}
else {
++index;
@ -472,7 +486,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
}
return getRecord(schema, index);
}
/**
* Get the first record whoose key is less than or equal to the specified
* key.
@ -484,15 +498,15 @@ abstract class LongKeyRecordNode extends LongKeyNode {
Record getRecordAtOrBefore(long key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -index-2;
index = -index - 2;
}
if (index < 0) {
LongKeyRecordNode nextLeaf = getPreviousLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, nextLeaf.keyCount - 1) : null;
}
return getRecord(schema, index);
return getRecord(schema, index);
}
/**
* Get the first record whoose key is greater than or equal to the specified
* key.
@ -504,19 +518,19 @@ abstract class LongKeyRecordNode extends LongKeyNode {
Record getRecordAtOrAfter(long key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -(index+1);
index = -(index + 1);
}
if (index == keyCount) {
LongKeyRecordNode nextLeaf = getNextLeaf();
return nextLeaf != null ? nextLeaf.getRecord(schema, 0) : null;
}
return getRecord(schema, index);
return getRecord(schema, index);
}
/**
* Create a new record node with no siblings attached.
* @param nodeMgr table node manager instance
* @param fixedRecordLength length of fixed-length record, 0 = variable length
* @param schema record schema
* @return new record leaf node
* @throws IOException thrown if IO error occurs
*/
@ -528,7 +542,7 @@ abstract class LongKeyRecordNode extends LongKeyNode {
else {
node = new FixedRecNode(nodeMgr, schema.getFixedLength(), -1, -1);
}
return node;
return node;
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,7 +15,6 @@
*/
package db;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
@ -28,45 +26,48 @@ import java.util.Arrays;
* object associated with the database.
*/
class MasterTable {
private TableRecord masterRecord;
private DBHandle dbh;
private DBParms dbParms;
private Table table;
// List of table records sorted by tablenum
TableRecord[] tableRecords;
private long nextTableNum = 0;
/**
* Construct an existing master table.
* @param db database handle
* @param dbh database handle
* @throws IOException database IO error
*/
MasterTable(DBHandle db) throws IOException {
this.dbParms = db.getDBParms();
masterRecord = new TableRecord(0, "MASTER",
TableRecord.getTableRecordSchema(), -1);
MasterTable(DBHandle dbh) throws IOException {
this.dbh = dbh;
this.dbParms = dbh.getDBParms();
masterRecord = new TableRecord(0, "MASTER", TableRecord.getTableRecordSchema(), -1);
try {
masterRecord.setRootBufferId(dbParms.get(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM));
} catch (ArrayIndexOutOfBoundsException e) {
throw new IOException("Corrupt database parameters");
}
table = new Table(db, masterRecord);
catch (ArrayIndexOutOfBoundsException e) {
throw new IOException("Corrupt database parameters", e);
}
table = new Table(dbh, masterRecord);
ArrayList<TableRecord> trList = new ArrayList<TableRecord>();
RecordIterator it = table.iterator();
while(it.hasNext()) {
trList.add(new TableRecord(it.next()));
while (it.hasNext()) {
trList.add(new TableRecord(dbh, it.next()));
}
tableRecords = new TableRecord[trList.size()];
trList.toArray(tableRecords);
if (tableRecords.length > 0) {
nextTableNum = tableRecords[tableRecords.length-1].getTableNum() + 1;
nextTableNum = tableRecords[tableRecords.length - 1].getTableNum() + 1;
}
}
/**
* Create a new table record and add to master table.
* If this is an index table the name corresponds to the table which is
@ -78,34 +79,37 @@ class MasterTable {
* @param tableSchema table schema
* @param indexedColumn primary table index key column, or -1 for primary table
* @return new table record
* @throws IOException database IO error
*/
TableRecord createTableRecord(String name, Schema tableSchema, int indexedColumn) throws IOException {
TableRecord createTableRecord(String name, Schema tableSchema, int indexedColumn)
throws IOException {
// Create new table record
TableRecord tableRecord = new TableRecord(nextTableNum++, name, tableSchema, indexedColumn);
table.putRecord(tableRecord.getRecord());
// Update master root which may have changed
dbParms.set(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM, masterRecord.getRootBufferId());
// Update tableRecord list
TableRecord[] newList = new TableRecord[tableRecords.length+1];
TableRecord[] newList = new TableRecord[tableRecords.length + 1];
System.arraycopy(tableRecords, 0, newList, 0, tableRecords.length);
newList[tableRecords.length] = tableRecord;
Arrays.sort(newList);
tableRecords = newList;
return tableRecord;
}
/**
* Remove the master table record associated with the specified table name.
* This method may only be invoked while a database transaction
* is in progress.
* @param id table name
* @param tableNum table number (key within master table)
* @throws IOException database IO error
*/
void deleteTableRecord(long tableNum) throws IOException {
// Locate tableRecord to be deleted
for (int i = 0; i < tableRecords.length; i++) {
if (tableRecords[i].getTableNum() == tableNum) {
@ -113,21 +117,22 @@ class MasterTable {
throw new IOException("Can not delete non-empty table");
table.deleteRecord(tableNum);
tableRecords[i].invalidate();
// Update master root which may have changed
dbParms.set(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM, masterRecord.getRootBufferId());
dbParms.set(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM,
masterRecord.getRootBufferId());
// Update tableRecord list
TableRecord[] newList = new TableRecord[tableRecords.length-1];
TableRecord[] newList = new TableRecord[tableRecords.length - 1];
System.arraycopy(tableRecords, 0, newList, 0, i);
System.arraycopy(tableRecords, i+1, newList, i, tableRecords.length-i-1);
System.arraycopy(tableRecords, i + 1, newList, i, tableRecords.length - i - 1);
tableRecords = newList;
return;
}
}
throw new IOException("Table not found");
}
/**
* Get a list of all tables defined within this master table.
* Records are returned in the list ordered by their table number key.
@ -136,59 +141,61 @@ class MasterTable {
TableRecord[] getTableRecords() {
return tableRecords;
}
/**
* Refresh table data from the master table.
* Records are returned in the list ordered by their table number key.
* @return the update list of master table records.
* @throws IOException database IO error
*/
TableRecord[] refreshTableRecords() throws IOException {
try {
int masterRootId = dbParms.get(DBParms.MASTER_TABLE_ROOT_BUFFER_ID_PARM);
if (masterRecord.getRootBufferId() != masterRootId) {
masterRecord.setRootBufferId(masterRootId);
table.tableRecordChanged();
}
} catch (ArrayIndexOutOfBoundsException e) {
throw new IOException("Corrupt database parameters");
}
catch (ArrayIndexOutOfBoundsException e) {
throw new IOException("Corrupt database parameters", e);
}
ArrayList<TableRecord> trList = new ArrayList<TableRecord>();
int ix = 0;
int oldTableCnt = tableRecords.length;
RecordIterator it = table.iterator();
while (it.hasNext()) {
Record rec = it.next();
long tablenum = rec.getKey();
while (ix < tableRecords.length && tablenum > tableRecords[ix].getTableNum()) {
tableRecords[ix++].invalidate(); // table no longer exists
}
if (ix == oldTableCnt || tablenum < tableRecords[ix].getTableNum()) {
trList.add(new TableRecord(rec)); // new table
}
trList.add(new TableRecord(dbh, rec)); // new table
}
else if (tablenum == tableRecords[ix].getTableNum()) {
tableRecords[ix].setRecord(rec);
tableRecords[ix].setRecord(dbh, rec);
trList.add(tableRecords[ix++]); // update existing table
}
}
}
while (ix < tableRecords.length) {
tableRecords[ix++].invalidate(); // table no longer exists
}
tableRecords = trList.toArray(new TableRecord[trList.size()]);
return tableRecords;
}
/**
* Flush all unsaved table changes to the underlying buffer mgr.
* This method may only be invoked while a database transaction
* is in progress.
* @throws IOException
* @throws IOException database IO error
*/
void flush() throws IOException {
for (int i = 0; i < tableRecords.length; i++) {
@ -201,8 +208,8 @@ class MasterTable {
/**
* Change the name of a table and its associated indexes.
* @param oldName
* @param newName
* @param oldName old table name
* @param newName new tablename
*/
void changeTableName(String oldName, String newName) {
for (int i = 0; i < tableRecords.length; i++) {
@ -211,6 +218,5 @@ class MasterTable {
}
}
}
}
}

View file

@ -15,13 +15,12 @@
*/
package db;
import ghidra.util.datastruct.IntObjectHashtable;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import java.util.HashMap;
import db.buffers.BufferMgr;
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/**
* The <code>NodeMgr</code> manages all database nodes associated with
@ -30,6 +29,25 @@ import db.buffers.DataBuffer;
* buffer allocations, retrievals and releases as required. The NodeMgr
* also performs hard caching of all buffers until the releaseNodes
* method is invoked.
*
* Legacy Issues (prior to Ghidra 9.2):
* <ul>
* <li>Legacy {@link Table} implementation incorrectly employed {@link VarKeyNode}
* storage with primitive fixed-length primary keys other than {@link LongField}
* (e.g., {@link ByteField}). With improved support for fixed-length keys
* legacy data poses a backward capatibility issue. This has been
* addressed through the use of a hack whereby a {@link Schema} is forced to
* treat the primary key as variable length
* (see {@link Schema#forceUseOfVariableLengthKeyNodes()}. The detection
* for this rare condition is provided by {@link TableRecord} during
* schema instantiation.</li>
*
* <li>Legacy {@link Table} implementation incorrectly employed variable
* length storage when both primary key and indexed fields were
* LongField types. This issue has been addressed by treating the
* {@link Field#LEGACY_INDEX_LONG_TYPE} (0x8) as variable-length (see
* implementation {@link LegacyIndexField}).</li>
* </ul>
*/
class NodeMgr {
@ -70,6 +88,24 @@ class NodeMgr {
*/
static final byte VARKEY_REC_NODE = 4;
/**
* Node type for fixed-length key interior tree nodes
* @see db.FixedKeyInteriorNode
*/
static final byte FIXEDKEY_INTERIOR_NODE = 5;
/**
* Node type for fixed-length key variable-length record leaf nodes
* @see db.FixedKeyVarRecNode
*/
static final byte FIXEDKEY_VAR_REC_NODE = 6;
/**
* Node type for fixed-length key fixed-length record leaf nodes
* @see db.FixedKeyFixedRecNode
*/
static final byte FIXEDKEY_FIXED_REC_NODE = 7;
/**
* Node type for chained buffer index nodes
* @see db.DBBuffer
@ -84,21 +120,21 @@ class NodeMgr {
private BufferMgr bufferMgr;
private Schema schema;
private String tableName;
private int leafRecordCnt = 0;
private IntObjectHashtable<BTreeNode> nodeTable = new IntObjectHashtable<BTreeNode>(10);
// private ArrayList<BTreeNode> nodeList = new ArrayList<BTreeNode>(10);
private HashMap<Integer, BTreeNode> nodeTable = new HashMap<>();
/**
* Construct a node manager for a specific table.
* @param table associated table
* @param bufferMgr buffer manager.
* @param schema table schema (required for Table use)
*/
NodeMgr(BufferMgr bufferMgr, Schema schema) {
NodeMgr(Table table, BufferMgr bufferMgr) {
this.bufferMgr = bufferMgr;
this.schema = schema;
this.schema = table.getSchema();
this.tableName = table.getName();
}
/**
@ -109,21 +145,36 @@ class NodeMgr {
return bufferMgr;
}
/**
* Get the table schema associated with this node manager
* @return table schema
*/
Schema getTableSchema() {
return schema;
}
/**
* Get the table name associated with this node manager
* @return table name
*/
String getTableName() {
return tableName;
}
/**
* Release all nodes held by this node manager.
* This method must be invoked before a database transaction can be committed.
* @return the change in record count (+/-)
* @throws IOException if IO error occurs on database
*/
int releaseNodes() throws IOException {
int[] bufferIds = nodeTable.getKeys();
for (int bufferId : bufferIds) {
BTreeNode node = nodeTable.get(bufferId);
if (node instanceof LongKeyRecordNode || node instanceof VarKeyRecordNode) {
for (BTreeNode node : nodeTable.values()) {
if (node instanceof RecordNode) {
leafRecordCnt -= node.getKeyCount();
}
bufferMgr.releaseBuffer(node.getBuffer());
}
nodeTable.removeAll();
nodeTable = new HashMap<>();
int result = -leafRecordCnt;
leafRecordCnt = 0;
return result;
@ -133,8 +184,8 @@ class NodeMgr {
* Release a specific read-only buffer node.
* WARNING! This method may only be used to release read-only buffers,
* if a release buffer has been modified an IOException will be thrown.
* @param bufferId
* @throws IOException
* @param bufferId buffer ID
* @throws IOException if IO error occurs on database
*/
void releaseReadOnlyNode(int bufferId) throws IOException {
BTreeNode node = nodeTable.get(bufferId);
@ -142,7 +193,7 @@ class NodeMgr {
// There is a possible leafRecordCount error if buffer is released multiple times
throw new IOException("Releasing modified buffer node as read-only");
}
if (node instanceof LongKeyRecordNode || node instanceof VarKeyRecordNode) {
if (node instanceof RecordNode) {
leafRecordCnt -= node.getKeyCount();
}
bufferMgr.releaseBuffer(node.getBuffer());
@ -170,11 +221,32 @@ class NodeMgr {
bufferMgr.deleteBuffer(bufferId);
}
/**
* Perform a test of the specified buffer to determine if it is
* a VarKeyNode type. It is important that the specified buffer
* not be in use.
* @param bufferMgr buffer manager
* @param bufferId buffer ID
* @return true if node found and is a VarKeyNode type
* @throws IOException thrown if an IO error occurs
*/
static boolean isVarKeyNode(BufferMgr bufferMgr, int bufferId) throws IOException {
DataBuffer buf = bufferMgr.getBuffer(bufferId);
try {
int nodeType = getNodeType(buf);
return nodeType == VARKEY_REC_NODE || nodeType == VARKEY_INTERIOR_NODE;
}
finally {
bufferMgr.releaseBuffer(buf);
}
}
/**
* Get a LongKeyNode object for a specified buffer
* @param bufferId buffer ID
* @return LongKeyNode instance
* @throws ClassCastException if node type is incorrect.
* @throws IOException if IO error occurs on database
*/
LongKeyNode getLongKeyNode(int bufferId) throws IOException {
LongKeyNode node = (LongKeyNode) nodeTable.get(bufferId);
@ -197,8 +269,44 @@ class NodeMgr {
node = new LongKeyInteriorNode(this, buf);
break;
default:
throw new AssertException("Unexpected Node Type (" + nodeType +
") found, expecting LongKeyNode");
bufferMgr.releaseBuffer(buf);
throw new AssertException(
"Unexpected Node Type (" + nodeType + ") found, expecting LongKeyNode");
}
return node;
}
/**
* Get a FixedKeyNode object for a specified buffer
* @param bufferId buffer ID
* @return LongKeyNode instance
* @throws ClassCastException if node type is incorrect.
* @throws IOException if IO error occurs on database
*/
FixedKeyNode getFixedKeyNode(int bufferId) throws IOException {
FixedKeyNode node = (FixedKeyNode) nodeTable.get(bufferId);
if (node != null) {
return node;
}
DataBuffer buf = bufferMgr.getBuffer(bufferId);
int nodeType = getNodeType(buf);
switch (nodeType) {
case FIXEDKEY_VAR_REC_NODE:
node = new FixedKeyVarRecNode(this, buf);
leafRecordCnt += node.keyCount;
break;
case FIXEDKEY_FIXED_REC_NODE:
node = new FixedKeyFixedRecNode(this, buf);
leafRecordCnt += node.keyCount;
break;
case FIXEDKEY_INTERIOR_NODE:
node = new FixedKeyInteriorNode(this, buf);
break;
default:
bufferMgr.releaseBuffer(buf);
throw new IOException(
"Unexpected Node Type (" + nodeType + ") found, expecting FixedKeyNode");
}
return node;
}
@ -208,6 +316,7 @@ class NodeMgr {
* @param bufferId buffer ID
* @return VarKeyNode instance
* @throws ClassCastException if node type is incorrect.
* @throws IOException if IO error occurs on database
*/
VarKeyNode getVarKeyNode(int bufferId) throws IOException {
VarKeyNode node = (VarKeyNode) nodeTable.get(bufferId);
@ -226,8 +335,9 @@ class NodeMgr {
node = new VarKeyInteriorNode(this, buf);
break;
default:
throw new AssertException("Unexpected Node Type (" + nodeType +
") found, expecting VarKeyNode");
bufferMgr.releaseBuffer(buf);
throw new AssertException(
"Unexpected Node Type (" + nodeType + ") found, expecting VarKeyNode");
}
return node;
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,10 +15,10 @@
*/
package db;
import ghidra.util.ObjectStorage;
import java.util.ArrayList;
import ghidra.util.ObjectStorage;
/**
* <code>ObjectStorageAdapterDB</code> provides an ObjectStorage
* implementation for use by Saveable objects. This allows Saveable objects
@ -29,17 +28,17 @@ import java.util.ArrayList;
* using a suitable schema.
*/
public class ObjectStorageAdapterDB implements ObjectStorage {
private ArrayList<Field> fieldList = new ArrayList<Field>();
private int col = 0;
private boolean readOnly = false;
/**
* Construct an empty writable storage adapter.
*/
public ObjectStorageAdapterDB() {
}
/**
* Construct a read-only storage adapter from an
* existing record.
@ -53,81 +52,63 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#putInt(int)
*/
@Override
public void putInt(int value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new IntField(value));
}
/*
* @see ghidra.util.ObjectStorage#putByte(byte)
*/
@Override
public void putByte(byte value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new ByteField(value));
}
/*
* @see ghidra.util.ObjectStorage#putShort(short)
*/
@Override
public void putShort(short value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new ShortField(value));
}
/*
* @see ghidra.util.ObjectStorage#putLong(long)
*/
@Override
public void putLong(long value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new LongField(value));
}
/*
* @see ghidra.util.ObjectStorage#putString(java.lang.String)
*/
@Override
public void putString(String value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new StringField(value));
}
/*
* @see ghidra.util.ObjectStorage#putBoolean(boolean)
*/
@Override
public void putBoolean(boolean value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BooleanField(value));
}
/*
* @see ghidra.util.ObjectStorage#putFloat(float)
*/
@Override
public void putFloat(float value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#putDouble(double)
*/
@Override
public void putDouble(double value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#getInt()
*/
@Override
public int getInt() {
try {
return fieldList.get(col++).getIntValue();
@ -137,9 +118,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getByte()
*/
@Override
public byte getByte() {
try {
return fieldList.get(col++).getByteValue();
@ -149,9 +128,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getShort()
*/
@Override
public short getShort() {
try {
return fieldList.get(col++).getShortValue();
@ -161,9 +138,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getLong()
*/
@Override
public long getLong() {
try {
return fieldList.get(col++).getLongValue();
@ -173,9 +148,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getBoolean()
*/
@Override
public boolean getBoolean() {
try {
return fieldList.get(col++).getBooleanValue();
@ -185,9 +158,7 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getString()
*/
@Override
public String getString() {
try {
return fieldList.get(col++).getString();
@ -197,12 +168,10 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getFloat()
*/
@Override
public float getFloat() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getFloatValue();
}
catch (IndexOutOfBoundsException e) {
@ -210,12 +179,10 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getDouble()
*/
@Override
public double getDouble() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getDoubleValue();
}
catch (IndexOutOfBoundsException e) {
@ -223,75 +190,59 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#putInts(int[])
*/
@Override
public void putInts(int[] value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#putBytes(byte[])
*/
@Override
public void putBytes(byte[] value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#putShorts(short[])
*/
@Override
public void putShorts(short[] value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#putLongs(long[])
*/
@Override
public void putLongs(long[] value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#putFloats(float[])
*/
@Override
public void putFloats(float[] value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#putDoubles(double[])
*/
@Override
public void putDoubles(double[] value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#putStrings(java.lang.String[])
*/
@Override
public void putStrings(String[] value) {
if (readOnly)
throw new IllegalStateException();
fieldList.add(new BinaryCodedField(value));
}
/*
* @see ghidra.util.ObjectStorage#getInts()
*/
@Override
public int[] getInts() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getIntArray();
}
catch (IndexOutOfBoundsException e) {
@ -299,12 +250,10 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getBytes()
*/
@Override
public byte[] getBytes() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getByteArray();
}
catch (IndexOutOfBoundsException e) {
@ -312,12 +261,10 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getShorts()
*/
@Override
public short[] getShorts() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getShortArray();
}
catch (IndexOutOfBoundsException e) {
@ -325,12 +272,10 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getLongs()
*/
@Override
public long[] getLongs() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getLongArray();
}
catch (IndexOutOfBoundsException e) {
@ -338,12 +283,10 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getFloats()
*/
@Override
public float[] getFloats() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getFloatArray();
}
catch (IndexOutOfBoundsException e) {
@ -351,12 +294,10 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getDoubles()
*/
@Override
public double[] getDoubles() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getDoubleArray();
}
catch (IndexOutOfBoundsException e) {
@ -364,12 +305,10 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
}
}
/*
* @see ghidra.util.ObjectStorage#getStrings()
*/
@Override
public String[] getStrings() {
try {
BinaryCodedField codedField = new BinaryCodedField((BinaryField)fieldList.get(col++));
BinaryCodedField codedField = new BinaryCodedField((BinaryField) fieldList.get(col++));
return codedField.getStringArray();
}
catch (IndexOutOfBoundsException e) {
@ -383,13 +322,13 @@ public class ObjectStorageAdapterDB implements ObjectStorage {
* @return Schema
*/
public Schema getSchema(int version) {
Class<?>[] fieldClasses = new Class<?>[fieldList.size()];
String[] fieldNames = new String[fieldClasses.length];
for (int i = 0; i < fieldClasses.length; i++) {
fieldClasses[i] = fieldList.get(i).getClass();
Field[] fields = new Field[fieldList.size()];
String[] fieldNames = new String[fields.length];
for (int i = 0; i < fields.length; i++) {
fields[i] = fieldList.get(i).newField();
fieldNames[i] = Integer.toString(i);
}
return new Schema(version, "key", fieldClasses, fieldNames);
return new Schema(version, "key", fields, fieldNames);
}
/**

View file

@ -29,15 +29,15 @@ import ghidra.util.exception.AssertException;
*
*/
public class Record implements Comparable<Record> {
private Field key;
private Field[] fieldValues;
private boolean dirty = false;
private int length = -1;
private boolean isVariableLength;
/**
* Construct a new record.
* The schema is derived from the field values supplied.
@ -48,7 +48,7 @@ public class Record implements Comparable<Record> {
this.key = key;
this.fieldValues = fieldValues;
}
/**
* Set the primary key associated with this record.
* @param key primary key
@ -56,9 +56,9 @@ public class Record implements Comparable<Record> {
public void setKey(long key) {
if (!(this.key instanceof LongField))
throw new AssertException();
this.key = new LongField(key);
this.key = new LongField(key);
}
/**
* Set the primary key associated with this record.
* @param key primary key
@ -66,9 +66,9 @@ public class Record implements Comparable<Record> {
public void setKey(Field key) {
if (!this.key.getClass().equals(key.getClass()))
throw new AssertException();
this.key = key;
this.key = key;
}
/**
* Get the record primary key.
* @return primary key as long value.
@ -76,7 +76,7 @@ public class Record implements Comparable<Record> {
public long getKey() {
return key.getLongValue();
}
/**
* Get the record primary key as a Field object.
* @return primary key as a field object.
@ -84,7 +84,7 @@ public class Record implements Comparable<Record> {
public Field getKeyField() {
return key;
}
/**
* Determine if this record's schema is the same as another record's
* schema. This check factors column count and column field types only.
@ -107,17 +107,19 @@ public class Record implements Comparable<Record> {
/**
* Determine if this record's schema is compatible with the specified schema.
* This check factors column count and column field types only.
* @param schema
* @param schema other schema
* @return true if records schemas are the same
*/
public boolean hasSameSchema(Schema schema) {
if (fieldValues.length != schema.getFieldCount()) {
return false;
}
Class<?>[] schemaFieldClasses = schema.getFieldClasses();
if (!key.isSameType(schema.getKeyFieldType())) {
return false;
}
Field[] otherFields = schema.getFields();
for (int i = 0; i < fieldValues.length; i++) {
if (!fieldValues[i].getClass().equals(schemaFieldClasses[i])) {
if (!fieldValues[i].isSameType(otherFields[i])) {
return false;
}
}
@ -131,7 +133,7 @@ public class Record implements Comparable<Record> {
public int getColumnCount() {
return fieldValues.length;
}
/**
* Get a copy of the specified field value.
* @param columnIndex
@ -139,9 +141,9 @@ public class Record implements Comparable<Record> {
*/
public Field getFieldValue(int columnIndex) {
Field f = fieldValues[columnIndex];
return f.newField(f);
return f.copyField();
}
/**
* Set the field value for the specified field.
* @param colIndex field index
@ -153,7 +155,7 @@ public class Record implements Comparable<Record> {
}
fieldValues[colIndex] = value;
}
/**
* Get the specified field. The object returned must not be
* modified.
@ -163,7 +165,7 @@ public class Record implements Comparable<Record> {
Field getField(int columnIndex) {
return fieldValues[columnIndex];
}
/**
* Get all fields. The objects returned must not be
* modified.
@ -172,7 +174,7 @@ public class Record implements Comparable<Record> {
Field[] getFields() {
return fieldValues;
}
/**
* Determine if the specified field equals the field associated with the
* specified columnIndex.
@ -183,7 +185,7 @@ public class Record implements Comparable<Record> {
public boolean fieldEquals(int columnIndex, Field field) {
return fieldValues[columnIndex].equals(field);
}
/**
* Compare two field values.
* @param columnIndex the field index within this record
@ -195,22 +197,22 @@ public class Record implements Comparable<Record> {
public int compareFieldTo(int columnIndex, Field value) {
return fieldValues[columnIndex].compareTo(value);
}
/**
* Obtain a copy of this record object.
* @return Record
*/
public Record copy() {
Field newKey = key.newField(key);
Field newKey = key.copyField();
Field[] fields = new Field[fieldValues.length];
for (int i = 0; i < fields.length; i++) {
Field f = fieldValues[i];
fields[i] = f.newField(f);
fields[i] = f.copyField();
}
return new Record(newKey, fields);
}
/**
* Get the stored record length.
* This method is used to determine the space required to store the data
@ -228,7 +230,7 @@ public class Record implements Comparable<Record> {
}
return length;
}
/**
* Get the long value for the specified field.
* @param colIndex field index
@ -238,7 +240,7 @@ public class Record implements Comparable<Record> {
public long getLongValue(int colIndex) {
return fieldValues[colIndex].getLongValue();
}
/**
* Set the long value for the specified field.
* @param colIndex field index
@ -249,7 +251,7 @@ public class Record implements Comparable<Record> {
dirty = true;
fieldValues[colIndex].setLongValue(value);
}
/**
* Get the integer value for the specified field.
* @param colIndex field index
@ -270,7 +272,7 @@ public class Record implements Comparable<Record> {
dirty = true;
fieldValues[colIndex].setIntValue(value);
}
/**
* Get the short value for the specified field.
* @param colIndex field index
@ -280,7 +282,7 @@ public class Record implements Comparable<Record> {
public short getShortValue(int colIndex) {
return fieldValues[colIndex].getShortValue();
}
/**
* Set the short value for the specified field.
* @param colIndex field index
@ -291,7 +293,7 @@ public class Record implements Comparable<Record> {
dirty = true;
fieldValues[colIndex].setShortValue(value);
}
/**
* Get the byte value for the specified field.
* @param colIndex field index
@ -301,7 +303,7 @@ public class Record implements Comparable<Record> {
public byte getByteValue(int colIndex) {
return fieldValues[colIndex].getByteValue();
}
/**
* Set the byte value for the specified field.
* @param colIndex field index
@ -322,7 +324,7 @@ public class Record implements Comparable<Record> {
public boolean getBooleanValue(int colIndex) {
return fieldValues[colIndex].getBooleanValue();
}
/**
* Set the boolean value for the specified field.
* @param colIndex field index
@ -333,7 +335,7 @@ public class Record implements Comparable<Record> {
dirty = true;
fieldValues[colIndex].setBooleanValue(value);
}
/**
* Get the binary data array for the specified field.
* @param colIndex field index
@ -343,19 +345,20 @@ public class Record implements Comparable<Record> {
public byte[] getBinaryData(int colIndex) {
return fieldValues[colIndex].getBinaryData();
}
/**
* Set the binary data array for the specified field.
* @param colIndex field index
* @param bytes field value
* @throws IllegalFieldAccessException if field does support binary data access
* or incorrect number of bytes provided
*/
public void setBinaryData(int colIndex, byte[] bytes) {
dirty = true;
length = -1;
fieldValues[colIndex].setBinaryData(bytes);
}
/**
* Get the string value for the specified field.
* @param colIndex field index
@ -365,7 +368,7 @@ public class Record implements Comparable<Record> {
public String getString(int colIndex) {
return fieldValues[colIndex].getString();
}
/**
* Set the string value for the specified field.
* @param colIndex field index
@ -390,7 +393,7 @@ public class Record implements Comparable<Record> {
}
dirty = false;
}
/**
* Read the record field data from the specified buffer and offset
* @param buf data buffer
@ -403,7 +406,7 @@ public class Record implements Comparable<Record> {
}
dirty = false;
}
/**
* Determine if data fields have been modified since the last write
* occurred.
@ -412,23 +415,24 @@ public class Record implements Comparable<Record> {
public boolean isDirty() {
return dirty;
}
@Override
public int hashCode() {
return key.hashCode();
}
/**
* Compare the content of two Records for equality.
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
public boolean equals(Object obj) {
if (!(obj instanceof Record))
return false;
Record rec = (Record) obj;
return key.equals(rec.key) && Arrays.equals(fieldValues, rec.fieldValues);
}
/**
* Compares the key associated with this record with the
* key of another record (obj).
@ -438,5 +442,10 @@ public class Record implements Comparable<Record> {
public int compareTo(Record otherRec) {
return key.compareTo(otherRec.key);
}
@Override
public String toString() {
return "{key:" + key + "}";
}
}

View file

@ -0,0 +1,42 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* {@link Table} record leaf nodes within the BTree structure.
*/
public interface RecordNode extends BTreeNode {
/**
* Get the record offset within the node's data buffer
* @param index key/record index
* @return positive record offset within buffer, or a negative bufferID for
* indirect record storage in a dedicated buffer
* @throws IOException if IO error occurs
*/
int getRecordOffset(int index) throws IOException;
/**
* Get the key offset within the node's data buffer
* @param index key/record index
* @return positive record offset within buffer
* @throws IOException if IO error occurs
*/
int getKeyOffset(int index) throws IOException;
}

View file

@ -33,42 +33,42 @@ public class Schema {
private Field keyType;
private String keyName;
private Class<?>[] fieldClasses;
private Field[] fields;
private String[] fieldNames;
private boolean isVariableLength;
private int fixedLength;
private boolean forceUseVariableLengthKeyNodes;
/**
* Construct a new Schema.
* @param version
* @param keyFieldClass Field class associated with primary key. If the
* class is LongField, the long key methods on Table must be used. Specifying any
* other Field class requires the use of the Field key methods on Table.
* @param keyName
* @param fieldClasses
* @param fieldNames
* @param version schema version
* @param keyField field associated with primary key (representative instance)
* @param keyName primary key name
* @param fields array of column fields (representative instances)
* @param fieldNames array of column field names
* @throws IllegalArgumentException invalid parameters
*/
public Schema(int version, Class<? extends Field> keyFieldClass, String keyName,
Class<?>[] fieldClasses, String[] fieldNames) {
public Schema(int version, Field keyField, String keyName, Field[] fields,
String[] fieldNames) {
this.version = version;
this.keyType = getField(keyFieldClass);
this.keyType = keyField;
this.keyName = keyName;
this.fieldClasses = new Class<?>[fieldClasses.length];
this.fields = fields;
this.fieldNames = fieldNames;
if (fieldClasses.length != fieldNames.length)
throw new IllegalArgumentException();
if (fields.length != fieldNames.length)
throw new IllegalArgumentException("fieldNames and fields lengths differ");
isVariableLength = false;
fixedLength = 0;
for (int i = 0; i < fieldClasses.length; i++) {
this.fieldClasses[i] = fieldClasses[i];
Field field = getField(fieldClasses[i]);
for (int colIndex = 0; colIndex < fields.length; colIndex++) {
Field field = fields[colIndex];
if (field.isVariableLength()) {
isVariableLength = true;
}
fixedLength += field.length();
if (fieldNames[i].indexOf(NAME_SEPARATOR) >= 0)
throw new IllegalArgumentException();
if (fieldNames[colIndex].indexOf(NAME_SEPARATOR) >= 0)
throw new IllegalArgumentException("field names may not contain ';'");
}
if (isVariableLength) {
fixedLength = 0;
@ -76,46 +76,96 @@ public class Schema {
}
/**
* Construct a new Schema which uses a long key. The Field key methods on Table
* should not be used.
* @param version
* @param keyName
* @param fieldClasses
* @param fieldNames
* Construct a new Schema which uses a long key.
* @param version schema version
* @param keyName primary key name
* @param fields array of column fields (representative instances)
* @param fieldNames array of column field names
* @throws IllegalArgumentException invalid parameters
*/
public Schema(int version, String keyName, Field[] fields, String[] fieldNames) {
this(version, LongField.INSTANCE, keyName, fields, fieldNames);
}
/**
* Construct a new Schema.
* @param version schema version
* @param keyClass field class associated with primary key
* @param keyName primary key name
* @param fieldClasses array of column field classes
* @param fieldNames array of column field names
* @throws IllegalArgumentException invalid parameters
*/
public Schema(int version, Class<?> keyClass, String keyName, Class<?>[] fieldClasses,
String[] fieldNames) {
this(version, getField(keyClass), keyName, getFields(fieldClasses), fieldNames);
}
/**
* Construct a new Schema which uses a long key.
* @param version schema version
* @param keyName primary key name
* @param fieldClasses array of column field classes
* @param fieldNames array of column field names
* @throws IllegalArgumentException invalid parameters
*/
public Schema(int version, String keyName, Class<?>[] fieldClasses, String[] fieldNames) {
this(version, LongField.class, keyName, fieldClasses, fieldNames);
this(version, LongField.INSTANCE, keyName, getFields(fieldClasses), fieldNames);
}
/**
* Construct a new Schema with the given number of columns
* @param version
* @param fieldTypes
* @param version schema version
* @param encodedKeyFieldType key field type
* @param encodedFieldTypes encoded field types array.
* @param packedFieldNames packed list of field names separated by ';'.
* The first field name corresponds to the key name.
* @throws UnsupportedFieldException if unsupported fieldType specified
*/
Schema(int version, byte keyFieldType, byte[] fieldTypes, String packedFieldNames)
Schema(int version, byte encodedKeyFieldType, byte[] encodedFieldTypes, String packedFieldNames)
throws UnsupportedFieldException {
this.version = version;
this.keyType = Field.getField(keyFieldType);
this.keyType = Field.getField(encodedKeyFieldType);
parseNames(packedFieldNames);
if (fieldTypes.length != fieldNames.length)
throw new IllegalArgumentException();
this.fieldClasses = new Class[fieldTypes.length];
isVariableLength = false;
fixedLength = 0;
for (int i = 0; i < fieldTypes.length; i++) {
Field field = Field.getField(fieldTypes[i]);
fieldClasses[i] = field.getClass();
if (field.isVariableLength()) {
fields = new Field[encodedFieldTypes.length];
for (int i = 0; i < encodedFieldTypes.length; i++) {
byte b = encodedFieldTypes[i];
Field f = Field.getField(b);
fields[i] = f;
if (f.isVariableLength()) {
isVariableLength = true;
}
fixedLength += field.length();
fixedLength += f.length();
}
if (isVariableLength) {
fixedLength = 0;
}
if (fieldNames.length != encodedFieldTypes.length) {
throw new IllegalArgumentException("fieldNames and column types differ in length");
}
}
private static Field getField(Class<?> fieldClass) {
if (!Field.class.isAssignableFrom(fieldClass) || fieldClass == Field.class ||
IndexField.class.isAssignableFrom(fieldClass)) {
throw new IllegalArgumentException("Invalid Field class: " + fieldClass.getName());
}
try {
return (Field) fieldClass.getConstructor().newInstance();
}
catch (Exception e) {
throw new RuntimeException("Failed to construct: " + fieldClass.getName(), e);
}
}
private static Field[] getFields(Class<?>[] fieldClasses) {
Field[] fields = new Field[fieldClasses.length];
for (int i = 0; i < fieldClasses.length; i++) {
fields[i] = getField(fieldClasses[i]);
}
return fields;
}
/**
@ -123,22 +173,43 @@ public class Schema {
* @return true if LongKeyNode's can be used to store records produced with this schema.
*/
boolean useLongKeyNodes() {
return keyType instanceof LongField;
return !forceUseVariableLengthKeyNodes && keyType instanceof LongField;
}
/**
* Get the key Field class
* @return key Field classes
* Determine if this schema uses VarKeyNode's within a table.
* @return true if VarKeyNode's are be used to store records produced with this schema.
*/
public Class<? extends Field> getKeyFieldClass() {
return keyType.getClass();
boolean useVariableKeyNodes() {
return forceUseVariableLengthKeyNodes || keyType.isVariableLength();
}
/**
* Determine if this schema can use FixedKeyNode's within a table.
* @return true if FixedKeyNode's can be used to store records produced with this schema.
*/
boolean useFixedKeyNodes() {
return !useVariableKeyNodes() && !useLongKeyNodes();
}
/**
* Force use of variable-length key nodes.
* <br>
* This method provides a work-around for legacy schemas which
* employ primitive fixed-length keys other than LongField
* and improperly employ a variable-length-key storage schema.
* Although rare, this may be neccessary to ensure backward compatibility
* with legacy DB storage (example ByteField key employed by old table).
*/
void forceUseOfVariableLengthKeyNodes() {
forceUseVariableLengthKeyNodes = true;
}
/**
* Get the Field type for the key.
* @return key Field type
*/
Field getKeyFieldType() {
public Field getKeyFieldType() {
return keyType;
}
@ -155,8 +226,8 @@ public class Schema {
* The returned list is ordered consistent with the schema definition.
* @return data Field classes
*/
public Class<?>[] getFieldClasses() {
return fieldClasses;
public Field[] getFields() {
return fields;
}
/**
@ -173,7 +244,7 @@ public class Schema {
* @return data Field count
*/
public int getFieldCount() {
return fieldClasses.length;
return fields.length;
}
/**
@ -207,16 +278,21 @@ public class Schema {
return buf.toString();
}
byte getEncodedKeyFieldType() {
return keyType.getFieldType();
}
/**
* Get the schema field types as a byte array.
* @return byte[] field type list
* Get the schema field types as an encoded byte array.
* @return byte[] field type list as an encoded byte array.
*/
byte[] getFieldTypes() {
byte[] fieldTypes = new byte[fieldClasses.length];
for (int i = 0; i < fieldClasses.length; i++) {
fieldTypes[i] = getField(fieldClasses[i]).getFieldType();
byte[] getEncodedFieldTypes() {
byte[] encodedFieldTypes = new byte[fields.length];
for (int colIndex = 0; colIndex < fields.length; colIndex++) {
encodedFieldTypes[colIndex] = fields[colIndex].getFieldType();
}
return fieldTypes;
return encodedFieldTypes;
}
/**
@ -245,8 +321,8 @@ public class Schema {
/**
* Create an empty record for the specified key.
* @param key
* @return Record
* @param key long key
* @return new record
*/
public Record createRecord(long key) {
return createRecord(new LongField(key));
@ -254,21 +330,20 @@ public class Schema {
/**
* Create an empty record for the specified key.
* @param key
* @param key record key field
* @return new record
*/
public Record createRecord(Field key) {
if (!getKeyFieldClass().equals(key.getClass())) {
throw new IllegalArgumentException(
"expected key field type of " + keyType.getClass().getSimpleName());
if (!keyType.isSameType(key)) {
throw new IllegalArgumentException("key differs from schema key type");
}
Field[] fieldValues = new Field[fieldClasses.length];
for (int i = 0; i < fieldClasses.length; i++) {
Field[] fieldValues = new Field[fields.length];
for (int colIndex = 0; colIndex < fields.length; colIndex++) {
try {
fieldValues[i] = (Field) fieldClasses[i].newInstance();
fieldValues[colIndex] = fields[colIndex].newField();
}
catch (Exception e) {
throw new AssertException();
throw new AssertException(e);
}
}
return new Record(key, fieldValues);
@ -281,48 +356,13 @@ public class Schema {
*/
Field getField(int colIndex) {
try {
return (Field) fieldClasses[colIndex].newInstance();
return fields[colIndex].newField();
}
catch (Exception e) {
throw new AssertException(e.getMessage());
}
}
/**
* Get a new instance of a data Field object for the specified Field class.
* @param fieldClass Field implementation class
* @return new Field object suitable for data reading/writing.
*/
private Field getField(Class<?> fieldClass) {
try {
return (Field) fieldClass.newInstance();
}
catch (Exception e) {
throw new AssertException(e.getMessage());
}
}
/**
* Compare two schemas for equality.
* Field names are ignored in this comparison.
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Schema))
return false;
Schema otherSchema = (Schema) obj;
if (version != otherSchema.version ||
!keyType.getClass().equals(otherSchema.keyType.getClass()) ||
fieldClasses.length != otherSchema.fieldClasses.length)
return false;
for (int i = 0; i < fieldClasses.length; i++) {
if (!fieldClasses[i].getClass().equals(otherSchema.fieldClasses[i].getClass()))
return false;
}
return true;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
@ -334,7 +374,7 @@ public class Schema {
buf.append("\n");
buf.append(fieldNames[i]);
buf.append("(");
buf.append(fieldClasses[i].getSimpleName());
buf.append(fields[i].getClass().getSimpleName());
buf.append(")");
}
return buf.toString();

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,15 +15,30 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import db.buffers.DataBuffer;
/**
* <code>ShortField</code> provides a wrapper for 2-byte signed short data
* which is read or written to a Record.
*/
public class ShortField extends Field {
public final class ShortField extends Field {
/**
* Minimum short field value
*/
public static final ShortField MIN_VALUE = new ShortField(Short.MIN_VALUE, true);
/**
* Maximum short field value
*/
public static final ShortField MAX_VALUE = new ShortField(Short.MAX_VALUE, true);
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final ShortField INSTANCE = MIN_VALUE;
private short value;
@ -39,69 +53,57 @@ public class ShortField extends Field {
* @param s initial value
*/
public ShortField(short s) {
this(s, false);
}
/**
* Construct a short field with an initial value of s.
* @param s initial value
* @param immutable true if field value is immutable
*/
ShortField(short s, boolean immutable) {
super(immutable);
value = s;
}
/*
* @see ghidra.framework.store.db.Field#getShortValue()
*/
@Override
public short getShortValue() {
return value;
}
/*
* @see ghidra.framework.store.db.Field#setShortValue(short)
*/
@Override
public void setShortValue(short value) {
checkImmutable();
this.value = value;
}
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override
int length() {
return 2;
}
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override
int write(Buffer buf, int offset) throws IOException {
return buf.putShort(offset, value);
}
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
value = buf.getShort(offset);
return offset + 2;
}
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override
int readLength(Buffer buf, int offset) throws IOException {
return 2;
}
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override
protected byte getFieldType() {
byte getFieldType() {
return SHORT_TYPE;
}
/*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "ShortField: " + Short.toString(value);
@ -109,12 +111,9 @@ public class ShortField extends Field {
@Override
public String getValueAsString() {
return Integer.toHexString(value);
return "0x" + Integer.toHexString(value);
}
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof ShortField))
@ -122,9 +121,6 @@ public class ShortField extends Field {
return ((ShortField) obj).value == value;
}
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Field o) {
ShortField f = (ShortField) o;
@ -135,54 +131,63 @@ public class ShortField extends Field {
return 1;
}
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override
public Field newField(Field fieldValue) {
if (fieldValue.isVariableLength())
throw new AssertException();
return new ShortField((short) fieldValue.getLongValue());
int compareTo(DataBuffer buffer, int offset) {
short otherValue = buffer.getShort(offset);
if (value == otherValue)
return 0;
else if (value < otherValue)
return -1;
return 1;
}
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override
public Field newField() {
public ShortField copyField() {
return new ShortField((short) getLongValue());
}
@Override
public ShortField newField() {
return new ShortField();
}
/*
* @see ghidra.framework.store.db.Field#getLongValue()
*/
@Override
public long getLongValue() {
return value;
}
/*
* @see ghidra.framework.store.db.Field#setLongValue(long)
*/
@Override
public void setLongValue(long value) {
this.value = (short) value;
setShortValue((short) value);
}
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override
public byte[] getBinaryData() {
return new byte[] { (byte) (value >> 8), (byte) value };
}
/*
* @see java.lang.Object#hashCode()
*/
@Override
public void setBinaryData(byte[] bytes) {
checkImmutable();
if (bytes.length != 2) {
throw new IllegalFieldAccessException();
}
value = (short) (((bytes[0] & 0xff) << 8) | (bytes[1] & 0xff));
}
@Override
public int hashCode() {
return value;
}
@Override
ShortField getMinValue() {
return MIN_VALUE;
}
@Override
ShortField getMaxValue() {
return MAX_VALUE;
}
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,16 +15,22 @@
*/
package db;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import db.buffers.DataBuffer;
import ghidra.util.exception.AssertException;
/**
* <code>StringField</code> provides a wrapper for variable length String data which is read or
* written to a Record. Strings are always encoded as UTF-8.
*/
public class StringField extends Field {
public final class StringField extends Field {
/**
* Instance intended for defining a {@link Table} {@link Schema}
*/
public static final StringField INSTANCE = new StringField(null, true);
private static String ENCODING = "UTF-8";
@ -40,45 +45,48 @@ public class StringField extends Field {
/**
* Construct a String field with an initial value of s.
* @param s initial value
* @param str initial string value or null
*/
public StringField(String s) {
setString(s);
public StringField(String str) {
this(str, false);
}
/*
* @see ghidra.framework.store.db.Field#getString()
/**
* Construct a String field with an initial value of s.
* @param str initial string value or null
* @param immutable true if field value is immutable
*/
StringField(String str, boolean immutable) {
super(immutable);
doSetString(str);
}
@Override
public String getString() {
return str;
}
/*
* @see ghidra.framework.store.db.Field#setString(java.lang.String)
*/
@Override
public void setString(String str) {
checkImmutable();
doSetString(str);
}
private void doSetString(String str) {
this.str = str;
try {
bytes = (str != null ? str.getBytes(ENCODING) : null);
}
catch (UnsupportedEncodingException e) {
throw new AssertException();
throw new AssertException(e);
}
}
/*
* @see ghidra.framework.store.db.Field#length()
*/
@Override
int length() {
return (bytes == null) ? 4 : (bytes.length + 4);
}
/*
* @see ghidra.framework.store.db.Field#write(ghidra.framework.store.Buffer, int)
*/
@Override
int write(Buffer buf, int offset) throws IOException {
if (bytes == null) {
@ -88,11 +96,9 @@ public class StringField extends Field {
return buf.put(offset, bytes);
}
/*
* @see ghidra.framework.store.db.Field#read(ghidra.framework.store.Buffer, int)
*/
@Override
int read(Buffer buf, int offset) throws IOException {
checkImmutable();
int len = buf.getInt(offset);
offset += 4;
if (len < 0) {
@ -107,34 +113,22 @@ public class StringField extends Field {
return offset;
}
/*
* @see ghidra.framework.store.db.Field#readLength(ghidra.framework.store.Buffer, int)
*/
@Override
int readLength(Buffer buf, int offset) throws IOException {
int len = buf.getInt(offset);
return (len < 0 ? 0 : len) + 4;
}
/*
* @see ghidra.framework.store.db.Field#isVariableLength()
*/
@Override
public boolean isVariableLength() {
return true;
}
/*
* @see ghidra.framework.store.db.Field#getFieldType()
*/
@Override
protected byte getFieldType() {
byte getFieldType() {
return STRING_TYPE;
}
/*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
return "StringField: " + str;
@ -168,9 +162,6 @@ public class StringField extends Field {
// return value;
// }
/*
* @see java.lang.Object#equals(java.lang.Object)
*/
@Override
public boolean equals(Object obj) {
if (obj == null || !(obj instanceof StringField))
@ -182,19 +173,14 @@ public class StringField extends Field {
return str.equals(f.str);
}
/*
* @see ghidra.framework.store.db.Field#getBinaryData()
*/
@Override
public byte[] getBinaryData() {
return bytes;
}
/*
* @see ghidra.framework.store.db.Field#setBinaryData(byte[])
*/
@Override
public void setBinaryData(byte[] bytes) {
checkImmutable();
if (bytes == null) {
str = null;
}
@ -204,14 +190,11 @@ public class StringField extends Field {
str = new String(bytes, ENCODING);
}
catch (UnsupportedEncodingException e) {
throw new AssertException();
throw new AssertException(e);
}
}
}
/*
* @see ghidra.framework.store.db.Field#truncate(int)
*/
@Override
void truncate(int length) {
int maxLen = length - 4;
@ -220,9 +203,6 @@ public class StringField extends Field {
}
}
/*
* @see java.lang.Comparable#compareTo(java.lang.Object)
*/
@Override
public int compareTo(Field o) {
StringField f = (StringField) o;
@ -237,36 +217,41 @@ public class StringField extends Field {
return str.compareTo(f.str);
}
/*
* @see ghidra.framework.store.db.Field#newField(ghidra.framework.store.db.Field)
*/
@Override
public Field newField(Field fieldValue) {
if (fieldValue instanceof StringField) {
return new StringField(fieldValue.getString());
}
int compareTo(DataBuffer buffer, int offset) {
StringField f = new StringField();
try {
return new StringField(new String(fieldValue.getBinaryData(), ENCODING));
f.read(buffer, offset);
}
catch (UnsupportedEncodingException e) {
catch (IOException e) {
throw new AssertException(e); // DataBuffer does not throw IOException
}
throw new AssertException();
return compareTo(f);
}
/*
* @see ghidra.framework.store.db.Field#newField()
*/
@Override
public Field newField() {
public StringField copyField() {
return new StringField(str);
}
@Override
public StringField newField() {
return new StringField();
}
/*
* @see java.lang.Object#hashCode()
*/
@Override
public int hashCode() {
return str.hashCode();
}
@Override
StringField getMinValue() {
throw new UnsupportedOperationException();
}
@Override
StringField getMaxValue() {
throw new UnsupportedOperationException();
}
}

File diff suppressed because it is too large Load diff

View file

@ -15,6 +15,8 @@
*/
package db;
import java.io.IOException;
import db.Field.UnsupportedFieldException;
/**
@ -33,24 +35,27 @@ class TableRecord implements Comparable<TableRecord> {
private static final int MAX_KEY_COLUMN = 7;
private static final int RECORD_COUNT_COLUMN = 8;
private static Class<?>[] fieldClasses = { StringField.class, // name of table
IntField.class, // Schema version
IntField.class, // Root buffer ID (first buffer)
ByteField.class, // Key field type
BinaryField.class, // Schema field types
StringField.class, // Schema key/field names
IntField.class, // indexing column (-1 = primary)
LongField.class, // max primary key value ever used
IntField.class // number of records
//@formatter:off
private static Field[] fields = {
StringField.INSTANCE, // name of table
IntField.INSTANCE, // Schema version
IntField.INSTANCE, // Root buffer ID (first buffer)
ByteField.INSTANCE, // Key field type
BinaryField.INSTANCE, // Schema field types
StringField.INSTANCE, // Schema key/field names
IntField.INSTANCE, // indexing column (-1 = primary)
LongField.INSTANCE, // max primary key value ever used
IntField.INSTANCE // number of records
};
//@formatter:on
private static String[] tableRecordFieldNames = { "TableName", "SchemaVersion", "RootBufferId",
"KeyType", "FieldTypes", "FieldNames", "IndexColumn", "MaxKey", "RecordCount" };
private static Schema schema = new Schema(0, "TableNum", fieldClasses, tableRecordFieldNames);
private static Schema schema = new Schema(0, "TableNum", fields, tableRecordFieldNames);
private Record record;
private Schema tableSchema;
private Table table;
/**
@ -61,10 +66,11 @@ class TableRecord implements Comparable<TableRecord> {
* @param indexedColumn primary table index key column, or -1 for primary table
*/
TableRecord(long tableNum, String name, Schema tableSchema, int indexedColumn) {
this.tableSchema = tableSchema;
record = schema.createRecord(tableNum);
record.setString(NAME_COLUMN, name);
record.setByteValue(KEY_TYPE_COLUMN, tableSchema.getKeyFieldType().getFieldType());
record.setBinaryData(FIELD_TYPES_COLUMN, tableSchema.getFieldTypes());
record.setByteValue(KEY_TYPE_COLUMN, tableSchema.getEncodedKeyFieldType());
record.setBinaryData(FIELD_TYPES_COLUMN, tableSchema.getEncodedFieldTypes());
record.setString(FIELD_NAMES_COLUMN, tableSchema.getPackedFieldNames());
record.setIntValue(VERSION_COLUMN, tableSchema.getVersion());
record.setIntValue(COLUMN_INDEXED_COLUMN, indexedColumn);
@ -75,9 +81,13 @@ class TableRecord implements Comparable<TableRecord> {
/**
* Construct an existing master table storage record.
* @param dbh database handle
* @param record master table storage record.
* @throws UnsupportedFieldException stored schema contains unsupported field
* @throws IOException if IO error occurs
*/
TableRecord(Record record) {
TableRecord(DBHandle dbh, Record record) throws IOException {
this.tableSchema = parseSchema(dbh, record);
this.record = record;
}
@ -100,9 +110,13 @@ class TableRecord implements Comparable<TableRecord> {
/**
* Set the storage record for this instance.
* Data is refreshed from the record provided.
* @param dbh database handle
* @param record master table storage record.
* @throws UnsupportedFieldException stored schema contains unsupported field
* @throws IOException if IO error occurs
*/
void setRecord(Record record) {
void setRecord(DBHandle dbh, Record record) throws IOException {
this.tableSchema = parseSchema(dbh, record);
this.record = record;
if (table != null) {
table.tableRecordChanged();
@ -120,6 +134,7 @@ class TableRecord implements Comparable<TableRecord> {
table = null;
}
this.record = null;
this.tableSchema = null;
}
/**
@ -140,20 +155,62 @@ class TableRecord implements Comparable<TableRecord> {
/**
* Set the table name
* @param name
* @param name table name
*/
void setName(String name) {
record.setString(NAME_COLUMN, name);
}
/**
*
* @param dbh database handle
* @param record record which defines table schema
* @return table schema
* @throws UnsupportedFieldException stored schema contains unsupported field
* @throws IOException if IO error occurs
*/
private static Schema parseSchema(DBHandle dbh, Record record) throws IOException {
Schema tableSchema =
new Schema(record.getIntValue(VERSION_COLUMN), record.getByteValue(KEY_TYPE_COLUMN),
record.getBinaryData(FIELD_TYPES_COLUMN), record.getString(FIELD_NAMES_COLUMN));
forceUseOfVariableLengthKeyNodesIfNeeded(dbh, tableSchema,
record.getIntValue(BUFFER_ID_COLUMN));
return tableSchema;
}
/**
* Determine if legacy schema should be forced to use {@link VarKeyNode}
* table storage for compatibility. Root buffer node for applicable
* primitive fixed-length key types will be checked.
* @param dbh database handle
* @param tableSchema table schema to be checked
* @param rootBufferId table root buffer ID
* @throws IOException if IO error occurs
*/
private static void forceUseOfVariableLengthKeyNodesIfNeeded(DBHandle dbh, Schema tableSchema,
int rootBufferId) throws IOException {
if (rootBufferId < 0) {
return;
}
Field keyType = tableSchema.getKeyFieldType();
if (keyType.isVariableLength()) {
return;
}
if (keyType instanceof LongField || keyType instanceof IndexField ||
keyType instanceof FixedField) {
return;
}
if (NodeMgr.isVarKeyNode(dbh.getBufferMgr(), rootBufferId)) {
tableSchema.forceUseOfVariableLengthKeyNodes();
}
}
/**
* Get the table schema
* @return table schema
* @throws UnsupportedFieldException if unsupported schema field encountered
*/
Schema getSchema() throws UnsupportedFieldException {
return new Schema(record.getIntValue(VERSION_COLUMN), record.getByteValue(KEY_TYPE_COLUMN),
record.getBinaryData(FIELD_TYPES_COLUMN), record.getString(FIELD_NAMES_COLUMN));
Schema getSchema() {
return tableSchema;
}
/**

View file

@ -1,323 +0,0 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import java.io.IOException;
/**
* The <code>VarIndexTable</code> provides a secondary index on a variable-length table column
* (e.g., StringField). For each unique secondary index value, an IndexBuffer is
* stored within an underlying index table record. The secondary index value is used as the long
* key to access this record. Within a single IndexBuffer is stored all primary keys which
* correspond to an index value.
*/
class VarIndexTable extends IndexTable {
private static final Class<?>[] fieldClasses = { BinaryField.class, // index data
};
private static final String[] fieldNames = { "IndexBuffer" };
private Schema indexSchema;
/**
* Construct a new secondary index which is based upon a field within the
* primary table specified by name.
* @param primaryTable primary table.
* @param colIndex identifies the indexed column within the primary table.
* @throws IOException thrown if an IO error occurs
*/
VarIndexTable(Table primaryTable, int colIndex) throws IOException {
this(primaryTable,
primaryTable.getDBHandle().getMasterTable().createTableRecord(primaryTable.getName(),
new Schema(0, primaryTable.getSchema().getField(colIndex).getClass(), "IndexKey",
fieldClasses, fieldNames),
colIndex));
}
/**
* Construct a new or existing secondary index. An existing index must have
* its root ID specified within the tableRecord.
* @param primaryTable primary table.
* @param indexTableRecord specifies the index parameters.
* @throws IOException thrown if an IO error occurs
*/
VarIndexTable(Table primaryTable, TableRecord indexTableRecord) throws IOException {
super(primaryTable, indexTableRecord);
this.indexSchema = indexTable.getSchema();
}
/**
* Find all primary keys which correspond to the specified indexed field
* value.
* @param indexValue the field value to search for.
* @return list of primary keys
* @throws IOException thrown if an IO error occurs
*/
@Override
long[] findPrimaryKeys(Field indexValue) throws IOException {
if (!indexValue.getClass().equals(fieldType.getClass())) {
throw new IllegalArgumentException("Incorrect indexed field type");
}
Record indexRecord = indexTable.getRecord(indexValue);
if (indexRecord == null) {
return emptyKeyArray;
}
IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0));
return indexBuffer.getPrimaryKeys();
}
/**
* Get the number of primary keys which correspond to the specified indexed field
* value.
* @param indexValue the field value to search for.
* @return key count
* @throws IOException thrown if an IO error occurs
*/
@Override
int getKeyCount(Field indexValue) throws IOException {
if (!indexValue.getClass().equals(fieldType.getClass())) {
throw new IllegalArgumentException("Incorrect indexed field type");
}
Record indexRecord = indexTable.getRecord(indexValue);
if (indexRecord == null) {
return 0;
}
IndexBuffer indexBuffer = new IndexBuffer(indexValue, indexRecord.getBinaryData(0));
return indexBuffer.keyCount;
}
/*
* @see ghidra.framework.store.db.IndexTable#addEntry(ghidra.framework.store.db.Record)
*/
@Override
void addEntry(Record record) throws IOException {
Field indexField = record.getField(colIndex);
Record indexRecord = indexTable.getRecord(indexField);
if (indexRecord == null) {
indexRecord = indexSchema.createRecord(indexField);
}
IndexBuffer indexBuffer = new IndexBuffer(indexField, indexRecord.getBinaryData(0));
indexBuffer.addEntry(record.getKey());
indexRecord.setBinaryData(0, indexBuffer.getData());
indexTable.putRecord(indexRecord);
}
/*
* @see ghidra.framework.store.db.IndexTable#deleteEntry(ghidra.framework.store.db.Record)
*/
@Override
void deleteEntry(Record record) throws IOException {
Field indexField = record.getField(colIndex);
Record indexRecord = indexTable.getRecord(indexField);
if (indexRecord != null) {
IndexBuffer indexBuffer = new IndexBuffer(indexField, indexRecord.getBinaryData(0));
indexBuffer.deleteEntry(record.getKey());
byte[] data = indexBuffer.getData();
if (data == null) {
indexTable.deleteRecord(indexField);
}
else {
indexRecord.setBinaryData(0, data);
indexTable.putRecord(indexRecord);
}
}
}
/**
* Get the index buffer associated with the specified index key
* @param indexKey index key
* @return index buffer or null if not found
* @throws IOException thrown if IO error occurs
*/
private IndexBuffer getIndexBuffer(Field indexKey) throws IOException {
Record indexRec = indexTable.getRecord(indexKey);
return indexRec != null ? new IndexBuffer(indexKey, indexRec.getBinaryData(0)) : null;
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator()
*/
@Override
DBFieldIterator indexIterator() throws IOException {
return new IndexVarFieldIterator();
}
/*
* @see ghidra.framework.store.db.IndexTable#indexIterator(ghidra.framework.store.db.Field, ghidra.framework.store.db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, boolean before)
throws IOException {
return new IndexVarFieldIterator(minField, maxField, before);
}
/*
* @see db.IndexTable#indexIterator(db.Field, db.Field, db.Field, boolean)
*/
@Override
DBFieldIterator indexIterator(Field minField, Field maxField, Field startField, boolean before)
throws IOException {
return new IndexVarFieldIterator(minField, maxField, startField, before);
}
/**
* Iterates over index field values within a specified range.
*/
class IndexVarFieldIterator implements DBFieldIterator {
private Field lastKey;
private Field keyField;
private DBFieldIterator indexIterator;
private boolean hasNext = false;
private boolean hasPrev = false;
/**
* Construct an index field iterator starting with the minimum index value.
*/
IndexVarFieldIterator() throws IOException {
this(null, null, true);
}
/**
* Construct an index field iterator. The iterator is positioned at index
* value identified by startValue.
* @param minValue minimum index value. Null corresponds to minimum indexed value.
* @param maxValue maximum index value. Null corresponds to maximum indexed value.
* @param before if true initial position is before minValue, else position
* is after maxValue.
* @throws IOException
*/
IndexVarFieldIterator(Field minValue, Field maxValue, boolean before) throws IOException {
indexIterator = indexTable.fieldKeyIterator(minValue, maxValue, before);
if (indexIterator.hasNext()) {
indexIterator.next();
if (before) {
indexIterator.previous();
}
}
}
/**
* Construct an index field iterator. The iterator is positioned at index
* value identified by startValue.
* @param minValue minimum index value. Null corresponds to minimum indexed value.
* @param maxValue maximum index value. Null corresponds to maximum indexed value.
* @param startValue identify initial position by value
* @param before if true initial position is before minValue, else position
* is after maxValue.
* @throws IOException
*/
IndexVarFieldIterator(Field minValue, Field maxValue, Field startValue, boolean before)
throws IOException {
if (startValue == null) {
throw new IllegalArgumentException("starting index value required");
}
indexIterator = indexTable.fieldKeyIterator(minValue, maxValue, startValue);
if (indexIterator.hasNext()) {
Field f = indexIterator.next();
if (before || !f.equals(startValue)) {
indexIterator.previous();
}
}
}
@Override
public boolean hasNext() throws IOException {
if (hasNext) {
return true;
}
Field key = indexIterator.next();
if (key == null) {
return false;
}
keyField = key;
hasNext = true;
hasPrev = false;
return true;
}
@Override
public boolean hasPrevious() throws IOException {
if (hasPrev) {
return true;
}
Field key = indexIterator.previous();
if (key == null) {
return false;
}
keyField = key;
hasNext = false;
hasPrev = true;
return true;
}
@Override
public Field next() throws IOException {
if (hasNext || hasNext()) {
hasNext = false;
hasPrev = true;
lastKey = keyField;
return keyField;
}
return null;
}
@Override
public Field previous() throws IOException {
if (hasPrev || hasPrevious()) {
hasNext = true;
hasPrev = false;
lastKey = keyField;
return keyField;
}
return null;
}
/**
* Delete all primary records which have the current
* index value (lastKey).
* @see db.DBFieldIterator#delete()
*/
@Override
public boolean delete() throws IOException {
if (lastKey == null) {
return false;
}
synchronized (db) {
IndexBuffer indexBuf = getIndexBuffer(lastKey);
if (indexBuf != null) {
long[] keys = indexBuf.getPrimaryKeys();
for (long key : keys) {
primaryTable.deleteRecord(key);
}
// The following does not actually delete the index record since it
// should already have been removed with the removal of all associated
// primary records. Invoking this method allows the iterator to
// recover from the index table change.
// indexIterator.delete();
}
lastKey = null;
return true;
}
}
}
}

View file

@ -29,9 +29,9 @@ import ghidra.util.task.TaskMonitor;
* has the following layout within a single DataBuffer (field size in bytes):
*
* | NodeType(1) | KeyType(1) | KeyCount(4) | KeyOffset0(4) | ID0(4) | ... | KeyOffsetN(4) | IDN(4) |
* ...<FreeSpace>... | KeyN | ... | Key0 |
* ...&lt;FreeSpace&gt;... | KeyN | ... | Key0 |
*/
class VarKeyInteriorNode extends VarKeyNode {
class VarKeyInteriorNode extends VarKeyNode implements FieldKeyInteriorNode {
private static final int BASE = VARKEY_NODE_HEADER_SIZE;
@ -83,7 +83,7 @@ class VarKeyInteriorNode extends VarKeyNode {
void logConsistencyError(String tableName, String msg, Throwable t) throws IOException {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " parent.key[0]=" + getKey(0) + " bufferID=" + getBufferId());
Msg.debug(this, " parent.key[0]=" + getKeyField(0) + " bufferID=" + getBufferId());
if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t);
}
@ -98,26 +98,24 @@ class VarKeyInteriorNode extends VarKeyNode {
for (int i = 0; i < keyCount; i++) {
// Compare each key entry with the previous entries key-range
Field key = getKey(i);
if (i != 0) {
if (key.compareTo(lastMinKey) <= 0) {
consistent = false;
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null);
Msg.debug(this,
" child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].minKey = " + lastMinKey +
" bufferID=" + getBufferId(i - 1));
}
else if (key.compareTo(lastMaxKey) <= 0) {
consistent = false;
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null);
Msg.debug(this,
" child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].maxKey = " + lastMaxKey +
" bufferID=" + getBufferId(i - 1));
}
Field key = getKeyField(i);
if (lastMinKey != null && key.compareTo(lastMinKey) <= 0) {
consistent = false;
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].minKey", null);
Msg.debug(this,
" child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].minKey = " + lastMinKey + " bufferID=" +
getBufferId(i - 1));
}
else if (lastMaxKey != null && key.compareTo(lastMaxKey) <= 0) {
consistent = false;
logConsistencyError(tableName,
"child[" + i + "].minKey <= child[" + (i - 1) + "].maxKey", null);
Msg.debug(this,
" child[" + i + "].minKey = " + key + " bufferID=" + getBufferId(i));
Msg.debug(this, " child[" + (i - 1) + "].maxKey = " + lastMaxKey + " bufferID=" +
getBufferId(i - 1));
}
lastMinKey = key;
@ -143,10 +141,10 @@ class VarKeyInteriorNode extends VarKeyNode {
continue; // skip child
}
lastMaxKey = node.getKey(node.getKeyCount() - 1);
lastMaxKey = node.getKeyField(node.getKeyCount() - 1);
// Verify key match-up between parent and child
Field childKey0 = node.getKey(0);
Field childKey0 = node.getKeyField(0);
if (!key.equals(childKey0)) {
consistent = false;
logConsistencyError(tableName,
@ -182,10 +180,16 @@ class VarKeyInteriorNode extends VarKeyNode {
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. This method is used to identify the child
* node which contains the specified record key.
* @param key
* @return int buffer ID index.
* into the Buffer ID storage. This method is intended to locate the child
* node which contains the specified key. The returned index corresponds
* to a child's stored buffer/node ID and may correspond to another interior
* node or a leaf record node. Each stored key within this interior node
* effectively identifies the maximum key contained within the corresponding
* child node.
* @param key key to search for
* @return int buffer ID index of child node. An existing positive index
* value will always be returned.
* @throws IOException if IO error occurs
*/
int getIdIndex(Field key) throws IOException {
@ -194,12 +198,11 @@ class VarKeyInteriorNode extends VarKeyNode {
while (min <= max) {
int i = (min + max) / 2;
Field k = getKey(i);
int rc = k.compareTo(key);
int rc = compareKeyField(key, i);
if (rc == 0) {
return i;
}
else if (rc < 0) {
else if (rc > 0) {
min = i + 1;
}
else {
@ -209,26 +212,19 @@ class VarKeyInteriorNode extends VarKeyNode {
return max;
}
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage. This method is intended to find the insertion
* index or exact match for a child key.
* @param key
* @return int buffer ID index.
*/
private int getKeyIndex(Field key) throws IOException {
@Override
public int getKeyIndex(Field key) throws IOException {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max) / 2;
Field k = getKey(i);
int rc = k.compareTo(key);
int rc = compareKeyField(key, i);
if (rc == 0) {
return i;
}
else if (rc < 0) {
else if (rc > 0) {
min = i + 1;
}
else {
@ -271,7 +267,8 @@ class VarKeyInteriorNode extends VarKeyNode {
* @param index key index
* @return record key offset
*/
private int getKeyOffset(int index) {
@Override
public int getKeyOffset(int index) {
return buffer.getInt(BASE + (index * ENTRY_SIZE));
}
@ -284,11 +281,8 @@ class VarKeyInteriorNode extends VarKeyNode {
buffer.putInt(BASE + (index * ENTRY_SIZE), offset);
}
/*
* @see ghidra.framework.store.db.VarKeyNode#getKey(int)
*/
@Override
Field getKey(int index) throws IOException {
public Field getKeyField(int index) throws IOException {
Field key = keyType.newField();
key.read(buffer, buffer.getInt(BASE + (index * ENTRY_SIZE)));
return key;
@ -313,15 +307,6 @@ class VarKeyInteriorNode extends VarKeyNode {
return buffer.getInt(BASE + (index * ENTRY_SIZE) + KEY_OFFSET_SIZE);
}
// /**
// * Store the child node buffer ID associated with the specified key index
// * @param index child key index
// * @param id child node buffer ID
// */
// private void putBufferId(int index, int id) {
// buffer.putInt(BASE + (index * ENTRY_SIZE) + KEY_OFFSET_SIZE, id);
// }
/**
* @return unused free space within node
*/
@ -432,8 +417,11 @@ class VarKeyInteriorNode extends VarKeyNode {
* Callback method for when a child node's leftmost key changes.
* @param oldKey previous leftmost key.
* @param newKey new leftmost key.
* @param node child node containing oldKey
* @throws IOException if IO error occurs
*/
void keyChanged(Field oldKey, Field newKey, VarKeyNode node) throws IOException {
@Override
public void keyChanged(Field oldKey, Field newKey, FieldKeyNode node) throws IOException {
int index = getKeyIndex(oldKey);
if (index < 0) {
@ -443,7 +431,7 @@ class VarKeyInteriorNode extends VarKeyNode {
int lenChange = newKey.length() - oldKey.length();
if (lenChange > 0 && lenChange > getFreeSpace()) {
// Split node if updated key won't fit
split(index, oldKey, newKey, node);
split(index, oldKey, newKey, (VarKeyNode) node);
}
else {
@ -461,7 +449,8 @@ class VarKeyInteriorNode extends VarKeyNode {
* @param oldIndex index of key to be updated
* @param oldKey old key value stored at oldIndex
* @param newKey new key value
* @throws IOException thrown if IO error occurs
* @param node child node containing oldKey
* @throws IOException if IO error occurs
*/
private void split(int oldIndex, Field oldKey, Field newKey, VarKeyNode node)
throws IOException {
@ -497,7 +486,7 @@ class VarKeyInteriorNode extends VarKeyNode {
parent.insert(newNode);
if (newNode.parent != parent) {
// Fix my parent
if (parent.getKeyIndex(getKey(0)) < 0) {
if (parent.getKeyIndex(getKeyField(0)) < 0) {
parent = newNode.parent;
}
}
@ -505,8 +494,8 @@ class VarKeyInteriorNode extends VarKeyNode {
}
// New parent node becomes root
parent = new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newNode.getKey(0),
newNode.getBufferId());
parent = new VarKeyInteriorNode(nodeMgr, getKeyField(0), buffer.getId(),
newNode.getKeyField(0), newNode.getBufferId());
newNode.parent = parent;
}
@ -518,7 +507,7 @@ class VarKeyInteriorNode extends VarKeyNode {
*/
VarKeyNode insert(VarKeyNode node) throws IOException {
Field key = node.getKey(0);
Field key = node.getKeyField(0);
int id = node.getBufferId();
// Split this node if full
@ -536,6 +525,7 @@ class VarKeyInteriorNode extends VarKeyNode {
* @param key leftmost key associated with new node.
* @param node child node which corresponds to the id and key.
* @return root node.
* @throws IOException thrown if an IO error occurs
*/
VarKeyNode insert(int id, Field key, VarKeyNode node) throws IOException {
@ -549,7 +539,7 @@ class VarKeyInteriorNode extends VarKeyNode {
node.parent = this;
if (index == 0 && parent != null) {
parent.keyChanged(getKey(1), key, this);
parent.keyChanged(getKeyField(1), key, this);
}
return getRoot();
@ -568,7 +558,6 @@ class VarKeyInteriorNode extends VarKeyNode {
// Create new interior node
VarKeyInteriorNode newNode = new VarKeyInteriorNode(nodeMgr, keyType);
// DataBuffer newBuf = newNode.buffer;
int halfway =
((keyCount == 0 ? buffer.length() : getKeyOffset(keyCount - 1)) + buffer.length()) / 2;
@ -576,7 +565,7 @@ class VarKeyInteriorNode extends VarKeyNode {
moveKeysRight(this, newNode, keyCount - getOffsetIndex(halfway));
// Insert new key/id
Field rightKey = newNode.getKey(0);
Field rightKey = newNode.getKeyField(0);
if (newKey.compareTo(rightKey) < 0) {
insert(newId, newKey, node);
}
@ -588,7 +577,7 @@ class VarKeyInteriorNode extends VarKeyNode {
VarKeyNode rootNode = parent.insert(newNode);
if (newNode.parent != parent) {
// Fix my parent
if (parent.getKeyIndex(getKey(0)) < 0) {
if (parent.getKeyIndex(getKeyField(0)) < 0) {
parent = newNode.parent;
}
}
@ -596,33 +585,27 @@ class VarKeyInteriorNode extends VarKeyNode {
}
// New parent node becomes root
parent = new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), rightKey,
parent = new VarKeyInteriorNode(nodeMgr, getKeyField(0), buffer.getId(), rightKey,
newNode.getBufferId());
newNode.parent = parent;
return parent;
}
/*
* @see ghidra.framework.store.db.VarKeyNode#getLeafNode(long)
*/
@Override
VarKeyRecordNode getLeafNode(Field key) throws IOException {
public VarKeyRecordNode getLeafNode(Field key) throws IOException {
VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(getIdIndex(key)));
node.parent = this;
return node.getLeafNode(key);
}
/*
* @see ghidra.framework.store.db.VarKeyNode#getLeftmostLeafNode()
*/
@Override
VarKeyRecordNode getLeftmostLeafNode() throws IOException {
public VarKeyRecordNode getLeftmostLeafNode() throws IOException {
VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(0));
return node.getLeftmostLeafNode();
}
@Override
VarKeyRecordNode getRightmostLeafNode() throws IOException {
public VarKeyRecordNode getRightmostLeafNode() throws IOException {
VarKeyNode node = nodeMgr.getVarKeyNode(getBufferId(keyCount - 1));
return node.getRightmostLeafNode();
}
@ -654,7 +637,7 @@ class VarKeyInteriorNode extends VarKeyNode {
// Delete child entry
deleteEntry(index);
if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0), this);
parent.keyChanged(key, getKeyField(0), this);
}
return (parent != null) ? parent.balanceChild(this) : this;
@ -676,7 +659,7 @@ class VarKeyInteriorNode extends VarKeyNode {
// balance with right sibling except if node corresponds to the right-most
// key within this interior node - in that case balance with left sibling.
int index = getIdIndex(node.getKey(0));
int index = getIdIndex(node.getKeyField(0));
if (index == (keyCount - 1)) {
return balanceChild((VarKeyInteriorNode) nodeMgr.getVarKeyNode(getBufferId(index - 1)),
node);
@ -700,14 +683,11 @@ class VarKeyInteriorNode extends VarKeyNode {
int leftKeyCount = leftNode.keyCount;
int rightKeyCount = rightNode.keyCount;
// if (leftKeyCount == rightKeyCount) {
// return getRoot();
// }
int len = buffer.length();
int leftKeySpace = len - leftNode.getKeyOffset(leftKeyCount - 1);
int rightKeySpace = len - rightNode.getKeyOffset(rightKeyCount - 1);
Field rightKey = rightNode.getKey(0);
Field rightKey = rightNode.getKeyField(0);
// Can right keys fit within left node
if ((rightKeySpace + (rightKeyCount * ENTRY_SIZE)) <= (len - BASE - leftKeySpace -
@ -731,7 +711,7 @@ class VarKeyInteriorNode extends VarKeyNode {
balanced = moveKeysLeft(leftNode, rightNode, rightKeyCount - index - 1);
}
if (balanced) {
this.keyChanged(rightKey, rightNode.getKey(0), rightNode);
this.keyChanged(rightKey, rightNode.getKeyField(0), rightNode);
}
return getRoot();
}
@ -775,16 +755,6 @@ class VarKeyInteriorNode extends VarKeyNode {
return true;
}
//private static void checkKeyOffsets(VarKeyInteriorNode node) {
// for (int i = 0; i < node.keyCount; i++) {
// int length = node.buffer.getInt(node.getKeyOffset(i));
//
// if (length < -1 || length > 40) {
// throw new ArrayIndexOutOfBoundsException();
// }
// }
//}
/**
* Move some or all of the keys from the right node into the left node.
* If all keys are moved, the caller is responsible for deleting the right
@ -802,9 +772,6 @@ class VarKeyInteriorNode extends VarKeyNode {
int rightOffset = rightNode.getKeyOffset(count - 1);
int len = rightNode.buffer.length() - rightOffset;
int leftOffset = leftNode.getKeyOffset(leftKeyCount - 1) - len;
//if ((len + (ENTRY_SIZE * count)) > leftNode.getFreeSpace()) {
// throw new ArrayIndexOutOfBoundsException();
//}
// Move key data to left node
leftNode.buffer.copy(leftOffset, rightNode.buffer, rightOffset, len);
@ -831,9 +798,6 @@ class VarKeyInteriorNode extends VarKeyNode {
return true;
}
/*
* @see ghidra.framework.store.db.VarKeyNode#delete()
*/
@Override
public void delete() throws IOException {
@ -846,9 +810,6 @@ class VarKeyInteriorNode extends VarKeyNode {
nodeMgr.deleteNode(this);
}
/*
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
@Override
public int[] getBufferReferences() {
int[] ids = new int[keyCount];
@ -871,7 +832,7 @@ class VarKeyInteriorNode extends VarKeyNode {
public boolean isRightmostKey(Field key) throws IOException {
if (getIdIndex(key) == (keyCount - 1)) {
if (parent != null) {
return parent.isRightmostKey(getKey(0));
return parent.isRightmostKey(getKeyField(0));
}
return true;
}

View file

@ -22,8 +22,11 @@ import db.buffers.DataBuffer;
/**
* <code>VarKeyNode</code> is an abstract implementation of a BTree node
* which utilizes variable-length Field key values.
* <pre>
* | NodeType(1) | KeyType(1) | KeyCount(4) | ...
* </pre>
*/
abstract class VarKeyNode implements BTreeNode {
abstract class VarKeyNode implements FieldKeyNode {
private static final int KEY_TYPE_SIZE = 1;
private static final int KEY_COUNT_SIZE = 4;
@ -62,7 +65,7 @@ abstract class VarKeyNode implements BTreeNode {
* @param nodeMgr table node manager.
* @param nodeType node type
* @param keyType key Field type
* @throws IOException thrown if IO error occurs
* @throws IOException if IO error occurs
*/
VarKeyNode(NodeMgr nodeMgr, byte nodeType, Field keyType) throws IOException {
this.nodeMgr = nodeMgr;
@ -75,6 +78,11 @@ abstract class VarKeyNode implements BTreeNode {
nodeMgr.addNode(this);
}
@Override
public VarKeyInteriorNode getParent() {
return parent;
}
@Override
public int getBufferId() {
return buffer.getId();
@ -91,8 +99,9 @@ abstract class VarKeyNode implements BTreeNode {
* @return TableNode
*/
VarKeyNode getRoot() {
if (parent != null)
if (parent != null) {
return parent.getRoot();
}
return this;
}
@ -107,13 +116,26 @@ abstract class VarKeyNode implements BTreeNode {
buffer.putInt(KEY_COUNT_OFFSET, keyCount);
}
@Override
public int compareKeyField(Field k, int keyIndex) {
return k.compareTo(buffer, getKeyOffset(keyIndex));
}
/**
* Get the key offset within the buffer
* @param index key index
* @return record key offset
*/
public abstract int getKeyOffset(int index);
/**
* Get the key value at a specific index.
* @param index key index
* @return key value
* @throws IOException thrown if an IO error occurs
*/
abstract Field getKey(int index) throws IOException;
@Override
public abstract Field getKeyField(int index) throws IOException;
/**
* Get the leaf node which contains the specified key.
@ -121,20 +143,23 @@ abstract class VarKeyNode implements BTreeNode {
* @return leaf node
* @throws IOException thrown if an IO error occurs
*/
abstract VarKeyRecordNode getLeafNode(Field key) throws IOException;
@Override
public abstract VarKeyRecordNode getLeafNode(Field key) throws IOException;
/**
* Get the left-most leaf node within the tree.
* @return left-most leaf node.
* @throws IOException thrown if IO error occurs
*/
abstract VarKeyRecordNode getLeftmostLeafNode() throws IOException;
@Override
public abstract VarKeyRecordNode getLeftmostLeafNode() throws IOException;
/**
* Get the right-most leaf node within the tree.
* @return right-most leaf node.
* @throws IOException thrown if IO error occurs
*/
abstract VarKeyRecordNode getRightmostLeafNode() throws IOException;
@Override
public abstract VarKeyRecordNode getRightmostLeafNode() throws IOException;
}

View file

@ -25,19 +25,19 @@ import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
/**
* <code>LongKeyRecordNode</code> is an implementation of a BTree leaf node
* <code>VarKeyRecordNode</code> is an implementation of a BTree leaf node
* which utilizes variable-length key values and stores variable-length records.
* This type of node has the following layout within a single DataBuffer
* (field size in bytes):
* <pre>
* | NodeType(1) | KeyType(1) | KeyCount(4) | PrevLeafId(4) | NextLeafId(4) | KeyOffset0(4) | IndFlag0(1) |...
*
* | KeyOffsetN(4) | IndFlagN(1) |...<FreeSpace>... | KeyN | RecN |... | Key0 | Rec0 |
* | KeyOffsetN(4) | IndFlagN(1) |...&lt;FreeSpace&gt;... | KeyN | RecN |... | Key0 | Rec0 |
* </pre>
* IndFlag - if not zero the record has been stored within a chained DBBuffer
* whose 4-byte integer buffer ID has been stored within this leaf at the record offset.
*/
class VarKeyRecordNode extends VarKeyNode {
class VarKeyRecordNode extends VarKeyNode implements FieldKeyRecordNode {
private static final int ID_SIZE = 4;
@ -94,7 +94,7 @@ class VarKeyRecordNode extends VarKeyNode {
void logConsistencyError(String tableName, String msg, Throwable t) throws IOException {
Msg.debug(this, "Consistency Error (" + tableName + "): " + msg);
Msg.debug(this, " bufferID=" + getBufferId() + " key[0]=" + getKey(0));
Msg.debug(this, " bufferID=" + getBufferId() + " key[0]=" + getKeyField(0));
if (t != null) {
Msg.error(this, "Consistency Error (" + tableName + ")", t);
}
@ -107,7 +107,7 @@ class VarKeyRecordNode extends VarKeyNode {
Field prevKey = null;
for (int i = 0; i < keyCount; i++) {
// Compare each key entry with the previous key
Field key = getKey(i);
Field key = getKeyField(i);
if (i != 0) {
if (key.compareTo(prevKey) <= 0) {
consistent = false;
@ -119,14 +119,14 @@ class VarKeyRecordNode extends VarKeyNode {
prevKey = key;
}
if ((parent == null || parent.isLeftmostKey(getKey(0))) && getPreviousLeaf() != null) {
if ((parent == null || parent.isLeftmostKey(getKeyField(0))) && getPreviousLeaf() != null) {
consistent = false;
logConsistencyError(tableName, "previous-leaf should not exist", null);
}
VarKeyRecordNode node = getNextLeaf();
if (node != null) {
if (parent == null || parent.isRightmostKey(getKey(0))) {
if (parent == null || parent.isRightmostKey(getKeyField(0))) {
consistent = false;
logConsistencyError(tableName, "next-leaf should not exist", null);
}
@ -138,7 +138,7 @@ class VarKeyRecordNode extends VarKeyNode {
}
}
}
else if (parent != null && !parent.isRightmostKey(getKey(0))) {
else if (parent != null && !parent.isRightmostKey(getKeyField(0))) {
consistent = false;
logConsistencyError(tableName, "this leaf is not linked to next-leaf", null);
}
@ -146,35 +146,36 @@ class VarKeyRecordNode extends VarKeyNode {
return consistent;
}
/*
* @see ghidra.framework.store.db.VarKeyNode#getLeafNode(long)
*/
@Override
VarKeyRecordNode getLeafNode(Field key) throws IOException {
public VarKeyRecordNode getLeafNode(Field key) throws IOException {
return this;
}
/*
* @see ghidra.framework.store.db2.VarKeyNode#getLeftmostLeafNode()
*/
@Override
VarKeyRecordNode getLeftmostLeafNode() throws IOException {
public VarKeyRecordNode getLeftmostLeafNode() throws IOException {
VarKeyRecordNode leaf = getPreviousLeaf();
return leaf != null ? leaf.getLeftmostLeafNode() : this;
}
@Override
VarKeyRecordNode getRightmostLeafNode() throws IOException {
public VarKeyRecordNode getRightmostLeafNode() throws IOException {
VarKeyRecordNode leaf = getNextLeaf();
return leaf != null ? leaf.getRightmostLeafNode() : this;
}
@Override
public boolean hasNextLeaf() throws IOException {
int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
return (nextLeafId >= 0);
}
/**
* Get this leaf node's right sibling
* @return this leaf node's right sibling or null if right sibling does not exist.
* @throws IOException thrown if an IO error occurs
*/
VarKeyRecordNode getNextLeaf() throws IOException {
@Override
public VarKeyRecordNode getNextLeaf() throws IOException {
VarKeyRecordNode leaf = null;
int nextLeafId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
if (nextLeafId >= 0) {
@ -183,40 +184,40 @@ class VarKeyRecordNode extends VarKeyNode {
return leaf;
}
@Override
public boolean hasPreviousLeaf() throws IOException {
int prevLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
return (prevLeafId >= 0);
}
/**
* Get this leaf node's left sibling
* @return this leaf node's left sibling or null if left sibling does not exist.
* @throws IOException thrown if an IO error occurs
* @throws IOException if an IO error occurs
*/
VarKeyRecordNode getPreviousLeaf() throws IOException {
@Override
public VarKeyRecordNode getPreviousLeaf() throws IOException {
VarKeyRecordNode leaf = null;
int nextLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
if (nextLeafId >= 0) {
leaf = (VarKeyRecordNode) nodeMgr.getVarKeyNode(nextLeafId);
int prevLeafId = buffer.getInt(PREV_LEAF_ID_OFFSET);
if (prevLeafId >= 0) {
leaf = (VarKeyRecordNode) nodeMgr.getVarKeyNode(prevLeafId);
}
return leaf;
}
/**
* Perform a binary search to locate the specified key and derive an index
* into the Buffer ID storage.
* @param key
* @return int buffer ID index.
* @throws IOException thrown if an IO error occurs
*/
int getKeyIndex(Field key) throws IOException {
@Override
public int getKeyIndex(Field key) throws IOException {
int min = 0;
int max = keyCount - 1;
while (min <= max) {
int i = (min + max) / 2;
Field k = getKey(i);
int rc = k.compareTo(key);
int rc = compareKeyField(key, i);
if (rc == 0) {
return i;
}
else if (rc < 0) {
else if (rc > 0) {
min = i + 1;
}
else {
@ -256,14 +257,14 @@ class VarKeyRecordNode extends VarKeyNode {
}
// New parent node becomes root
return new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), newLeaf.getKey(0),
newBufId);
return new VarKeyInteriorNode(nodeMgr, getKeyField(0), buffer.getId(),
newLeaf.getKeyField(0), newBufId);
}
/**
* Append a leaf which contains one or more keys and update tree. Leaf is inserted
* as the new right sibling of this leaf.
* @param newLeaf new right sibling leaf (must be same node type as this leaf)
* @param leaf new right sibling leaf (must be same node type as this leaf)
* @return root node which may have changed.
* @throws IOException thrown if an IO error occurs
*/
@ -290,17 +291,12 @@ class VarKeyRecordNode extends VarKeyNode {
}
// New parent node becomes root
return new VarKeyInteriorNode(nodeMgr, getKey(0), buffer.getId(), leaf.getKey(0), newBufId);
return new VarKeyInteriorNode(nodeMgr, getKeyField(0), buffer.getId(), leaf.getKeyField(0),
newBufId);
}
/**
* Insert or Update a record.
* @param record data record with long key
* @param table table which will be notified when record is inserted or updated.
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
VarKeyNode putRecord(Record record, Table table) throws IOException {
@Override
public VarKeyNode putRecord(Record record, Table table) throws IOException {
Field key = record.getKeyField();
int index = getKeyIndex(key);
@ -318,7 +314,7 @@ class VarKeyRecordNode extends VarKeyNode {
index = -index - 1;
if (insertRecord(index, record)) {
if (index == 0 && parent != null) {
parent.keyChanged(getKey(1), key, this);
parent.keyChanged(getKeyField(1), key, this);
}
if (table != null) {
table.insertedRecord(record);
@ -359,7 +355,8 @@ class VarKeyRecordNode extends VarKeyNode {
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
VarKeyNode deleteRecord(Field key, Table table) throws IOException {
@Override
public VarKeyNode deleteRecord(Field key, Table table) throws IOException {
// Handle non-existent key - do nothing
int index = getKeyIndex(key);
@ -382,20 +379,14 @@ class VarKeyRecordNode extends VarKeyNode {
// Notify parent of leftmost key change
if (index == 0 && parent != null) {
parent.keyChanged(key, getKey(0), this);
parent.keyChanged(key, getKeyField(0), this);
}
return getRoot();
}
/**
* Get the first record whoose key is less than the specified key.
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordBefore(Field key, Schema schema) throws IOException {
@Override
public Record getRecordBefore(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -index - 2;
@ -410,14 +401,8 @@ class VarKeyRecordNode extends VarKeyNode {
return getRecord(schema, index);
}
/**
* Get the first record whoose key is greater than the specified key.
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAfter(Field key, Schema schema) throws IOException {
@Override
public Record getRecordAfter(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -(index + 1);
@ -432,15 +417,8 @@ class VarKeyRecordNode extends VarKeyNode {
return getRecord(schema, index);
}
/**
* Get the first record whoose key is less than or equal to the specified
* key.
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAtOrBefore(Field key, Schema schema) throws IOException {
@Override
public Record getRecordAtOrBefore(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -index - 2;
@ -452,15 +430,8 @@ class VarKeyRecordNode extends VarKeyNode {
return getRecord(schema, index);
}
/**
* Get the first record whoose key is greater than or equal to the specified
* key.
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecordAtOrAfter(Field key, Schema schema) throws IOException {
@Override
public Record getRecordAtOrAfter(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0) {
index = -(index + 1);
@ -484,23 +455,25 @@ class VarKeyRecordNode extends VarKeyNode {
return new VarKeyRecordNode(nodeMgr, prevLeafId, nextLeafId, keyType);
}
/*
* @see ghidra.framework.store.db.VarKeyNode#getKey(int)
*/
@Override
Field getKey(int index) throws IOException {
public Field getKeyField(int index) throws IOException {
Field key = keyType.newField();
key.read(buffer, buffer.getInt(HEADER_SIZE + (index * ENTRY_SIZE)));
key.read(buffer, getKeyOffset(index));
return key;
}
@Override
public int getKeyOffset(int index) {
return buffer.getInt(HEADER_SIZE + (index * ENTRY_SIZE));
}
/**
* Get the record data offset within the buffer
* @param index key index
* @return record data offset
*/
private int getRecordDataOffset(int index) throws IOException {
int offset = buffer.getInt(HEADER_SIZE + (index * ENTRY_SIZE));
int offset = getKeyOffset(index);
return offset + keyType.readLength(buffer, offset);
}
@ -552,13 +525,13 @@ class VarKeyRecordNode extends VarKeyNode {
/**
* Get the length of a stored record with key.
* @param keyIndex key index associated with record.
* @param index key index associated with record.
*/
private int getFullRecordLength(int keyIndex) {
if (keyIndex == 0) {
private int getFullRecordLength(int index) {
if (index == 0) {
return buffer.length() - getRecordKeyOffset(0);
}
return getRecordKeyOffset(keyIndex - 1) - getRecordKeyOffset(keyIndex);
return getRecordKeyOffset(index - 1) - getRecordKeyOffset(index);
}
/**
@ -600,8 +573,9 @@ class VarKeyRecordNode extends VarKeyNode {
* @param index key index
* @return Record
*/
Record getRecord(Schema schema, int index) throws IOException {
Field key = getKey(index);
@Override
public Record getRecord(Schema schema, int index) throws IOException {
Field key = getKeyField(index);
Record record = schema.createRecord(key);
if (hasIndirectStorage(index)) {
int bufId = buffer.getInt(getRecordDataOffset(index));
@ -614,14 +588,16 @@ class VarKeyRecordNode extends VarKeyNode {
return record;
}
/**
* Get the record identified by the specified key.
* @param key record key
* @param schema record data schema
* @return Record requested or null if record not found.
* @throws IOException thrown if IO error occurs
*/
Record getRecord(Field key, Schema schema) throws IOException {
@Override
public int getRecordOffset(int index) throws IOException {
if (hasIndirectStorage(index)) {
return -buffer.getInt(getRecordDataOffset(index));
}
return getRecordDataOffset(index);
}
@Override
public Record getRecord(Field key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
return null;
@ -658,7 +634,7 @@ class VarKeyRecordNode extends VarKeyNode {
/**
* Split the contents of this leaf node; placing the right half of the records into the
* empty leaf node provided.
* @param newRightLeaf empty right sibling leaf
* @param rightNode empty right sibling leaf
*/
private void splitData(VarKeyRecordNode rightNode) {
@ -752,12 +728,12 @@ class VarKeyRecordNode extends VarKeyNode {
/**
* Inserts the record at the given index if there is sufficient space in
* the buffer.
* @param keyIndex insertion index
* @param index insertion index
* @param record record to be inserted
* @return true if the record was successfully inserted.
* @throws IOException thrown if IO error occurs
*/
private boolean insertRecord(int keyIndex, Record record) throws IOException {
private boolean insertRecord(int index, Record record) throws IOException {
Field key = record.getKeyField();
int keyLen = key.length();
@ -776,11 +752,11 @@ class VarKeyRecordNode extends VarKeyNode {
return false; // insufficient space for record storage
// Make room for new record
int offset = moveRecords(keyIndex, -(len + keyLen));
int offset = moveRecords(index, -(len + keyLen));
// Make room for new key/offset entry
int start = HEADER_SIZE + (keyIndex * ENTRY_SIZE);
len = (keyCount - keyIndex) * ENTRY_SIZE;
int start = HEADER_SIZE + (index * ENTRY_SIZE);
len = (keyCount - index) * ENTRY_SIZE;
buffer.move(start, start + ENTRY_SIZE, len);
// Store new record key/offset
@ -798,7 +774,7 @@ class VarKeyRecordNode extends VarKeyNode {
else {
record.write(buffer, offset + keyLen);
}
enableIndirectStorage(keyIndex, useIndirect);
enableIndirectStorage(index, useIndirect);
return true;
}
@ -809,7 +785,8 @@ class VarKeyRecordNode extends VarKeyNode {
* @param index record index
* @throws IOException thrown if IO error occurs
*/
void remove(int index) throws IOException {
@Override
public void remove(int index) throws IOException {
if (index < 0 || index >= keyCount)
throw new AssertException();
@ -833,7 +810,8 @@ class VarKeyRecordNode extends VarKeyNode {
* @return root node which may have changed.
* @throws IOException thrown if IO error occurs
*/
VarKeyNode removeLeaf() throws IOException {
@Override
public VarKeyNode removeLeaf() throws IOException {
// Remove all chained buffers associated with this leaf
for (int index = 0; index < keyCount; ++index) {
@ -842,7 +820,7 @@ class VarKeyRecordNode extends VarKeyNode {
}
}
Field key = getKey(0);
Field key = getKeyField(0);
int prevBufferId = buffer.getInt(PREV_LEAF_ID_OFFSET);
int nextBufferId = buffer.getInt(NEXT_LEAF_ID_OFFSET);
if (prevBufferId >= 0) {
@ -870,9 +848,6 @@ class VarKeyRecordNode extends VarKeyNode {
chainedBuffer.delete();
}
/*
* @see ghidra.framework.store.db.VarKeyNode#delete()
*/
@Override
public void delete() throws IOException {
@ -890,9 +865,6 @@ class VarKeyRecordNode extends VarKeyNode {
nodeMgr.deleteNode(this);
}
/*
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
@Override
public int[] getBufferReferences() {
IntArrayList idList = new IntArrayList();
@ -903,6 +875,7 @@ class VarKeyRecordNode extends VarKeyNode {
idList.add(buffer.getInt(offset));
}
catch (IOException e) {
// ignore
}
}
}

View file

@ -15,12 +15,11 @@
*/
package db;
import ghidra.util.datastruct.IntArrayList;
import ghidra.util.exception.AssertException;
import java.io.IOException;
import db.buffers.DataBuffer;
import ghidra.util.datastruct.IntArrayList;
import ghidra.util.exception.AssertException;
/**
* <code>VarRecNode</code> is an implementation of a BTree leaf node
@ -37,19 +36,19 @@ import db.buffers.DataBuffer;
* whose 4-byte integer buffer ID has been stored within this leaf at the record offset.
*/
class VarRecNode extends LongKeyRecordNode {
private static final int HEADER_SIZE = RECORD_LEAF_HEADER_SIZE;
private static final int KEY_SIZE = 8;
private static final int OFFSET_SIZE = 4;
private static final int INDIRECT_OPTION_SIZE = 1;
private static final int ENTRY_SIZE = KEY_SIZE + OFFSET_SIZE + INDIRECT_OPTION_SIZE;
private static final int KEY_BASE_OFFSET = HEADER_SIZE;
private static final int DATA_OFFSET_BASE_OFFSET = KEY_BASE_OFFSET + KEY_SIZE;
private static final int IND_OPTION_BASE_OFFSET = DATA_OFFSET_BASE_OFFSET + OFFSET_SIZE;
/**
* Construct an existing long-key variable-length record leaf node.
* @param nodeMgr table node manager instance
@ -58,7 +57,7 @@ class VarRecNode extends LongKeyRecordNode {
VarRecNode(NodeMgr nodeMgr, DataBuffer buf) {
super(nodeMgr, buf);
}
/**
* Construct a new long-key variable-length record leaf node.
* @param nodeMgr table node manager instance
@ -70,37 +69,27 @@ class VarRecNode extends LongKeyRecordNode {
super(nodeMgr, NodeMgr.LONGKEY_VAR_REC_NODE, prevLeafId, nextLeafId);
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#createNewLeaf()
*/
@Override
LongKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
LongKeyRecordNode createNewLeaf(int prevLeafId, int nextLeafId) throws IOException {
return new VarRecNode(nodeMgr, prevLeafId, nextLeafId);
}
/*
* @see ghidra.framework.store.db.LongKeyNode#getKey(int)
*/
@Override
long getKey(int index) {
return buffer.getLong(KEY_BASE_OFFSET + (index * ENTRY_SIZE));
long getKey(int index) {
return buffer.getLong(getKeyOffset(index));
}
// /**
// * Store a key at the specified index
// * @param index key index
// * @param key key value
// */
// private void putKey(int index, long key) {
// buffer.putLong(KEY_BASE_OFFSET + (index * ENTRY_SIZE), key);
// }
@Override
public int getKeyOffset(int index) {
return KEY_BASE_OFFSET + (index * ENTRY_SIZE);
}
/**
* Get the record offset within the buffer
* @param index key index
* @return record offset
*/
private int getRecordOffset(int index) {
int getRecordDataOffset(int index) {
return buffer.getInt(DATA_OFFSET_BASE_OFFSET + (index * ENTRY_SIZE));
}
@ -109,10 +98,10 @@ class VarRecNode extends LongKeyRecordNode {
* @param index key index
* @param offset record offset
*/
private void putRecordOffset(int index, int offset) {
private void putRecordDataOffset(int index, int offset) {
buffer.putInt(DATA_OFFSET_BASE_OFFSET + (index * ENTRY_SIZE), offset);
}
/**
* Determine if a record is utilizing a chained DBBuffer for data storage
* @param index key index
@ -128,42 +117,41 @@ class VarRecNode extends LongKeyRecordNode {
* @param state indirect storage used (true) or not used (false)
*/
private void enableIndirectStorage(int index, boolean state) {
buffer.putByte(IND_OPTION_BASE_OFFSET + (index * ENTRY_SIZE),
state ? (byte)1 : (byte)0);
buffer.putByte(IND_OPTION_BASE_OFFSET + (index * ENTRY_SIZE), state ? (byte) 1 : (byte) 0);
}
/**
* @return unused free space within node
*/
private int getFreeSpace() {
return (keyCount == 0 ? buffer.length() : getRecordOffset(keyCount - 1))
- (keyCount * ENTRY_SIZE) - RECORD_LEAF_HEADER_SIZE;
return (keyCount == 0 ? buffer.length() : getRecordDataOffset(keyCount - 1)) -
(keyCount * ENTRY_SIZE) - RECORD_LEAF_HEADER_SIZE;
}
/**
* Get the length of a stored record.
* @param keyIndex key index associated with record.
* @param index index associated with record.
*/
private int getRecordLength(int keyIndex) {
if (keyIndex == 0) {
return buffer.length() - getRecordOffset(0);
private int getRecordLength(int index) {
if (index == 0) {
return buffer.length() - getRecordDataOffset(0);
}
return getRecordOffset(keyIndex - 1) - getRecordOffset(keyIndex);
return getRecordDataOffset(index - 1) - getRecordDataOffset(index);
}
/**
* Get the length of a stored record. Optimized if record offset
* already known.
* @param keyIndex key index associated with record.
* @param index index associated with record.
* @param offset record offset
*/
private int getRecordLength(int keyIndex, int offset) {
if (keyIndex == 0) {
private int getRecordLength(int index, int offset) {
if (index == 0) {
return buffer.length() - offset;
}
return getRecordOffset(keyIndex - 1) - offset;
return getRecordDataOffset(index - 1) - offset;
}
/**
* Move all record data, starting with index, by the specified offset amount.
* If the node contains 5 records, an index of 3 would shift the record data
@ -174,76 +162,77 @@ class VarRecNode extends LongKeyRecordNode {
* @return insertion offset immediately following moved block.
*/
private int moveRecords(int index, int offset) {
int lastIndex = keyCount - 1;
// No movement needed for appended record
if (index == keyCount) {
if (index == 0) {
return buffer.length() + offset;
return buffer.length() + offset;
}
return getRecordOffset(lastIndex) + offset;
return getRecordDataOffset(lastIndex) + offset;
}
// Determine block to be moved
int start = getRecordOffset(lastIndex);
int end = (index == 0) ? buffer.length() : getRecordOffset(index - 1);
int start = getRecordDataOffset(lastIndex);
int end = (index == 0) ? buffer.length() : getRecordDataOffset(index - 1);
int len = end - start;
// Move record data
buffer.move(start, start + offset, len);
// Adjust stored offsets
for (int i = index; i < keyCount; i++) {
putRecordOffset(i, getRecordOffset(i) + offset);
putRecordDataOffset(i, getRecordDataOffset(i) + offset);
}
return end + offset;
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#getRecord(ghidra.framework.store.db.Schema, int)
*/
@Override
Record getRecord(Schema schema, int index) throws IOException {
public Record getRecord(Schema schema, int index) throws IOException {
long key = getKey(index);
Record record = schema.createRecord(key);
if (hasIndirectStorage(index)) {
int bufId = buffer.getInt(getRecordOffset(index));
ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(),
bufId);
int bufId = buffer.getInt(getRecordDataOffset(index));
ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufId);
record.read(chainedBuffer, 0);
}
else {
record.read(buffer, getRecordOffset(index));
record.read(buffer, getRecordDataOffset(index));
}
return record;
return record;
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#getRecord(long, ghidra.framework.store.db.Schema)
*/
@Override
Record getRecord(long key, Schema schema) throws IOException {
public int getRecordOffset(int index) throws IOException {
if (hasIndirectStorage(index)) {
return -buffer.getInt(getRecordDataOffset(index));
}
return getRecordDataOffset(index);
}
@Override
Record getRecord(long key, Schema schema) throws IOException {
int index = getKeyIndex(key);
if (index < 0)
return null;
return getRecord(schema, index);
}
/**
* Find the index which represents the halfway point within the record data.
* @return key index.
*/
private int getSplitIndex() {
int halfway = ((keyCount == 0 ? buffer.length() : getRecordOffset(keyCount - 1))
+ buffer.length()) / 2;
int halfway = ((keyCount == 0 ? buffer.length() : getRecordDataOffset(keyCount - 1)) +
buffer.length()) / 2;
int min = 1;
int max = keyCount - 1;
while (min < max) {
int i = (min + max)/2;
int offset = getRecordOffset(i);
int i = (min + max) / 2;
int offset = getRecordDataOffset(i);
if (offset == halfway) {
return i;
}
@ -257,59 +246,53 @@ class VarRecNode extends LongKeyRecordNode {
return min;
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#splitData(ghidra.framework.store.db.LongKeyRecordNode)
*/
@Override
void splitData(LongKeyRecordNode newRightLeaf) {
void splitData(LongKeyRecordNode newRightLeaf) {
VarRecNode rightNode = (VarRecNode) newRightLeaf;
int splitIndex = getSplitIndex();
int count = keyCount - splitIndex;
int start = getRecordOffset(keyCount - 1); // start of block to be moved
int end = getRecordOffset(splitIndex - 1); // end of block to be moved
int start = getRecordDataOffset(keyCount - 1); // start of block to be moved
int end = getRecordDataOffset(splitIndex - 1); // end of block to be moved
int splitLen = end - start; // length of block to be moved
int rightOffset = buffer.length() - splitLen; // data offset within new leaf node
// Copy data to new leaf node
DataBuffer newBuf = rightNode.buffer;
newBuf.copy(rightOffset, buffer, start, splitLen);
newBuf.copy(KEY_BASE_OFFSET, buffer, KEY_BASE_OFFSET + (splitIndex * ENTRY_SIZE), count * ENTRY_SIZE);
newBuf.copy(rightOffset, buffer, start, splitLen);
newBuf.copy(KEY_BASE_OFFSET, buffer, KEY_BASE_OFFSET + (splitIndex * ENTRY_SIZE),
count * ENTRY_SIZE);
// Fix record offsets in new leaf node
int offsetCorrection = buffer.length() - end;
for (int i = 0; i < count; i++) {
rightNode.putRecordOffset(i, rightNode.getRecordOffset(i) + offsetCorrection);
rightNode.putRecordDataOffset(i, rightNode.getRecordDataOffset(i) + offsetCorrection);
}
// Adjust key counts
setKeyCount(keyCount - count);
rightNode.setKeyCount(count);
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#updateRecord(int, ghidra.framework.store.db.Record)
*/
@Override
LongKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordOffset(index);
LongKeyNode updateRecord(int index, Record record) throws IOException {
int offset = getRecordDataOffset(index);
int oldLen = getRecordLength(index, offset);
int len = record.length();
// Check for use of indirect chained record node(s)
int maxRecordLength = ((buffer.length() - HEADER_SIZE) >> 2) - ENTRY_SIZE; // min 4 records per node
boolean wasIndirect = hasIndirectStorage(index);
boolean useIndirect = (len > maxRecordLength);
if (useIndirect) {
// Store record in chained buffers
len = 4;
ChainedBuffer chainedBuffer = null;
if (wasIndirect) {
chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(),
buffer.getInt(offset));
chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), buffer.getInt(offset));
chainedBuffer.setSize(record.length(), false);
}
else {
@ -323,15 +306,15 @@ class VarRecNode extends LongKeyRecordNode {
removeChainedBuffer(buffer.getInt(offset));
enableIndirectStorage(index, false);
}
// See if updated record will fit in current buffer
if (useIndirect || len <= (getFreeSpace() + oldLen)) {
// Overwrite record data - move other data if needed
int dataShift = oldLen - len;
if (dataShift != 0) {
offset = moveRecords(index + 1, dataShift);
putRecordOffset(index, offset);
putRecordDataOffset(index, offset);
}
if (!useIndirect) {
record.write(buffer, offset);
@ -340,22 +323,13 @@ class VarRecNode extends LongKeyRecordNode {
}
// Insufficient room for updated record - remove and re-add
long key = record.getKey();
long key = record.getKey();
LongKeyRecordNode leaf = deleteRecord(key, null).getLeafNode(key);
return leaf.putRecord(record, null);
}
/**
* Insert the specified record at the specified key index.
* Existing data may be shifted within the buffer to make room for
* the new record. Parent must be notified if this changes the leftmost
* key.
* @param keyIndex
* @param record
* @throws IOException
*/
@Override
boolean insertRecord(int keyIndex, Record record) throws IOException {
boolean insertRecord(int index, Record record) throws IOException {
// Check for use of indirect chained record node(s)
int len = record.length();
@ -364,77 +338,74 @@ class VarRecNode extends LongKeyRecordNode {
if (useIndirect) {
len = 4;
}
if ((len + ENTRY_SIZE) > getFreeSpace())
return false; // insufficient space for record storage
// Make room for new record
int offset = moveRecords(keyIndex, -len);
int offset = moveRecords(index, -len);
// Make room for new key/offset entry
int start = KEY_BASE_OFFSET + (keyIndex * ENTRY_SIZE);
len = (keyCount - keyIndex) * ENTRY_SIZE;
int start = KEY_BASE_OFFSET + (index * ENTRY_SIZE);
len = (keyCount - index) * ENTRY_SIZE;
buffer.move(start, start + ENTRY_SIZE, len);
// Store new record key/offset
buffer.putLong(start, record.getKey());
buffer.putInt(start + KEY_SIZE, offset);
setKeyCount(keyCount + 1);
// Store record data
if (useIndirect) {
ChainedBuffer chainedBuffer = new ChainedBuffer(record.length(), nodeMgr.getBufferMgr());
ChainedBuffer chainedBuffer =
new ChainedBuffer(record.length(), nodeMgr.getBufferMgr());
buffer.putInt(offset, chainedBuffer.getId());
record.write(chainedBuffer, 0);
}
else {
record.write(buffer, offset);
}
enableIndirectStorage(keyIndex, useIndirect);
enableIndirectStorage(index, useIndirect);
return true;
}
/*
* @see ghidra.framework.store.db.LongKeyRecordNode#remove(int)
*/
@Override
void remove(int index) throws IOException {
public void remove(int index) throws IOException {
if (index < 0 || index >= keyCount)
throw new AssertException();
if (index < 0 || index >= keyCount)
throw new AssertException();
if (hasIndirectStorage(index)) {
removeChainedBuffer(buffer.getInt(getRecordOffset(index)));
removeChainedBuffer(buffer.getInt(getRecordDataOffset(index)));
enableIndirectStorage(index, false);
}
int len = getRecordLength(index);
moveRecords(index + 1, len);
int start = KEY_BASE_OFFSET + ((index+1) * ENTRY_SIZE);
int start = KEY_BASE_OFFSET + ((index + 1) * ENTRY_SIZE);
len = (keyCount - index - 1) * ENTRY_SIZE;
buffer.move(start, start - ENTRY_SIZE, len);
setKeyCount(keyCount-1);
setKeyCount(keyCount - 1);
}
/**
* Removes this leaf and all associated chained buffers.
* @see db.LongKeyRecordNode#removeLeaf()
*/
@Override
LongKeyNode removeLeaf() throws IOException {
public LongKeyNode removeLeaf() throws IOException {
// Remove all chained buffers associated with this leaf
for (int index = 0; index < keyCount; ++index) {
if (hasIndirectStorage(index)) {
removeChainedBuffer(buffer.getInt(getRecordOffset(index)));
removeChainedBuffer(buffer.getInt(getRecordDataOffset(index)));
}
}
return super.removeLeaf();
}
/**
* Remove a chained buffer.
* @param bufferId chained buffer ID
@ -443,35 +414,30 @@ throw new AssertException();
ChainedBuffer chainedBuffer = new ChainedBuffer(nodeMgr.getBufferMgr(), bufferId);
chainedBuffer.delete();
}
/*
* @see ghidra.framework.store.db.LongKeyNode#delete()
*/
@Override
public void delete() throws IOException {
public void delete() throws IOException {
// Remove all chained buffers associated with this node.
for (int index = 0; index < keyCount; index++) {
if (hasIndirectStorage(index)) {
int offset = getRecordOffset(index);
int offset = getRecordDataOffset(index);
int bufferId = buffer.getInt(offset);
removeChainedBuffer(bufferId);
buffer.putInt(offset, -1);
}
}
// Remove this node
nodeMgr.deleteNode(this);
}
/*
* @see ghidra.framework.store.db.BTreeNode#getBufferReferences()
*/
@Override
public int[] getBufferReferences() {
IntArrayList idList = new IntArrayList();
for (int i = 0; i < keyCount; i++) {
if (hasIndirectStorage(i)) {
int offset = getRecordOffset(i);
int offset = getRecordDataOffset(i);
idList.add(buffer.getInt(offset));
}
}

View file

@ -28,7 +28,6 @@ import ghidra.util.SystemUtilities;
import ghidra.util.datastruct.ObjectArray;
import ghidra.util.exception.*;
import ghidra.util.task.TaskMonitor;
import ghidra.util.task.TaskMonitorAdapter;
/**
* <code>BufferMgr</code> provides low-level buffer management and caching.
@ -176,7 +175,7 @@ public class BufferMgr {
* @param sourceFile buffer file
* @throws IOException if source or cache file access error occurs
*/
public BufferMgr(BufferFile sourceFile) throws FileNotFoundException, IOException {
public BufferMgr(BufferFile sourceFile) throws IOException {
this(sourceFile, DEFAULT_BUFFER_SIZE, DEFAULT_CACHE_SIZE, DEFAULT_CHECKPOINT_COUNT);
}
@ -188,8 +187,7 @@ public class BufferMgr {
* @param maxUndos maximum number of checkpoints retained for undo (Minimum=1).
* @throws IOException if source or cache file access error occurs
*/
public BufferMgr(BufferFile sourceFile, long approxCacheSize, int maxUndos)
throws FileNotFoundException, IOException {
public BufferMgr(BufferFile sourceFile, long approxCacheSize, int maxUndos) throws IOException {
this(sourceFile, 0, approxCacheSize, maxUndos);
}
@ -202,9 +200,9 @@ public class BufferMgr {
* @param maxUndos maximum number of checkpoints retained for undo (Minimum=1).
* @throws IOException if source or cache file access error occurs
*/
private BufferMgr(BufferFile sourceFile, int requestedbufferSize, long approxCacheSize,
private BufferMgr(BufferFile sourceFile, int requestedBufferSize, long approxCacheSize,
int maxUndos) throws FileNotFoundException, IOException {
bufferSize = requestedbufferSize;
bufferSize = requestedBufferSize;
if (sourceFile != null) {
this.sourceFile = sourceFile;
int cnt = sourceFile.getIndexCount();
@ -362,6 +360,9 @@ public class BufferMgr {
/**
* Get file parameter
* @param name parameter name/key
* @return parameter value
* @throws NoSuchElementException if parameter not found
*/
int getParameter(String name) throws NoSuchElementException {
return cacheFile.getParameter(name);
@ -369,8 +370,8 @@ public class BufferMgr {
/**
* Set file parameter
* @param name
* @param value
* @param name parameter name/key
* @param value parameter value
*/
void setParameter(String name, int value) {
cacheFile.setParameter(name, value);
@ -391,7 +392,8 @@ public class BufferMgr {
* buffer file.
* This method should be called when this buffer manager instance
* is no longer needed.
* @param keepRecoveryData
* @param keepRecoveryData true if existing snapshot recovery files
* should not be deleted.
*/
public void dispose(boolean keepRecoveryData) {
@ -626,7 +628,6 @@ public class BufferMgr {
/**
* Remove a buffer node from memory cache.
* @param node buffer node
* @return buffer object, or null if buffer node was not cached
*/
private void removeFromCache(BufferNode node) {
if (node.buffer != null) {
@ -1010,9 +1011,11 @@ public class BufferMgr {
}
/**
* Return buffer.
* Release buffer back to buffer manager.
* After invoking this method, the buffer object should not
* be used and all references should be dropped.
* @param buf data buffer
* @throws IOException if IO error occurs
*/
public void releaseBuffer(DataBuffer buf) throws IOException {
@ -1031,9 +1034,9 @@ public class BufferMgr {
/**
* Handle exception which indicates a potential corruption of the BufferMgr state
* @param exception
* @param errorText
* @throws IOException
* @param exception exception
* @param errorText associated error text
* @throws IOException exception thrown if instance of IOException
*/
private void handleCorruptionException(Exception exception, String errorText)
throws IOException {
@ -1182,7 +1185,7 @@ public class BufferMgr {
}
/**
* Returns true if unsaved "buffer" changes exist.
* @return true if unsaved "buffer" changes exist.
* If no changes have been made, or all changes have been
* "undone", false will be returned. Parameter changes
* are no considered.
@ -1194,9 +1197,6 @@ public class BufferMgr {
/**
* Create a new checkpoint node list.
* The redo stack will be cleared.
* @param force if true the checkpoint will be performed regardless of
* the lock count.
* @return true if checkpoint successful, or false if buffers are read-only
*/
private void startCheckpoint() {
@ -1235,21 +1235,25 @@ public class BufferMgr {
}
/**
* Returns number of undo-able transactions
* @return number of undo-able transactions
*/
public int getAvailableUndoCount() {
return checkpointHeads.size() - 1;
}
/**
* Returns the number of redo-able transactions
* @return the number of redo-able transactions
*/
public int getAvailableRedoCount() {
return redoCheckpointHeads.size();
}
/**
* Backup to previous checkpoint.
* Backup to previous checkpoint. Method should not be invoked
* when one or more buffers are locked.
* @param redoable true if currrent checkpoint should be moved to redo stack
* @return true if successful else false
* @throws IOException if IO error occurs
*/
public boolean undo(boolean redoable) throws IOException {
synchronized (snapshotLock) {
@ -1337,7 +1341,9 @@ public class BufferMgr {
}
/**
* Redo next checkpoint.
* Redo next checkpoint. Method should not be invoked
* when one or more buffers are locked.
* @return true if successful else false
*/
public boolean redo() {
synchronized (snapshotLock) {
@ -1414,7 +1420,8 @@ public class BufferMgr {
}
/**
* Returns true if save operation can be performed.
* @return true if save operation can be performed.
* @throws IOException if IO error occurs
*/
public boolean canSave() throws IOException {
if (corruptedState) {
@ -1427,7 +1434,7 @@ public class BufferMgr {
}
/**
* Returns true if buffers have been modified since opening or since
* @return true if buffers have been modified since opening or since
* last snapshot.
*/
public synchronized boolean modifiedSinceSnapshot() {
@ -1440,6 +1447,8 @@ public class BufferMgr {
* made since the last version.
* @param monitor task monitor
* @return true if snapshot successful, false if
* @throws IOException if IO error occurs
* @throws CancelledException if task monitor is cancelled
*/
public boolean takeRecoverySnapshot(DBChangeSet changeSet, TaskMonitor monitor)
throws IOException, CancelledException {
@ -1548,7 +1557,8 @@ public class BufferMgr {
* Returns the recovery changeSet data file for reading or null if one is not available.
* The caller must dispose of the returned file before peforming generating any new
* recovery snapshots.
* @throws IOException
* @return recovery change set buffer file
* @throws IOException if IO error occurs
*/
public LocalBufferFile getRecoveryChangeSetFile() throws IOException {
if (recoveryMgr != null) {
@ -1580,6 +1590,9 @@ public class BufferMgr {
* If recovery is cancelled, this buffer manager must be disposed.
* since the underlying state will be corrupt.
* @param monitor task monitor
* @return true if recovery successful else false
* @throws IOException if IO error occurs
* @throws CancelledException if task monitor is cancelled
*/
public boolean recover(TaskMonitor monitor) throws IOException, CancelledException {
synchronized (snapshotLock) {
@ -1600,9 +1613,12 @@ public class BufferMgr {
/**
* Recover data from recovery file
* @param recoveryFile
* @param monitor
* @throws CancelledException
* @param recoveryFile recovery file
* @param recoveryIndex recovery index (0 or 1) which corresponds to
* recoveryFile.
* @param monitor task monitor
* @throws IOException if IO error occurs
* @throws CancelledException if task monitor is cancelled
*/
synchronized void recover(RecoveryFile recoveryFile, int recoveryIndex, TaskMonitor monitor)
throws IOException, CancelledException {
@ -1755,7 +1771,7 @@ public class BufferMgr {
}
if (monitor == null) {
monitor = TaskMonitorAdapter.DUMMY_MONITOR;
monitor = TaskMonitor.DUMMY;
}
boolean oldCancelState = monitor.isCancelEnabled();
@ -1840,7 +1856,7 @@ public class BufferMgr {
}
if (monitor == null) {
monitor = TaskMonitorAdapter.DUMMY_MONITOR;
monitor = TaskMonitor.DUMMY;
}
int indexCnt = indexProvider.getIndexCount();
@ -1871,7 +1887,7 @@ public class BufferMgr {
* Write all changes to the specified outFile
* @param outFile output buffer file
* @param monitor task monitor
* @throws IOException
* @throws IOException if IO error occurs
* @throws CancelledException thrown if task cancelled
*/
private void doSave(BufferFile outFile, TaskMonitor monitor)
@ -1880,7 +1896,7 @@ public class BufferMgr {
int preSaveCnt = outFile.getIndexCount();
if (monitor == null) {
monitor = TaskMonitorAdapter.DUMMY_MONITOR;
monitor = TaskMonitor.DUMMY;
}
monitor.initialize(indexCnt);
monitor.setMessage("Saving file...");

View file

@ -143,34 +143,22 @@ public class DataBuffer implements Buffer, Externalizable {
empty = state;
}
/*
* @see ghidra.framework.store.Buffer#length()
*/
@Override
public int length() {
return data.length;
}
/*
* @see ghidra.framework.store.Buffer#get(int, byte[], int, int)
*/
@Override
public void get(int offset, byte[] bytes, int dataOffset, int length)
throws ArrayIndexOutOfBoundsException {
System.arraycopy(data, offset, bytes, dataOffset, length);
}
/*
* @see ghidra.framework.store.Buffer#get(int, byte[])
*/
@Override
public void get(int offset, byte[] bytes) {
System.arraycopy(data, offset, bytes, 0, bytes.length);
}
/*
* @see ghidra.framework.store.Buffer#get(int, int)
*/
@Override
public byte[] get(int offset, int length) throws ArrayIndexOutOfBoundsException {
byte[] bytes = new byte[length];
@ -178,34 +166,22 @@ public class DataBuffer implements Buffer, Externalizable {
return bytes;
}
/*
* @see ghidra.framework.store.Buffer#getByte(int)
*/
@Override
public byte getByte(int offset) {
return data[offset];
}
/*
* @see ghidra.framework.store.Buffer#getInt(int)
*/
@Override
public int getInt(int offset) {
return ((data[offset] & 0xff) << 24) | ((data[++offset] & 0xff) << 16) |
((data[++offset] & 0xff) << 8) | (data[++offset] & 0xff);
}
/*
* @see ghidra.framework.store.Buffer#getShort(int)
*/
@Override
public short getShort(int offset) {
return (short) (((data[offset] & 0xff) << 8) | (data[++offset] & 0xff));
}
/*
* @see ghidra.framework.store.Buffer#getLong(int)
*/
@Override
public long getLong(int offset) {
return (((long) data[offset] & 0xff) << 56) | (((long) data[++offset] & 0xff) << 48) |
@ -214,9 +190,6 @@ public class DataBuffer implements Buffer, Externalizable {
(((long) data[++offset] & 0xff) << 8) | ((long) data[++offset] & 0xff);
}
/*
* @see ghidra.framework.store.Buffer#put(int, byte[], int, int)
*/
@Override
public int put(int offset, byte[] bytes, int dataOffset, int length) {
dirty = true;
@ -224,9 +197,6 @@ public class DataBuffer implements Buffer, Externalizable {
return offset + length;
}
/*
* @see ghidra.framework.store.Buffer#put(int, byte[])
*/
@Override
public int put(int offset, byte[] bytes) {
dirty = true;
@ -234,9 +204,6 @@ public class DataBuffer implements Buffer, Externalizable {
return offset + bytes.length;
}
/*
* @see ghidra.framework.store.Buffer#putByte(int, byte)
*/
@Override
public int putByte(int offset, byte b) {
dirty = true;
@ -244,9 +211,6 @@ public class DataBuffer implements Buffer, Externalizable {
return ++offset;
}
/*
* @see ghidra.framework.store.Buffer#putInt(int, int)
*/
@Override
public int putInt(int offset, int v) {
dirty = true;
@ -257,9 +221,6 @@ public class DataBuffer implements Buffer, Externalizable {
return ++offset;
}
/*
* @see ghidra.framework.store.Buffer#putShort(int, short)
*/
@Override
public int putShort(int offset, short v) {
dirty = true;
@ -268,9 +229,6 @@ public class DataBuffer implements Buffer, Externalizable {
return ++offset;
}
/*
* @see ghidra.framework.store.Buffer#putLong(int, long)
*/
@Override
public int putLong(int offset, long v) {
dirty = true;
@ -380,9 +338,8 @@ public class DataBuffer implements Buffer, Externalizable {
int compressedDataOffset = 0;
while (!deflate.finished() && compressedDataOffset < compressedData.length) {
compressedDataOffset +=
deflate.deflate(compressedData, compressedDataOffset, compressedData.length -
compressedDataOffset, Deflater.SYNC_FLUSH);
compressedDataOffset += deflate.deflate(compressedData, compressedDataOffset,
compressedData.length - compressedDataOffset, Deflater.SYNC_FLUSH);
}
if (!deflate.finished()) {
@ -423,6 +380,30 @@ public class DataBuffer implements Buffer, Externalizable {
}
}
/**
* Perform an unsigned data comparison
* @param otherData other data to be compared
* @param offset offset within this buffer
* @param len length of data within this buffer
* @return unsigned comparison result
* @throws ArrayIndexOutOfBoundsException if specified region is not
* contained within this buffer.
*/
public int unsignedCompareTo(byte[] otherData, int offset, int len) {
int otherLen = otherData.length;
int otherOffset = 0;
int n = Math.min(len, otherLen);
while (n-- != 0) {
int b = data[offset++] & 0xff;
int otherByte = otherData[otherOffset++] & 0xff;
if (b != otherByte) {
return b - otherByte;
}
}
return len - otherLen;
}
/**
* Inflate compressedData into a properly sized data array.
* @param compressedData array containing compressed data

View file

@ -1,140 +0,0 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package db;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.NoSuchElementException;
import org.junit.*;
import generic.test.AbstractGenericTest;
import ghidra.util.LongIterator;
public class DBFieldMapTest extends AbstractGenericTest {
private DBFieldMap map;
public DBFieldMapTest() {
}
/*
* @see TestCase#setUp()
*/
@Before
public void setUp() throws Exception {
map = new DBFieldMap(StringField.class, 1);
}
/*
* @see TestCase#tearDown()
*/
@After
public void tearDown() throws Exception {
if (map != null) {
map.dispose();
}
}
private void addEntries() {
map.addEntry(new StringField("f3"), 5);
map.addEntry(new StringField("f2"), 3);
map.addEntry(new StringField("f1"), 1);
map.addEntry(new StringField("f2"), 2);
map.addEntry(new StringField("f4"), 6);
map.addEntry(new StringField("f3"), 4);
}
@Test
public void testAddEntry() {
addEntries();
assertTrue(map.hasEntry(new StringField("f1"), 1));
assertTrue(map.hasEntry(new StringField("f2"), 2));
assertTrue(map.hasEntry(new StringField("f2"), 3));
assertTrue(map.hasEntry(new StringField("f3"), 4));
assertTrue(map.hasEntry(new StringField("f3"), 5));
assertTrue(map.hasEntry(new StringField("f4"), 6));
}
@Test
public void testDeleteEntry() {
addEntries();
map.deleteEntry(new StringField("f2"), 2);
map.deleteEntry(new StringField("f3"), 4);
assertTrue(map.hasEntry(new StringField("f1"), 1));
assertTrue(!map.hasEntry(new StringField("f2"), 2));
assertTrue(map.hasEntry(new StringField("f2"), 3));
assertTrue(!map.hasEntry(new StringField("f3"), 4));
assertTrue(map.hasEntry(new StringField("f3"), 5));
assertTrue(map.hasEntry(new StringField("f4"), 6));
}
@Test
public void testIterator() {
addEntries();
LongIterator iter = map.iterator();
assertEquals(1, iter.next());
assertEquals(2, iter.next());
assertEquals(3, iter.next());
assertEquals(4, iter.next());
assertEquals(5, iter.next());
assertEquals(6, iter.next());
try {
iter.next();
Assert.fail();
}
catch (NoSuchElementException e) {
// expected
}
assertEquals(6, iter.previous());
assertEquals(5, iter.previous());
assertEquals(4, iter.previous());
assertEquals(3, iter.previous());
assertEquals(2, iter.previous());
assertEquals(1, iter.previous());
try {
iter.previous();
Assert.fail();
}
catch (NoSuchElementException e) {
// expected
}
assertEquals(1, iter.next());
assertEquals(2, iter.next());
assertEquals(3, iter.next());
assertEquals(4, iter.next());
assertEquals(4, iter.previous());
assertEquals(3, iter.previous());
assertEquals(2, iter.previous());
assertEquals(1, iter.previous());
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -15,8 +15,7 @@
*/
package db;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
@ -27,6 +26,7 @@ import org.junit.*;
import db.buffers.*;
import generic.test.AbstractGenericTest;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
import utilities.util.FileUtilities;
public class DBIndexedTableTest extends AbstractGenericTest {
@ -130,7 +130,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
return recs;
}
private long[] matchingKeys(Record[] recs, int columnIx, Record matchRec) {
private Field[] matchingKeys(Record[] recs, int columnIx, Record matchRec) {
ArrayList<Record> recList = new ArrayList<>();
Field f = matchRec.getField(columnIx);
for (Record rec : recs) {
@ -138,12 +138,12 @@ public class DBIndexedTableTest extends AbstractGenericTest {
recList.add(rec);
}
}
long[] keys = new long[recList.size()];
Field[] keys = new Field[recList.size()];
Iterator<Record> iter = recList.iterator();
int i = 0;
while (iter.hasNext()) {
Record rec = iter.next();
keys[i++] = rec.getKey();
keys[i++] = rec.getKeyField();
}
Arrays.sort(keys);
return keys;
@ -156,15 +156,15 @@ public class DBIndexedTableTest extends AbstractGenericTest {
saveAsAndReopen(dbName);
}
Table table = dbh.getTable(table1Name);
int[] indexedColumns = table.getIndexedColumns();
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
int step = recordCnt / findCnt;
for (int n = 0; n < indexedColumns.length; n++) {
for (int indexColumn : table.getIndexedColumns()) {
for (int i = 0; i < recordCnt; i += step) {
long[] keys = table.findRecords(recs[i].getField(n), n);
Field[] keys = table.findRecords(recs[i].getField(indexColumn), indexColumn);
Arrays.sort(keys);
assertTrue(Arrays.equals(matchingKeys(recs, n, recs[i]), keys));
assertEquals(keys.length, table.getMatchingRecordCount(recs[i].getField(n), n));
assertTrue(Arrays.equals(matchingKeys(recs, indexColumn, recs[i]), keys));
assertEquals(keys.length,
table.getMatchingRecordCount(recs[i].getField(indexColumn), indexColumn));
}
}
}
@ -181,36 +181,30 @@ public class DBIndexedTableTest extends AbstractGenericTest {
dbh.undo();
dbh.redo();
int[] indexedColumns = table.getIndexedColumns();
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
int max = indexedColumns.length > 3 ? 3 : indexedColumns.length;
for (int n = 0; n < max; n++) {
long startKey = 1500L;
long minKey = 100L;
long maxKey = 5000L;
DBLongIterator iter = table.longKeyIterator();
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
long startKey = 1500L;
long minKey = 100L;
long maxKey = 5000L;
DBLongIterator iter = table.longKeyIterator();
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
iter = table.longKeyIterator(startKey);
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
iter = table.longKeyIterator(startKey);
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
iter = table.longKeyIterator(minKey, maxKey, startKey);
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
iter = table.longKeyIterator(minKey, maxKey, startKey);
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
startKey = -1L;
iter = table.longKeyIterator(minKey, maxKey, startKey);
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
startKey = -1L;
iter = table.longKeyIterator(minKey, maxKey, startKey);
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
startKey = 10000L;
iter = table.longKeyIterator(minKey, maxKey, startKey);
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
}
startKey = 10000L;
iter = table.longKeyIterator(minKey, maxKey, startKey);
assertTrue(!iter.hasPrevious());
assertTrue(!iter.hasNext());
}
@Test
@ -260,12 +254,15 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Find string and binary columns
int strColumn = -1;
int binColumn = -1;
Class<?>[] fieldClasses = table.getSchema().getFieldClasses();
for (int i = 0; i < fieldClasses.length; i++) {
if (fieldClasses[i].equals(StringField.class)) {
Field[] fields = table.getSchema().getFields();
for (int i = 0; i < fields.length; i++) {
if (!fields[i].isVariableLength()) {
continue;
}
if (fields[i] instanceof StringField) {
strColumn = i;
}
else if (fieldClasses[i].equals(BinaryField.class)) {
else if (fields[i] instanceof BinaryField) {
binColumn = i;
}
}
@ -302,9 +299,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
assertEquals(recordCnt, table.getRecordCount());
int[] indexedColumns = table.getIndexedColumns();
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
for (int colIx : indexedColumns) {
for (int colIx : table.getIndexedColumns()) {
Arrays.sort(recs, new RecColumnComparator(colIx));
@ -319,8 +314,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Forward iteration (start in middle - specify primary key)
recIx = recordCnt / 2;
iter =
table.indexIteratorBefore(colIx, recs[recIx].getField(colIx), recs[recIx].getKey());
iter = table.indexIteratorBefore(colIx, recs[recIx].getField(colIx),
recs[recIx].getKeyField());
while (iter.hasNext()) {
Record rec = iter.next();
assertEquals(recs[recIx++], rec);
@ -329,8 +324,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Reverse iteration (end - specify primary key)
recIx = recordCnt - 1;
iter =
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey());
iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
recs[recIx].getKeyField());
while (iter.hasPrevious()) {
Record rec = iter.previous();
assertEquals(recs[recIx--], rec);
@ -339,8 +334,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Reverse iteration (start in middle - specify primary key)
recIx = recordCnt / 2;
iter =
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey());
iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
recs[recIx].getKeyField());
while (iter.hasPrevious()) {
Record rec = iter.previous();
assertEquals(recs[recIx--], rec);
@ -437,9 +432,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
assertEquals(recordCnt, table.getRecordCount());
}
int[] indexedColumns = table.getIndexedColumns();
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
for (int colIx : indexedColumns) {
for (int colIx : table.getIndexedColumns()) {
Arrays.sort(recs, new RecColumnComparator(colIx));
int recIx;
@ -491,7 +484,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
startIx = 0;
recIx = findStart(recs, startIx, colIx);
iter = table.indexIteratorBefore(colIx, recs[startIx].getField(colIx),
recs[startIx].getKey());
recs[startIx].getKeyField());
while (iter.hasNext()) {
Record rec = iter.next();
assertEquals(recs[recIx++], rec);
@ -502,7 +495,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
startIx = 0;
recIx = findStart(recs, startIx, colIx);
iter = table.indexIteratorBefore(colIx, recs[startIx].getField(colIx),
recs[startIx].getKey());
recs[startIx].getKeyField());
assertTrue(!iter.hasPrevious());
// Forward iteration (before first)
@ -521,14 +514,14 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Forward iteration (end - specify primary key)
recIx = recordCnt - 1;
iter =
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey());
iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
recs[recIx].getKeyField());
assertTrue(!iter.hasNext());
// Backward iteration (end - specify primary key)
recIx = recordCnt - 1;
iter =
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey());
iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
recs[recIx].getKeyField());
while (iter.hasPrevious()) {
Record rec = iter.previous();
assertEquals(recs[recIx--], rec);
@ -551,8 +544,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Forward iteration (start in middle - specify primary key)
recIx = recordCnt / 2;
iter =
table.indexIteratorBefore(colIx, recs[recIx].getField(colIx), recs[recIx].getKey());
iter = table.indexIteratorBefore(colIx, recs[recIx].getField(colIx),
recs[recIx].getKeyField());
while (iter.hasNext()) {
Record rec = iter.next();
assertEquals(recs[recIx++], rec);
@ -561,8 +554,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
// Backward iteration (start in middle - specify primary key)
recIx = recordCnt / 2;
iter =
table.indexIteratorAfter(colIx, recs[recIx].getField(colIx), recs[recIx].getKey());
iter = table.indexIteratorAfter(colIx, recs[recIx].getField(colIx),
recs[recIx].getKeyField());
while (iter.hasPrevious()) {
Record rec = iter.previous();
assertEquals(recs[recIx--], rec);
@ -943,7 +936,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
* @throws IOException
*/
private void deleteIteratedIndexFields(int recordCnt, int testColIx, long keyIncrement,
int varDataSize) throws IOException {
int varDataSize) throws Exception {
Record[] recs = null;
if (keyIncrement == 0) {
@ -989,6 +982,10 @@ public class DBIndexedTableTest extends AbstractGenericTest {
assertEquals(fieldCnt, cnt);
assertEquals(0, table.getRecordCount());
}
catch (Exception e) {
e.printStackTrace();
throw e;
}
finally {
dbh.deleteTable(table1Name);
dbh.endTransaction(txId, true);
@ -1066,7 +1063,7 @@ public class DBIndexedTableTest extends AbstractGenericTest {
private class RecColumnComparator implements Comparator<Record> {
int columnIx;
final int columnIx;
RecColumnComparator(int columnIx) {
this.columnIx = columnIx;
@ -1160,12 +1157,12 @@ public class DBIndexedTableTest extends AbstractGenericTest {
public void testRecordIteratorExtents() throws IOException {
Record[] recs = null;
recs = createOrderedRecordRange(DBTestUtils.SINGLE_BYTE, 30, 2, 1);
recs = createOrderedRecordRange(DBTestUtils.SINGLE_SHORT, 30, 2, 1);
Table table = dbh.getTable(table1Name);
assertEquals(recs.length, table.getRecordCount());
int[] indexedColumns = table.getIndexedColumns();
assertEquals(table.getSchema().getFieldClasses().length, indexedColumns.length);
assertEquals(1, indexedColumns.length);
// Backward Range iterator
int colIx = 0;
@ -1173,8 +1170,8 @@ public class DBIndexedTableTest extends AbstractGenericTest {
int recIx = recs.length - 1;
// RecordIterator iter = table.indexIterator(colIx, recs[minIx].getField(colIx),
// recs[maxIx].getField(colIx), false);
Field minField = new ByteField(Byte.MIN_VALUE);
Field maxField = new ByteField(Byte.MAX_VALUE);
Field minField = new ShortField(Short.MIN_VALUE);
Field maxField = new ShortField(Short.MAX_VALUE);
RecordIterator iter = table.indexIterator(colIx, minField, maxField, false);
while (iter.hasPrevious()) {
Record rec = iter.previous();
@ -1202,22 +1199,67 @@ public class DBIndexedTableTest extends AbstractGenericTest {
@Test
public void testRecordIteratorDelete() throws IOException {
for (int colIx = 0; colIx < 6; colIx++) {
for (int colIx : DBTestUtils.getIndexedColumns(DBTestUtils.ALL_TYPES)) {
deleteIteratedRecords(ITER_REC_CNT, colIx, 1, 1);
}
for (int colIx = 0; colIx < 6; colIx++) {
for (int colIx : DBTestUtils.getIndexedColumns(DBTestUtils.ALL_TYPES)) {
deleteIteratedRecords(ITER_REC_CNT, colIx, 0, 1);
}
}
@Test
public void testIndexFieldIteratorDelete() throws IOException {
for (int colIx = 0; colIx < 6; colIx++) {
public void testIndexFieldIteratorDelete() throws Exception {
for (int colIx : DBTestUtils.getIndexedColumns(DBTestUtils.ALL_TYPES)) {
deleteIteratedIndexFields(ITER_REC_CNT, colIx, 1, 1);
}
for (int colIx = 0; colIx < 6; colIx++) {
for (int colIx : DBTestUtils.getIndexedColumns(DBTestUtils.ALL_TYPES)) {
deleteIteratedIndexFields(ITER_REC_CNT, colIx, 0, 1);
}
}
@Test
public void testConsistencyAndIndexRebuild() throws IOException {
Record[] recs = createRandomTableRecords(DBTestUtils.ALL_TYPES, ITER_REC_CNT, 10);
long txId = dbh.startTransaction();
try {
assertTrue(dbh.isConsistent(TaskMonitor.DUMMY));
dbh.rebuild(TaskMonitor.DUMMY);
}
catch (CancelledException e) {
fail("unexpected cancel exception");
}
finally {
dbh.endTransaction(txId, true);
}
Table table = dbh.getTable(table1Name);
for (int colIx : table.getIndexedColumns()) {
Arrays.sort(recs, new RecColumnComparator(colIx));
int recIx = 0;
RecordIterator iter = table.indexIterator(colIx);
while (iter.hasNext()) {
Record rec = iter.next();
assertEquals(recs[recIx++], rec);
}
assertEquals(ITER_REC_CNT, recIx);
}
saveAsAndReopen(dbName);
table = dbh.getTable(table1Name);
for (int colIx : table.getIndexedColumns()) {
Arrays.sort(recs, new RecColumnComparator(colIx));
int recIx = 0;
RecordIterator iter = table.indexIterator(colIx);
while (iter.hasNext()) {
Record rec = iter.next();
assertEquals(recs[recIx++], rec);
}
assertEquals(ITER_REC_CNT, recIx);
}
}
}

View file

@ -69,8 +69,8 @@ public class DBLongKeyChainedBufferUseTest extends AbstractGenericTest {
long txId = dbh.startTransaction();
Schema schema = new Schema(0, "Enum ID",
new Class[] { StringField.class, StringField.class, LongField.class, ByteField.class,
ShortField.class, IntField.class },
new Field[] { StringField.INSTANCE, StringField.INSTANCE, LongField.INSTANCE,
ByteField.INSTANCE, ShortField.INSTANCE, IntField.INSTANCE },
new String[] { "str1", "str2", "long", "byte", "short", "int" });
Table table = dbh.createTable("TABLE1", schema);
@ -108,8 +108,8 @@ public class DBLongKeyChainedBufferUseTest extends AbstractGenericTest {
long txId = dbh.startTransaction();
Schema schema = new Schema(0, "Enum ID",
new Class[] { StringField.class, StringField.class, LongField.class, ByteField.class,
ShortField.class, IntField.class },
new Field[] { StringField.INSTANCE, StringField.INSTANCE, LongField.INSTANCE,
ByteField.INSTANCE, ShortField.INSTANCE, IntField.INSTANCE },
new String[] { "str1", "str2", "long", "byte", "short", "int" });
Table table = dbh.createTable("TABLE1", schema);

View file

@ -265,8 +265,7 @@ public class DBTest extends AbstractGenericTest {
for (TableRecord tableRecord : tableRecords) {
if (tableRecord.getIndexedColumn() < 0) {
if (tableCnt > 0) {
Schema schema = lastTable.getSchema();
assertEquals(schema.getFieldClasses().length, indexCnt);
assertEquals(DBTestUtils.getIndexedColumnCount(tableCnt - 1), indexCnt);
}
String name = "TABLE" + tableCnt;
lastTable = dbh.getTable(name);
@ -281,7 +280,9 @@ public class DBTest extends AbstractGenericTest {
if (lastTable == null) {
Assert.fail();
}
assertEquals(indexCnt, tableRecord.getIndexedColumn());
int[] indexedColumns = DBTestUtils.getIndexedColumns(tableCnt - 1);
assertTrue(indexCnt < indexedColumns.length);
assertEquals(indexedColumns[indexCnt], tableRecord.getIndexedColumn());
assertEquals(lastTable.getName(), tableRecord.getName());
assertEquals(Long.MIN_VALUE, tableRecord.getMaxKey());
assertEquals(0, tableRecord.getRecordCount());
@ -290,8 +291,7 @@ public class DBTest extends AbstractGenericTest {
}
}
Schema schema = lastTable.getSchema();
assertEquals(schema.getFieldClasses().length, indexCnt);
assertEquals(DBTestUtils.getIndexedColumnCount(tableCnt - 1), indexCnt);
assertEquals(DBTestUtils.MAX_SCHEMA_TYPE + 1, tableCnt);
}
@ -387,8 +387,7 @@ public class DBTest extends AbstractGenericTest {
for (TableRecord tableRecord : tableRecords) {
if (tableRecord.getIndexedColumn() < 0) {
if (tableCnt > 0) {
Schema schema = lastTable.getSchema();
assertEquals(schema.getFieldClasses().length, indexCnt);
assertEquals(DBTestUtils.getIndexedColumnCount(2 * (tableCnt - 1)), indexCnt);
}
String name = "TABLE" + (2 * tableCnt);
lastTable = dbh.getTable(name);
@ -403,7 +402,9 @@ public class DBTest extends AbstractGenericTest {
if (lastTable == null) {
Assert.fail();
}
assertEquals(indexCnt, tableRecord.getIndexedColumn());
int[] indexedColumns = DBTestUtils.getIndexedColumns(2 * (tableCnt - 1));
assertTrue(indexCnt < indexedColumns.length);
assertEquals(indexedColumns[indexCnt], tableRecord.getIndexedColumn());
assertEquals(lastTable.getName(), tableRecord.getName());
assertEquals(Long.MIN_VALUE, tableRecord.getMaxKey());
assertEquals(0, tableRecord.getRecordCount());
@ -413,7 +414,7 @@ public class DBTest extends AbstractGenericTest {
}
Schema schema = lastTable.getSchema();
assertEquals(schema.getFieldClasses().length, indexCnt);
assertEquals(schema.getFields().length, indexCnt);
assertEquals(totalTableCnt, tableCnt);
}

View file

@ -17,6 +17,8 @@ package db;
import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Random;
import org.junit.Assert;
@ -31,50 +33,85 @@ public class DBTestUtils {
// Schema Types
static final int EMPTY = 0;
static final int SINGLE_BYTE = 1;
static final int SINGLE_INT = 2;
static final int SINGLE_SHORT = 3;
static final int SINGLE_LONG = 4;
static final int SINGLE_STRING = 5;
static final int SINGLE_BINARY = 6;
static final int ALL_TYPES = 7;
static final int SINGLE_BOOLEAN = 1;
static final int SINGLE_BYTE = 2;
static final int SINGLE_INT = 3;
static final int SINGLE_SHORT = 4;
static final int SINGLE_LONG = 5;
static final int SINGLE_STRING = 6;
static final int SINGLE_BINARY = 7;
static final int SINGLE_FIXED = 8;
static final int ALL_TYPES = 9;
static final int MAX_SCHEMA_TYPE = 7;
static final int MAX_SCHEMA_TYPE = 9;
private static Class<?>[][] schemaFields = { {}, // no columns
{ ByteField.class }, { IntField.class }, { ShortField.class }, { LongField.class },
{ StringField.class }, { BinaryField.class }, { ByteField.class, IntField.class,
ShortField.class, LongField.class, StringField.class, BinaryField.class } };
//@formatter:off
private static final Field[][] schemaFields = {
{}, // no columns
{ BooleanField.INSTANCE },
{ ByteField.INSTANCE },
{ IntField.INSTANCE },
{ ShortField.INSTANCE },
{ LongField.INSTANCE },
{ StringField.INSTANCE },
{ BinaryField.INSTANCE },
{ FixedField10.INSTANCE },
{ BooleanField.INSTANCE, ByteField.INSTANCE, IntField.INSTANCE, ShortField.INSTANCE,
LongField.INSTANCE, StringField.INSTANCE, BinaryField.INSTANCE, FixedField10.INSTANCE } };
//@formatter:on
private static String[][] schemaFieldNames = { {}, // no columns
{ "Byte" }, { "Int" }, { "Short" }, { "Long" }, { "String" }, { "Binary" },
{ "Byte", "Int", "Short", "Long", "String", "Binary" } };
private static final int[][] schemaIndexedColumns =
{ {}, {}, {}, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 0 }, { 2, 3, 4, 5, 6, 7 } };
private static Schema[] longKeySchemas =
{ new Schema(0, "LongKey", schemaFields[0], schemaFieldNames[0]),
new Schema(0, "LongKey", schemaFields[1], schemaFieldNames[1]),
new Schema(0, "LongKey", schemaFields[2], schemaFieldNames[2]),
new Schema(0, "LongKey", schemaFields[3], schemaFieldNames[3]),
new Schema(0, "LongKey", schemaFields[4], schemaFieldNames[4]),
new Schema(0, "LongKey", schemaFields[5], schemaFieldNames[5]),
new Schema(0, "LongKey", schemaFields[6], schemaFieldNames[6]),
new Schema(0, "LongKey", schemaFields[7], schemaFieldNames[7]) };
//@formatter:off
private static final String[][] schemaFieldNames = {
{}, // no columns
{ "Boolean" }, { "Byte" }, { "Int" }, { "Short" }, { "Long" },
{ "String" }, { "Binary" }, { "Fixed" },
{ "Boolean", "Byte", "Int", "Short", "Long", "String", "Binary", "Fixed" }
};
//@formatter:on
private static Field varKeyType = new BinaryField();
private static Class<? extends Field> varKeyClass = varKeyType.getClass();
private static final Schema[] longKeySchemas;
static {
longKeySchemas = new Schema[MAX_SCHEMA_TYPE + 1];
for (int i = 0; i < longKeySchemas.length; i++) {
longKeySchemas[i] = new Schema(0, "LongKey", schemaFields[i], schemaFieldNames[i]);
}
}
private static Schema[] binaryKeySchemas =
{ new Schema(0, varKeyClass, "VarKey", schemaFields[0], schemaFieldNames[0]),
new Schema(0, varKeyClass, "VarKey", schemaFields[1], schemaFieldNames[1]),
new Schema(0, varKeyClass, "VarKey", schemaFields[2], schemaFieldNames[2]),
new Schema(0, varKeyClass, "VarKey", schemaFields[3], schemaFieldNames[3]),
new Schema(0, varKeyClass, "VarKey", schemaFields[4], schemaFieldNames[4]),
new Schema(0, varKeyClass, "VarKey", schemaFields[5], schemaFieldNames[5]),
new Schema(0, varKeyClass, "VarKey", schemaFields[6], schemaFieldNames[6]),
new Schema(0, varKeyClass, "VarKey", schemaFields[7], schemaFieldNames[7]) };
private static final Field fixedKeyType = new FixedField10();
private static final Schema[] fixedKeySchemas;
static {
fixedKeySchemas = new Schema[MAX_SCHEMA_TYPE + 1];
for (int i = 0; i < fixedKeySchemas.length; i++) {
fixedKeySchemas[i] =
new Schema(0, fixedKeyType, "FixedKey", schemaFields[i], schemaFieldNames[i]);
}
}
private static final Field varKeyType = new BinaryField();
private static final Schema[] binaryKeySchemas;
static {
binaryKeySchemas = new Schema[MAX_SCHEMA_TYPE + 1];
for (int i = 0; i < binaryKeySchemas.length; i++) {
binaryKeySchemas[i] =
new Schema(0, varKeyType, "VarKey", schemaFields[i], schemaFieldNames[i]);
}
}
static Random random = new Random(0x123456789L);
static int[] getIndexedColumns(int schemaType) {
return schemaIndexedColumns[schemaType];
}
static int getIndexedColumnCount(int schemaType) {
return schemaIndexedColumns[schemaType].length;
}
/**
* Create a new long-keyed table within the specified database.
* @param db database handle
@ -89,11 +126,8 @@ public class DBTestUtils {
Table t;
int indexCnt = 0;
if (createIndex) {
indexCnt = schemaFields[schemaType].length;
int[] indexedColumns = new int[indexCnt];
for (int i = 0; i < indexedColumns.length; i++) {
indexedColumns[i] = i;
}
indexCnt = getIndexedColumnCount(schemaType);
int[] indexedColumns = getAllowedIndexColumns(schemaFields[schemaType]);
t = db.createTable(name, longKeySchemas[schemaType], indexedColumns);
}
else {
@ -108,6 +142,49 @@ public class DBTestUtils {
return t;
}
static int[] getAllowedIndexColumns(Field[] columnFields) {
ArrayList<Integer> list = new ArrayList<>();
for (int i = 0; i < columnFields.length; i++) {
if (Field.canIndex(columnFields[i])) {
list.add(i);
}
}
int[] columnIndexes = new int[list.size()];
for (int i = 0; i < columnIndexes.length; i++) {
columnIndexes[i] = list.get(i);
}
return columnIndexes;
}
/**
* Create a new FixedField-keyed table within the specified database.
* @param db database handle
* @param name name of table
* @param schemaType type of schema (use static identifier)
* @param createIndex all fields will be indexed if true
* @return Table new table
* @throws IOException
*/
static Table createFixedKeyTable(DBHandle db, String name, int schemaType, boolean createIndex)
throws IOException {
Table t;
if (createIndex) {
int[] indexedColumns = getAllowedIndexColumns(schemaFields[schemaType]);
t = db.createTable(name, fixedKeySchemas[schemaType], indexedColumns);
Assert.assertArrayEquals(schemaIndexedColumns[schemaType], t.getIndexedColumns());
}
else {
t = db.createTable(name, fixedKeySchemas[schemaType]);
Assert.assertEquals(0, t.getIndexedColumns().length);
}
Assert.assertEquals(name, t.getName());
Assert.assertEquals(Long.MIN_VALUE, t.getMaxKey());
Assert.assertEquals(0, t.getRecordCount());
Assert.assertEquals(fixedKeySchemas[schemaType], t.getSchema());
Assert.assertTrue(!t.useLongKeys());
return t;
}
/**
* Create a new BinaryField-keyed table within the specified database.
* @param db database handle
@ -122,11 +199,7 @@ public class DBTestUtils {
Table t;
int indexCnt = 0;
if (createIndex) {
indexCnt = schemaFields[schemaType].length;
int[] indexedColumns = new int[indexCnt];
for (int i = 0; i < indexedColumns.length; i++) {
indexedColumns[i] = i;
}
int[] indexedColumns = getAllowedIndexColumns(schemaFields[schemaType]);
t = db.createTable(name, binaryKeySchemas[schemaType], indexedColumns);
}
else {
@ -181,6 +254,33 @@ public class DBTestUtils {
}
}
/**
* Create a new random-FixedField-keyed record.
* @param table
* @param varDataSize
* @param doInsert
* @return Record
* @throws IOException
* @throws DuplicateKeyException
*/
static Record createFixedKeyRecord(Table table, int varDataSize, boolean doInsert)
throws IOException, DuplicateKeyException {
int keyLength = 10;
byte[] bytes = new byte[keyLength];
random.nextBytes(bytes);
Field key = fixedKeyType.newField();
key.setBinaryData(bytes);
try {
Record rec = createRecord(table, key, varDataSize, doInsert);
Assert.assertEquals(key, rec.getKeyField());
return rec;
}
catch (DuplicateKeyException dke) {
return createFixedKeyRecord(table, varDataSize, doInsert);
}
}
/**
* Create a new random-BinaryField-keyed record.
* @param table
@ -278,6 +378,31 @@ public class DBTestUtils {
return rec;
}
static FixedField addToFixedField(Field fixedField, long increment) {
FixedField f = (FixedField) fixedField;
byte[] valueBytes = f.getBinaryData();
BigInteger v = new BigInteger(1, valueBytes);
v = v.add(BigInteger.valueOf(increment));
byte[] resultBytes = v.toByteArray();
if (resultBytes.length > valueBytes.length) {
if (resultBytes[0] != 0) {
throw new UnsupportedOperationException("overflow in test data");
}
byte[] b = new byte[valueBytes.length];
System.arraycopy(resultBytes, 1, b, 0, valueBytes.length);
resultBytes = b;
}
else if (resultBytes.length < valueBytes.length) {
byte[] b = new byte[valueBytes.length];
System.arraycopy(resultBytes, 0, b, valueBytes.length - resultBytes.length,
resultBytes.length);
resultBytes = b;
}
FixedField r = f.newField();
r.setBinaryData(resultBytes);
return r;
}
/**
* Create a new record whose value is in the center portion of the valid
* values range for byte, short, int, or long.
@ -359,7 +484,10 @@ public class DBTestUtils {
Field[] fields = rec.getFields();
for (int i = 0; i < fields.length; i++) {
if (fields[i] instanceof ByteField) {
if (fields[i] instanceof BooleanField) {
rec.setBooleanValue(i, (random.nextInt() % 2) == 0);
}
else if (fields[i] instanceof ByteField) {
rec.setByteValue(i, (byte) random.nextInt());
}
else if (fields[i] instanceof ShortField) {
@ -389,7 +517,7 @@ public class DBTestUtils {
}
}
else if (fields[i] instanceof BinaryField) {
int size = varDataSize;
int size = fields[i].isVariableLength() ? varDataSize : fields[i].length();
if (size < 0) {
size = random.nextInt(6) - 1;
}

View file

@ -35,18 +35,19 @@ public class TableTest extends AbstractGenericTest {
private static final int BUFFER_SIZE = 256;
private static final int CACHE_SIZE = 4 * 1024 * 1024;
private static final Class<?>[] FIXED_SIZE_SCHEMA_FIELD_CLASSES =
new Class[] { LongField.class, IntField.class, ShortField.class };
private static final Class<?>[] VARIABLE_SIZE_SCHEMA_FIELD_CLASSES =
new Class[] { StringField.class, };
private static final Field[] FIXED_SIZE_SCHEMA_FIELDS = new Field[] { LongField.INSTANCE,
IntField.INSTANCE, ShortField.INSTANCE, FixedField10.INSTANCE };
private static final Field[] VARIABLE_SIZE_SCHEMA_FIELDS =
new Field[] { StringField.INSTANCE, };
private static final String[] FIXED_SIZE_SCHEMA_COLUMN_NAMES = { "Long1", "Int2", "Short3" };
private static final String[] FIXED_SIZE_SCHEMA_COLUMN_NAMES =
{ "Long1", "Int2", "Short3", "Fixed4" };
private static final String[] VARIABLE_SIZE_SCHEMA_COLUMN_NAMES = { "String" };
private static final Schema FIXED_SIZE_SCHEMA =
new Schema(0, "LongKey", FIXED_SIZE_SCHEMA_FIELD_CLASSES, FIXED_SIZE_SCHEMA_COLUMN_NAMES);
private static final Schema VARIABLE_SIZE_SCHEMA = new Schema(0, "LongKey",
VARIABLE_SIZE_SCHEMA_FIELD_CLASSES, VARIABLE_SIZE_SCHEMA_COLUMN_NAMES);
new Schema(0, "LongKey", FIXED_SIZE_SCHEMA_FIELDS, FIXED_SIZE_SCHEMA_COLUMN_NAMES);
private static final Schema VARIABLE_SIZE_SCHEMA =
new Schema(0, "LongKey", VARIABLE_SIZE_SCHEMA_FIELDS, VARIABLE_SIZE_SCHEMA_COLUMN_NAMES);
private static final int BUFFER_COUNT = 5;
private static final int FIRST_KEY = 0;
private static final int END_KEY = BUFFER_COUNT * 100 - 10;
@ -272,6 +273,9 @@ public class TableTest extends AbstractGenericTest {
Record rec = schema.createRecord(i * RECORD_KEY_SPACING);
if (fixedSize) {
rec.setLongValue(0, i);
rec.setIntValue(1, i);
rec.setShortValue(2, (short) i);
rec.setField(3, FixedField10.INSTANCE.getMaxValue());
}
else {
rec.setString(0, "abcdef");

View file

@ -600,7 +600,7 @@ public abstract class AbstractLocalFileSystemTest extends AbstractGenericTest {
DBHandle dbh = new DBHandle();
long id = dbh.startTransaction();
dbh.createTable("test",
new Schema(0, "key", new Class[] { IntField.class }, new String[] { "dummy" }));
new Schema(0, "key", new Field[] { IntField.INSTANCE }, new String[] { "dummy" }));
dbh.endTransaction(id, true);
BufferFile bf =
fs.createDatabase("/abc", "fred", null, "Database", dbh.getBufferSize(), "bob", null);
@ -741,7 +741,7 @@ public abstract class AbstractLocalFileSystemTest extends AbstractGenericTest {
DBHandle dbh = new DBHandle();
long id = dbh.startTransaction();
dbh.createTable("test",
new Schema(0, "key", new Class[] { IntField.class }, new String[] { "dummy" }));
new Schema(0, "key", new Field[] { IntField.INSTANCE }, new String[] { "dummy" }));
dbh.endTransaction(id, true);
BufferFile bf =
fs.createDatabase("/abc", "greg", "123", "Database", dbh.getBufferSize(), "test", null);
@ -789,7 +789,7 @@ public abstract class AbstractLocalFileSystemTest extends AbstractGenericTest {
DBHandle dbh = new DBHandle();
long id = dbh.startTransaction();
dbh.createTable("test",
new Schema(0, "key", new Class[] { IntField.class }, new String[] { "dummy" }));
new Schema(0, "key", new Field[] { IntField.INSTANCE }, new String[] { "dummy" }));
dbh.endTransaction(id, true);
BufferFile bf =
fs.createDatabase("/abc", "greg", "123", "Database", dbh.getBufferSize(), "test", null);
@ -933,7 +933,7 @@ public abstract class AbstractLocalFileSystemTest extends AbstractGenericTest {
DBHandle dbh = new DBHandle();
long id = dbh.startTransaction();
dbh.createTable("test",
new Schema(0, "key", new Class[] { IntField.class }, new String[] { "dummy" }));
new Schema(0, "key", new Field[] { IntField.INSTANCE }, new String[] { "dummy" }));
dbh.endTransaction(id, true);
BufferFile bf = fs.createDatabase(folderPath, itemName, fileId, "Database",
dbh.getBufferSize(), "test", null);

View file

@ -35,9 +35,10 @@ public class RecoveryDBTest extends AbstractGenericTest {
private static int RECORD_COUNT = 1000;
private static Schema SCHEMA =
new Schema(1, "key", new Class[] { StringField.class }, new String[] { "field1" });
new Schema(1, "key", new Field[] { StringField.INSTANCE }, new String[] { "field1" });
private static final File testDir = new File(AbstractGenericTest.getTestDirectoryPath(), "test");
private static final File testDir =
new File(AbstractGenericTest.getTestDirectoryPath(), "test");
private LocalFileSystem fileSystem;

View file

@ -33,7 +33,7 @@ import utilities.util.FileUtilities;
public class PackedDatabaseTest extends AbstractGenericTest {
private static final Schema TEST_SCHEMA =
new Schema(1, "Key", new Class[] { StringField.class }, new String[] { "Col1" });
new Schema(1, "Key", new Field[] { StringField.INSTANCE }, new String[] { "Col1" });
private File packedDbFile;
private PackedDatabase db;

View file

@ -32,8 +32,9 @@ class OptionsDB extends AbstractOptions {
private static final String PROPERTY_TABLE_NAME = "Property Table";
private final static Schema PROPERTY_SCHEMA = new Schema(0, StringField.class, "Property Name",
new Class[] { StringField.class, ByteField.class }, new String[] { "Value", "Type" });
private final static Schema PROPERTY_SCHEMA = new Schema(0, StringField.INSTANCE,
"Property Name", new Field[] { StringField.INSTANCE, ByteField.INSTANCE },
new String[] { "Value", "Type" });
private static final int VALUE_COL = 0;
private static final int TYPE_COL = 1;
@ -81,8 +82,8 @@ class OptionsDB extends AbstractOptions {
throw new IllegalArgumentException("property alteration old-path may not be null");
}
if (path != null && path.endsWith(DELIMITER_STRING)) {
throw new IllegalArgumentException("property alteration paths must not end with '" +
DELIMITER + "': " + path);
throw new IllegalArgumentException(
"property alteration paths must not end with '" + DELIMITER + "': " + path);
}
}
@ -118,8 +119,8 @@ class OptionsDB extends AbstractOptions {
String keyName = ((StringField) rec.getKeyField()).getString();
if (keyName.startsWith(oldSubListPath)) {
iterator.delete();
rec.setKey(new StringField(newSubListPath +
keyName.substring(oldSubListPath.length())));
rec.setKey(
new StringField(newSubListPath + keyName.substring(oldSubListPath.length())));
list.add(rec);
}
else {

View file

@ -158,6 +158,7 @@ public class DBObjectCache<T extends DatabaseObject> {
* within the specified keyRanges.
* @param keyRanges key ranges to delete
*/
//TODO: Discourage large cases by only allowing a single range to be specified
public synchronized void delete(List<KeyRange> keyRanges) {
hardCache.clear();
processQueue();

View file

@ -45,8 +45,11 @@ public class DataTypeArchiveDB extends DomainObjectAdapterDB
* database schema associated with any of the managers.
* 18-Sep-2008 - version 1 - added fields for synchronizing program data types with project archives.
* 03-Dec-2009 - version 2 - Added source archive updating (consolidating windows.gdt, clib.gdt, ntddk.gdt)
* 14-Nov-2019 - version 3 - Corrected fixed length indexing implementation causing
* change in index table low-level storage for newly
* created tables.
*/
static final int DB_VERSION = 2;
static final int DB_VERSION = 3;
/**
* UPGRADE_REQUIRED_BEFORE_VERSION should be changed to DB_VERSION any time the
@ -76,10 +79,10 @@ public class DataTypeArchiveDB extends DomainObjectAdapterDB
private static final String DEFAULT_POINTER_SIZE = "Default Pointer Size";
private final static Class<?>[] COL_CLASS = new Class[] { StringField.class };
private final static Field[] COL_FIELDS = new Field[] { StringField.INSTANCE };
private final static String[] COL_TYPES = new String[] { "Value" };
private final static Schema SCHEMA =
new Schema(0, StringField.class, "Key", COL_CLASS, COL_TYPES);
new Schema(0, StringField.INSTANCE, "Key", COL_FIELDS, COL_TYPES);
private ProjectDataTypeManager dataTypeManager;

View file

@ -30,7 +30,7 @@ import ghidra.program.model.listing.DataTypeArchiveChangeSet;
class DataTypeArchiveDBChangeSet implements DataTypeArchiveChangeSet, DomainObjectDBChangeSet {
private static final Schema STORED_ID_SCHEMA =
new Schema(0, "Key", new Class[] { LongField.class }, new String[] { "value" });
new Schema(0, "Key", new Field[] { LongField.INSTANCE }, new String[] { "value" });
private static final String DATATYPE_ADDITIONS = "DataType Additions";
private static final String DATATYPE_CHANGES = "DataType Changes";

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,6 +15,10 @@
*/
package ghidra.program.database;
import java.util.ConcurrentModificationException;
import db.*;
import db.util.ErrorHandler;
import ghidra.program.database.map.AddressMap;
import ghidra.program.database.util.AddressRangeMapDB;
import ghidra.program.model.address.*;
@ -25,11 +28,6 @@ import ghidra.util.exception.CancelledException;
import ghidra.util.exception.DuplicateNameException;
import ghidra.util.task.TaskMonitor;
import java.util.ConcurrentModificationException;
import db.*;
import db.util.ErrorHandler;
public class IntRangeMapDB implements IntRangeMap {
private static final String MY_PREFIX = "IntMap - ";
@ -65,8 +63,8 @@ public class IntRangeMapDB implements IntRangeMap {
DBHandle dbh = program.getDBHandle();
String tableName = TABLE_PREFIX + mapName;
if (dbh.getTable(tableName) != null) {
throw new DuplicateNameException("Address Set Property Map named " + mapName +
" already exists.");
throw new DuplicateNameException(
"Address Set Property Map named " + mapName + " already exists.");
}
return new IntRangeMapDB(program, mapName, program, addrMap, lock);
@ -82,9 +80,8 @@ public class IntRangeMapDB implements IntRangeMap {
this.mapName = mapName;
this.lock = lock;
propertyMap =
new AddressRangeMapDB(program.getDBHandle(), program.getAddressMap(),
program.getLock(), MY_PREFIX + mapName, errHandler, IntField.class, true);
propertyMap = new AddressRangeMapDB(program.getDBHandle(), program.getAddressMap(),
program.getLock(), MY_PREFIX + mapName, errHandler, IntField.INSTANCE, true);
}

View file

@ -30,7 +30,8 @@ import ghidra.util.exception.DuplicateNameException;
class OverlaySpaceAdapterDB {
private static String TABLE_NAME = "Overlay Spaces";
static final Schema SCHEMA = new Schema(0, "ID",
new Class[] { StringField.class, StringField.class, LongField.class, LongField.class },
new Field[] { StringField.INSTANCE, StringField.INSTANCE, LongField.INSTANCE,
LongField.INSTANCE },
new String[] { "Overlay Space", "Template Space", "Minimum Offset", "Maximum Offset" });
private static final int OV_SPACE_NAME_COL = 0;

View file

@ -93,8 +93,11 @@ public class ProgramDB extends DomainObjectAdapterDB implements Program, ChangeM
* Read of old symbol data3 format does not require upgrade.
* 14-May-2020 - version 21 - added support for overlay mapped blocks and byte mapping
* schemes other than the default 1:1
* 19-Jun-2020 - version 22 - Corrected fixed length indexing implementation causing
* change in index table low-level storage for newly
* created tables.
*/
static final int DB_VERSION = 21;
static final int DB_VERSION = 22;
/**
* UPGRADE_REQUIRED_BFORE_VERSION should be changed to DB_VERSION anytime the
@ -133,10 +136,10 @@ public class ProgramDB extends DomainObjectAdapterDB implements Program, ChangeM
private static final String EXECUTE_FORMAT = "Execute Format";
private static final String IMAGE_OFFSET = "Image Offset";
private final static Class<?>[] COL_CLASS = new Class[] { StringField.class };
private final static Field[] COL_FIELDS = new Field[] { StringField.INSTANCE };
private final static String[] COL_TYPES = new String[] { "Value" };
private final static Schema SCHEMA =
new Schema(0, StringField.class, "Key", COL_CLASS, COL_TYPES);
new Schema(0, StringField.INSTANCE, "Key", COL_FIELDS, COL_TYPES);
//
// The numbering of managers controls the order in which they are notified.

View file

@ -34,10 +34,10 @@ import ghidra.program.model.listing.ProgramChangeSet;
class ProgramDBChangeSet implements ProgramChangeSet, DomainObjectDBChangeSet {
private static final Schema STORED_ID_SCHEMA =
new Schema(0, "Key", new Class[] { LongField.class }, new String[] { "value" });
new Schema(0, "Key", new Field[] { LongField.INSTANCE }, new String[] { "value" });
private static final Schema STORED_ADDRESS_RANGE_SCHEMA = new Schema(0, "Key",
new Class[] { LongField.class, LongField.class }, new String[] { "addr1", "addr2" });
new Field[] { LongField.INSTANCE, LongField.INSTANCE }, new String[] { "addr1", "addr2" });
private static final String DATATYPE_ADDITIONS = "DataType Additions";
private static final String DATATYPE_CHANGES = "DataType Changes";

View file

@ -51,7 +51,7 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
* DB_VERSION should be incremented any time a change is made to the overall
* database schema associated with any of the managers.
*/
static final int DB_VERSION = 1;
static final int DB_VERSION = 2;
/**
* UPGRADE_REQUIRED_BFORE_VERSION should be changed to DB_VERSION any time the
@ -59,13 +59,13 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
* until upgrade is performed). It is assumed that read-only mode is supported
* if the data's version is &gt;= UPGRADE_REQUIRED_BEFORE_VERSION and &lt;= DB_VERSION.
*/
private static final int UPGRADE_REQUIRED_BEFORE_VERSION = 1;
private static final int UPGRADE_REQUIRED_BEFORE_VERSION = 2;
private static final String TABLE_NAME = "ProgramUserData";
private final static Class<?>[] COL_CLASS = new Class[] { StringField.class };
private final static Field[] COL_FIELDS = new Field[] { StringField.INSTANCE };
private final static String[] COL_NAMES = new String[] { "Value" };
private final static Schema SCHEMA =
new Schema(0, StringField.class, "Key", COL_CLASS, COL_NAMES);
new Schema(0, StringField.INSTANCE, "Key", COL_FIELDS, COL_NAMES);
private static final int VALUE_COL = 0;
private static final String STORED_DB_VERSION = "DB Version";
@ -73,12 +73,12 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
private static final String LANGUAGE_ID = "Language ID";
private static final String REGISTRY_TABLE_NAME = "PropertyRegistry";
private final static Class<?>[] REGISTRY_COL_CLASS =
new Class[] { StringField.class, StringField.class, IntField.class, StringField.class };
private final static Field[] REGISTRY_COL_FIELDS = new Field[] { StringField.INSTANCE,
StringField.INSTANCE, IntField.INSTANCE, StringField.INSTANCE };
private final static String[] REGISTRY_COL_NAMES =
new String[] { "Owner", "PropertyName", "PropertyType", "SaveableClass" };
private final static Schema REGISTRY_SCHEMA =
new Schema(0, "ID", REGISTRY_COL_CLASS, REGISTRY_COL_NAMES);
new Schema(0, "ID", REGISTRY_COL_FIELDS, REGISTRY_COL_NAMES);
private static final int PROPERTY_OWNER_COL = 0;
private static final int PROPERTY_NAME_COL = 1;
private static final int PROPERTY_TYPE_COL = 2;
@ -467,7 +467,8 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
Class<?> saveableClass, boolean create) throws PropertyTypeMismatchException {
try {
for (long key : registryTable.findRecords(new StringField(owner), PROPERTY_OWNER_COL)) {
for (Field key : registryTable.findRecords(new StringField(owner),
PROPERTY_OWNER_COL)) {
Record rec = registryTable.getRecord(key);
if (propertyName.equals(rec.getString(PROPERTY_NAME_COL))) {
int type = rec.getIntValue(PROPERTY_TYPE_COL);
@ -573,7 +574,8 @@ class ProgramUserDataDB extends DomainObjectAdapterDB implements ProgramUserData
public synchronized List<PropertyMap> getProperties(String owner) {
List<PropertyMap> list = new ArrayList<PropertyMap>();
try {
for (long key : registryTable.findRecords(new StringField(owner), PROPERTY_OWNER_COL)) {
for (Field key : registryTable.findRecords(new StringField(owner),
PROPERTY_OWNER_COL)) {
Record rec = registryTable.getRecord(key);
list.add(getPropertyMap(rec));
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,15 +15,14 @@
*/
package ghidra.program.database.bookmark;
import ghidra.program.database.map.AddressMap;
import ghidra.program.database.util.EmptyRecordIterator;
import ghidra.program.model.address.*;
import ghidra.util.exception.VersionException;
import java.io.IOException;
import java.util.HashSet;
import db.*;
import ghidra.program.database.map.AddressMap;
import ghidra.program.database.util.EmptyRecordIterator;
import ghidra.program.model.address.*;
import ghidra.util.exception.VersionException;
public class BookmarkDBAdapterV3 extends BookmarkDBAdapter {
@ -35,8 +33,9 @@ public class BookmarkDBAdapterV3 extends BookmarkDBAdapter {
static final int V3_COMMENT_COL = 2;
static final int VERSION = 3;
static final Schema V3_SCHEMA = new Schema(VERSION, "ID", new Class[] { LongField.class,
StringField.class, StringField.class }, new String[] { "Address", "Category", "Comment" });
static final Schema V3_SCHEMA = new Schema(VERSION, "ID",
new Field[] { LongField.INSTANCE, StringField.INSTANCE, StringField.INSTANCE },
new String[] { "Address", "Category", "Comment" });
static int[] INDEXED_COLUMNS = new int[] { V3_ADDRESS_COL, V3_CATEGORY_COL };

View file

@ -296,9 +296,8 @@ public class BookmarkDBManager implements BookmarkManager, ErrorHandler, Manager
bm.setComment(comment);
}
else {
Record rec =
bookmarkAdapter.createBookmark(typeId, category, addrMap.getKey(addr, true),
comment);
Record rec = bookmarkAdapter.createBookmark(typeId, category,
addrMap.getKey(addr, true), comment);
bm = new BookmarkDB(this, cache, rec);
// fire event
@ -606,9 +605,8 @@ public class BookmarkDBManager implements BookmarkManager, ErrorHandler, Manager
RecordIterator it;
try {
if (bmt != null && bmt.hasBookmarks()) {
it =
bookmarkAdapter.getRecordsByTypeStartingAtAddress(bmt.getTypeId(),
addrMap.getKey(startAddress, false), forward);
it = bookmarkAdapter.getRecordsByTypeStartingAtAddress(bmt.getTypeId(),
addrMap.getKey(startAddress, false), forward);
}
else {
it = new EmptyRecordIterator();
@ -761,11 +759,10 @@ public class BookmarkDBManager implements BookmarkManager, ErrorHandler, Manager
try {
Table table = bookmarkAdapter.getTable(typeId);
if (table != null) {
DBLongIterator it =
new AddressIndexPrimaryKeyIterator(table, BookmarkDBAdapter.ADDRESS_COL,
addrMap, set, true);
DBFieldIterator it = new AddressIndexPrimaryKeyIterator(table,
BookmarkDBAdapter.ADDRESS_COL, addrMap, set, true);
while (it.hasNext()) {
BookmarkDB bm = (BookmarkDB) getBookmark(it.next());
BookmarkDB bm = (BookmarkDB) getBookmark(it.next().getLongValue());
if (category == null || category.equals(bm.getCategory())) {
doRemoveBookmark(bm);
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,11 +15,10 @@
*/
package ghidra.program.database.bookmark;
import ghidra.util.exception.VersionException;
import java.io.IOException;
import db.*;
import ghidra.util.exception.VersionException;
abstract class BookmarkTypeDBAdapter {
@ -28,8 +26,8 @@ abstract class BookmarkTypeDBAdapter {
static final int TYPE_NAME_COL = 0;
static final Schema SCHEMA = new Schema(0, "ID", new Class[] { StringField.class },
new String[] { "Name" });
static final Schema SCHEMA =
new Schema(0, "ID", new Field[] { StringField.INSTANCE }, new String[] { "Name" });
static BookmarkTypeDBAdapter getAdapter(DBHandle dbHandle, int openMode)
throws VersionException, IOException {
@ -58,8 +56,8 @@ abstract class BookmarkTypeDBAdapter {
return new BookmarkTypeDBAdapterNoTable(dbHandle);
}
private static BookmarkTypeDBAdapter upgrade(DBHandle dbHandle, BookmarkTypeDBAdapter oldAdapter)
throws VersionException, IOException {
private static BookmarkTypeDBAdapter upgrade(DBHandle dbHandle,
BookmarkTypeDBAdapter oldAdapter) throws VersionException, IOException {
return new BookmarkTypeDBAdapterV0(dbHandle, true);
}

View file

@ -32,8 +32,8 @@ abstract class CommentHistoryAdapter {
static final String COMMENT_HISTORY_TABLE_NAME = "Comment History";
static final Schema COMMENT_HISTORY_SCHEMA = new Schema(0, "Key",
new Class[] { LongField.class, ByteField.class, IntField.class, IntField.class,
StringField.class, StringField.class, LongField.class },
new Field[] { LongField.INSTANCE, ByteField.INSTANCE, IntField.INSTANCE, IntField.INSTANCE,
StringField.INSTANCE, StringField.INSTANCE, LongField.INSTANCE },
new String[] { "Address", "Comment Type", "Pos1", "Pos2", "String Data", "User", "Date" });
static final int HISTORY_ADDRESS_COL = 0;

View file

@ -57,8 +57,8 @@ abstract class CommentsDBAdapter {
NAMES[REPEATABLE_COMMENT_COL] = "Repeatable";
COMMENTS_SCHEMA =
new Schema(1, "Address", new Class[] { StringField.class, StringField.class,
StringField.class, StringField.class, StringField.class }, NAMES);
new Schema(1, "Address", new Field[] { StringField.INSTANCE, StringField.INSTANCE,
StringField.INSTANCE, StringField.INSTANCE, StringField.INSTANCE }, NAMES);
}
// /** comment type for end of line */
@ -110,8 +110,8 @@ abstract class CommentsDBAdapter {
}
private static CommentsDBAdapter upgrade(DBHandle dbHandle, AddressMap addrMap,
CommentsDBAdapter oldAdapter, TaskMonitor monitor) throws VersionException,
IOException, CancelledException {
CommentsDBAdapter oldAdapter, TaskMonitor monitor)
throws VersionException, IOException, CancelledException {
AddressMap oldAddrMap = addrMap.getOldAddressMap();

View file

@ -103,6 +103,8 @@ class DataDB extends CodeUnitDB implements Data {
DataType dt;
if (rec != null) {
// ensure that record provided corresponds to a DataDB record
// since following an undo/redo the record could correspond to
// a different type of code unit (hopefully with a different record schema)
if (!rec.hasSameSchema(DataDBAdapter.DATA_SCHEMA)) {
return true;
}

View file

@ -1,6 +1,5 @@
/* ###
* IP: GHIDRA
* REVIEWED: YES
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -19,6 +18,9 @@
*/
package ghidra.program.database.code;
import java.io.IOException;
import db.*;
import ghidra.program.database.map.AddressKeyIterator;
import ghidra.program.database.map.AddressMap;
import ghidra.program.model.address.Address;
@ -27,10 +29,6 @@ import ghidra.util.exception.CancelledException;
import ghidra.util.exception.VersionException;
import ghidra.util.task.TaskMonitor;
import java.io.IOException;
import db.*;
/**
* Adapter to access the Data table.
*/
@ -38,7 +36,7 @@ abstract class DataDBAdapter {
static final String DATA_TABLE_NAME = "Data";
static final Schema DATA_SCHEMA = new Schema(0, "Address", new Class[] { LongField.class },
static final Schema DATA_SCHEMA = new Schema(0, "Address", new Field[] { LongField.INSTANCE },
new String[] { "Data Type ID" });
static final int DATA_TYPE_ID_COL = 0;
@ -75,8 +73,8 @@ abstract class DataDBAdapter {
}
private static DataDBAdapter upgrade(DBHandle dbHandle, AddressMap addrMap,
DataDBAdapter oldAdapter, TaskMonitor monitor) throws VersionException, IOException,
CancelledException {
DataDBAdapter oldAdapter, TaskMonitor monitor)
throws VersionException, IOException, CancelledException {
AddressMap oldAddrMap = addrMap.getOldAddressMap();

Some files were not shown because too many files have changed in this diff Show more