mirror of
https://github.com/geometer/FBReaderJ.git
synced 2025-10-06 03:50:19 +02:00
plucker support (in progress)
git-svn-id: https://only.mawhrin.net/repos/FBReaderJ/trunk@829 6a642e6f-84f6-412e-ac94-c4a38d5a04b0
This commit is contained in:
parent
69fb2d8d0b
commit
ba4ca95cda
11 changed files with 261 additions and 144 deletions
|
@ -149,8 +149,6 @@ public class ZLEncodingCollection {
|
|||
}
|
||||
|
||||
public boolean startElementHandler(String tag, ZLStringMap attributes) {
|
||||
System.out.println("reading");
|
||||
|
||||
if (GROUP.equals(tag)) {
|
||||
final String name = attributes.getValue(NAME);
|
||||
if (name != null) {
|
||||
|
|
Binary file not shown.
100
src/org/geometerplus/fbreader/formats/pdb/DocDecompressor.java
Normal file
100
src/org/geometerplus/fbreader/formats/pdb/DocDecompressor.java
Normal file
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Copyright (C) 2007-2008 Geometer Plus <contact@geometerplus.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
package org.geometerplus.fbreader.formats.pdb;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
public abstract class DocDecompressor {
|
||||
private static final byte[] TOKEN_CODE = {
|
||||
0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
|
||||
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
||||
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
||||
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
|
||||
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
|
||||
};
|
||||
|
||||
public static int decompress(InputStream stream, byte[] targetBuffer, int compressedSize) throws IOException {
|
||||
final int maxUncompressedSize = targetBuffer.length;
|
||||
final byte[] sourceBuffer = new byte[compressedSize];
|
||||
|
||||
int sourceIndex = 0;
|
||||
int targetIndex = 0;
|
||||
|
||||
if (stream.read(sourceBuffer, 0, compressedSize) == compressedSize) {
|
||||
byte token;
|
||||
int shiftedIndex;
|
||||
|
||||
loop:
|
||||
while ((sourceIndex < compressedSize) && (targetIndex < maxUncompressedSize)) {
|
||||
token = sourceBuffer[sourceIndex++];
|
||||
switch (TOKEN_CODE[token]) {
|
||||
case 0:
|
||||
targetBuffer[targetIndex++] = token;
|
||||
break;
|
||||
case 1:
|
||||
if ((sourceIndex + token > compressedSize) ||
|
||||
(targetIndex + token > maxUncompressedSize)) {
|
||||
break loop;
|
||||
}
|
||||
System.arraycopy(sourceBuffer, sourceIndex, targetBuffer, targetIndex, token);
|
||||
sourceIndex += token;
|
||||
targetIndex += token;
|
||||
break;
|
||||
case 2:
|
||||
if (targetIndex + 2 > maxUncompressedSize) {
|
||||
break loop;
|
||||
}
|
||||
targetBuffer[targetIndex++] = ' ';
|
||||
targetBuffer[targetIndex++] = (byte)(token ^ 0x80);
|
||||
break;
|
||||
case 3:
|
||||
if (sourceIndex + 1 > compressedSize) {
|
||||
break loop;
|
||||
}
|
||||
int N = 256 * token + sourceBuffer[sourceIndex++];
|
||||
int copyLength = (N & 7) + 3;
|
||||
if (targetIndex + copyLength > maxUncompressedSize) {
|
||||
break loop;
|
||||
}
|
||||
shiftedIndex = targetIndex - (N & 0x3fff) / 8;
|
||||
if (shiftedIndex >= 0) {
|
||||
for (int i = 0; i < copyLength; i++) {
|
||||
targetBuffer[targetIndex++] = targetBuffer[shiftedIndex++];
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return targetIndex;
|
||||
}
|
||||
}
|
|
@ -32,29 +32,34 @@ public class PdbHeader {
|
|||
public boolean read(InputStream stream) throws IOException {
|
||||
final byte[] buffer = new byte[32];
|
||||
if (stream.read(buffer, 0, 32) != 32) {
|
||||
System.err.println("way 0");
|
||||
return false;
|
||||
}
|
||||
DocName = new String(buffer);
|
||||
Flags = PdbUtil.readUnsignedShort(stream);
|
||||
Flags = PdbUtil.readShort(stream);
|
||||
|
||||
stream.skip(26);
|
||||
|
||||
if (stream.read(buffer, 0, 8) != 8) {
|
||||
System.err.println("way 1");
|
||||
return false;
|
||||
}
|
||||
Id = new String(buffer, 0, 8);
|
||||
|
||||
stream.skip(8);
|
||||
|
||||
int numRecords = PdbUtil.readUnsignedShort(stream);
|
||||
int numRecords = PdbUtil.readShort(stream);
|
||||
if (numRecords <= 0) {
|
||||
System.err.println(numRecords);
|
||||
System.err.println("way 2");
|
||||
return false;
|
||||
}
|
||||
Offsets = new int[numRecords];
|
||||
|
||||
for (int i = 0; i < numRecords; ++i) {
|
||||
Offsets[i] = stream.read();
|
||||
Offsets[i] = PdbUtil.readInt(stream);
|
||||
if (stream.skip(4) != 4) {
|
||||
System.err.println("way 3");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,11 +7,11 @@ import org.geometerplus.zlibrary.core.filesystem.ZLFile;
|
|||
|
||||
public class PdbInputStream extends InputStream {
|
||||
private final InputStream myBase;
|
||||
private long myOffset = 0;
|
||||
private final long mySize;
|
||||
private int myOffset = 0;
|
||||
private final int mySize;
|
||||
|
||||
public PdbInputStream(ZLFile file) throws IOException {
|
||||
mySize = file.size();
|
||||
mySize = (int)file.size();
|
||||
myBase = file.getInputStream();
|
||||
}
|
||||
|
||||
|
@ -53,15 +53,11 @@ public class PdbInputStream extends InputStream {
|
|||
super.reset();
|
||||
}
|
||||
|
||||
public long skip(long n) throws IOException {
|
||||
return super.skip(n);
|
||||
}
|
||||
|
||||
public long offset() {
|
||||
public int offset() {
|
||||
return myOffset;
|
||||
}
|
||||
|
||||
public long sizeOfOpened() {
|
||||
public int sizeOfOpened() {
|
||||
return mySize - myOffset;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ public abstract class PdbPlugin extends FormatPlugin {
|
|||
}
|
||||
|
||||
String fileName = file.getPath();
|
||||
System.err.println("fileName = " + fileName);
|
||||
int index = fileName.indexOf(':');
|
||||
ZLFile baseFile = (index == -1) ? file : new ZLFile(fileName.substring(0, index));
|
||||
boolean upToDate = BookDescriptionUtil.checkInfo(baseFile);
|
||||
|
@ -47,7 +46,7 @@ public abstract class PdbPlugin extends FormatPlugin {
|
|||
if ((palmType.length() != 8) || !upToDate) {
|
||||
byte[] id = new byte[8];
|
||||
try {
|
||||
InputStream stream = file.getInputStream();
|
||||
final InputStream stream = file.getInputStream();
|
||||
if (stream == null) {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -19,21 +19,19 @@
|
|||
|
||||
package org.geometerplus.fbreader.formats.pdb;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import org.geometerplus.fbreader.formats.pdb.PdbUtil;
|
||||
import org.geometerplus.zlibrary.core.filesystem.ZLFile;
|
||||
|
||||
public abstract class PdbStream extends InputStream {
|
||||
protected final InputStream myBase;
|
||||
private int myOffset;
|
||||
protected final PdbHeader myHeader = new PdbHeader();
|
||||
protected byte[] myBuffer;
|
||||
private int myOffset;
|
||||
public final PdbHeader myHeader = new PdbHeader();
|
||||
protected byte[] myBuffer;
|
||||
|
||||
protected short myBufferLength;
|
||||
protected short myBufferOffset;
|
||||
protected short myBufferLength;
|
||||
protected short myBufferOffset;
|
||||
|
||||
public PdbStream(ZLFile file) {
|
||||
InputStream base;
|
||||
|
@ -62,7 +60,7 @@ public abstract class PdbStream extends InputStream {
|
|||
myOffset += realSize;
|
||||
return realSize;
|
||||
}
|
||||
/*public int read(byte[] buffer,int offset, int maxSize) {
|
||||
/*public int read(byte[] buffer,int offset, int maxSize) {
|
||||
int realSize = 0;
|
||||
while (realSize < maxSize) {
|
||||
if (!fillBuffer()) {
|
||||
|
@ -89,7 +87,7 @@ public abstract class PdbStream extends InputStream {
|
|||
return false;
|
||||
}
|
||||
|
||||
myBase.skip((myHeader.Offsets[0])/*, true*/);
|
||||
myBase.skip(myHeader.Offsets[0] - 78 - 8 * myHeader.Offsets.length);
|
||||
|
||||
myBufferLength = 0;
|
||||
myBufferOffset = 0;
|
||||
|
@ -108,9 +106,9 @@ public abstract class PdbStream extends InputStream {
|
|||
}
|
||||
}
|
||||
|
||||
public void skip(int offset) throws IOException {
|
||||
public void skip(int offset) throws IOException {
|
||||
if (offset > 0) {
|
||||
read(null,0, offset);
|
||||
read(null, 0, offset);
|
||||
} else if (offset < 0) {
|
||||
offset += this.offset();
|
||||
open();
|
||||
|
@ -120,11 +118,11 @@ public abstract class PdbStream extends InputStream {
|
|||
}
|
||||
}
|
||||
|
||||
public int offset() {
|
||||
public int offset() {
|
||||
return myOffset;
|
||||
}
|
||||
|
||||
public int sizeOfOpened() {
|
||||
public int sizeOfOpened() {
|
||||
// TODO: implement
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -21,43 +21,28 @@ package org.geometerplus.fbreader.formats.pdb;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class PdbUtil {
|
||||
public static short readShort(InputStream stream) {
|
||||
byte[] tmp = new byte[2];
|
||||
final byte[] tmp = new byte[2];
|
||||
try {
|
||||
stream.read(tmp, 0, 2);
|
||||
} catch (IOException e) {
|
||||
return -1;
|
||||
}
|
||||
return (short)(tmp[1] + (tmp[0] << 8));
|
||||
return (short)((tmp[1] & 0xFF) + ((tmp[0] & 0xFF) << 8));
|
||||
}
|
||||
|
||||
public static long readUnsignedLong(InputStream stream) {
|
||||
byte []readBuffer = new byte[8];
|
||||
try {
|
||||
stream.read(readBuffer, 0, 8);
|
||||
} catch (IOException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
return (((long)readBuffer[0] << 56) +
|
||||
((long)(readBuffer[1] & 255) << 48) +
|
||||
((long)(readBuffer[2] & 255) << 40) +
|
||||
((long)(readBuffer[3] & 255) << 32) +
|
||||
((long)(readBuffer[4] & 255) << 24) +
|
||||
((readBuffer[5] & 255) << 16) +
|
||||
((readBuffer[6] & 255) << 8) +
|
||||
((readBuffer[7] & 255) << 0));
|
||||
|
||||
/*byte []tmp = new byte[4];
|
||||
public static int readInt(InputStream stream) {
|
||||
final byte[] tmp = new byte[4];
|
||||
try {
|
||||
stream.read(tmp, 0, 4);
|
||||
} catch (IOException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
return -1;
|
||||
}
|
||||
return tmp[3] + tmp[2]*256 +tmp[1] * 256^2 +tmp[0] * 256^3;*/
|
||||
return (tmp[0] << 24) +
|
||||
((tmp[1] & 0xFF) << 16) +
|
||||
((tmp[2] & 0xFF) << 8) +
|
||||
(tmp[3] & 0xFF);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,8 +24,12 @@ import java.io.InputStream;
|
|||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.zip.DataFormatException;
|
||||
import java.util.zip.Inflater;
|
||||
|
||||
import org.geometerplus.fbreader.bookmodel.*;
|
||||
import org.geometerplus.fbreader.encoding.*;
|
||||
import org.geometerplus.fbreader.formats.EncodedTextReader;
|
||||
import org.geometerplus.fbreader.formats.pdb.*;
|
||||
import org.geometerplus.zlibrary.core.filesystem.ZLFile;
|
||||
import org.geometerplus.zlibrary.core.image.ZLImage;
|
||||
|
@ -33,44 +37,64 @@ import org.geometerplus.zlibrary.text.model.*;
|
|||
import org.geometerplus.zlibrary.text.model.impl.*;
|
||||
|
||||
public class PluckerBookReader extends BookReader {
|
||||
private final String myFilePath;
|
||||
private PdbInputStream myStream;
|
||||
private int myFont;
|
||||
private char[] myCharBuffer;
|
||||
private String myConvertedTextBuffer;
|
||||
private boolean myParagraphStarted = false;
|
||||
private boolean myBufferIsEmpty;
|
||||
private ZLTextForcedControlEntry myForcedEntry;
|
||||
private final ArrayList/*<std::pair<FBTextKind,bool> >*/ myDelayedControls = new ArrayList();
|
||||
private final ArrayList/*<std::string> */myDelayedHyperlinks = new ArrayList();
|
||||
private short myCompressionVersion;
|
||||
private char myBytesToSkip;
|
||||
|
||||
private final ArrayList/*<std::pair<int, int> >*/ myReferencedParagraphs = new ArrayList();
|
||||
private final HashMap/*<int, std::vector<int> >*/ myParagraphMap = new HashMap();
|
||||
private ArrayList/*<Integer>*/ myParagraphVector = new ArrayList(); //íà âñÿêèé ñëó÷àé
|
||||
private boolean myParagraphStored;
|
||||
|
||||
private final ZLEncodingConverter myConverter;
|
||||
|
||||
public PluckerBookReader(String filePath, BookModel model, String encoding){
|
||||
super(model);
|
||||
//, EncodedTextReader = encoding,
|
||||
myFilePath = filePath;
|
||||
myFont = FontType.FT_REGULAR;
|
||||
myCharBuffer = new char[65535];
|
||||
myForcedEntry = null;
|
||||
super(model);
|
||||
myConverter = new EncodedTextReader(encoding).getConverter();
|
||||
myFilePath = filePath;
|
||||
System.out.println(filePath + " " + encoding);
|
||||
myFont = FontType.FT_REGULAR;
|
||||
myCharBuffer = new char[65535];
|
||||
myForcedEntry = null;
|
||||
|
||||
}
|
||||
|
||||
public boolean readDocument() throws IOException {
|
||||
final PdbStream stream = new PluckerTextStream(new ZLFile(myFilePath));
|
||||
myStream = stream;
|
||||
if (!stream.open()) {
|
||||
return false;
|
||||
}
|
||||
System.out.println("reading document");
|
||||
myStream = new PdbInputStream(new ZLFile(myFilePath));
|
||||
|
||||
PdbHeader header = new PdbHeader();
|
||||
if (!header.read(stream)) {
|
||||
stream.close();
|
||||
if (!header.read(myStream)) {
|
||||
myStream.close();
|
||||
System.out.println("reading stream null");
|
||||
return false;
|
||||
}
|
||||
|
||||
setMainTextModel();
|
||||
myFont = FontType.FT_REGULAR;
|
||||
|
||||
for (int i = 0; i < header.Offsets.length; ++i) {
|
||||
int currentOffset = ((PdbStream)myStream).offset();
|
||||
int pit = header.Offsets[i];
|
||||
for (int index = 0; index < header.Offsets.length; ++index) {
|
||||
int currentOffset = myStream.offset();
|
||||
int pit = header.Offsets[index];
|
||||
if (currentOffset > pit) {
|
||||
break;
|
||||
}
|
||||
//myStream.seek(pit - currentOffset, false);
|
||||
myStream.skip(pit - currentOffset);
|
||||
|
||||
if (((PdbStream)myStream).offset() != pit) {
|
||||
if (myStream.offset() != pit) {
|
||||
break;
|
||||
}
|
||||
int recordSize = 25;//((pit != header.Offsets.size() - 1) ? (Integer)it.next() : ((PdbStream)myStream).sizeOfOpened()) - pit;
|
||||
int recordSize = ((index != header.Offsets.length - 1) ? header.Offsets[index + 1] : myStream.sizeOfOpened()) - pit;
|
||||
readRecord(recordSize);
|
||||
}
|
||||
myStream.close();
|
||||
|
@ -96,59 +120,88 @@ public class PluckerBookReader extends BookReader {
|
|||
}
|
||||
|
||||
private class FontType {
|
||||
public static final int FT_REGULAR = 0;
|
||||
public static final int FT_H1 = 1;
|
||||
public static final int FT_H2 = 2;
|
||||
public static final int FT_H3 = 3;
|
||||
public static final int FT_H4 = 4;
|
||||
public static final int FT_H5 = 5;
|
||||
public static final int FT_H6 = 6;
|
||||
public static final int FT_BOLD = 7;
|
||||
public static final int FT_TT = 8;
|
||||
public static final int FT_SMALL = 9;
|
||||
public static final int FT_SUB = 10;
|
||||
public static final int FT_SUP = 11;
|
||||
};
|
||||
public static final int FT_REGULAR = 0;
|
||||
public static final int FT_H1 = 1;
|
||||
public static final int FT_H2 = 2;
|
||||
public static final int FT_H3 = 3;
|
||||
public static final int FT_H4 = 4;
|
||||
public static final int FT_H5 = 5;
|
||||
public static final int FT_H6 = 6;
|
||||
public static final int FT_BOLD = 7;
|
||||
public static final int FT_TT = 8;
|
||||
public static final int FT_SMALL = 9;
|
||||
public static final int FT_SUB = 10;
|
||||
public static final int FT_SUP = 11;
|
||||
};
|
||||
|
||||
private void readRecord(int recordSize) throws IOException {
|
||||
short uid = PdbUtil.readShort(myStream);
|
||||
System.out.println("reading record");
|
||||
int uid = PdbUtil.readShort(myStream);
|
||||
if (uid == 1) {
|
||||
myCompressionVersion = PdbUtil.readShort(myStream );
|
||||
} else {
|
||||
short paragraphs = PdbUtil.readShort(myStream);
|
||||
int paragraphs = PdbUtil.readShort(myStream);
|
||||
System.out.println("par "+paragraphs);
|
||||
|
||||
short size = PdbUtil.readShort(myStream);
|
||||
int size = PdbUtil.readShort(myStream);
|
||||
//TODO ??????
|
||||
int type = (int)myStream.read();
|
||||
int type = myStream.read();
|
||||
|
||||
int flags = (int)myStream.read();
|
||||
int flags = myStream.read();
|
||||
|
||||
System.out.println("type " + type);
|
||||
System.out.println("Compression " + myCompressionVersion);
|
||||
switch (type) {
|
||||
case 0: // text (TODO: found sample file and test this code)
|
||||
case 1: // compressed text
|
||||
{
|
||||
ArrayList/*<Integer>*/ pars = new ArrayList();
|
||||
for (int i = 0; i < paragraphs; ++i) {
|
||||
short pSize = PdbUtil.readShort(myStream);
|
||||
int pSize = PdbUtil.readShort(myStream);
|
||||
pars.add(pSize);
|
||||
myStream.skip(2);
|
||||
}
|
||||
|
||||
boolean doProcess = false;
|
||||
if (type == 0) {
|
||||
|
||||
doProcess = myStream.read(myCharBuffer.toString().getBytes(), 0, (int)size) == size;
|
||||
if (type == 0) {//?
|
||||
byte[] buf = new byte[size];
|
||||
doProcess = myStream.read(buf, 0, (int)size) == size;
|
||||
if (doProcess) {
|
||||
myCharBuffer = new String(buf).toCharArray();
|
||||
}
|
||||
} else if (myCompressionVersion == 1) {
|
||||
//doProcess =
|
||||
//DocDecompressor().decompress(myStream, myCharBuffer, recordSize - 8 - 4 * paragraphs, size) == size;
|
||||
byte[] buf = new byte[size];
|
||||
doProcess =
|
||||
DocDecompressor.decompress(myStream, buf, recordSize - 8 - 4 * paragraphs) == size;
|
||||
if (doProcess) {
|
||||
myCharBuffer = new String(buf).toCharArray();
|
||||
}
|
||||
} else if (myCompressionVersion == 2) {
|
||||
myStream.skip(2);
|
||||
// myStream.skip(2);
|
||||
System.out.println("input size = " + (recordSize - 10 - 4 * paragraphs));
|
||||
System.out.println("size = " + size);
|
||||
byte input [] = new byte[(int) (recordSize - 10 - 4 * paragraphs)];
|
||||
final int inputSize = myStream.read(input);
|
||||
System.out.println("inputsize = " + inputSize);
|
||||
Inflater decompressor = new Inflater();
|
||||
decompressor.setInput(input, 0, inputSize);
|
||||
byte output [] = new byte[30000];
|
||||
try {
|
||||
doProcess = decompressor.inflate(output) == size;
|
||||
decompressor.end();
|
||||
myCharBuffer = new String(output, 0, size).toCharArray();
|
||||
} catch (DataFormatException e) {
|
||||
// TODO Auto-generated catch block
|
||||
// e.printStackTrace();
|
||||
System.out.println(e.getMessage());
|
||||
}
|
||||
//doProcess =
|
||||
//ZLZDecompressor(recordSize - 10 - 4 * paragraphs).
|
||||
//decompress(myStream, myCharBuffer, size) == size;
|
||||
}
|
||||
if (doProcess) {
|
||||
addHyperlinkLabel(fromNumber(uid));
|
||||
myParagraphMap.put(uid, new ArrayList());
|
||||
myParagraphVector = (ArrayList)myParagraphMap.get(uid);
|
||||
processTextRecord(size, pars);
|
||||
if ((flags & 0x1) == 0) {
|
||||
|
@ -206,14 +259,14 @@ public class PluckerBookReader extends BookReader {
|
|||
}
|
||||
}
|
||||
|
||||
private void processTextRecord(int size, ArrayList<Integer> pars) {
|
||||
private void processTextRecord(int size, ArrayList/*<Integer>*/ pars) {
|
||||
int start = 0;
|
||||
int end = 0;
|
||||
|
||||
for (Iterator it = pars.iterator(); it.hasNext();) {
|
||||
start = end;
|
||||
end = start + (Integer)it.next();
|
||||
if (end > myCharBuffer[size]) {
|
||||
if (end > size) {
|
||||
return;
|
||||
}
|
||||
myParagraphStored = false;
|
||||
|
@ -238,8 +291,8 @@ public class PluckerBookReader extends BookReader {
|
|||
functionFlag = true;
|
||||
if (ptr > textStart) {
|
||||
safeBeginParagraph();
|
||||
myConvertedTextBuffer = "";//.erase();
|
||||
//myConverter.convert(myConvertedTextBuffer, textStart, ptr);
|
||||
// myConvertedTextBuffer = "";//.erase();
|
||||
myConvertedTextBuffer = myConverter.convert(data, textStart, ptr);
|
||||
addData(myConvertedTextBuffer.toCharArray());
|
||||
myBufferIsEmpty = false;
|
||||
}
|
||||
|
@ -261,15 +314,15 @@ public class PluckerBookReader extends BookReader {
|
|||
if (data[ptr] == 0xA0) {
|
||||
data[ptr] = 0x20;
|
||||
}
|
||||
if (!myParagraphStarted && (textStart == ptr) /*&& isspace(data[ptr])*/) {
|
||||
if (!myParagraphStarted && (textStart == ptr) && (data[ptr] == ' ')) {
|
||||
++textStart;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (end > textStart) {
|
||||
safeBeginParagraph();
|
||||
myConvertedTextBuffer = "";//erase();
|
||||
//myConverter.convert(myConvertedTextBuffer, textStart, end);
|
||||
// myConvertedTextBuffer = "";//erase();
|
||||
myConvertedTextBuffer = myConverter.convert(data, textStart, end);
|
||||
addData(myConvertedTextBuffer.toCharArray());
|
||||
myBufferIsEmpty = false;
|
||||
}
|
||||
|
@ -425,17 +478,19 @@ public class PluckerBookReader extends BookReader {
|
|||
if (myParagraphStarted) {
|
||||
addHyperlinkControl(FBTextKind.INTERNAL_HYPERLINK, id);
|
||||
} else {
|
||||
myDelayedHyperlinks.add(myDelayedHyperlinks.size()-1, (id));
|
||||
myDelayedHyperlinks.add(id);
|
||||
}
|
||||
}
|
||||
|
||||
private void safeBeginParagraph() {
|
||||
System.out.println("safe begin par ");
|
||||
if (!myParagraphStarted) {
|
||||
myParagraphStarted = true;
|
||||
myBufferIsEmpty = true;
|
||||
System.out.println("Calling begin text par");
|
||||
beginParagraph(ZLTextParagraph.Kind.TEXT_PARAGRAPH);
|
||||
if (!myParagraphStored) {
|
||||
myParagraphVector.add(myParagraphVector.size()-1, getModel().getBookTextModel().getParagraphsNumber() - 1);
|
||||
myParagraphVector.add(getModel().getBookTextModel().getParagraphsNumber() - 1);
|
||||
myParagraphStored = true;
|
||||
}
|
||||
for (Iterator it = myDelayedControls.iterator(); it.hasNext(); ) {
|
||||
|
@ -454,11 +509,13 @@ public class PluckerBookReader extends BookReader {
|
|||
}
|
||||
}
|
||||
private void safeEndParagraph() {
|
||||
// System.out.println("safe end par ");
|
||||
if (myParagraphStarted) {
|
||||
if (myBufferIsEmpty) {
|
||||
final String SPACE = " ";
|
||||
addData(SPACE.toCharArray());
|
||||
}
|
||||
System.out.println("Calling end par");
|
||||
endParagraph();
|
||||
myParagraphStarted = false;
|
||||
}
|
||||
|
@ -496,24 +553,6 @@ public class PluckerBookReader extends BookReader {
|
|||
}
|
||||
}
|
||||
|
||||
private String myFilePath;
|
||||
private InputStream myStream;
|
||||
private int myFont;
|
||||
private char[] myCharBuffer;
|
||||
private String myConvertedTextBuffer;
|
||||
private boolean myParagraphStarted;
|
||||
private boolean myBufferIsEmpty;
|
||||
private ZLTextForcedControlEntry myForcedEntry;
|
||||
private ArrayList/*<std::pair<FBTextKind,bool> >*/ myDelayedControls;
|
||||
private ArrayList/*<std::string> */myDelayedHyperlinks;
|
||||
private short myCompressionVersion;
|
||||
private char myBytesToSkip;
|
||||
|
||||
private ArrayList/*<std::pair<int, int> >*/ myReferencedParagraphs;
|
||||
private HashMap/*<int, std::vector<int> >*/ myParagraphMap;
|
||||
private ArrayList<Integer> myParagraphVector;
|
||||
private boolean myParagraphStored;
|
||||
|
||||
static private class Pair {
|
||||
public Object myFirst;
|
||||
public Object mySecond;
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.io.InputStream;
|
|||
|
||||
import org.geometerplus.fbreader.bookmodel.BookModel;
|
||||
import org.geometerplus.fbreader.description.BookDescription;
|
||||
import org.geometerplus.fbreader.formats.pdb.PdbPlugin;
|
||||
import org.geometerplus.fbreader.formats.pdb.*;
|
||||
import org.geometerplus.zlibrary.core.filesystem.ZLFile;
|
||||
|
||||
public class PluckerPlugin extends PdbPlugin {
|
||||
|
@ -33,25 +33,24 @@ public class PluckerPlugin extends PdbPlugin {
|
|||
}
|
||||
|
||||
public boolean acceptsFile(ZLFile file) {
|
||||
System.err.println("fileType = " + fileType(file));
|
||||
return "DataPlkr".equals(fileType(file));
|
||||
}
|
||||
|
||||
public boolean readDescription(String path, BookDescription description) {
|
||||
ZLFile file = new ZLFile(path);
|
||||
|
||||
InputStream stream = null;
|
||||
try {
|
||||
stream = file.getInputStream();
|
||||
detectEncodingAndLanguage(description, stream);
|
||||
PdbStream stream = new PluckerTextStream(file);
|
||||
if (stream.open()) {
|
||||
//detectEncodingAndLanguage(description, stream);
|
||||
stream.close();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
if (description.getEncoding().length() == 0) {
|
||||
return false;
|
||||
}
|
||||
//if (description.getEncoding().length() == 0) {
|
||||
// return false;
|
||||
//}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,10 @@ import org.geometerplus.fbreader.formats.pdb.PdbUtil;
|
|||
import org.geometerplus.zlibrary.core.filesystem.ZLFile;
|
||||
|
||||
public class PluckerTextStream extends PdbStream {
|
||||
private short myCompressionVersion;
|
||||
private byte[] myFullBuffer;
|
||||
private int myRecordIndex;
|
||||
|
||||
public PluckerTextStream(ZLFile file) {
|
||||
super(file);
|
||||
myFullBuffer = null;
|
||||
|
@ -66,15 +70,13 @@ public class PluckerTextStream extends PdbStream {
|
|||
}
|
||||
++myRecordIndex;
|
||||
int currentOffset = myHeader.Offsets[myRecordIndex];
|
||||
/*
|
||||
if (currentOffset < ((PdbStream)myBase).offset()) {
|
||||
return false;
|
||||
}
|
||||
*/
|
||||
//myBase.skip(currentOffset - offset());
|
||||
//if (currentOffset < myBase.offset()) {
|
||||
// return false;
|
||||
//}
|
||||
//((PdbStream)myBase).seek(currentOffset, true);
|
||||
int nextOffset =
|
||||
(myRecordIndex + 1 < myHeader.Offsets.length) ?
|
||||
myHeader.Offsets[myRecordIndex + 1] : ((PdbStream)myBase).sizeOfOpened();
|
||||
myHeader.Offsets[myRecordIndex + 1] : 0;//myBase.sizeOfOpened();
|
||||
if (nextOffset < currentOffset) {
|
||||
return false;
|
||||
}
|
||||
|
@ -162,8 +164,4 @@ public class PluckerTextStream extends PdbStream {
|
|||
myBufferLength += end - textStart;
|
||||
}
|
||||
}
|
||||
|
||||
private short myCompressionVersion;
|
||||
private byte[] myFullBuffer;
|
||||
private int myRecordIndex;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue