moved snippets generation code from db to query object

This commit is contained in:
Jean-Francois Dockes 2012-09-26 12:13:40 +02:00
parent 94b571aac6
commit e48d402823
12 changed files with 603 additions and 562 deletions

View file

@ -6,8 +6,8 @@ LIBS = librcl.a
all: $(LIBS) all: $(LIBS)
OBJS = rclaspell.o beaglequeuecache.o cstr.o rclconfig.o rclinit.o textsplit.o unacpp.o beaglequeue.o bglfetcher.o fetcher.o fsfetcher.o fsindexer.o indexer.o mimetype.o subtreelist.o htmlparse.o internfile.o mh_exec.o mh_execm.o mh_html.o mh_mail.o mh_mbox.o mh_text.o mimehandler.o myhtmlparse.o txtdcode.o docseq.o docseqdb.o docseqhist.o filtseq.o dynconf.o plaintorich.o recollq.o reslistpager.o sortseq.o wasastringtoquery.o wasatorcl.o expansiondbs.o rcldb.o rcldoc.o rclquery.o searchdata.o stemdb.o stoplist.o synfamily.o unac.o base64.o circache.o closefrom.o conftree.o copyfile.o debuglog.o ecrontab.o execmd.o fstreewalk.o idfile.o fileudi.o md5.o mimeparse.o netcon.o pathut.o pxattr.o rclionice.o readfile.o smallut.o transcode.o wipedir.o x11mon.o mime-parsefull.o mime-parseonlyheader.o mime-printbody.o mime.o convert.o iodevice.o iofactory.o OBJS = rclaspell.o beaglequeuecache.o cstr.o rclconfig.o rclinit.o textsplit.o unacpp.o beaglequeue.o bglfetcher.o fetcher.o fsfetcher.o fsindexer.o indexer.o mimetype.o subtreelist.o htmlparse.o internfile.o mh_exec.o mh_execm.o mh_html.o mh_mail.o mh_mbox.o mh_text.o mimehandler.o myhtmlparse.o txtdcode.o docseq.o docseqdb.o docseqhist.o filtseq.o dynconf.o plaintorich.o recollq.o reslistpager.o sortseq.o wasastringtoquery.o wasatorcl.o expansiondbs.o rclabstract.o rcldb.o rcldoc.o rclquery.o searchdata.o stemdb.o stoplist.o synfamily.o unac.o base64.o circache.o closefrom.o conftree.o copyfile.o debuglog.o ecrontab.o execmd.o fstreewalk.o idfile.o fileudi.o md5.o mimeparse.o netcon.o pathut.o pxattr.o rclionice.o readfile.o smallut.o transcode.o wipedir.o x11mon.o mime-parsefull.o mime-parseonlyheader.o mime-printbody.o mime.o convert.o iodevice.o iofactory.o
DEPS = rclaspell.dep.stamp beaglequeuecache.dep.stamp cstr.dep.stamp rclconfig.dep.stamp rclinit.dep.stamp textsplit.dep.stamp unacpp.dep.stamp beaglequeue.dep.stamp bglfetcher.dep.stamp fetcher.dep.stamp fsfetcher.dep.stamp fsindexer.dep.stamp indexer.dep.stamp mimetype.dep.stamp subtreelist.dep.stamp htmlparse.dep.stamp internfile.dep.stamp mh_exec.dep.stamp mh_execm.dep.stamp mh_html.dep.stamp mh_mail.dep.stamp mh_mbox.dep.stamp mh_text.dep.stamp mimehandler.dep.stamp myhtmlparse.dep.stamp txtdcode.dep.stamp docseq.dep.stamp docseqdb.dep.stamp docseqhist.dep.stamp filtseq.dep.stamp dynconf.dep.stamp plaintorich.dep.stamp recollq.dep.stamp reslistpager.dep.stamp sortseq.dep.stamp wasastringtoquery.dep.stamp wasatorcl.dep.stamp expansiondbs.dep.stamp rcldb.dep.stamp rcldoc.dep.stamp rclquery.dep.stamp searchdata.dep.stamp stemdb.dep.stamp stoplist.dep.stamp synfamily.dep.stamp unac.dep.stamp base64.dep.stamp circache.dep.stamp closefrom.dep.stamp conftree.dep.stamp copyfile.dep.stamp debuglog.dep.stamp ecrontab.dep.stamp execmd.dep.stamp fstreewalk.dep.stamp idfile.dep.stamp fileudi.dep.stamp md5.dep.stamp mimeparse.dep.stamp netcon.dep.stamp pathut.dep.stamp pxattr.dep.stamp rclionice.dep.stamp readfile.dep.stamp smallut.dep.stamp transcode.dep.stamp wipedir.dep.stamp x11mon.dep.stamp mime-parsefull.dep.stamp mime-parseonlyheader.dep.stamp mime-printbody.dep.stamp mime.dep.stamp convert.dep.stamp iodevice.dep.stamp iofactory.dep.stamp DEPS = rclaspell.dep.stamp beaglequeuecache.dep.stamp cstr.dep.stamp rclconfig.dep.stamp rclinit.dep.stamp textsplit.dep.stamp unacpp.dep.stamp beaglequeue.dep.stamp bglfetcher.dep.stamp fetcher.dep.stamp fsfetcher.dep.stamp fsindexer.dep.stamp indexer.dep.stamp mimetype.dep.stamp subtreelist.dep.stamp htmlparse.dep.stamp internfile.dep.stamp mh_exec.dep.stamp mh_execm.dep.stamp mh_html.dep.stamp mh_mail.dep.stamp mh_mbox.dep.stamp mh_text.dep.stamp mimehandler.dep.stamp myhtmlparse.dep.stamp txtdcode.dep.stamp docseq.dep.stamp docseqdb.dep.stamp docseqhist.dep.stamp filtseq.dep.stamp dynconf.dep.stamp plaintorich.dep.stamp recollq.dep.stamp reslistpager.dep.stamp sortseq.dep.stamp wasastringtoquery.dep.stamp wasatorcl.dep.stamp expansiondbs.dep.stamp rclabstract.dep.stamp rcldb.dep.stamp rcldoc.dep.stamp rclquery.dep.stamp searchdata.dep.stamp stemdb.dep.stamp stoplist.dep.stamp synfamily.dep.stamp unac.dep.stamp base64.dep.stamp circache.dep.stamp closefrom.dep.stamp conftree.dep.stamp copyfile.dep.stamp debuglog.dep.stamp ecrontab.dep.stamp execmd.dep.stamp fstreewalk.dep.stamp idfile.dep.stamp fileudi.dep.stamp md5.dep.stamp mimeparse.dep.stamp netcon.dep.stamp pathut.dep.stamp pxattr.dep.stamp rclionice.dep.stamp readfile.dep.stamp smallut.dep.stamp transcode.dep.stamp wipedir.dep.stamp x11mon.dep.stamp mime-parsefull.dep.stamp mime-parseonlyheader.dep.stamp mime-printbody.dep.stamp mime.dep.stamp convert.dep.stamp iodevice.dep.stamp iofactory.dep.stamp
librcl.a : $(DEPS) $(OBJS) librcl.a : $(DEPS) $(OBJS)
ar ru librcl.a $(OBJS) ar ru librcl.a $(OBJS)
@ -89,6 +89,8 @@ wasatorcl.o : ../query/wasatorcl.cpp $(depth)/mk/localdefs
$(CXX) $(ALL_CXXFLAGS) -c ../query/wasatorcl.cpp $(CXX) $(ALL_CXXFLAGS) -c ../query/wasatorcl.cpp
expansiondbs.o : ../rcldb/expansiondbs.cpp $(depth)/mk/localdefs expansiondbs.o : ../rcldb/expansiondbs.cpp $(depth)/mk/localdefs
$(CXX) $(ALL_CXXFLAGS) -c ../rcldb/expansiondbs.cpp $(CXX) $(ALL_CXXFLAGS) -c ../rcldb/expansiondbs.cpp
rclabstract.o : ../rcldb/rclabstract.cpp $(depth)/mk/localdefs
$(CXX) $(ALL_CXXFLAGS) -c ../rcldb/rclabstract.cpp
rcldb.o : ../rcldb/rcldb.cpp $(depth)/mk/localdefs rcldb.o : ../rcldb/rcldb.cpp $(depth)/mk/localdefs
$(CXX) $(ALL_CXXFLAGS) -c ../rcldb/rcldb.cpp $(CXX) $(ALL_CXXFLAGS) -c ../rcldb/rcldb.cpp
rcldoc.o : ../rcldb/rcldoc.cpp $(depth)/mk/localdefs rcldoc.o : ../rcldb/rcldoc.cpp $(depth)/mk/localdefs
@ -283,6 +285,9 @@ wasatorcl.dep.stamp : ../query/wasatorcl.cpp $(depth)/mk/localdefs
expansiondbs.dep.stamp : ../rcldb/expansiondbs.cpp $(depth)/mk/localdefs expansiondbs.dep.stamp : ../rcldb/expansiondbs.cpp $(depth)/mk/localdefs
$(CXX) -M $(ALL_CXXFLAGS) ../rcldb/expansiondbs.cpp > expansiondbs.dep $(CXX) -M $(ALL_CXXFLAGS) ../rcldb/expansiondbs.cpp > expansiondbs.dep
touch expansiondbs.dep.stamp touch expansiondbs.dep.stamp
rclabstract.dep.stamp : ../rcldb/rclabstract.cpp $(depth)/mk/localdefs
$(CXX) -M $(ALL_CXXFLAGS) ../rcldb/rclabstract.cpp > rclabstract.dep
touch rclabstract.dep.stamp
rcldb.dep.stamp : ../rcldb/rcldb.cpp $(depth)/mk/localdefs rcldb.dep.stamp : ../rcldb/rcldb.cpp $(depth)/mk/localdefs
$(CXX) -M $(ALL_CXXFLAGS) ../rcldb/rcldb.cpp > rcldb.dep $(CXX) -M $(ALL_CXXFLAGS) ../rcldb/rcldb.cpp > rcldb.dep
touch rcldb.dep.stamp touch rcldb.dep.stamp
@ -411,6 +416,7 @@ include sortseq.dep
include wasastringtoquery.dep include wasastringtoquery.dep
include wasatorcl.dep include wasatorcl.dep
include expansiondbs.dep include expansiondbs.dep
include rclabstract.dep
include rcldb.dep include rcldb.dep
include rcldoc.dep include rcldoc.dep
include rclquery.dep include rclquery.dep

View file

@ -42,6 +42,7 @@ ${depth}/query/sortseq.cpp \
${depth}/query/wasastringtoquery.cpp \ ${depth}/query/wasastringtoquery.cpp \
${depth}/query/wasatorcl.cpp \ ${depth}/query/wasatorcl.cpp \
${depth}/rcldb/expansiondbs.cpp \ ${depth}/rcldb/expansiondbs.cpp \
${depth}/rcldb/rclabstract.cpp \
${depth}/rcldb/rcldb.cpp \ ${depth}/rcldb/rcldb.cpp \
${depth}/rcldb/rcldoc.cpp \ ${depth}/rcldb/rcldoc.cpp \
${depth}/rcldb/rclquery.cpp \ ${depth}/rcldb/rclquery.cpp \

View file

@ -1053,7 +1053,7 @@ Db_makeDocAbstract(recoll_DbObject* self, PyObject *args, PyObject *)
return 0; return 0;
} }
string abstract; string abstract;
if (!self->db->makeDocAbstract(*(pydoc->doc), pyquery->query, abstract)) { if (!pyquery->query->makeDocAbstract(*(pydoc->doc), abstract)) {
PyErr_SetString(PyExc_EnvironmentError, "rcl makeDocAbstract failed"); PyErr_SetString(PyExc_EnvironmentError, "rcl makeDocAbstract failed");
return 0; return 0;
} }

View file

@ -77,9 +77,8 @@ bool DocSequenceDb::getAbstract(Rcl::Doc &doc,
int maxoccs = 500; int maxoccs = 500;
Rcl::abstract_result ret = Rcl::ABSRES_ERROR; Rcl::abstract_result ret = Rcl::ABSRES_ERROR;
if (m_q->whatDb()) { if (m_q->whatDb()) {
ret = m_q->whatDb()->makeDocAbstract(doc, m_q.getptr(), vpabs, ret = m_q->makeDocAbstract(doc,vpabs, maxoccs,
maxoccs, m_q->whatDb()->getAbsCtxLen()+ 2);
m_q->whatDb()->getAbsCtxLen()+ 2);
} }
if (vpabs.empty()) if (vpabs.empty())
vpabs.push_back(pair<int, string>(0, doc.meta[Rcl::Doc::keyabs])); vpabs.push_back(pair<int, string>(0, doc.meta[Rcl::Doc::keyabs]));
@ -96,7 +95,7 @@ bool DocSequenceDb::getAbstract(Rcl::Doc &doc, vector<string>& vabs)
setQuery(); setQuery();
if (m_q->whatDb() && if (m_q->whatDb() &&
m_queryBuildAbstract && (doc.syntabs || m_queryReplaceAbstract)) { m_queryBuildAbstract && (doc.syntabs || m_queryReplaceAbstract)) {
m_q->whatDb()->makeDocAbstract(doc, m_q.getptr(), vabs); m_q->makeDocAbstract(doc, vabs);
} }
if (vabs.empty()) if (vabs.empty())
vabs.push_back(doc.meta[Rcl::Doc::keyabs]); vabs.push_back(doc.meta[Rcl::Doc::keyabs]);
@ -107,7 +106,7 @@ int DocSequenceDb::getFirstMatchPage(Rcl::Doc &doc)
{ {
setQuery(); setQuery();
if (m_q->whatDb()) { if (m_q->whatDb()) {
return m_q->whatDb()->getFirstMatchPage(doc, m_q.getptr()); return m_q->getFirstMatchPage(doc);
} }
return -1; return -1;
} }

View file

@ -67,7 +67,7 @@ void output_fields(const vector<string>fields, Rcl::Doc& doc,
string out; string out;
if (!it->compare("abstract")) { if (!it->compare("abstract")) {
string abstract; string abstract;
rcldb.makeDocAbstract(doc, &query, abstract); query.makeDocAbstract(doc, abstract);
base64_encode(abstract, out); base64_encode(abstract, out);
} else { } else {
base64_encode(doc.meta[*it], out); base64_encode(doc.meta[*it], out);
@ -376,7 +376,7 @@ int recollq(RclConfig **cfp, int argc, char **argv)
} }
if (op_flags & OPT_A) { if (op_flags & OPT_A) {
string abstract; string abstract;
if (rcldb.makeDocAbstract(doc, &query, abstract)) { if (query.makeDocAbstract(doc, abstract)) {
cout << "ABSTRACT" << endl; cout << "ABSTRACT" << endl;
cout << abstract << endl; cout << abstract << endl;
cout << "/ABSTRACT" << endl; cout << "/ABSTRACT" << endl;

487
src/rcldb/rclabstract.cpp Normal file
View file

@ -0,0 +1,487 @@
/* Copyright (C) 2004 J.F.Dockes
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "autoconfig.h"
#include <math.h>
#include <map>
using namespace std;
#include "debuglog.h"
#include "rcldb.h"
#include "rcldb_p.h"
#include "rclquery.h"
#include "rclquery_p.h"
#include "textsplit.h"
#include "utf8iter.h"
namespace Rcl {
// This is used as a marker inside the abstract frag lists, but
// normally doesn't remain in final output (which is built with a
// custom sep. by our caller).
static const string cstr_ellipsis("...");
#undef DEBUGABSTRACT
#ifdef DEBUGABSTRACT
#define LOGABS LOGDEB
static void listList(const string& what, const vector<string>&l)
{
string a;
for (vector<string>::const_iterator it = l.begin(); it != l.end(); it++) {
a = a + *it + " ";
}
LOGDEB(("%s: %s\n", what.c_str(), a.c_str()));
}
#else
#define LOGABS LOGDEB2
static void listList(const string&, const vector<string>&)
{
}
#endif
// Keep only non-prefixed terms. We use to remove prefixes and keep
// the terms instead, but field terms are normally also indexed
// un-prefixed, so this is simpler and better.
static void noPrefixList(const vector<string>& in, vector<string>& out)
{
for (vector<string>::const_iterator qit = in.begin();
qit != in.end(); qit++) {
if (!has_prefix(*qit))
out.push_back(*qit);
}
}
// Retrieve db-wide frequencies for the query terms and store them in
// the query object. This is done at most once for a query, and the data is used
// while computing abstracts for the different result documents.
void Query::Native::setDbWideQTermsFreqs()
{
// Do it once only for a given query.
if (!termfreqs.empty())
return;
vector<string> qterms;
{
vector<string> iqterms;
m_q->getQueryTerms(iqterms);
noPrefixList(iqterms, qterms);
}
// listList("Query terms: ", qterms);
Xapian::Database &xrdb = m_q->m_db->m_ndb->xrdb;
double doccnt = xrdb.get_doccount();
if (doccnt == 0)
doccnt = 1;
for (vector<string>::const_iterator qit = qterms.begin();
qit != qterms.end(); qit++) {
termfreqs[*qit] = xrdb.get_termfreq(*qit) / doccnt;
LOGABS(("setDbWideQTermFreqs: [%s] db freq %.1e\n", qit->c_str(),
termfreqs[*qit]));
}
}
// Compute query terms quality coefficients for a matched document by
// retrieving the Within Document Frequencies and multiplying by
// overal term frequency, then using log-based thresholds.
// 2012: it's not too clear to me why exactly we do the log thresholds thing.
// Preferring terms wich are rare either or both in the db and the document
// seems reasonable though
double Query::Native::qualityTerms(Xapian::docid docid,
const vector<string>& terms,
multimap<double, string>& byQ)
{
setDbWideQTermsFreqs();
map<string, double> termQcoefs;
double totalweight = 0;
Xapian::Database &xrdb = m_q->m_db->m_ndb->xrdb;
double doclen = xrdb.get_doclength(docid);
if (doclen == 0)
doclen = 1;
for (vector<string>::const_iterator qit = terms.begin();
qit != terms.end(); qit++) {
Xapian::TermIterator term = xrdb.termlist_begin(docid);
term.skip_to(*qit);
if (term != xrdb.termlist_end(docid) && *term == *qit) {
double q = (term.get_wdf() / doclen) * termfreqs[*qit];
q = -log10(q);
if (q < 3) {
q = 0.05;
} else if (q < 4) {
q = 0.3;
} else if (q < 5) {
q = 0.7;
} else if (q < 6) {
q = 0.8;
} else {
q = 1;
}
termQcoefs[*qit] = q;
totalweight += q;
}
}
// Build a sorted by quality term list.
for (vector<string>::const_iterator qit = terms.begin();
qit != terms.end(); qit++) {
if (termQcoefs.find(*qit) != termQcoefs.end())
byQ.insert(pair<double,string>(termQcoefs[*qit], *qit));
}
#ifdef DEBUGABSTRACT
LOGDEB(("Db::qualityTerms:\n"));
for (multimap<double, string>::reverse_iterator qit = byQ.rbegin();
qit != byQ.rend(); qit++) {
LOGDEB(("%.1e->[%s]\n", qit->first, qit->second.c_str()));
}
#endif
return totalweight;
}
// Return page number for first match of "significant" term.
int Query::Native::getFirstMatchPage(Xapian::docid docid)
{
if (!m_q|| !m_q->m_db || !m_q->m_db->m_ndb || !m_q->m_db->m_ndb->m_isopen) {
LOGERR(("Query::getFirstMatchPage: no db\n"));
return false;
}
Rcl::Db::Native *ndb(m_q->m_db->m_ndb);
Xapian::Database& xrdb(ndb->xrdb);
vector<string> terms;
{
vector<string> iterms;
m_q->getMatchTerms(docid, iterms);
noPrefixList(iterms, terms);
}
if (terms.empty()) {
LOGDEB(("getFirstMatchPage: empty match term list (field match?)\n"));
return -1;
}
vector<int> pagepos;
ndb->getPagePositions(docid, pagepos);
if (pagepos.empty())
return -1;
setDbWideQTermsFreqs();
// We try to use a page which matches the "best" term. Get a sorted list
multimap<double, string> byQ;
double totalweight = qualityTerms(docid, terms, byQ);
for (multimap<double, string>::reverse_iterator qit = byQ.rbegin();
qit != byQ.rend(); qit++) {
string qterm = qit->second;
Xapian::PositionIterator pos;
string emptys;
try {
for (pos = xrdb.positionlist_begin(docid, qterm);
pos != xrdb.positionlist_end(docid, qterm); pos++) {
int pagenum = ndb->getPageNumberForPosition(pagepos, *pos);
if (pagenum > 0)
return pagenum;
}
} catch (...) {
// Term does not occur. No problem.
}
}
return -1;
}
// Build a document abstract by extracting text chunks around the query terms
// This uses the db termlists, not the original document.
//
// DatabaseModified and other general exceptions are catched and
// possibly retried by our caller
abstract_result Query::Native::makeAbstract(Xapian::docid docid,
vector<pair<int, string> >& vabs,
int imaxoccs, int ictxwords)
{
Chrono chron;
LOGDEB2(("makeAbstract:%d: maxlen %d wWidth %d imaxoccs %d\n", chron.ms(),
m_rcldb->m_synthAbsLen, m_rcldb->m_synthAbsWordCtxLen, imaxoccs));
// The (unprefixed) terms matched by this document
vector<string> matchedTerms;
{
vector<string> iterms;
m_q->getMatchTerms(docid, iterms);
noPrefixList(iterms, matchedTerms);
if (matchedTerms.empty()) {
LOGDEB(("makeAbstract::Empty term list\n"));
return ABSRES_ERROR;
}
}
listList("Match terms: ", matchedTerms);
// Retrieve the term freqencies for the query terms. This is
// actually computed only once for a query, and for all terms in
// the query (not only the matches for this doc)
setDbWideQTermsFreqs();
// Build a sorted by quality container for the match terms We are
// going to try and show text around the less common search terms.
// TOBEDONE: terms issued from an original one by stem expansion
// should be somehow aggregated here, else, it may happen that
// such a group prevents displaying matches for other terms (by
// removing its meaning from the maximum occurrences per term test
// used while walking the list below)
multimap<double, string> byQ;
double totalweight = qualityTerms(docid, matchedTerms, byQ);
LOGABS(("makeAbstract:%d: computed Qcoefs.\n", chron.ms()));
// This can't happen, but would crash us
if (totalweight == 0.0) {
LOGERR(("makeAbstract: totalweight == 0.0 !\n"));
return ABSRES_ERROR;
}
Rcl::Db::Native *ndb(m_q->m_db->m_ndb);
Xapian::Database& xrdb(ndb->xrdb);
///////////////////
// For each of the query terms, ask xapian for its positions list
// in the document. For each position entry, remember it in
// qtermposs and insert it and its neighbours in the set of
// 'interesting' positions
// The terms 'array' that we partially populate with the document
// terms, at their positions around the search terms positions:
map<unsigned int, string> sparseDoc;
// Total number of occurences for all terms. We stop when we have too much
unsigned int totaloccs = 0;
// Limit the total number of slots we populate. The 7 is taken as
// average word size. It was a mistake to have the user max
// abstract size parameter in characters, we basically only deal
// with words. We used to limit the character size at the end, but
// this damaged our careful selection of terms
const unsigned int maxtotaloccs = imaxoccs > 0 ? imaxoccs :
m_q->m_db->getAbsLen() /(7 * (m_q->m_db->getAbsCtxLen() + 1));
int ctxwords = ictxwords == -1 ? m_q->m_db->getAbsCtxLen() : ictxwords;
LOGABS(("makeAbstract:%d: mxttloccs %d ctxwords %d\n",
chron.ms(), maxtotaloccs, ctxwords));
// This is used to mark positions overlapped by a multi-word match term
const string occupiedmarker("?");
abstract_result ret = ABSRES_OK;
// Let's go populate
for (multimap<double, string>::reverse_iterator qit = byQ.rbegin();
qit != byQ.rend(); qit++) {
string qterm = qit->second;
unsigned int maxoccs;
if (byQ.size() == 1) {
maxoccs = maxtotaloccs;
} else {
// We give more slots to the better terms
float q = qit->first / totalweight;
maxoccs = int(ceil(maxtotaloccs * q));
LOGABS(("makeAbstract: [%s] %d max occs (coef %.2f)\n",
qterm.c_str(), maxoccs, q));
}
// The match term may span several words
int qtrmwrdcnt = TextSplit::countWords(qterm, TextSplit::TXTS_NOSPANS);
Xapian::PositionIterator pos;
// There may be query terms not in this doc. This raises an
// exception when requesting the position list, we catch it ??
// Not clear how this can happen because we are walking the
// match list returned by Xapian. Maybe something with the
// fields?
string emptys;
try {
unsigned int occurrences = 0;
for (pos = xrdb.positionlist_begin(docid, qterm);
pos != xrdb.positionlist_end(docid, qterm); pos++) {
int ipos = *pos;
if (ipos < int(baseTextPosition)) // Not in text body
continue;
LOGABS(("makeAbstract: [%s] at %d occurrences %d maxoccs %d\n",
qterm.c_str(), ipos, occurrences, maxoccs));
totaloccs++;
// Add adjacent slots to the set to populate at next
// step by inserting empty strings. Special provisions
// for adding ellipsis and for positions overlapped by
// the match term.
unsigned int sta = MAX(0, ipos - ctxwords);
unsigned int sto = ipos + qtrmwrdcnt-1 +
m_q->m_db->getAbsCtxLen();
for (unsigned int ii = sta; ii <= sto; ii++) {
if (ii == (unsigned int)ipos) {
sparseDoc[ii] = qterm;
} else if (ii > (unsigned int)ipos &&
ii < (unsigned int)ipos + qtrmwrdcnt) {
sparseDoc[ii] = occupiedmarker;
} else if (!sparseDoc[ii].compare(cstr_ellipsis)) {
// For an empty slot, the test has a side
// effect of inserting an empty string which
// is what we want
sparseDoc[ii] = emptys;
}
}
// Add ellipsis at the end. This may be replaced later by
// an overlapping extract. Take care not to replace an
// empty string here, we really want an empty slot,
// use find()
if (sparseDoc.find(sto+1) == sparseDoc.end()) {
sparseDoc[sto+1] = cstr_ellipsis;
}
// Limit to allocated occurences and total size
if (++occurrences >= maxoccs ||
totaloccs >= maxtotaloccs) {
ret = ABSRES_TRUNC;
LOGDEB(("Db::makeAbstract: max occurrences cutoff\n"));
break;
}
}
} catch (...) {
// Term does not occur. No problem.
}
if (totaloccs >= maxtotaloccs) {
ret = ABSRES_TRUNC;
LOGDEB(("Db::makeAbstract: max1 occurrences cutoff\n"));
break;
}
}
LOGABS(("makeAbstract:%d:chosen number of positions %d\n",
chron.millis(), totaloccs));
// This can happen if there are term occurences in the keywords
// etc. but not elsewhere ?
if (totaloccs == 0) {
LOGDEB1(("makeAbstract: no occurrences\n"));
return ABSRES_ERROR;
}
// Walk all document's terms position lists and populate slots
// around the query terms. We arbitrarily truncate the list to
// avoid taking forever. If we do cutoff, the abstract may be
// inconsistant (missing words, potentially altering meaning),
// which is bad.
{
Xapian::TermIterator term;
int cutoff = 500 * 1000;
for (term = xrdb.termlist_begin(docid);
term != xrdb.termlist_end(docid); term++) {
// Ignore prefixed terms
if (has_prefix(*term))
continue;
if (cutoff-- < 0) {
ret = ABSRES_TRUNC;
LOGDEB0(("makeAbstract: max term count cutoff\n"));
break;
}
Xapian::PositionIterator pos;
for (pos = xrdb.positionlist_begin(docid, *term);
pos != xrdb.positionlist_end(docid, *term); pos++) {
if (cutoff-- < 0) {
ret = ABSRES_TRUNC;
LOGDEB0(("makeAbstract: max term count cutoff\n"));
break;
}
map<unsigned int, string>::iterator vit;
if ((vit=sparseDoc.find(*pos)) != sparseDoc.end()) {
// Don't replace a term: the terms list is in
// alphabetic order, and we may have several terms
// at the same position, we want to keep only the
// first one (ie: dockes and dockes@wanadoo.fr)
if (vit->second.empty()) {
LOGDEB2(("makeAbstract: populating: [%s] at %d\n",
(*term).c_str(), *pos));
sparseDoc[*pos] = *term;
}
}
}
}
}
#if 0
// Debug only: output the full term[position] vector
bool epty = false;
int ipos = 0;
for (map<unsigned int, string>::iterator it = sparseDoc.begin();
it != sparseDoc.end();
it++, ipos++) {
if (it->empty()) {
if (!epty)
LOGDEB(("makeAbstract:vec[%d]: [%s]\n", ipos, it->c_str()));
epty=true;
} else {
epty = false;
LOGDEB(("makeAbstract:vec[%d]: [%s]\n", ipos, it->c_str()));
}
}
#endif
vector<int> vpbreaks;
ndb->getPagePositions(docid, vpbreaks);
LOGABS(("makeAbstract:%d: extracting. Got %u pages\n", chron.millis(),
vpbreaks.size()));
// Finally build the abstract by walking the map (in order of position)
vabs.clear();
string chunk;
bool incjk = false;
int page = 0;
for (map<unsigned int, string>::const_iterator it = sparseDoc.begin();
it != sparseDoc.end(); it++) {
LOGDEB2(("Abtract:output %u -> [%s]\n", it->first,it->second.c_str()));
if (!occupiedmarker.compare(it->second))
continue;
if (chunk.empty() && !vpbreaks.empty()) {
page = ndb->getPageNumberForPosition(vpbreaks, it->first);
if (page < 0)
page = 0;
}
Utf8Iter uit(it->second);
bool newcjk = false;
if (TextSplit::isCJK(*uit))
newcjk = true;
if (!incjk || (incjk && !newcjk))
chunk += " ";
incjk = newcjk;
if (it->second == cstr_ellipsis) {
vabs.push_back(pair<int,string>(page, chunk));
chunk.clear();
} else {
if (it->second.compare(end_of_field_term) &&
it->second.compare(start_of_field_term))
chunk += it->second;
}
}
if (!chunk.empty())
vabs.push_back(pair<int, string>(page, chunk));
LOGDEB2(("makeAbtract: done in %d mS\n", chron.millis()));
return ret;
}
}

View file

@ -57,22 +57,11 @@ using namespace std;
#include "termproc.h" #include "termproc.h"
#include "expansiondbs.h" #include "expansiondbs.h"
#ifndef MAX
#define MAX(A,B) (A>B?A:B)
#endif
#ifndef MIN
#define MIN(A,B) (A<B?A:B)
#endif
// Recoll index format version is stored in user metadata. When this change, // Recoll index format version is stored in user metadata. When this change,
// we can't open the db and will have to reindex. // we can't open the db and will have to reindex.
static const string cstr_RCL_IDX_VERSION_KEY("RCL_IDX_VERSION_KEY"); static const string cstr_RCL_IDX_VERSION_KEY("RCL_IDX_VERSION_KEY");
static const string cstr_RCL_IDX_VERSION("1"); static const string cstr_RCL_IDX_VERSION("1");
// This is the word position offset at which we index the body text
// (abstract, keywords, etc.. are stored before this)
static const unsigned int baseTextPosition = 100000;
static const string cstr_mbreaks("rclmbreaks"); static const string cstr_mbreaks("rclmbreaks");
#ifndef NO_NAMESPACES #ifndef NO_NAMESPACES
@ -102,11 +91,6 @@ const string page_break_term = "XXPG/";
static const string unsplitFilenameFieldName = "rclUnsplitFN"; static const string unsplitFilenameFieldName = "rclUnsplitFN";
static const string unsplitfilename_prefix = "XSFS"; static const string unsplitfilename_prefix = "XSFS";
// This is used as a marker inside the abstract frag lists, but
// normally doesn't remain in final output (which is built with a
// custom sep. by our caller).
static const string cstr_ellipsis("...");
string version_string(){ string version_string(){
return string("Recoll ") + string(rclversionstr) + string(" + Xapian ") + return string("Recoll ") + string(rclversionstr) + string(" + Xapian ") +
string(Xapian::version_string()); string(Xapian::version_string());
@ -200,123 +184,6 @@ bool Db::Native::dbDataToRclDoc(Xapian::docid docid, std::string &data,
return true; return true;
} }
// Keep only non-prefixed terms. We use to remove prefixes and keep
// the terms instead, but field terms are normally also indexed
// un-prefixed, so this is simpler and better.
static void noPrefixList(const vector<string>& in, vector<string>& out)
{
for (vector<string>::const_iterator qit = in.begin();
qit != in.end(); qit++) {
if (!has_prefix(*qit))
out.push_back(*qit);
}
}
#undef DEBUGABSTRACT
#ifdef DEBUGABSTRACT
#define LOGABS LOGDEB
static void listList(const string& what, const vector<string>&l)
{
string a;
for (vector<string>::const_iterator it = l.begin(); it != l.end(); it++) {
a = a + *it + " ";
}
LOGDEB(("%s: %s\n", what.c_str(), a.c_str()));
}
#else
#define LOGABS LOGDEB2
static void listList(const string&, const vector<string>&)
{
}
#endif
// Retrieve db-wide frequencies for the query terms and store them in
// the query object. This is done at most once for a query, and the data is used
// while computing abstracts for the different result documents.
void Db::Native::setDbWideQTermsFreqs(Query *query)
{
// Do it once only for a given query.
if (!query->m_nq->termfreqs.empty())
return;
vector<string> qterms;
{
vector<string> iqterms;
query->getQueryTerms(iqterms);
noPrefixList(iqterms, qterms);
}
// listList("Query terms: ", qterms);
double doccnt = xrdb.get_doccount();
if (doccnt == 0)
doccnt = 1;
for (vector<string>::const_iterator qit = qterms.begin();
qit != qterms.end(); qit++) {
query->m_nq->termfreqs[*qit] = xrdb.get_termfreq(*qit) / doccnt;
LOGABS(("set..QTermFreqs: [%s] db freq %.1e\n", qit->c_str(),
query->m_nq->termfreqs[*qit]));
}
}
// Compute query terms quality coefficients for a matched document by
// retrieving the Within Document Frequencies and multiplying by
// overal term frequency, then using log-based thresholds.
// 2012: it's not too clear to me why exactly we do the log thresholds thing.
// Preferring terms wich are rare either or both in the db and the document
// seems reasonable though
double Db::Native::qualityTerms(Xapian::docid docid,
Query *query,
const vector<string>& terms,
multimap<double, string>& byQ)
{
map<string, double> termQcoefs;
double totalweight = 0;
double doclen = xrdb.get_doclength(docid);
if (doclen == 0)
doclen = 1;
for (vector<string>::const_iterator qit = terms.begin();
qit != terms.end(); qit++) {
Xapian::TermIterator term = xrdb.termlist_begin(docid);
term.skip_to(*qit);
if (term != xrdb.termlist_end(docid) && *term == *qit) {
double q = (term.get_wdf() / doclen) * query->m_nq->termfreqs[*qit];
q = -log10(q);
if (q < 3) {
q = 0.05;
} else if (q < 4) {
q = 0.3;
} else if (q < 5) {
q = 0.7;
} else if (q < 6) {
q = 0.8;
} else {
q = 1;
}
termQcoefs[*qit] = q;
totalweight += q;
}
}
// Build a sorted by quality term list.
for (vector<string>::const_iterator qit = terms.begin();
qit != terms.end(); qit++) {
if (termQcoefs.find(*qit) != termQcoefs.end())
byQ.insert(pair<double,string>(termQcoefs[*qit], *qit));
}
#ifdef DEBUGABSTRACT
LOGDEB(("Db::qualityTerms:\n"));
for (multimap<double, string>::reverse_iterator qit = byQ.rbegin();
qit != byQ.rend(); qit++) {
LOGDEB(("%.1e->[%s]\n", qit->first, qit->second.c_str()));
}
#endif
return totalweight;
}
// Return the positions list for the page break term // Return the positions list for the page break term
bool Db::Native::getPagePositions(Xapian::docid docid, vector<int>& vpos) bool Db::Native::getPagePositions(Xapian::docid docid, vector<int>& vpos)
{ {
@ -377,322 +244,6 @@ int Db::Native::getPageNumberForPosition(const vector<int>& pbreaks,
return it - pbreaks.begin() + 1; return it - pbreaks.begin() + 1;
} }
// Return page number for first match of "significant" term.
int Db::Native::getFirstMatchPage(Xapian::docid docid, Query *query)
{
vector<string> terms;
{
vector<string> iterms;
query->getMatchTerms(docid, iterms);
noPrefixList(iterms, terms);
}
if (terms.empty()) {
LOGDEB(("getFirstMatchPage: empty match term list (field match?)\n"));
return -1;
}
vector<int> pagepos;
getPagePositions(docid, pagepos);
if (pagepos.empty())
return -1;
setDbWideQTermsFreqs(query);
// We try to use a page which matches the "best" term. Get a sorted list
multimap<double, string> byQ;
double totalweight = qualityTerms(docid, query, terms, byQ);
for (multimap<double, string>::reverse_iterator qit = byQ.rbegin();
qit != byQ.rend(); qit++) {
string qterm = qit->second;
Xapian::PositionIterator pos;
string emptys;
try {
for (pos = xrdb.positionlist_begin(docid, qterm);
pos != xrdb.positionlist_end(docid, qterm); pos++) {
int pagenum = getPageNumberForPosition(pagepos, *pos);
if (pagenum > 0)
return pagenum;
}
} catch (...) {
// Term does not occur. No problem.
}
}
return -1;
}
// Build a document abstract by extracting text chunks around the query terms
// This uses the db termlists, not the original document.
//
// DatabaseModified and other general exceptions are catched and
// possibly retried by our caller
abstract_result Db::Native::makeAbstract(Xapian::docid docid, Query *query,
vector<pair<int, string> >& vabs,
int imaxoccs, int ictxwords)
{
Chrono chron;
LOGDEB2(("makeAbstract:%d: maxlen %d wWidth %d imaxoccs %d\n", chron.ms(),
m_rcldb->m_synthAbsLen, m_rcldb->m_synthAbsWordCtxLen, imaxoccs));
// The (unprefixed) terms matched by this document
vector<string> matchedTerms;
{
vector<string> iterms;
query->getMatchTerms(docid, iterms);
noPrefixList(iterms, matchedTerms);
if (matchedTerms.empty()) {
LOGDEB(("makeAbstract::Empty term list\n"));
return ABSRES_ERROR;
}
}
listList("Match terms: ", matchedTerms);
// Retrieve the term freqencies for the query terms. This is
// actually computed only once for a query, and for all terms in
// the query (not only the matches for this doc)
setDbWideQTermsFreqs(query);
// Build a sorted by quality container for the match terms We are
// going to try and show text around the less common search terms.
// TOBEDONE: terms issued from an original one by stem expansion
// should be somehow aggregated here, else, it may happen that
// such a group prevents displaying matches for other terms (by
// removing its meaning from the maximum occurrences per term test
// used while walking the list below)
multimap<double, string> byQ;
double totalweight = qualityTerms(docid, query, matchedTerms, byQ);
LOGABS(("makeAbstract:%d: computed Qcoefs.\n", chron.ms()));
// This can't happen, but would crash us
if (totalweight == 0.0) {
LOGERR(("makeAbstract: totalweight == 0.0 !\n"));
return ABSRES_ERROR;
}
///////////////////
// For each of the query terms, ask xapian for its positions list
// in the document. For each position entry, remember it in
// qtermposs and insert it and its neighbours in the set of
// 'interesting' positions
// The terms 'array' that we partially populate with the document
// terms, at their positions around the search terms positions:
map<unsigned int, string> sparseDoc;
// Total number of occurences for all terms. We stop when we have too much
unsigned int totaloccs = 0;
// Limit the total number of slots we populate. The 7 is taken as
// average word size. It was a mistake to have the user max
// abstract size parameter in characters, we basically only deal
// with words. We used to limit the character size at the end, but
// this damaged our careful selection of terms
const unsigned int maxtotaloccs = imaxoccs > 0 ? imaxoccs :
m_rcldb->m_synthAbsLen /(7 * (m_rcldb->m_synthAbsWordCtxLen+1));
int ctxwords = ictxwords == -1 ? m_rcldb->m_synthAbsWordCtxLen : ictxwords;
LOGABS(("makeAbstract:%d: mxttloccs %d ctxwords %d\n",
chron.ms(), maxtotaloccs, ctxwords));
// This is used to mark positions overlapped by a multi-word match term
const string occupiedmarker("?");
abstract_result ret = ABSRES_OK;
// Let's go populate
for (multimap<double, string>::reverse_iterator qit = byQ.rbegin();
qit != byQ.rend(); qit++) {
string qterm = qit->second;
unsigned int maxoccs;
if (byQ.size() == 1) {
maxoccs = maxtotaloccs;
} else {
// We give more slots to the better terms
float q = qit->first / totalweight;
maxoccs = int(ceil(maxtotaloccs * q));
LOGABS(("makeAbstract: [%s] %d max occs (coef %.2f)\n",
qterm.c_str(), maxoccs, q));
}
// The match term may span several words
int qtrmwrdcnt = TextSplit::countWords(qterm, TextSplit::TXTS_NOSPANS);
Xapian::PositionIterator pos;
// There may be query terms not in this doc. This raises an
// exception when requesting the position list, we catch it ??
// Not clear how this can happen because we are walking the
// match list returned by Xapian. Maybe something with the
// fields?
string emptys;
try {
unsigned int occurrences = 0;
for (pos = xrdb.positionlist_begin(docid, qterm);
pos != xrdb.positionlist_end(docid, qterm); pos++) {
int ipos = *pos;
if (ipos < int(baseTextPosition)) // Not in text body
continue;
LOGABS(("makeAbstract: [%s] at %d occurrences %d maxoccs %d\n",
qterm.c_str(), ipos, occurrences, maxoccs));
totaloccs++;
// Add adjacent slots to the set to populate at next
// step by inserting empty strings. Special provisions
// for adding ellipsis and for positions overlapped by
// the match term.
unsigned int sta = MAX(0, ipos - ctxwords);
unsigned int sto = ipos + qtrmwrdcnt-1 +
m_rcldb->m_synthAbsWordCtxLen;
for (unsigned int ii = sta; ii <= sto; ii++) {
if (ii == (unsigned int)ipos) {
sparseDoc[ii] = qterm;
} else if (ii > (unsigned int)ipos &&
ii < (unsigned int)ipos + qtrmwrdcnt) {
sparseDoc[ii] = occupiedmarker;
} else if (!sparseDoc[ii].compare(cstr_ellipsis)) {
// For an empty slot, the test has a side
// effect of inserting an empty string which
// is what we want
sparseDoc[ii] = emptys;
}
}
// Add ellipsis at the end. This may be replaced later by
// an overlapping extract. Take care not to replace an
// empty string here, we really want an empty slot,
// use find()
if (sparseDoc.find(sto+1) == sparseDoc.end()) {
sparseDoc[sto+1] = cstr_ellipsis;
}
// Limit to allocated occurences and total size
if (++occurrences >= maxoccs ||
totaloccs >= maxtotaloccs) {
ret = ABSRES_TRUNC;
LOGDEB(("Db::makeAbstract: max occurrences cutoff\n"));
break;
}
}
} catch (...) {
// Term does not occur. No problem.
}
if (totaloccs >= maxtotaloccs) {
ret = ABSRES_TRUNC;
LOGDEB(("Db::makeAbstract: max1 occurrences cutoff\n"));
break;
}
}
LOGABS(("makeAbstract:%d:chosen number of positions %d\n",
chron.millis(), totaloccs));
// This can happen if there are term occurences in the keywords
// etc. but not elsewhere ?
if (totaloccs == 0) {
LOGDEB1(("makeAbstract: no occurrences\n"));
return ABSRES_ERROR;
}
// Walk all document's terms position lists and populate slots
// around the query terms. We arbitrarily truncate the list to
// avoid taking forever. If we do cutoff, the abstract may be
// inconsistant (missing words, potentially altering meaning),
// which is bad.
{
Xapian::TermIterator term;
int cutoff = 500 * 1000;
for (term = xrdb.termlist_begin(docid);
term != xrdb.termlist_end(docid); term++) {
// Ignore prefixed terms
if (has_prefix(*term))
continue;
if (cutoff-- < 0) {
ret = ABSRES_TRUNC;
LOGDEB0(("makeAbstract: max term count cutoff\n"));
break;
}
Xapian::PositionIterator pos;
for (pos = xrdb.positionlist_begin(docid, *term);
pos != xrdb.positionlist_end(docid, *term); pos++) {
if (cutoff-- < 0) {
ret = ABSRES_TRUNC;
LOGDEB0(("makeAbstract: max term count cutoff\n"));
break;
}
map<unsigned int, string>::iterator vit;
if ((vit=sparseDoc.find(*pos)) != sparseDoc.end()) {
// Don't replace a term: the terms list is in
// alphabetic order, and we may have several terms
// at the same position, we want to keep only the
// first one (ie: dockes and dockes@wanadoo.fr)
if (vit->second.empty()) {
LOGDEB2(("makeAbstract: populating: [%s] at %d\n",
(*term).c_str(), *pos));
sparseDoc[*pos] = *term;
}
}
}
}
}
#if 0
// Debug only: output the full term[position] vector
bool epty = false;
int ipos = 0;
for (map<unsigned int, string>::iterator it = sparseDoc.begin();
it != sparseDoc.end();
it++, ipos++) {
if (it->empty()) {
if (!epty)
LOGDEB(("makeAbstract:vec[%d]: [%s]\n", ipos, it->c_str()));
epty=true;
} else {
epty = false;
LOGDEB(("makeAbstract:vec[%d]: [%s]\n", ipos, it->c_str()));
}
}
#endif
vector<int> vpbreaks;
getPagePositions(docid, vpbreaks);
LOGABS(("makeAbstract:%d: extracting. Got %u pages\n", chron.millis(),
vpbreaks.size()));
// Finally build the abstract by walking the map (in order of position)
vabs.clear();
string chunk;
bool incjk = false;
int page = 0;
for (map<unsigned int, string>::const_iterator it = sparseDoc.begin();
it != sparseDoc.end(); it++) {
LOGDEB2(("Abtract:output %u -> [%s]\n", it->first,it->second.c_str()));
if (!occupiedmarker.compare(it->second))
continue;
if (chunk.empty() && !vpbreaks.empty()) {
page = getPageNumberForPosition(vpbreaks, it->first);
if (page < 0)
page = 0;
}
Utf8Iter uit(it->second);
bool newcjk = false;
if (TextSplit::isCJK(*uit))
newcjk = true;
if (!incjk || (incjk && !newcjk))
chunk += " ";
incjk = newcjk;
if (it->second == cstr_ellipsis) {
vabs.push_back(pair<int,string>(page, chunk));
chunk.clear();
} else {
if (it->second.compare(end_of_field_term) &&
it->second.compare(start_of_field_term))
chunk += it->second;
}
}
if (!chunk.empty())
vabs.push_back(pair<int, string>(page, chunk));
LOGDEB2(("makeAbtract: done in %d mS\n", chron.millis()));
return ret;
}
/* Rcl::Db methods ///////////////////////////////// */ /* Rcl::Db methods ///////////////////////////////// */
@ -2190,77 +1741,6 @@ bool Db::stemDiffers(const string& lang, const string& word,
return true; return true;
} }
abstract_result Db::makeDocAbstract(Doc &doc, Query *query,
vector<pair<int, string> >& abstract,
int maxoccs, int ctxwords)
{
LOGDEB(("makeDocAbstract: maxoccs %d ctxwords %d\n", maxoccs, ctxwords));
if (!m_ndb || !m_ndb->m_isopen) {
LOGERR(("Db::makeDocAbstract: no db\n"));
return ABSRES_ERROR;
}
abstract_result ret = ABSRES_ERROR;
XAPTRY(ret = m_ndb->makeAbstract(doc.xdocid, query, abstract,
maxoccs, ctxwords),
m_ndb->xrdb, m_reason);
if (!m_reason.empty())
return ABSRES_ERROR;
return ret;
}
bool Db::makeDocAbstract(Doc &doc, Query *query, vector<string>& abstract)
{
if (!m_ndb || !m_ndb->m_isopen) {
LOGERR(("Db::makeDocAbstract: no db\n"));
return false;
}
vector<pair<int, string> > vpabs;
if (!makeDocAbstract(doc, query, vpabs))
return false;
for (vector<pair<int, string> >::const_iterator it = vpabs.begin();
it != vpabs.end(); it++) {
string chunk;
if (it->first > 0) {
ostringstream ss;
ss << it->first;
chunk += string(" [p ") + ss.str() + "] ";
}
chunk += it->second;
abstract.push_back(chunk);
}
return true;
}
bool Db::makeDocAbstract(Doc &doc, Query *query, string& abstract)
{
if (!m_ndb || !m_ndb->m_isopen) {
LOGERR(("Db::makeDocAbstract: no db\n"));
return false;
}
vector<pair<int, string> > vpabs;
if (!makeDocAbstract(doc, query, vpabs))
return false;
for (vector<pair<int, string> >::const_iterator it = vpabs.begin();
it != vpabs.end(); it++) {
abstract.append(it->second);
abstract.append(cstr_ellipsis);
}
return m_reason.empty() ? true : false;
}
int Db::getFirstMatchPage(Doc &doc, Query *query)
{
LOGDEB1(("Db::getFirstMatchPages\n"));;
if (!m_ndb || !m_ndb->m_isopen) {
LOGERR(("Db::getFirstMatchPage: no db\n"));
return false;
}
int pagenum = -1;
XAPTRY(pagenum = m_ndb->getFirstMatchPage(Xapian::docid(doc.xdocid), query),
m_ndb->xrdb, m_reason);
return m_reason.empty() ? pagenum : -1;
}
// Retrieve document defined by Unique doc identifier. This is mainly used // Retrieve document defined by Unique doc identifier. This is mainly used
// by the GUI history feature // by the GUI history feature
bool Db::getDoc(const string &udi, Doc &doc) bool Db::getDoc(const string &udi, Doc &doc)

View file

@ -68,11 +68,6 @@ enum value_slot {
VALUE_SIG = 10 // Doc sig as chosen by app (ex: mtime+size VALUE_SIG = 10 // Doc sig as chosen by app (ex: mtime+size
}; };
enum abstract_result {
ABSRES_ERROR = 0,
ABSRES_OK = 1,
ABSRES_TRUNC = 2
};
class SearchData; class SearchData;
class TermIter; class TermIter;
class Query; class Query;
@ -300,20 +295,10 @@ class Db {
{ {
return m_synthAbsWordCtxLen; return m_synthAbsWordCtxLen;
} }
int getAbsLen() const
/** Build synthetic abstract for document, extracting chunks relevant for {
* the input query. This uses index data only (no access to the file) */ return m_synthAbsLen;
// Abstract return as one string }
bool makeDocAbstract(Doc &doc, Query *query, string& abstract);
// Returned as a snippets vector
bool makeDocAbstract(Doc &doc, Query *query, vector<string>& abstract);
// Returned as a vector of pair<page,snippet> page is 0 if unknown
abstract_result makeDocAbstract(Doc &doc, Query *query,
vector<pair<int, string> >& abstract,
int maxoccs= -1, int ctxwords = -1);
/** Retrieve detected page breaks positions */
int getFirstMatchPage(Doc &doc, Query *query);
/** Get document for given udi /** Get document for given udi
* *
* Used by the 'history' feature (and nothing else?) * Used by the 'history' feature (and nothing else?)

View file

@ -89,16 +89,7 @@ class Db::Native {
#endif // IDX_THREADS #endif // IDX_THREADS
} }
double qualityTerms(Xapian::docid docid,
Query *query,
const vector<string>& terms,
std::multimap<double, string>& byQ);
void setDbWideQTermsFreqs(Query *query);
abstract_result makeAbstract(Xapian::docid id, Query *query,
vector<pair<int, string> >&, int maxoccs = -1,
int ctxwords = -1);
bool getPagePositions(Xapian::docid docid, vector<int>& vpos); bool getPagePositions(Xapian::docid docid, vector<int>& vpos);
int getFirstMatchPage(Xapian::docid docid, Query *query);
int getPageNumberForPosition(const vector<int>& pbreaks, unsigned int pos); int getPageNumberForPosition(const vector<int>& pbreaks, unsigned int pos);
bool dbDataToRclDoc(Xapian::docid docid, std::string &data, Doc &doc); bool dbDataToRclDoc(Xapian::docid docid, std::string &data, Doc &doc);
@ -122,5 +113,9 @@ class Db::Native {
}; };
// This is the word position offset at which we index the body text
// (abstract, keywords, etc.. are stored before this)
static const unsigned int baseTextPosition = 100000;
} }
#endif /* _rcldb_p_h_included_ */ #endif /* _rcldb_p_h_included_ */

View file

@ -20,6 +20,8 @@
#include <stdio.h> #include <stdio.h>
#include <vector> #include <vector>
#include <sstream>
using namespace std;
#include "xapian.h" #include "xapian.h"
@ -35,10 +37,11 @@
#include "searchdata.h" #include "searchdata.h"
#include "unacpp.h" #include "unacpp.h"
#ifndef NO_NAMESPACES
namespace Rcl { namespace Rcl {
#endif // This is used as a marker inside the abstract frag lists, but
// normally doesn't remain in final output (which is built with a
// custom sep. by our caller).
static const string cstr_ellipsis("...");
// Field names inside the index data record may differ from the rcldoc ones // Field names inside the index data record may differ from the rcldoc ones
// (esp.: caption / title) // (esp.: caption / title)
@ -294,6 +297,68 @@ bool Query::getMatchTerms(unsigned long xdocid, vector<string>& terms)
return true; return true;
} }
abstract_result Query::makeDocAbstract(Doc &doc,
vector<pair<int, string> >& abstract,
int maxoccs, int ctxwords)
{
LOGDEB(("makeDocAbstract: maxoccs %d ctxwords %d\n", maxoccs, ctxwords));
if (!m_db || !m_db->m_ndb || !m_db->m_ndb->m_isopen || !m_nq) {
LOGERR(("Query::makeDocAbstract: no db or no nq\n"));
return ABSRES_ERROR;
}
abstract_result ret = ABSRES_ERROR;
XAPTRY(ret = m_nq->makeAbstract(doc.xdocid, abstract, maxoccs, ctxwords),
m_db->m_ndb->xrdb, m_reason);
if (!m_reason.empty())
return ABSRES_ERROR;
return ret;
}
bool Query::makeDocAbstract(Doc &doc, vector<string>& abstract)
{
vector<pair<int, string> > vpabs;
if (!makeDocAbstract(doc, vpabs))
return false;
for (vector<pair<int, string> >::const_iterator it = vpabs.begin();
it != vpabs.end(); it++) {
string chunk;
if (it->first > 0) {
ostringstream ss;
ss << it->first;
chunk += string(" [p ") + ss.str() + "] ";
}
chunk += it->second;
abstract.push_back(chunk);
}
return true;
}
bool Query::makeDocAbstract(Doc &doc, string& abstract)
{
vector<pair<int, string> > vpabs;
if (!makeDocAbstract(doc, vpabs))
return false;
for (vector<pair<int, string> >::const_iterator it = vpabs.begin();
it != vpabs.end(); it++) {
abstract.append(it->second);
abstract.append(cstr_ellipsis);
}
return m_reason.empty() ? true : false;
}
int Query::getFirstMatchPage(Doc &doc)
{
LOGDEB1(("Db::getFirstMatchPages\n"));;
if (!m_nq) {
LOGERR(("Query::getFirstMatchPage: no nq\n"));
return false;
}
int pagenum = -1;
XAPTRY(pagenum = m_nq->getFirstMatchPage(Xapian::docid(doc.xdocid)),
m_db->m_ndb->xrdb, m_reason);
return m_reason.empty() ? pagenum : -1;
}
// Mset size // Mset size
static const int qquantum = 50; static const int qquantum = 50;

View file

@ -34,6 +34,12 @@ class SearchData;
class Db; class Db;
class Doc; class Doc;
enum abstract_result {
ABSRES_ERROR = 0,
ABSRES_OK = 1,
ABSRES_TRUNC = 2
};
/** /**
* An Rcl::Query is a question (SearchData) applied to a * An Rcl::Query is a question (SearchData) applied to a
* database. Handles access to the results. Somewhat equivalent to a * database. Handles access to the results. Somewhat equivalent to a
@ -76,6 +82,18 @@ class Query {
bool getMatchTerms(const Doc& doc, vector<string>& terms); bool getMatchTerms(const Doc& doc, vector<string>& terms);
bool getMatchTerms(unsigned long xdocid, vector<string>& terms); bool getMatchTerms(unsigned long xdocid, vector<string>& terms);
/** Build synthetic abstract for document, extracting chunks relevant for
* the input query. This uses index data only (no access to the file) */
// Abstract return as one string
bool makeDocAbstract(Doc &doc, string& abstract);
// Returned as a snippets vector
bool makeDocAbstract(Doc &doc, vector<string>& abstract);
// Returned as a vector of pair<page,snippet> page is 0 if unknown
abstract_result makeDocAbstract(Doc &doc, vector<pair<int, string> >& abst,
int maxoccs= -1, int ctxwords = -1);
/** Retrieve detected page breaks positions */
int getFirstMatchPage(Doc &doc);
/** Expand query to look for documents like the one passed in */ /** Expand query to look for documents like the one passed in */
vector<string> expand(const Doc &doc); vector<string> expand(const Doc &doc);

View file

@ -34,7 +34,6 @@ public:
/** The query I belong to */ /** The query I belong to */
Query *m_q; Query *m_q;
/** query descriptor: terms and subqueries joined by operators /** query descriptor: terms and subqueries joined by operators
* (or/and etc...) * (or/and etc...)
*/ */
@ -55,6 +54,12 @@ public:
delete xenquire; xenquire = 0; delete xenquire; xenquire = 0;
termfreqs.clear(); termfreqs.clear();
} }
abstract_result makeAbstract(Xapian::docid id, vector<pair<int, string> >&,
int maxoccs = -1, int ctxwords = -1);
int getFirstMatchPage(Xapian::docid docid);
void setDbWideQTermsFreqs();
double qualityTerms(Xapian::docid docid, const vector<string>& terms,
std::multimap<double, string>& byQ);
}; };
} }