将query改成filter,lucene中有个QueryWrapperFilter性能比较差,所以基本上都须要自己写filter。包含TermFilter,ExactPhraseFilter,ConjunctionFilter,DisjunctionFilter。

这几天验证下来,还是or改善最明显,4个termfilter,4508个返回结果,在我本机上性能提高1/3。ExactPhraseFilter也有小幅提升(5%-10%)。

最令人不解的是and,原来以为跟结果数和子查询数相关,但几次測试基本都是下降。

附ExactPhraseFilter和ut代码:

import java.io.IOException;
import java.util.ArrayList; import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits; // A fake to lucene phrase query, but far simplified.
public class ExactPhraseFilter extends Filter {
protected final ArrayList<Term> terms = new ArrayList<Term>();
protected final ArrayList<Integer> positions = new ArrayList<Integer>(); protected String fieldName; public void add(Term term) {
if (terms.size() == 0) {
fieldName = term.field();
} else {
assert fieldName == term.field();
}
positions.add(Integer.valueOf(terms.size()));
terms.add(term);
} @Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException
{
return new ExactPhraseDocIdSet(context, acceptDocs);
} static class PostingAndFreq implements Comparable<PostingAndFreq> {
DocsAndPositionsEnum posEnum;
int docFreq;
int position;
boolean useAdvance;
int posFreq = 0;
int pos = -1;
int posTime = 0; public PostingAndFreq(DocsAndPositionsEnum posEnum, int docFreq, int position, boolean useAdvance) {
this.posEnum = posEnum;
this.docFreq = docFreq;
this.position = position;
this.useAdvance = useAdvance;
} @Override
public int compareTo(PostingAndFreq other) {
if (docFreq != other.docFreq) {
return docFreq - other.docFreq;
}
if (position != other.position) {
return position - other.position;
}
return 0;
}
} protected class ExactPhraseDocIdSet extends DocIdSet {
protected final AtomicReaderContext context;
protected final Bits acceptDocs;
protected final PostingAndFreq[] postings;
protected boolean noDocs = false; public ExactPhraseDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
this.context = context;
this.acceptDocs = acceptDocs; Terms fieldTerms = context.reader().fields().terms(fieldName);
// TermContext states[] = new TermContext[terms.size()];
postings = new PostingAndFreq[terms.size()]; TermsEnum te = fieldTerms.iterator(null);
for (int i = 0; i < terms.size(); ++i) {
final Term t = terms.get(i);
// states[i] = TermContext.build(context, terms.get(i), true);
// final TermState state = states[i].get(context.ord);
if (!te.seekExact(t.bytes(), true)) {
noDocs = true;
return;
}
if (i == 0) {
postings[i] = new PostingAndFreq(te.docsAndPositions(acceptDocs, null, 0), te.docFreq(), positions.get(i), false);
} else {
postings[i] = new PostingAndFreq(te.docsAndPositions(acceptDocs, null, 0), te.docFreq(), positions.get(i), te.docFreq() > 5 * postings[0].docFreq);
}
} ArrayUtil.mergeSort(postings);
for (int i = 1; i < terms.size(); ++i) {
postings[i].posEnum.nextDoc();
}
} @Override
public DocIdSetIterator iterator() throws IOException
{
if (noDocs) {
return EMPTY_DOCIDSET.iterator();
} else {
return new ExactPhraseDocIdSetIterator(context, acceptDocs);
}
} protected class ExactPhraseDocIdSetIterator extends DocIdSetIterator {
protected int docID = -1; public ExactPhraseDocIdSetIterator(AtomicReaderContext context, Bits acceptDocs) throws IOException {
} @Override
public int nextDoc() throws IOException {
while (true) {
// first (rarest) term
final int doc = postings[0].posEnum.nextDoc();
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
// System.err.println("END");
return docID = doc;
} // non-first terms
int i = 1;
while (i < postings.length) {
final PostingAndFreq pf = postings[i];
int doc2 = pf.posEnum.docID();
if (pf.useAdvance) {
if (doc2 < doc) {
doc2 = pf.posEnum.advance(doc);
}
} else {
int iter = 0;
while (doc2 < doc) {
if (++iter == 50) {
doc2 = pf.posEnum.advance(doc);
} else {
doc2 = pf.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
++i;
} if (i == postings.length) {
// System.err.println(doc);
docID = doc;
// return docID;
if (containsPhrase()) {
return docID;
}
}
}
} @Override
public int advance(int target) throws IOException {
throw new IOException();
} private boolean containsPhrase() throws IOException {
int index = -1;
int i = 0;
PostingAndFreq pf; // init.
for (i = 0; i < postings.length; ++i) {
postings[i].posFreq = postings[i].posEnum.freq();
postings[i].pos = postings[i].posEnum.nextPosition() - postings[i].position;
postings[i].posTime = 1;
} while (true) {
pf = postings[0]; // first term.
while (pf.pos < index && pf.posTime < pf.posFreq) {
pf.pos = pf.posEnum.nextPosition() - pf.position;
++pf.posTime;
}
if (pf.pos >= index) {
index = pf.pos;
} else if (pf.posTime == pf.posFreq) {
return false;
} // other terms.
for (i = 1; i < postings.length; ++i) {
pf = postings[i];
while (pf.pos < index && pf.posTime < pf.posFreq) {
pf.pos = pf.posEnum.nextPosition() - pf.position;
++pf.posTime;
}
if (pf.pos > index) {
index = pf.pos;
break;
}
if (pf.pos == index) {
continue;
}
if (pf.posTime == pf.posFreq) {
                            return false;
}
}
if (i == postings.length) {
return true;
}
}
} @Override
public int docID()
{
return docID;
}
} } }

UT:

import java.io.IOException;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.Codec;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.testng.annotations.AfterTest;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test; import com.dp.arts.lucenex.codec.Dp10Codec; public class ExactPhraseFilterTest
{
final Directory dir = new RAMDirectory(); @BeforeTest
public void setUp() throws IOException {
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer);
iwc.setOpenMode(OpenMode.CREATE);
iwc.setCodec(Codec.forName(Dp10Codec.DP10_CODEC_NAME)); IndexWriter writer = new IndexWriter(dir, iwc);
addDocument(writer, "新疆烧烤"); // 0
addDocument(writer, "啤酒"); // 1
addDocument(writer, "烤烧"); // 2
addDocument(writer, "烧烧烧"); // 3
addDocument(writer, "烤烧中华烧烤"); // 4
writer.close();
} private void addDocument(IndexWriter writer, String str) throws IOException {
Document doc = new Document();
doc.add(new TextField("searchkeywords", str, Store.YES));
writer.addDocument(doc, new StandardAnalyzer(Version.LUCENE_40));
} @AfterTest
public void tearDown() throws IOException
{
this.dir.close();
} @Test
public void test1() throws IOException
{
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader); ExactPhraseFilter pf = new ExactPhraseFilter();
pf.add(new Term("searchkeywords", "烧"));
pf.add(new Term("searchkeywords", "烤"));
Query query = new ConstantScoreQuery(pf);
TopDocs results = searcher.search(query, 20); assert results.totalHits == 2;
assert results.scoreDocs[0].doc == 0;
assert results.scoreDocs[1].doc == 4; searcher.getIndexReader().close();
}
}

关于使用Filter降低Lucene tf idf打分计算的调研的更多相关文章

  1. Lucene默认的打分算法——ES默认

    改变Lucene的打分模型 随着Apache Lucene 4.0版本在2012年的发布,这款伟大的全文检索工具包终于允许用户修改默认的基于TF/IDF原理的打分算法.Lucene API变得更加容易 ...

  2. 文本分类学习(三) 特征权重(TF/IDF)和特征提取

    上一篇中,主要说的就是词袋模型.回顾一下,在进行文本分类之前,我们需要把待分类文本先用词袋模型进行文本表示.首先是将训练集中的所有单词经过去停用词之后组合成一个词袋,或者叫做字典,实际上一个维度很大的 ...

  3. 25.TF&IDF算法以及向量空间模型算法

    主要知识点: boolean model IF/IDF vector space model     一.boolean model     在es做各种搜索进行打分排序时,会先用boolean mo ...

  4. TF/IDF(term frequency/inverse document frequency)

    TF/IDF(term frequency/inverse document frequency) 的概念被公认为信息检索中最重要的发明. 一. TF/IDF描述单个term与特定document的相 ...

  5. 基于TF/IDF的聚类算法原理

        一.TF/IDF描述单个term与特定document的相关性TF(Term Frequency): 表示一个term与某个document的相关性. 公式为这个term在document中出 ...

  6. 使用solr的函数查询,并获取tf*idf值

    1. 使用函数df(field,keyword) 和idf(field,keyword). http://118.85.207.11:11100/solr/mobile/select?q={!func ...

  7. TF/IDF计算方法

    FROM:http://blog.csdn.net/pennyliang/article/details/1231028 我们已经谈过了如何自动下载网页.如何建立索引.如何衡量网页的质量(Page R ...

  8. tf–idf算法解释及其python代码实现(下)

    tf–idf算法python代码实现 这是我写的一个tf-idf的简单实现的代码,我们知道tfidf=tf*idf,所以可以分别计算tf和idf值在相乘,首先我们创建一个简单的语料库,作为例子,只有四 ...

  9. tf–idf算法解释及其python代码实现(上)

    tf–idf算法解释 tf–idf, 是term frequency–inverse document frequency的缩写,它通常用来衡量一个词对在一个语料库中对它所在的文档有多重要,常用在信息 ...

随机推荐

  1. Struts2页面开发中常用标签使用说明

    1. Struts2页面开发中常用标签使用说明 1.1.往action里传值的使用方式: <input name="userName" type="text&quo ...

  2. SpringMVC学习笔记:SpringMVC框架的执行流程

    一.MVC设计模式 二.SpringMVC框架介绍 三.SpringMVC环境搭建 四.SpringMVC框架的请求处理流程及体系结构

  3. TCP/IP Socket发送接收图片demo

    一个实例通过client端和server端通讯 客户端通过TCP/IP传输资源文件,比如图片,文字,音频,视频等..... 服务端接受到文件存入本地磁盘,返回接受到:“收到来自于"+s.ge ...

  4. 2.springioc实例化bean的三个方法

    1.构造器 也就是在上一篇讲的那个例子,调用默认的无参构造函数 2.静态工厂方法 1)创建需要执行的方法的类 public class HelloWorld { public HelloWorld() ...

  5. js画一棵树

    用纯js画一棵树.思路: 1.一棵树的图片,作为页面背景: 2.通过html5中的canvas画布进行遮罩: 3.定时每隔10ms,从下往上清除1px的遮罩: <!DOCTYPE html> ...

  6. Django框架model实现数据库增删查改

    1.创建Django工程 https://www.cnblogs.com/CK85/p/10159159.html 2.在model.py中配置生成表格的类对象. from django.db imp ...

  7. luogu P3065 first——trie树相关

    题目描述 Bessie has been playing with strings again. She found that by changing the order of the alphabe ...

  8. window.event.srcElement与window.event.target 触发事件的元素 触发事件对象的获取,window.event与时间函数参数的event是同一个 事件对象

    判断事件触发的元素:     var tag = window.event.target || window.event.srcElement;    if (tag.tagName.toLowerC ...

  9. .NET开源工作流RoadFlow-流程设计-流程步骤设置-事件设置

    事件设置是设置当前步骤在提交前后或退回前后要执行的一些操作(该事件为服务器事件). 事件格式为:dll名称.命名空间名称.类名.方法名,这里不需要写括号和参数,处理时会自动带上当前流程实例的相关参数. ...

  10. ODP.Net Tips

    Overview Oracle Data Provider for .NET是Oracle 提供的.Net版本的数据库连接组件.下载路径. 使用的核心DLL是Oracle.DataAccess.dll ...