将query改成filter,lucene中有个QueryWrapperFilter性能比较差,所以基本上都须要自己写filter。包含TermFilter,ExactPhraseFilter,ConjunctionFilter,DisjunctionFilter。

这几天验证下来,还是or改善最明显,4个termfilter,4508个返回结果,在我本机上性能提高1/3。ExactPhraseFilter也有小幅提升(5%-10%)。

最令人不解的是and,原来以为跟结果数和子查询数相关,但几次測试基本都是下降。

附ExactPhraseFilter和ut代码:

import java.io.IOException;
import java.util.ArrayList; import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.index.TermState;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Filter;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits; // A fake to lucene phrase query, but far simplified.
public class ExactPhraseFilter extends Filter {
protected final ArrayList<Term> terms = new ArrayList<Term>();
protected final ArrayList<Integer> positions = new ArrayList<Integer>(); protected String fieldName; public void add(Term term) {
if (terms.size() == 0) {
fieldName = term.field();
} else {
assert fieldName == term.field();
}
positions.add(Integer.valueOf(terms.size()));
terms.add(term);
} @Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException
{
return new ExactPhraseDocIdSet(context, acceptDocs);
} static class PostingAndFreq implements Comparable<PostingAndFreq> {
DocsAndPositionsEnum posEnum;
int docFreq;
int position;
boolean useAdvance;
int posFreq = 0;
int pos = -1;
int posTime = 0; public PostingAndFreq(DocsAndPositionsEnum posEnum, int docFreq, int position, boolean useAdvance) {
this.posEnum = posEnum;
this.docFreq = docFreq;
this.position = position;
this.useAdvance = useAdvance;
} @Override
public int compareTo(PostingAndFreq other) {
if (docFreq != other.docFreq) {
return docFreq - other.docFreq;
}
if (position != other.position) {
return position - other.position;
}
return 0;
}
} protected class ExactPhraseDocIdSet extends DocIdSet {
protected final AtomicReaderContext context;
protected final Bits acceptDocs;
protected final PostingAndFreq[] postings;
protected boolean noDocs = false; public ExactPhraseDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
this.context = context;
this.acceptDocs = acceptDocs; Terms fieldTerms = context.reader().fields().terms(fieldName);
// TermContext states[] = new TermContext[terms.size()];
postings = new PostingAndFreq[terms.size()]; TermsEnum te = fieldTerms.iterator(null);
for (int i = 0; i < terms.size(); ++i) {
final Term t = terms.get(i);
// states[i] = TermContext.build(context, terms.get(i), true);
// final TermState state = states[i].get(context.ord);
if (!te.seekExact(t.bytes(), true)) {
noDocs = true;
return;
}
if (i == 0) {
postings[i] = new PostingAndFreq(te.docsAndPositions(acceptDocs, null, 0), te.docFreq(), positions.get(i), false);
} else {
postings[i] = new PostingAndFreq(te.docsAndPositions(acceptDocs, null, 0), te.docFreq(), positions.get(i), te.docFreq() > 5 * postings[0].docFreq);
}
} ArrayUtil.mergeSort(postings);
for (int i = 1; i < terms.size(); ++i) {
postings[i].posEnum.nextDoc();
}
} @Override
public DocIdSetIterator iterator() throws IOException
{
if (noDocs) {
return EMPTY_DOCIDSET.iterator();
} else {
return new ExactPhraseDocIdSetIterator(context, acceptDocs);
}
} protected class ExactPhraseDocIdSetIterator extends DocIdSetIterator {
protected int docID = -1; public ExactPhraseDocIdSetIterator(AtomicReaderContext context, Bits acceptDocs) throws IOException {
} @Override
public int nextDoc() throws IOException {
while (true) {
// first (rarest) term
final int doc = postings[0].posEnum.nextDoc();
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
// System.err.println("END");
return docID = doc;
} // non-first terms
int i = 1;
while (i < postings.length) {
final PostingAndFreq pf = postings[i];
int doc2 = pf.posEnum.docID();
if (pf.useAdvance) {
if (doc2 < doc) {
doc2 = pf.posEnum.advance(doc);
}
} else {
int iter = 0;
while (doc2 < doc) {
if (++iter == 50) {
doc2 = pf.posEnum.advance(doc);
} else {
doc2 = pf.posEnum.nextDoc();
}
}
}
if (doc2 > doc) {
break;
}
++i;
} if (i == postings.length) {
// System.err.println(doc);
docID = doc;
// return docID;
if (containsPhrase()) {
return docID;
}
}
}
} @Override
public int advance(int target) throws IOException {
throw new IOException();
} private boolean containsPhrase() throws IOException {
int index = -1;
int i = 0;
PostingAndFreq pf; // init.
for (i = 0; i < postings.length; ++i) {
postings[i].posFreq = postings[i].posEnum.freq();
postings[i].pos = postings[i].posEnum.nextPosition() - postings[i].position;
postings[i].posTime = 1;
} while (true) {
pf = postings[0]; // first term.
while (pf.pos < index && pf.posTime < pf.posFreq) {
pf.pos = pf.posEnum.nextPosition() - pf.position;
++pf.posTime;
}
if (pf.pos >= index) {
index = pf.pos;
} else if (pf.posTime == pf.posFreq) {
return false;
} // other terms.
for (i = 1; i < postings.length; ++i) {
pf = postings[i];
while (pf.pos < index && pf.posTime < pf.posFreq) {
pf.pos = pf.posEnum.nextPosition() - pf.position;
++pf.posTime;
}
if (pf.pos > index) {
index = pf.pos;
break;
}
if (pf.pos == index) {
continue;
}
if (pf.posTime == pf.posFreq) {
                            return false;
}
}
if (i == postings.length) {
return true;
}
}
} @Override
public int docID()
{
return docID;
}
} } }

UT:

import java.io.IOException;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.Codec;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.TextField;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.testng.annotations.AfterTest;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test; import com.dp.arts.lucenex.codec.Dp10Codec; public class ExactPhraseFilterTest
{
final Directory dir = new RAMDirectory(); @BeforeTest
public void setUp() throws IOException {
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer);
iwc.setOpenMode(OpenMode.CREATE);
iwc.setCodec(Codec.forName(Dp10Codec.DP10_CODEC_NAME)); IndexWriter writer = new IndexWriter(dir, iwc);
addDocument(writer, "新疆烧烤"); // 0
addDocument(writer, "啤酒"); // 1
addDocument(writer, "烤烧"); // 2
addDocument(writer, "烧烧烧"); // 3
addDocument(writer, "烤烧中华烧烤"); // 4
writer.close();
} private void addDocument(IndexWriter writer, String str) throws IOException {
Document doc = new Document();
doc.add(new TextField("searchkeywords", str, Store.YES));
writer.addDocument(doc, new StandardAnalyzer(Version.LUCENE_40));
} @AfterTest
public void tearDown() throws IOException
{
this.dir.close();
} @Test
public void test1() throws IOException
{
IndexReader reader = DirectoryReader.open(dir);
IndexSearcher searcher = new IndexSearcher(reader); ExactPhraseFilter pf = new ExactPhraseFilter();
pf.add(new Term("searchkeywords", "烧"));
pf.add(new Term("searchkeywords", "烤"));
Query query = new ConstantScoreQuery(pf);
TopDocs results = searcher.search(query, 20); assert results.totalHits == 2;
assert results.scoreDocs[0].doc == 0;
assert results.scoreDocs[1].doc == 4; searcher.getIndexReader().close();
}
}

关于使用Filter降低Lucene tf idf打分计算的调研的更多相关文章

  1. Lucene默认的打分算法——ES默认

    改变Lucene的打分模型 随着Apache Lucene 4.0版本在2012年的发布,这款伟大的全文检索工具包终于允许用户修改默认的基于TF/IDF原理的打分算法.Lucene API变得更加容易 ...

  2. 文本分类学习(三) 特征权重(TF/IDF)和特征提取

    上一篇中,主要说的就是词袋模型.回顾一下,在进行文本分类之前,我们需要把待分类文本先用词袋模型进行文本表示.首先是将训练集中的所有单词经过去停用词之后组合成一个词袋,或者叫做字典,实际上一个维度很大的 ...

  3. 25.TF&IDF算法以及向量空间模型算法

    主要知识点: boolean model IF/IDF vector space model     一.boolean model     在es做各种搜索进行打分排序时,会先用boolean mo ...

  4. TF/IDF(term frequency/inverse document frequency)

    TF/IDF(term frequency/inverse document frequency) 的概念被公认为信息检索中最重要的发明. 一. TF/IDF描述单个term与特定document的相 ...

  5. 基于TF/IDF的聚类算法原理

        一.TF/IDF描述单个term与特定document的相关性TF(Term Frequency): 表示一个term与某个document的相关性. 公式为这个term在document中出 ...

  6. 使用solr的函数查询,并获取tf*idf值

    1. 使用函数df(field,keyword) 和idf(field,keyword). http://118.85.207.11:11100/solr/mobile/select?q={!func ...

  7. TF/IDF计算方法

    FROM:http://blog.csdn.net/pennyliang/article/details/1231028 我们已经谈过了如何自动下载网页.如何建立索引.如何衡量网页的质量(Page R ...

  8. tf–idf算法解释及其python代码实现(下)

    tf–idf算法python代码实现 这是我写的一个tf-idf的简单实现的代码,我们知道tfidf=tf*idf,所以可以分别计算tf和idf值在相乘,首先我们创建一个简单的语料库,作为例子,只有四 ...

  9. tf–idf算法解释及其python代码实现(上)

    tf–idf算法解释 tf–idf, 是term frequency–inverse document frequency的缩写,它通常用来衡量一个词对在一个语料库中对它所在的文档有多重要,常用在信息 ...

随机推荐

  1. shell通过ping检测整个网段IP的网络状态脚本

    要实现Ping一个网段的所有IP,并检测网络连接状态是否正常,很多方法都可以实现,下面简单介绍两种,如下:脚本1#!/bin/sh# Ping网段所有IP# 2012/02/05ip=1 #通过修改初 ...

  2. web弹出对话框

    Page.ClientScript.RegisterStartupScript(this.GetType(), "", "<script>alert('请输入 ...

  3. MVC 手机端页面 使用标签file 图片上传到后台处理

    最近刚做了一个头像上传的功能,使用的是H5 的界面,为了这个功能搞了半天的时间,找了各种插件,有很多自己都不知道怎么使用,后来只是使用了一个标签就搞定了:如果对样式没有太大的要求我感觉使用这个就足够了 ...

  4. Dinic算法----最大流常用算法之一

    ——没有什么是一个BFS或一个DFS解决不了的:如果有,那就两个一起. 最大流的$EK$算法虽然简单,但时间复杂度是$O(nm^2)$,在竞赛中不太常用. 竞赛中常用的$Dinic$算法和$SAP$, ...

  5. WebService发布与调用问题:expected: {http://schemas.xmlsoap.org/soap/envelope/}Envelope but found: {http://schemas.xmlsoap.org/wsdl/}definitions

    Mailbox===AsYVzdwoY_b6uD s>>>>>>>javax.xml.ws.Service@103bf65 hs>>>> ...

  6. mustache 模板,用于构造html页面内容

    Mustache 的模板语法很简单,就那么几个: {{data}} {{#data}} {{/data}} {{^data}} {{/data}} {{.}} {{<partials}} {{{ ...

  7. OkHttp3源码详解(三) 拦截器

    1.构造Demo 首先构造一个简单的异步网络访问Demo: OkHttpClient client = new OkHttpClient(); Request request = new Reques ...

  8. Unity3D-NGUI动态加载图片

    NGUI提供了很方便的UIAtlas,其主要作用是改进DrawCall,把众多图片整合在一张贴图上,由于UNITY3D简单易用的好处,所以只是用原生的GUI很容易忽视DrawCall的问题,所以NGU ...

  9. netstat 检测及监测网络连接

    网站没有很多流量,可查带宽却跑了几十M? 这是什么情况呢?      如果是出现这种情况,不排除是被CC了的可能.那么如何确定是否真的被CC了,被CC又有什么对应政策呢? netstat -na ,用 ...

  10. SQL Server 2014 虚拟机的自动备份 (Resource Manager)

    自动备份将在运行 SQL Server 2014 Standard 或 Enterprise 的 Azure VM 上自动为所有现有数据库和新数据库配置托管备份到 Azure. 这样,便可以配置使用持 ...