查询

   public class LuceneQuery : ILuceneQuery
{
#region Identity
private Logger logger = new Logger(typeof(LuceneQuery));
#endregion Identity #region QueryIndex
/// <summary>
/// 获取商品信息数据
/// </summary>
/// <param name="queryString"></param>
/// <returns></returns>
public List<Commodity> QueryIndex(string queryString)
{
IndexSearcher searcher = null;
try
{
List<Commodity> ciList = new List<Commodity>();
Directory dir = FSDirectory.Open(StaticConstant.IndexPath);
searcher = new IndexSearcher(dir);
Analyzer analyzer = new PanGuAnalyzer(); //--------------------------------------这里配置搜索条件
QueryParser parser = new QueryParser(Version.LUCENE_30, "title", analyzer);
Query query = parser.Parse(queryString);
Console.WriteLine(query.ToString()); //显示搜索表达式
TopDocs docs = searcher.Search(query, (Filter)null, 10000); foreach (ScoreDoc sd in docs.ScoreDocs)
{
Document doc = searcher.Doc(sd.Doc);
ciList.Add(DocumentToCommodityInfo(doc));
} return ciList;
}
finally
{
if (searcher != null)
{
searcher.Dispose();
}
}
} /// <summary>
/// 分页获取商品信息数据
/// </summary>
/// <param name="queryString"></param>
/// <param name="pageIndex">第一页为1</param>
/// <param name="pageSize"></param>
/// <param name="totalCount"></param>
/// <returns></returns>
public List<Commodity> QueryIndexPage(string queryString, int pageIndex, int pageSize, out int totalCount, string priceFilter, string priceOrderBy)
{
totalCount = 0;
IndexSearcher searcher = null;
try
{
List<Commodity> ciList = new List<Commodity>();
FSDirectory dir = FSDirectory.Open(StaticConstant.IndexPath);
searcher = new IndexSearcher(dir);
Analyzer analyzer = new PanGuAnalyzer(); //--------------------------------------这里配置搜索条件
QueryParser parser = new QueryParser(Version.LUCENE_30, "title", analyzer);
Query query = parser.Parse(queryString); pageIndex = Math.Max(1, pageIndex);//索引从1开始
int startIndex = (pageIndex - 1) * pageSize;
int endIndex = pageIndex * pageSize; NumericRangeFilter<float> numPriceFilter = null;
if (!string.IsNullOrWhiteSpace(priceFilter))
{
bool isContainStart = priceFilter.StartsWith("[");
bool isContainEnd = priceFilter.EndsWith("]");
string[] floatArray = priceFilter.Replace("[", "").Replace("]", "").Replace("{", "").Replace("}", "").Split(',');
float start = 0;
float end = 0;
if (!float.TryParse(floatArray[0], out start) || !float.TryParse(floatArray[1], out end))
{
throw new Exception("Wrong priceFilter");
}
numPriceFilter = NumericRangeFilter.NewFloatRange("price", start, end, isContainStart, isContainEnd);
} Sort sort = new Sort();
if (!string.IsNullOrWhiteSpace(priceOrderBy))
{
SortField sortField = new SortField("price", SortField.FLOAT, priceOrderBy.EndsWith("asc", StringComparison.CurrentCultureIgnoreCase));
sort.SetSort(sortField);
} TopDocs docs = searcher.Search(query, numPriceFilter, 10000, sort);
//TopDocs docs = searcher.Search(query, null, 10000); totalCount = docs.TotalHits;
//PrintScores(docs, startIndex, endIndex, searcher);
for (int i = startIndex; i < endIndex && i < totalCount; i++)
{
Document doc = searcher.Doc(docs.ScoreDocs[i].Doc);
ciList.Add(DocumentToCommodityInfo(doc));
} return ciList;
}
finally
{
if (searcher != null)
{
searcher.Dispose();
}
}
} private void PrintScores(TopDocs docs, int startIndex, int endIndex, MultiSearcher searcher)
{
ScoreDoc[] scoreDocs = docs.ScoreDocs;
for (int i = startIndex; i < endIndex && i < scoreDocs.Count(); i++)
{
int docId = scoreDocs[i].Doc;
Document doc = searcher.Doc(docId);
logger.Info(string.Format("{0}的分值为{1}", doc.Get("productid"), scoreDocs[i].Score));
}
} #endregion QueryIndex #region private
private Commodity DocumentToCommodityInfo(Document doc)
{
return new Commodity()
{
Id = int.Parse(doc.Get("id")),
Title = doc.Get("title"),
ProductId = long.Parse(doc.Get("productid")),
CategoryId = int.Parse(doc.Get("categoryid")),
ImageUrl = doc.Get("iamgeurl"),
Price = decimal.Parse(doc.Get("price")),
Url = doc.Get("url")
};
} #endregion private
}

批量/单个索引的增删改

    /// <summary>
/// 多线程的问题 :多文件写,然后合并
/// 延时:异步队列
///
/// </summary>
public class LuceneBulid : ILuceneBulid
{
#region Identity
private Logger logger = new Logger(typeof(LuceneBulid));
#endregion Identity #region 批量BuildIndex 索引合并
/// <summary>
/// 批量创建索引(要求是统一的sourceflag,即目录是一致的)
/// </summary>
/// <param name="ciList">sourceflag统一的</param>
/// <param name="pathSuffix">索引目录后缀,加在电商的路径后面,为空则为根目录.如sa\1</param>
/// <param name="isCreate">默认为false 增量索引 true的时候删除原有索引</param>
public void BuildIndex(List<Commodity> ciList, string pathSuffix = "", bool isCreate = false)
{
IndexWriter writer = null;
try
{
if (ciList == null || ciList.Count == 0)
{
return;
} string rootIndexPath = StaticConstant.IndexPath;
string indexPath = string.IsNullOrWhiteSpace(pathSuffix) ? rootIndexPath : string.Format("{0}\\{1}", rootIndexPath, pathSuffix); DirectoryInfo dirInfo = Directory.CreateDirectory(indexPath);
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, new PanGuAnalyzer(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
//writer = new IndexWriter(directory, CreateAnalyzerWrapper(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
writer.SetMaxBufferedDocs(100);//控制写入一个新的segent前内存中保存的doc的数量 默认10
writer.MergeFactor = 100;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建复合文件 减少索引文件数量 ciList.ForEach(c => CreateCIIndex(writer, c));
}
finally
{
if (writer != null)
{
//writer.Optimize(); 创建索引的时候不做合并 merge的时候处理
writer.Close();
}
}
} /// <summary>
/// 将索引合并到上级目录
/// </summary>
/// <param name="sourceDir">子文件夹名</param>
public void MergeIndex(string[] childDirs)
{
Console.WriteLine("MergeIndex Start");
IndexWriter writer = null;
try
{
if (childDirs == null || childDirs.Length == 0) return;
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
string rootPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootPath);
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);//删除原有的
LuceneIO.Directory[] dirNo = childDirs.Select(dir => LuceneIO.FSDirectory.Open(Directory.CreateDirectory(string.Format("{0}\\{1}", rootPath, dir)))).ToArray();
writer.MergeFactor = 100;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建符合文件 减少索引文件数量
writer.AddIndexesNoOptimize(dirNo);
}
finally
{
if (writer != null)
{
writer.Optimize();
writer.Close();
}
Console.WriteLine("MergeIndex End");
}
} //Field.Store.YES:存储字段值(未分词前的字段值)        
//Field.Store.NO:不存储,存储与索引没有关系        
//Field.Store.COMPRESS:压缩存储,用于长文本或二进制,但性能受损        
//Field.Index.ANALYZED:分词建索引        
//Field.Index.ANALYZED_NO_NORMS:分词建索引,但是Field的值不像通常那样被保存,而是只取一个byte,这样节约存储空间        
//Field.Index.NOT_ANALYZED:不分词且索引        
//Field.Index.NOT_ANALYZED_NO_NORMS:不分词建索引,Field的值去一个byte保存        
//TermVector表示文档的条目(由一个Document和Field定位)和它们在当前文档中所出现的次数        
//Field.TermVector.YES:为每个文档(Document)存储该字段的TermVector        
//Field.TermVector.NO:不存储TermVector        
// Field.TermVector.WITH_POSITIONS:存储位置        
//Field.TermVector.WITH_OFFSETS:存储偏移量        
//Field.TermVector.WITH_POSITIONS_OFFSETS:存储位置和偏移量
#endregion 批量BuildIndex 索引合并 #region 单个/批量索引增删改
/// <summary>
/// 新增一条数据的索引
/// </summary>
/// <param name="ci"></param>
public void InsertIndex(Commodity ci)
{
IndexWriter writer = null;
try
{
if (ci == null) return;
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath); bool isCreate = dirInfo.GetFiles().Count() == 0;//下面没有文件则为新建索引
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, CreateAnalyzerWrapper(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
writer.MergeFactor = 100;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建符合文件 减少索引文件数量
CreateCIIndex(writer, ci);
}
catch (Exception ex)
{
logger.Error("InsertIndex异常", ex);
throw ex;
}
finally
{
if (writer != null)
{
//if (fileNum > 50)
// writer.Optimize();
writer.Close();
}
}
} /// <summary>
/// 批量新增数据的索引
/// </summary>
/// <param name="ciList"></param>
public void InsertIndexMuti(List<Commodity> ciList)
{
BuildIndex(ciList, "", false);
} /// <summary>
/// 批量删除数据的索引
/// </summary>
/// <param name="ciList"></param>
public void DeleteIndexMuti(List<Commodity> ciList)
{
IndexReader reader = null;
try
{
if (ciList == null || ciList.Count == 0) return;
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath);
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
reader = IndexReader.Open(directory, false);
foreach (Commodity ci in ciList)
{
reader.DeleteDocuments(new Term("productid", ci.ProductId.ToString()));
}
}
catch (Exception ex)
{
logger.Error("DeleteIndex异常", ex);
throw ex;
}
finally
{
if (reader != null)
{
reader.Dispose();
}
}
} /// <summary>
/// 删除多条数据的索引
/// </summary>
/// <param name="ci"></param>
public void DeleteIndex(Commodity ci)
{
IndexReader reader = null;
try
{
if (ci == null) return;
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath);
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
reader = IndexReader.Open(directory, false);
reader.DeleteDocuments(new Term("productid", ci.ProductId.ToString()));
}
catch (Exception ex)
{ logger.Error("DeleteIndex异常", ex);
throw ex;
}
finally
{
if (reader != null)
{
reader.Dispose();
}
}
} /////// <summary>
/////// 更新一条数据的索引
/////// </summary>
//public void UpdateIndex(Commodity ci)
//{
// DeleteIndex(ci);
// InsertIndex(ci);
//} /// <summary>
/// 更新一条数据的索引
/// </summary>
/// <param name="ci"></param>
public void UpdateIndex(Commodity ci)
{
IndexWriter writer = null;
try
{
if (ci == null) return;
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath); bool isCreate = dirInfo.GetFiles().Count() == 0;//下面没有文件则为新建索引
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, CreateAnalyzerWrapper(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
writer.MergeFactor = 100;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建符合文件 减少索引文件数量
writer.UpdateDocument(new Term("productid", ci.ProductId.ToString()), ParseCItoDoc(ci));
}
catch (Exception ex)
{
logger.Error("InsertIndex异常", ex);
throw ex;
}
finally
{
if (writer != null)
{
//if (fileNum > 50)
// writer.Optimize();
writer.Close();
}
}
} /// <summary>
/// 批量更新数据的索引
/// </summary>
/// <param name="ciList">sourceflag统一的</param>
public void UpdateIndexMuti(List<Commodity> ciList)
{
IndexWriter writer = null;
try
{
if (ciList == null || ciList.Count == 0) return;
string rootIndexPath = StaticConstant.IndexPath;
DirectoryInfo dirInfo = Directory.CreateDirectory(rootIndexPath); bool isCreate = dirInfo.GetFiles().Count() == 0;//下面没有文件则为新建索引
LuceneIO.Directory directory = LuceneIO.FSDirectory.Open(dirInfo);
writer = new IndexWriter(directory, CreateAnalyzerWrapper(), isCreate, IndexWriter.MaxFieldLength.LIMITED);
writer.MergeFactor = 50;//控制多个segment合并的频率,默认10
writer.UseCompoundFile = true;//创建符合文件 减少索引文件数量
foreach (Commodity ci in ciList)
{
writer.UpdateDocument(new Term("productid", ci.ProductId.ToString()), ParseCItoDoc(ci));
}
}
catch (Exception ex)
{
logger.Error("InsertIndex异常", ex);
throw ex;
}
finally
{
if (writer != null)
{
//if (fileNum > 50)
// writer.Optimize();
writer.Close();
}
}
}
#endregion 单个索引增删改 #region PrivateMethod
/// <summary>
/// 创建分析器
/// </summary>
/// <returns></returns>
private PerFieldAnalyzerWrapper CreateAnalyzerWrapper()
{
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30); PerFieldAnalyzerWrapper analyzerWrapper = new PerFieldAnalyzerWrapper(analyzer);
analyzerWrapper.AddAnalyzer("title", new PanGuAnalyzer());
analyzerWrapper.AddAnalyzer("categoryid", new StandardAnalyzer(Version.LUCENE_30));
return analyzerWrapper;
} /// <summary>
/// 创建索引
/// </summary>
/// <param name="analyzer"></param>
/// <param name="title"></param>
/// <param name="content"></param>
private void CreateCIIndex(IndexWriter writer, Commodity ci)
{
try
{
writer.AddDocument(ParseCItoDoc(ci));
}
catch (Exception ex)
{
logger.Error("CreateCIIndex异常", ex);
throw ex;
}
} /// <summary>
/// 将Commodity转换成doc
/// </summary>
/// <param name="ci"></param>
/// <returns></returns>
private Document ParseCItoDoc(Commodity ci)
{
Document doc = new Document(); doc.Add(new Field("id", ci.Id.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("title", ci.Title, Field.Store.YES, Field.Index.ANALYZED));//盘古分词
doc.Add(new Field("productid", ci.ProductId.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("categoryid", ci.CategoryId.ToString(), Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("imageurl", ci.ImageUrl, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new Field("url", ci.Url, Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.Add(new NumericField("price", Field.Store.YES, true).SetFloatValue((float)ci.Price));
return doc;
} #endregion PrivateMethod
}

分词器封装

    public class LuceneAnalyze : ILuceneAnalyze
{
private Logger logger = new Logger(typeof(LuceneAnalyze)); //
#region AnalyzerKey
/// <summary>
/// 将搜索的keyword分词
/// 通过or 链接;查询更多的数据(贪婪查询)
/// </summary>
/// <param name="keyword"></param>
/// <returns></returns>
public string[] AnalyzerKey(string keyword)
{
Analyzer analyzer = new PanGuAnalyzer();
QueryParser parser = new QueryParser(Version.LUCENE_30, "title", analyzer);
Query query = parser.Parse(this.CleanKeyword(keyword));
if (query is TermQuery)
{
Term term = ((TermQuery)query).Term;
return new string[] { term.Text };
}
else if (query is PhraseQuery)
{
Term[] term = ((PhraseQuery)query).GetTerms();
return term.Select(t => t.Text).ToArray();
}
else if (query is BooleanQuery)// and or
{
BooleanClause[] clauses = ((BooleanQuery)query).GetClauses();
List<string> analyzerWords = new List<string>();
foreach (BooleanClause clause in clauses)
{
Query childQuery = clause.Query;
if (childQuery is TermQuery)
{
Term term = ((TermQuery)childQuery).Term;
analyzerWords.Add(term.Text);
}
else if (childQuery is PhraseQuery)
{
Term[] term = ((PhraseQuery)childQuery).GetTerms();
analyzerWords.AddRange(term.Select(t => t.Text));
}
}
return analyzerWords.ToArray();
}
else
{
logger.Debug(string.Format("AnalyzerKey在解析keyword={0}的结果为new string[] { keyword } ", keyword));
return new string[] { keyword };
}
} /// <summary>
/// 清理头尾and or 关键字
/// </summary>
/// <param name="keyword"></param>
/// <returns></returns>
private string CleanKeyword(string keyword)
{
if (string.IsNullOrWhiteSpace(keyword))
{ }
else
{
bool isClean = false;
while (!isClean)
{
keyword = keyword.Trim();
if (keyword.EndsWith(" AND"))
{
keyword = string.Format("{0}and", keyword.Remove(keyword.Length - 3, 3));
}
else if (keyword.EndsWith(" OR"))
{
keyword = string.Format("{0}or", keyword.Remove(keyword.Length - 2, 2));
}
else if (keyword.StartsWith("AND "))
{
keyword = string.Format("and{0}", keyword.Substring(3));
}
else if (keyword.StartsWith("OR "))
{
keyword = string.Format("or{0}", keyword.Substring(2));
}
else if (keyword.Contains(" OR "))
{
keyword = keyword.Replace(" OR ", " or ");
}
else if (keyword.Contains(" AND "))
{
keyword = keyword.Replace(" AND ", " and ");
}
else
isClean = true;
} }
return QueryParser.Escape(keyword);
}
#endregion AnalyzerKey

lucene.net全文检索(二)lucene.net 的封装的更多相关文章

  1. 全文检索框架---Lucene

    一.什么是全文检索 1.数据分类 我们生活中的数据总体分为两种:结构化数据和非结构化数据.   结构化数据:指具有固定格式或有限长度的数据,如数据库,元数据等.   非结构化数据:指不定长或无固定格式 ...

  2. JAVAEE——Lucene基础:什么是全文检索、Lucene实现全文检索的流程、配置开发环境、索引库创建与管理

    1. 学习计划 第一天:Lucene的基础知识 1.案例分析:什么是全文检索,如何实现全文检索 2.Lucene实现全文检索的流程 a) 创建索引 b) 查询索引 3.配置开发环境 4.创建索引库 5 ...

  3. Lucene 实例教程(二)

    原创作品,允许转载,转载时请务必以超链接形式标明文章 原始出处 .作者信息和本人声明.否则将追究法律责任. 作者: 永恒の_☆ 地址: http://blog.csdn.net/chenghui031 ...

  4. Lucene 01 - 初步认识全文检索和Lucene

    目录 1 搜索简介 1.1 搜索实现方案 1.2 数据查询方法 1.2.1 顺序扫描法 1.2.2 倒排索引法(反向索引) 1.3 搜索技术应用场景 2 Lucene简介 2.1 Lucene是什么 ...

  5. Lucene的全文检索学习

    Lucene的官方网站(Apache的顶级项目):http://lucene.apache.org/ 1.什么是Lucene? Lucene 是 apache 软件基金会的一个子项目,由 Doug C ...

  6. 全文检索以及Lucene的应用

    全文检索 一.什么是全文检索? 就是在检索数据,数据的分类: 在计算机当中,比如说存在磁盘的文本文档,HTML页面,Word文档等等...... 1.结构化数据 格式固定,长度固定,数据类型固定等等, ...

  7. 基于Lucene的全文检索实践

    由于项目的需要,使用到了全文检索技术,这里将前段时间所做的工作进行一个实践总结,方便以后查阅.在实际的工作中,需要灵活的使用lucene里面的查询技术,以达到满足业务要求与搜索性能提升的目的. 一.全 ...

  8. lucene解决全文检索word2003,word2007的办法

    在上一篇文章中 ,lucene只能全文检索word2003,无法检索2007,并且只能加载部分内容,无法加载全文内容.为解决此问题,找到了如下方法 POI 读取word (word 2003 和 wo ...

  9. 大型运输行业实战_day15_1_全文检索之Lucene

    1.引入 全文检索简介: 非结构化数据又一种叫法叫全文数据.从全文数据(文本)中进行检索就叫全文检索. 2.数据库搜索的弊端 案例 :     select  *  from product  whe ...

  10. Lucene学习之二:Lucene的总体架构

    本文转载自:http://www.cnblogs.com/forfuture1978/archive/2009/12/14/1623596.html Lucene总的来说是: 一个高效的,可扩展的,全 ...

随机推荐

  1. SpringBoot整合简单的定时任务~

    定时任务框架很多种Quartz,SpringTask,xxljob,PowerJob... 1.JDK提供的timer // JDK提供的 Timer timer = new Timer(); //t ...

  2. 2023-12-16:用go语言,给定整数数组arr,求删除任一元素后, 新数组中长度为k的子数组累加和的最大值。 来自字节。

    2023-12-16:用go语言,给定整数数组arr,求删除任一元素后, 新数组中长度为k的子数组累加和的最大值. 来自字节. 答案2023-12-16: 来自左程云. 灵捷3.5 大体步骤如下: 算 ...

  3. 5个免费、跨平台的SQLite数据库可视化工具

    前言 SQLite是一个轻量级的嵌入式关系型数据库,目前最新的版本是 SQLite3.今天推荐5个实用的SQLite数据库可视化工具(GUI),帮助大家更好的管理SQLite数据库. 什么是SQLit ...

  4. Liunx--centos7服务器上 安装 jenkins,实现持续集成发布

    1.下载并安装jenkins wget -v https://pkg.jenkins.io/redhat-stable/jenkins-2.176.3-1.1.noarch.rpmrpm -ivh j ...

  5. jenkins删除构建历史并重置构建序号

    系统管理 工具和动作-->脚本命令执行 删除之前,现在已经构建了156次  输入脚本 println(Jenkins.instance.getJobNames()) //查看获取任务名列表//要 ...

  6. 【难受】SpirngCloud-Alibaba-nacos跨服务器访问接口的问题

    原想法:我首先准备了 一个网关 2个服务 分别将两个服务部署到不同的远程服务器当中 实现跨服务器访问接口 网关为本地调用--这里就不一一介绍了 问题 利用gateway做路由时出现服务不可用的情况,看 ...

  7. [VMware]ESXI下硬盘的两种直通方式

    文章来自:https://rmbz.net/archives/vmware-esxi-passthrough 最近再搞ESXI,把原来的"黑群晖"改成ESXI:因为群晖里有数据,为 ...

  8. 8种超简单的Golang生成随机字符串方式

    本文分享自华为云社区<Golang生成随机字符串的八种方式与性能测试>,作者: 张俭. 前言 这是**icza**在StackOverflow上的一篇高赞回答,质量很高,翻译一下,大家一起 ...

  9. C#新鲜面试题出炉(2024)

    总所周知  C#这门语言 没有Java的八股文,所以面试题一般都是问的业务, 那么对于新手来讲,最起码也要会一些基础性问题, 以下就是包含C# 和sqlserver几个常见的面试题   1) Dele ...

  10. POJ 3537 Crosses and Crosses 博弈论 SG函数 记忆化搜索

    原题链接 题意 两人在一个长为n * 1的棋盘上下棋,两人持相同棋子,如果某人下完之后,棋盘上有三个棋子相连,则此人获胜.给出n,求是否先手必胜. 思路 一开始分析成了最少取3个,最多取5个的巴什博弈 ...