package org.apache.lucene.demo; /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStreamReader; import java.util.Date; import java.util.List; import java.util.Map; import org.apache.commons.dbutils.DbUtils; import org.apache.lucene.analysis.cn.ChineseAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.IntField; import org.apache.lucene.document.LongField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexWriterConfig.OpenMode; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; import org.apache.poi.util.IntegerField; import thtf.ebuilder.website.search.DBIndex; import thtf.ebuilder.website.services.HTMLServices; /** Index all text files under a directory. * <p> * This is a command-line application demonstrating simple Lucene indexing. * Run it with no command-line arguments for usage information. */ public class IndexFiles { private IndexFiles() {} /** Index all text files under a directory. */ public static void main(String[] args) { String indexPath = DBIndex._$.getIndexFile().toString(); boolean add = true; Date start = new Date(); try { Directory dir = FSDirectory.open(new File(indexPath)); IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_47, DBIndex._$.analyzer); if (add) { iwc.setOpenMode(OpenMode.CREATE); } else { iwc.setOpenMode(OpenMode.CREATE_OR_APPEND); } IndexWriter writer = new IndexWriter(dir, iwc); indexDocs(writer); // NOTE: if you want to maximize search performance, // you can optionally call forceMerge here. This can be // a terribly costly operation, so generally it's only // worth it when your index is relatively static (ie // you're done adding documents to it): // // writer.forceMerge(1); writer.close(); Date end = new Date(); System.out.println(end.getTime() - start.getTime() + " total milliseconds"); } catch (IOException e) { System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } } /** * Indexes the given file using the given writer, or if a directory is given, * recurses over files and directories found under the given directory. * * NOTE: This method indexes one document per input file. This is slow. For good * throughput, put multiple documents into your input file(s). An example of this is * in the benchmark module, which can create "line doc" files, one document per line, * using the * <a href="../../../../../contrib-benchmark/org/apache/lucene/benchmark/byTask/tasks/WriteLineDocTask.html" * >WriteLineDocTask</a>. * * @param writer Writer to the index where the given file/dir info will be stored * @param file The file to index, or the directory to recurse into to find files to index * @throws IOException If there is a low-level I/O error */ static void indexDocs(IndexWriter writer) throws IOException { try { // make a new, empty document List list=new DbUtils().queryToMapList("select info_id,info_title,info_content from up_info limit 500"); for(int i=0;i<list.size();i++){ Map map=(Map)list.get(i); Document doc = new Document(); Field info_id = new IntField("info_id",Integer.valueOf(String.valueOf(map.get("info_id"))), Field.Store.YES); doc.add(info_id); Field info_title = new StringField("info_title", map.get("info_title")==null?"": map.get("info_title").toString(), Field.Store.YES); doc.add(info_title); Field info_content = new TextField("info_content", map.get("info_content")==null?"": HTMLServices.clearHTMLToString(map.get("info_content").toString()), Field.Store.YES); doc.add(info_content); if (writer.getConfig().getOpenMode() == OpenMode.CREATE) { writer.addDocument(doc); } else { writer.updateDocument(new Term("info_id",map.get("info_id")==null?"1": map.get("info_id").toString() ), doc); } } writer.commit(); // // Add the path of the file as a field named "path". Use a // // field that is indexed (i.e. searchable), but don't tokenize // // the field into separate words and don't index term frequency // // or positional information: // Field pathField = new StringField("path", file.getPath(), Field.Store.YES); // doc.add(pathField); // // // Add the last modified date of the file a field named "modified". // // Use a LongField that is indexed (i.e. efficiently filterable with // // NumericRangeFilter). This indexes to milli-second resolution, which // // is often too fine. You could instead create a number based on // // year/month/day/hour/minutes/seconds, down the resolution you require. // // For example the long value 2011021714 would mean // // February 17, 2011, 2-3 PM. // doc.add(new LongField("modified", file.lastModified(), Field.Store.NO)); // // // Add the contents of the file to a field named "contents". Specify a Reader, // // so that the text of the file is tokenized and indexed, but not stored. // // Note that FileReader expects the file to be in UTF-8 encoding. // // If that's not the case searching for special characters will fail. // BufferedReader _content=new BufferedReader(new InputStreamReader(fis, "UTF-8")); // System.out.println(_content); // doc.add(new TextField("contents", _content)); // // if (writer.getConfig().getOpenMode() == OpenMode.CREATE) { // // New index, so we just add the document (no old document can be there): // System.out.println("adding " + file); // writer.addDocument(doc); // } else { // // Existing index (an old copy of this document may have been indexed) so // // we use updateDocument instead to replace the old one matching the exact // // path, if present: // System.out.println("updating " + file); // writer.updateDocument(new Term("path", file.getPath()), doc); // } }catch (Exception e) { e.printStackTrace(); } } }
相关推荐
lucene4.7官方完整包
使用lucene编程实现全文检索数据库内容,程序使用lucene-core-2.4.0以及access数据库
本案例通过.Net MVC4基础上,针对Lucene.Net实现全文检索的应用。通过查询数据表中数据,创建索引,通过统一输入框进行全文检索。可以进行对索引的增删改查功能。
Lucene4.7+IK Analyzer中文分词入门教程
lucene4.7开发实例,包括索引创建、修改、删除、排序、分页、优化、高亮显示、常见几种分词器等。实例加全jar包
lucene4.7相关jar包 以及IKAnalyzer分词jar包
基于lucene 2.4简单的一个索引和搜索实例
谷嫂Lucene4.7-Web例子[SpringMVC+MyBatis3],仅供学习参考使用
Lucene 4.7 测试案例,最新版本测试,使用详解
lucene4.7所需jar包 IKAnalyzer2012FF 最好的中文分词器
一种基于Lucene检索引擎的全文数据库的研究与实现一种基于Lucene检索引擎的全文数据库的研究与实现一种基于Lucene检索引擎的全文数据库的研究与实现
Struts2.3+spring4+hibernate4+Lucene4.7+IKAnalyzer,中文分词,高亮显示,附带MySQL8000条测试数据。仅供学习参考
全文检索的实现:Luene全文索引和数据库索引的比较 中文切分词机制简介:基于词库和自动切分词算法的比较 具体的安装和使用简介:系统结构介绍和演示 Hacking Lucene:简化的查询分析器,删除的实现,定制的排序,...
apache Lucene4.7 最全最新的jar包,很实用,很方便,可以下载后直接导入项目工程中即可。
Lucene 4.7 常用jar集合,官方原始集合太多,传不上来,现精选了一些常用的核心包
Lucene4做的全文检索,支持文件和数据库
lucene 全文检索数据库,非常不错的一个例子!
本文主要是研究了全文检索技术的基本原理以及Lucene的架构和工作原理,并介绍了基于Lucene的实时全文检索引擎的设计实现过程。并提供了一个基于Web的简单实现。最后通过实验的方式,对实现的实时全文检索引擎的性能...
Lucene在数据库全文检索中的性能研究