为什么Lucene程序退出时出现OutOfMemoryError?

为什么Lucene程序退出时出现OutOfMemoryError?,memory,lucene,out-of-memory,information-retrieval,Memory,Lucene,Out Of Memory,Information Retrieval,请有人帮帮我。当我使用一个小样本数据时,我的程序运行良好,但对于40MB的实际数据,它给出了以下错误:java.lang.OutOfMemoryError:java堆空间 这是我的密码 import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.HashMap; import java.util.Map

请有人帮帮我。当我使用一个小样本数据时,我的程序运行良好,但对于40MB的实际数据,它给出了以下错误:
java.lang.OutOfMemoryError:java堆空间

这是我的密码

import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;

import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field; 
import org.apache.lucene.document.StringField; 
import org.apache.lucene.document.TextField;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;


public class RechercheEngine {

    static  ArrayList<String> resultat = new ArrayList<String>();
    final static String file ="Ressources/file_collection.txt";
    static LireFichierCollection rf = new LireFichierCollection(); 
    static LireRequete ls = new LireRequete();

public static void indexerEtRechercherDocument( boolean exchange) throws IOException, org.apache.lucene.queryparser.classic.ParseException{


    System.out.println("Analyzing documents...");
    Directory directory = new RAMDirectory();
    Analyzer analyzer = new StandardAnalyzer();
    IndexWriterConfig config = new IndexWriterConfig(analyzer);
    IndexWriter indexWriter  =  new IndexWriter(directory, config);

    Document doc = new Document(); 
    rf.readFile(file);
    System.out.println("Indexing documents...");
        for(Map.Entry <String, String> entry : rf.hashMap.entrySet()) {
            doc.add(new StringField("DocKey",entry.getKey(), Field.Store.YES));
            Stemming st = new Stemming();
            if (exchange){    
                doc.add(new TextField("DocContent",  st.stemmingAvecStopWord(entry.getValue()), Field.Store.NO));
            }
            else{
                doc.add(new TextField("DocContent",  entry.getValue(), Field.Store.NO));
            }
            indexWriter.addDocument(doc);
        }
    indexWriter.close();
    System.out.println("Indexing documents done");

    ls.readList();
    System.out.println("Researching documents...");

        for(Map.Entry <String, String> entry : ls.map.entrySet()) {
            Stemming st = new Stemming();
            Query query;
            if (exchange){    
                query = new QueryParser("DocContent", analyzer).parse(st.stemmingAvecStopWord(entry.getValue()));
            }
            else{
                query = new QueryParser("DocContent", analyzer).parse(entry.getValue());
            }
            IndexReader reader=DirectoryReader.open(directory);
            IndexSearcher searcher = new IndexSearcher(reader);
            ScoreDoc[] hits = searcher.search(query, 2).scoreDocs;
            for (int i = 0; i < hits.length; i++) {
                int docId = hits[i].doc;
                Document hitDoc = searcher.doc(docId);
                 String docKey = hitDoc.get("DocKey");
                HashMap<String, String> X= new HashMap<String, String>();
                for (IndexableField field : hitDoc.getFields())
                {
                    X.put(field.name(), field.stringValue() );
                }

             resultat.add(entry.getKey()+ " " + docKey+ " " + hits[i].score);
            }
            reader.close();
        }
}

}
import java.io.IOException;
导入java.nio.file.Path;
导入java.nio.file.path;
导入java.util.ArrayList;
导入java.util.HashMap;
导入java.util.Map;
导入org.apache.lucene.document.document;
导入org.apache.lucene.document.Field;
导入org.apache.lucene.document.StringField;
导入org.apache.lucene.document.TextField;
导入org.apache.lucene.analysis.Analyzer;
导入org.apache.lucene.analysis.standard.StandardAnalyzer;
导入org.apache.lucene.index.DirectoryReader;
导入org.apache.lucene.index.IndexReader;
导入org.apache.lucene.index.IndexWriter;
导入org.apache.lucene.index.IndexWriterConfig;
导入org.apache.lucene.index.IndexableField;
导入org.apache.lucene.queryparser.classic.queryparser;
导入org.apache.lucene.search.indexsearch;
导入org.apache.lucene.search.Query;
导入org.apache.lucene.search.ScoreDoc;
导入org.apache.lucene.store.Directory;
导入org.apache.lucene.store.FSDirectory;
导入org.apache.lucene.store.RAMDirectory;
公共类回收引擎{
静态ArrayList resultat=new ArrayList();
最终静态字符串file=“Ressources/file\u collection.txt”;
静态LIREFCIERCOLECTION rf=新的LIREFCIERCOLECTION();
静态LireRequeste ls=新LireRequeste();
公共静态无效索引器etracherdocument(布尔交换)抛出IOException,org.apache.lucene.queryparser.classic.ParseException{
System.out.println(“分析文档…”);
目录目录=新的RAMDirectory();
Analyzer Analyzer=新的StandardAnalyzer();
IndexWriterConfig配置=新的IndexWriterConfig(分析器);
IndexWriter IndexWriter=新的IndexWriter(目录,配置);
单据单据=新单据();
rf.readFile(文件);
System.out.println(“索引文档…”);
for(Map.Entry:rf.hashMap.entrySet()){
添加(新的StringField(“DocKey”,entry.getKey(),Field.Store.YES));
词干st=新词干();
如果(交换){
添加(新的TextField(“DocContent”,st.stemmingavestopword(entry.getValue()),Field.Store.NO));
}
否则{
添加(新的TextField(“DocContent”,entry.getValue(),Field.Store.NO));
}
indexWriter.addDocument(文档);
}
indexWriter.close();
System.out.println(“索引文档完成”);
ls.readList();
System.out.println(“研究文档…”);
对于(Map.Entry:ls.Map.entrySet()){
词干st=新词干();
查询;
如果(交换){
query=newqueryparser(“DocContent”,analyzer).parse(st.stemmingavestopword(entry.getValue());
}
否则{
query=newQueryParser(“DocContent”,analyzer).parse(entry.getValue());
}
IndexReader=DirectoryReader.open(目录);
IndexSearcher search=新的IndexSearcher(阅读器);
ScoreDoc[]hits=searcher.search(查询,2).scoreDocs;
for(int i=0;i
您使用的是什么版本的lucene?可能是字段缓存问题。ls.readList()发生了什么?如何运行程序?分配给它多少内存?ls.readList()是我从类lireRequeste()调用的一个方法;它将查询读入hashmap。文件中列出了查询(要搜索的术语),每个查询都有一个键。我调用了包含查询的hashMap“map”。这是我的main:public类MonMain{public static void main(String[]args)抛出IOException,ParseException{recherchiengine.indexereterchercherdocument(false);for(int i=0;i