elasticsearch,elastic-stack,Java,elasticsearch,Elastic Stack" /> elasticsearch,elastic-stack,Java,elasticsearch,Elastic Stack" />

ElasticSearch-使用JavaAPI索引23GB的pdf(20k文档)需要更多时间(8小时)

ElasticSearch-使用JavaAPI索引23GB的pdf(20k文档)需要更多时间(8小时),java,elasticsearch,elastic-stack,Java,elasticsearch,Elastic Stack,我使用JavaAPI索引pdf。我已经安装了ingest attachement processor插件,并从我的java代码中,将PDF转换为base64,并为PDF的编码格式编制索引 实际上,我的机器d:\驱动器中有PDF。文件路径在名为documents\u local的ElasticSearch索引中可用。因此,我从documents\u local索引中获取所有记录并获取文件路径。然后,我读取pdf文件并将其编码为base64。然后索引它们 对于这个过程,我使用ScrollReques

我使用JavaAPI索引pdf。我已经安装了
ingest attachement processor插件
,并从我的java代码中,将PDF转换为base64,并为PDF的编码格式编制索引

实际上,我的机器d:\驱动器中有PDF。文件路径在名为
documents\u local
的ElasticSearch索引中可用。因此,我从
documents\u local
索引中获取所有记录并获取文件路径。然后,我读取pdf文件并将其编码为base64。然后索引它们

对于这个过程,我使用ScrollRequestAPI从索引中获取文件路径,因为我拥有的文档超过了
100k
。因此,对于索引
20000
PDF,使用下面的java代码需要8小时的时间

如何提高吞吐量或加快此过程。索引
100k
文档需要很长时间

public class DocumentIndex {

    private final static String INDEX = "documents_local";  
    private final static String ATTACHMENT = "document_attachment"; 
    private final static String TYPE = "doc";
    private static final Logger logger = Logger.getLogger(Thread.currentThread().getStackTrace()[0].getClassName());
    private static final int BUFFER_SIZE = 3 * 1024;

    public static void main(String args[]) throws IOException {


        RestHighLevelClient restHighLevelClient = null;
        RestHighLevelClient restHighLevelClient2 = null;
        Document doc=new Document();

        logger.info("Started Indexing the Document.....");

        //Fetching Id, FilePath & FileName from Document Index. 
        SearchRequest searchRequest = new SearchRequest(INDEX); 
        searchRequest.types(TYPE);
        final Scroll scroll = new Scroll(TimeValue.timeValueMinutes(60L)); //part of Scroll API

        searchRequest.scroll(scroll); //part of Scroll API
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        QueryBuilder qb = QueryBuilders.matchAllQuery();

        searchSourceBuilder.query(qb);
        searchRequest.source(searchSourceBuilder);

        SearchResponse searchResponse = SearchEngineClient.getInstance3().search(searchRequest);
        String scrollId = searchResponse.getScrollId(); //part of Scroll API
        SearchHit[] searchHits = searchResponse.getHits().getHits();

        //part of Scroll API -- Starts
        while (searchHits != null && searchHits.length > 0) { 
            SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId); 
            scrollRequest.scroll(scroll);
            searchResponse = SearchEngineClient.getInstance3().searchScroll(scrollRequest);
            scrollId = searchResponse.getScrollId();
            searchHits = searchResponse.getHits().getHits();

            File all_files_path = new File("d:\\All_Files_Path.txt");
            File available_files = new File("d:\\Available_Files.txt");
            File missing_files = new File("d:\\Missing_Files.txt");

            int totalFilePath=1;
            int totalAvailableFile=1;
            int missingFilecount=1;

            Map<String, Object> jsonMap ;
            for (SearchHit hit : searchHits) {

                StringBuilder result = null;
                String encodedfile = null;
                File file=null;
                Map<String, Object> sourceAsMap = hit.getSourceAsMap();

                if(sourceAsMap != null) {  
                    doc.setId((int) sourceAsMap.get("id"));
                    doc.setApp_language(String.valueOf(sourceAsMap.get("app_language")));
                }

                String filepath=doc.getPath().concat(doc.getFilename());
                file = new File(filepath);

                if(file.exists() && !file.isDirectory()) {

                    //base64 conversion Starts
                    FileInputStream fileInputStreamReader2 = new FileInputStream(file);
                    try ( BufferedInputStream in = new BufferedInputStream(fileInputStreamReader2, BUFFER_SIZE); ) {
                         try(PrintWriter out = new PrintWriter(new FileOutputStream(available_files, true))  ){
                            out.println("Available File Count --->"+totalAvailableFile+":::::::ID---> "+doc.getId()+"File Path --->"+filepath);
                            totalAvailableFile++;
                        }
                        Base64.Encoder encoder = Base64.getEncoder();
                        result = new StringBuilder();
                        byte[] chunk = new byte[BUFFER_SIZE];
                        int len = 0;
                        while ( (len = in.read(chunk)) == BUFFER_SIZE ) {
                             result.append( encoder.encodeToString(chunk) );
                        }
                        if ( len > 0 ) {
                             chunk = Arrays.copyOf(chunk,len);
                             result.append( encoder.encodeToString(chunk) );
                        }
                    }
                    //base64 conversion Ends

                }

                jsonMap = new HashMap<>();
                jsonMap.put("id", doc.getId());
                jsonMap.put("app_language", doc.getApp_language());
                jsonMap.put("fileContent", result);

                String id=Long.toString(doc.getId());

                IndexRequest request = new IndexRequest(ATTACHMENT, "doc", id )
                        .source(jsonMap)
                        .setPipeline(ATTACHMENT);

                PrintStream printStream = new PrintStream(new File("d:\\exception.txt"));
                try {

                    IndexResponse response = SearchEngineClient.getInstance3().index(request); 

                } catch(ElasticsearchException e) {
                    if (e.status() == RestStatus.CONFLICT) {
                    }
                    e.printStackTrace(printStream);
                }

                totalFilePath++;

            }

        }

        ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); 
        clearScrollRequest.addScrollId(scrollId);
        ClearScrollResponse clearScrollResponse = SearchEngineClient.getInstance3().clearScroll(clearScrollRequest);
        boolean succeeded = clearScrollResponse.isSucceeded();
        ////part of Scroll API -- Ends

        logger.info("Indexing done.....");

    }

}
公共类文档索引{
私有最终静态字符串INDEX=“documents\u local”;
私有最终静态字符串ATTACHMENT=“document\u ATTACHMENT”;
私有最终静态字符串TYPE=“doc”;
私有静态最终记录器Logger=Logger.getLogger(Thread.currentThread().getStackTrace()[0].getClassName());
私有静态最终整数缓冲区大小=3*1024;
公共静态void main(字符串args[])引发IOException{
RestHighLevelClient RestHighLevelClient=null;
RestHighLevelClient restHighLevelClient2=null;
单据单据=新单据();
info(“已开始为文档编制索引…”);
//正在从文档索引中获取Id、文件路径和文件名。
SearchRequest SearchRequest=新的SearchRequest(索引);
searchRequest.types(TYPE);
final Scroll Scroll=新滚动(TimeValue.timeValueMinutes(60L));//滚动API的一部分
searchRequest.scroll(滚动);//滚动API的一部分
SearchSourceBuilder SearchSourceBuilder=新的SearchSourceBuilder();
QueryBuilder qb=QueryBuilders.matchAllQuery();
searchSourceBuilder.query(qb);
searchRequest.source(searchSourceBuilder);
SearchResponse SearchResponse=SearchEngineClient.getInstance3().search(searchRequest);
String scrollId=searchResponse.getScrollId();//Scroll API的一部分
SearchHit[]searchHits=searchResponse.getHits().getHits();
//Scroll API的一部分--启动
而(searchHits!=null&&searchHits.length>0){
SearchScrollRequest scrollRequest=新的SearchScrollRequest(scrollId);
滚动请求。滚动(滚动);
searchResponse=SearchEngineClient.getInstance3().searchScroll(scrollRequest);
scrollId=searchResponse.getScrollId();
searchHits=searchResponse.getHits().getHits();
File all_files_path=新文件(“d:\\all_files_path.txt”);
文件可用\u文件=新文件(“d:\\available\u files.txt”);
File missing_files=新文件(“d:\\missing_files.txt”);
int totalFilePath=1;
int totalAvailableFile=1;
int missingFilecount=1;
地图jsonMap;
for(SearchHit:searchHits){
StringBuilder结果=null;
字符串encodedfile=null;
File=null;
Map sourceAsMap=hit.getSourceAsMap();
如果(sourceAsMap!=null){
文档setId((int)sourceAsMap.get(“id”);
doc.setApp_语言(String.valueOf(sourceAsMap.get(“app_语言”));
}
字符串filepath=doc.getPath().concat(doc.getFilename());
文件=新文件(文件路径);
if(file.exists()&&!file.isDirectory()){
//base64转换开始
FileInputStream fileInputStreamReader2=新的FileInputStream(文件);
try(BufferedInputStream in=new BufferedInputStream(fileInputStreamReader2,BUFFER_SIZE);){
try(PrintWriter out=new PrintWriter(new FileOutputStream(可用的\u文件,true))){
out.println(“可用文件计数-->”+totalAvailableFile+”::;
totalAvailableFile++;
}
Base64.Encoder编码器=Base64.getEncoder();
结果=新的StringBuilder();
byte[]chunk=新字节[缓冲区大小];
int len=0;
while((len=in.read(chunk))==缓冲区大小){
append(encoder.encodeToString(chunk));
}
如果(len>0){
chunk=Arrays.copyOf(chunk,len);
append(encoder.encodeToString(chunk));
}
}
//base64转换结束
}
jsonMap=newhashmap();
put(“id”,doc.getId());
put(“app_language”,doc.getApp_language());
jsonMap.put(“文件内容”,结果);
字符串id=Long.toString(doc.getId());
IndexRequest request=新IndexRequest(附件,“文件”,id)
.source(jsonMap)
.管道(附件);
PrintStream PrintStream=新的PrintStream(新文件(“d:\\exception.txt”);
试一试{
IndexResponse=SearchEngineClient.getInstance3().index(请求);
}捕捉(弹性){
if(e.status()==RestStatus.CONFLICT){
}
e、 printStackTrace(printStream);
}
totalFilePath++;
}
}
氯