Jakarta ee apache nutch爬行问题

Jakarta ee apache nutch爬行问题,jakarta-ee,solr,web-crawler,nutch,Jakarta Ee,Solr,Web Crawler,Nutch,我正在尝试通过前端获取google数据,并使用ApacheNutch1.13将所有URL和索引爬网到solr6.5中 当我通过从控制台获取输入来运行普通的java程序时,它工作得很好,我可以在solr中看到爬网的、索引的数据,并且可以对其进行搜索。 但是,当我尝试在JavaServlet中运行相同的程序,从UI获取输入时,我得到了以下错误。。。没办法弄明白,有什么问题 当我的servlet运行以下脚本时出现抛出错误: #!/bin/bash NUTCH_HOME="/home/nutch1.13

我正在尝试通过前端获取google数据,并使用ApacheNutch1.13将所有URL和索引爬网到solr6.5中

当我通过从控制台获取输入来运行普通的java程序时,它工作得很好,我可以在solr中看到爬网的、索引的数据,并且可以对其进行搜索。 但是,当我尝试在JavaServlet中运行相同的程序,从UI获取输入时,我得到了以下错误。。。没办法弄明白,有什么问题

当我的servlet运行以下脚本时出现抛出错误:

#!/bin/bash
NUTCH_HOME="/home/nutch1.13"
SOLR_HOME="/home/solr-6.5.0"
urls="/home/nutch1.13/urls/seed.txt"
crawldir="/home/nutch1.13/crawl"
NumRound=1


#clean the crawls
echo "Cleaning up..."
# bash check if directory exists
if [ -d $crawldir ]; then
    echo "crawldir Directory exists"
    rm -rf $crawldir/crawldb
    rm -rf $crawldir/linkdb
    rm -rf $crawldir/segments
else
    echo "Directory does not exists"
fi 

#crawl the urls
echo "----- crawling urls-----"
#$NUTCH_HOME/bin/crawl $urls $crawldir $NumRound


#start the solr
#$SOLR_HOME/bin/solr start


#if [ -d $SOLR_HOME/server/solr/$1]; then
#   echo "Core already exists"
#else
    #create collection/core for solr
#   echo "----- create solr core-----"
#   $SOLR_HOME/bin/solr create -c $1
#fi

#index the crawl data
#echo "----- Index to solr-----"
    #$NUTCH_HOME/bin/nutch solrindex http://localhost:8983/solr/$1 
    $crawldir/crawldb -linkdb $crawldir/linkdb  $crawldir/segments/*



also , my servlet class is as follows : 


import java.io.File;
import java.io.FileWriter;
import java.io.IOException;import java.io.InputStream;
import java.io.PrintWriter;

import javax.servlet.ServletException;
import javax.servlet.annotation.WebServlet;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;

import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

/**
 * Servlet implementation class 
 */
@WebServlet("/UrlMapping")
public class Driver extends HttpServlet {
    private static final long serialVersionUID = 1L;
    public static final String GOOGLE_SEARCH_URL = "https://www.google.com/search";

    /**
     * @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse response)
     */
    protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
        response.setContentType("text/html");
        PrintWriter out = response.getWriter();

        String word = request.getParameter("search");
        String url = request.getParameter("urlcount");
        String core = request.getParameter("solrcore");

        out.println("Entered search term ->"+word);
        out.println("Number of url's to be crawled -> "+url);       
        out.println("Solr core name -> "+core);

        try {

            search(word, Integer.parseInt(url));
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

        /** execute apache nutch script to crawl the url's and index in solr */
        try {
            executeProcess(core);
        } catch (InterruptedException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }

    }

    /**
     * @see HttpServlet#doPost(HttpServletRequest request, HttpServletResponse response)
     */
    protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
    }

    public void search(String searchterm,int num) throws IOException, InterruptedException{
        String gsearchURL = GOOGLE_SEARCH_URL + "?q=" + searchterm + "&num=" + num;


        Document doc = Jsoup.connect(gsearchURL).userAgent("Chrome/41.0.2228.0 Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko)  Safari/537.36").get();  


        Elements results = doc.select("h3.r > a");


        try (FileWriter fw = new FileWriter(new File("/home/sukesh/nutch1.13/urls/seed.txt"), false)) {
            for (Element result : results) {

                String linkHref = result.attr("href");
                String linkText = result.text();
                System.out.println("Text::" + linkText + ", URL::" + linkHref.substring(6, linkHref.indexOf("&")));

                fw.write(linkHref.substring(7, linkHref.indexOf("&")) + "\n");

            }
        }

    }



    public void executeProcess(String arg) throws IOException, InterruptedException {
        //String scriptPath = getServletContext().getRealPath("/sukicrawl.sh");
        String scriptPath = "/home/elicpse_j2ee/eclipse/workspace/GoogleAnalytics/NutchScript/sukicrawl.sh";
        Process p = new ProcessBuilder(scriptPath, arg).start();
        InputStream ip = p.getInputStream();
        int i = 0;
        StringBuffer sb = new StringBuffer();
        while ((i = ip.read()) != -1) {
            sb.append((char) i);

        }
        System.out.println(sb.toString());
    }

}
日志信息:

java.lang.Exception: java.io.IOException: Mkdirs failed to create file:/generate-temp-b42b2b91-e1e5-4e82-8861-881a7a607bd9/_temporary/0/_temporary/attempt_local2075293294_0001_r_000000_0/fetchlist-1 (exists=false, cwd=file:/)
        at org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:462)
        at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:529)
    Caused by: java.io.IOException: Mkdirs failed to create file:/generate-temp-b42b2b91-e1e5-4e82-8861-881a7a607bd9/_temporary/0/_temporary/attempt_local2075293294_0001_r_000000_0/fetchlist-1 (exists=false, cwd=file:/)
        at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:450)
        at org.apache.hadoop.fs.ChecksumFileSystem.create(ChecksumFileSystem.java:435)
        at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:909)
        at org.apache.hadoop.io.SequenceFile$Writer.<init>(SequenceFile.java:1135)
        at org.apache.hadoop.io.SequenceFile.createWriter(SequenceFile.java:273)
        at org.apache.hadoop.io.SequenceFile.createWriter(SequenceFile.java:530)
        at org.apache.hadoop.mapred.SequenceFileOutputFormat.getRecordWriter(SequenceFileOutputFormat.java:64)
        at org.apache.hadoop.mapred.lib.MultipleSequenceFileOutputFormat.getBaseRecordWriter(MultipleSequenceFileOutputFormat.java:51)
        at org.apache.hadoop.mapred.lib.MultipleOutputFormat$1.write(MultipleOutputFormat.java:104)
        at org.apache.hadoop.mapred.ReduceTask$OldTrackingRecordWriter.write(ReduceTask.java:493)
        at org.apache.hadoop.mapred.ReduceTask$3.collect(ReduceTask.java:422)
        at org.apache.nutch.crawl.Generator$Selector.reduce(Generator.java:344)
        at org.apache.nutch.crawl.Generator$Selector.reduce(Generator.java:112)
        at org.apache.hadoop.mapred.ReduceTask.runOldReducer(ReduceTask.java:444)
        at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:392)
        at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:745)
    2017-04-21 15:13:21,356 ERROR crawl.Generator - Generator: 

    java.io.IOException: Job failed!
            at org.apache.hadoop.mapred.JobClient.runJob(JobClient.java:865)
            at org.apache.nutch.crawl.Generator.generate(Generator.java:591)
            at org.apache.nutch.crawl.Generator.run(Generator.java:766)
            at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:70)
            at org.apache.nutch.crawl.Generator.main(Generator.java:719)

如果检查错误输出:

file:/generate-temp-b42b2b91-e1e5-4e82-8861-
881a7a607bd9/_temporary/0/_temporary/attempt_local2075293294_0001_r
_000000_0/fetchlist-1
它抱怨它不能在文件系统的根目录中创建文件,这没关系,您的servlet应该不能在那里编写。 看一看,您需要将
mapred.temp.dir
指定给servlet具有写入权限的路径,也可能是您遗漏的路径

由于您正试图在Nutch之上编写自定义UI,我建议您使用。这正是控制Nutch开始爬行等所需的


还要记住,Nutch分阶段/分批运行,每个阶段都需要一些时间,这取决于您的配置,因此在索引步骤结束之前,您不应该得到即时响应。

感谢您的响应…我让它工作起来。我将tomcat安装从/opt/tomcat更改为自定义安装位置。现在它的工作。。。
file:/generate-temp-b42b2b91-e1e5-4e82-8861-
881a7a607bd9/_temporary/0/_temporary/attempt_local2075293294_0001_r
_000000_0/fetchlist-1