Node.js 错误:getaddrinfo ENOTFOUND parishackers.org parishackers.org:80

Node.js 错误:getaddrinfo ENOTFOUND parishackers.org parishackers.org:80,node.js,Node.js,错误:getaddrinfo ENOTFOUND parishackers.org parishackers.org:80发生错误。 我写了密码 var Crawler = require("node-webcrawler"); var url = require('url'); var c = new Crawler({ maxConnections : 10, // This will be called for each crawled page callback

错误:getaddrinfo ENOTFOUND parishackers.org parishackers.org:80发生错误。 我写了密码

var Crawler = require("node-webcrawler");
var url = require('url');

var c = new Crawler({
    maxConnections : 10,
    // This will be called for each crawled page
    callback : function (error, result, $) {
        // $ is Cheerio by default
        //a lean implementation of core jQuery designed specifically for the server
        if(error){
            console.log(error);
        }else{
            console.log($("title").text());
        }
    }
});

// Queue just one URL, with default callback
c.queue('http://www.amazon.com');

// Queue a list of URLs
c.queue(['http://www.google.com/','http://www.yahoo.com']);

// Queue URLs with custom callbacks & parameters
c.queue([{
    uri: 'http://parishackers.org/',
    jQuery: false,

    // The global callback won't be called
    callback: function (error, result) {
        if(error){
            console.log(error);
        }else{
            console.log('Grabbed', result.body.length, 'bytes');
        }
    }
}]);

// Queue some HTML code directly without grabbing (mostly for tests)
c.queue([{
    html: '<p>This is a <strong>test</strong></p>'
}]);
错误发生了。我认为程序只是其中的一部分,但我不知道为什么程序可以完全做到这一点。我安装了类似npm的库安装节点webcrawler。我读了另一个网站,所以我认为这个错误是因为错误的链接,对吗?我该如何解决这个问题?我的代码有什么问题?

错误
(getaddrinfo ENOTFOUND parishackers.org parishackers.org:80)
由于域无效而被抛出。使用有效的url链接,您的节点webcrawler将像一个魔咒一样工作。修改代码段以供参考

var Crawler = require("node-webcrawler");
var url = require('url');

var c = new Crawler({
    maxConnections : 10,
    // This will be called for each crawled page
    callback : function (error, result, $) {
        // $ is Cheerio by default
        //a lean implementation of core jQuery designed specifically for the server
        if(error){
            console.log(error);
        }else{
            console.log($("title").text());
        }
    }
});

// Queue just one URL, with default callback
c.queue('http://www.amazon.com');

// Queue a list of URLs
c.queue(['http://www.google.com/','http://www.yahoo.com']);

// Queue URLs with custom callbacks & parameters
c.queue([{
    uri: 'http://www.amazon.com',
    jQuery: false,

    // The global callback won't be called
    callback: function (error, result) {
        if(error){
            console.log(error);
        }else{
            console.log('Grabbed', result.body.length, 'bytes');
        }
    }
}]);

// Queue some HTML code directly without grabbing (mostly for tests)
c.queue([{
    html: '<p>This is a <strong>test</strong></p>'
}]);
var Crawler=require(“节点webcrawler”);
var url=require('url');
var c=新的爬虫程序({
最大连接数:10,
//这将为每个已爬网页面调用
回调:函数(错误,结果,$){
//默认情况下,$是Cheerio
//专门为服务器设计的核心jQuery的精简实现
如果(错误){
console.log(错误);
}否则{
console.log($(“title”).text();
}
}
});
//仅对一个URL进行排队,并使用默认回调
c、 队列('http://www.amazon.com');
//对URL列表进行排队
c、 队列(['http://www.google.com/','http://www.yahoo.com']);
//使用自定义回调和参数对URL进行排队
c、 排队([{
uri:'http://www.amazon.com',
jQuery:false,
//不会调用全局回调
回调:函数(错误、结果){
如果(错误){
console.log(错误);
}否则{
log('抓取',result.body.length,'字节');
}
}
}]);
//直接将一些HTML代码排队,而无需抓取(主要用于测试)
c、 排队([{
html:“这是一个测试

” }]);
尝试使用浏览器访问
var Crawler = require("node-webcrawler");
var url = require('url');

var c = new Crawler({
    maxConnections : 10,
    // This will be called for each crawled page
    callback : function (error, result, $) {
        // $ is Cheerio by default
        //a lean implementation of core jQuery designed specifically for the server
        if(error){
            console.log(error);
        }else{
            console.log($("title").text());
        }
    }
});

// Queue just one URL, with default callback
c.queue('http://www.amazon.com');

// Queue a list of URLs
c.queue(['http://www.google.com/','http://www.yahoo.com']);

// Queue URLs with custom callbacks & parameters
c.queue([{
    uri: 'http://www.amazon.com',
    jQuery: false,

    // The global callback won't be called
    callback: function (error, result) {
        if(error){
            console.log(error);
        }else{
            console.log('Grabbed', result.body.length, 'bytes');
        }
    }
}]);

// Queue some HTML code directly without grabbing (mostly for tests)
c.queue([{
    html: '<p>This is a <strong>test</strong></p>'
}]);