Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/node.js/38.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Node.js Socket.io、群集、快速和同步事件_Node.js_Express_Socket.io_Cluster Computing - Fatal编程技术网

Node.js Socket.io、群集、快速和同步事件

Node.js Socket.io、群集、快速和同步事件,node.js,express,socket.io,cluster-computing,Node.js,Express,Socket.io,Cluster Computing,我有一个大问题持续了一周。我尝试将实际运行在单核上的node.JS项目转换为使用集群的多核项目 对于websockets,目前我对事件没有问题,但是对于xhr轮询或jsonp轮询,我在集群模式下的socket.io有很大的问题 这是我的服务器配置: 00-generic.js 'use strict'; var http = require('http'), os = require('os'), cluster

我有一个大问题持续了一周。我尝试将实际运行在单核上的node.JS项目转换为使用集群的多核项目

对于websockets,目前我对事件没有问题,但是对于xhr轮询或jsonp轮询,我在集群模式下的socket.io有很大的问题

这是我的服务器配置:

00-generic.js

'use strict';

var http            = require('http'),
    os              = require('os'),
    cluster         = require('cluster');

module.exports = function(done) {
    var app = this.express,
        port = process.env.PORT || 3000,
        address = '0.0.0.0';

    if(this.env == 'test'){
        port = 3030;
    }

    var self = this;
    var size = os.cpus().length;

    if (cluster.isMaster) {
        console.info('Creating HTTP server cluster with %d workers', size);

        for (var i = 0; i < size; ++i) {
            console.log('spawning worker process %d', (i + 1));
            cluster.fork();
        }

        cluster.on('fork', function(worker) {
            console.log('worker %s spawned', worker.id);
        });
        cluster.on('online', function(worker) {
            console.log('worker %s online', worker.id);
        });
        cluster.on('listening', function(worker, addr) {
            console.log('worker %s listening on %s:%d', worker.id, addr.address, addr.port);
        });
        cluster.on('disconnect', function(worker) {
            console.log('worker %s disconnected', worker.id);
        });
        cluster.on('exit', function(worker, code, signal) {
            console.log('worker %s died (%s)', worker.id, signal || code);
            if (!worker.suicide) {
                console.log('restarting worker');
                cluster.fork();
            }
        });
    } else {
        http.createServer(app).listen(port, address, function() {
            var addr = this.address();
            console.log('listening on %s:%d', addr.address, addr.port);
            self.server = this;
            done();
        });
    }
};
通过轮询,客户机不时地在与启动的侦听器不同的进程上进行连接。类似地,使用emit将通信服务器连接到客户端

通过一点搜索,我发现有必要通过socket.io的存储来共享数据连接。因此,我构建了Redistore socket.io,如文档所示,但即使如此,我发现自己的事件并没有安全到达,我仍然收到以下错误消息:

warn: client not handshaken client should reconnect
编辑

现在,不会调用warn错误。我将Redistore更改为socket.io-clusterhub,但现在并不总是调用事件。有时,就好像轮询请求是由另一个工作人员捕获的,而不是由侦听器启动的工作人员捕获的,因此什么也没有发生。以下是新的配置:

'use strict';

var http            = require('http'),
    locomotive      = require('locomotive'),
    os              = require('os'),
    cluster         = require('cluster'),
    config          = require(__dirname + '/../app/global'),
    _               = require('underscore'),
    socketio        = require('socket.io'),
    v1              = require(__dirname + '/../app/socket.io/v1'),
    sockets         = require(__dirname + '/../../app/socket/socket');

module.exports = function(done) {
    var app = this.express,
        port = process.env.PORT || 3000,
        address = '0.0.0.0';

    if(this.env == 'test'){
        port = 3030;
    }

    var self = this;
    var size = os.cpus().length;

    this.clusterStore = new (require('socket.io-clusterhub'));

    if (cluster.isMaster) {
        for (var i = 0; i < size; ++i) {
            console.log('spawning worker process %d', (i + 1));
            cluster.fork();
        }

        cluster.on('fork', function(worker) {
            console.log('worker %s spawned', worker.id);
        });
        cluster.on('online', function(worker) {
            console.log('worker %s online', worker.id);
        });
        cluster.on('listening', function(worker, addr) {
            console.log('worker %s listening on %s:%d', worker.id, addr.address, addr.port);
        });
        cluster.on('disconnect', function(worker) {
            console.log('worker %s disconnected', worker.id);
        });
        cluster.on('exit', function(worker, code, signal) {
            console.log('worker %s died (%s)', worker.id, signal || code);
            if (!worker.suicide) {
                console.log('restarting worker');
                cluster.fork();
            }
        });
    } else {
        var server = http.createServer(app);

        this.io = socketio.listen(server);

        this.io.configure(function() {
            this.io.enable('browser client minification');  // send minified client
            this.io.enable('browser client etag');          // apply etag caching logic based on version number
            this.io.enable('browser client gzip');          // gzip the file

            this.io.set('store', this.clusterStore);
            this.io.set('log level', 2);
            this.io.set('transports', [
                'websocket',
                'jsonp-polling'
            ]);
            //this.io.set('close timeout', 24*60*60);
            //this.io.set('heartbeat timeout', 24*60*60);
        }.bind(this));

        this.io.sockets.on('connection', function (socket) {
            console.log('connected with ' + this.io.transports[socket.id].name);
            console.log('connected to worker: ' + cluster.worker.id);

            // partie v1 @deprecated
            v1.events(socket);

            // partie v1.1 refaite
            _.each(sockets['1.1'], function(Mod) {
                var mod = new Mod();
                mod.launch({
                    socket  : socket,
                    io      : this.io
                });
            }, this);

        }.bind(this));

        server.listen(port, address, function() {
            var addr = this.address();
            console.log('listening on %s:%d', addr.address, addr.port);
            self.server = this;
            done();
        });
    }
};
“严格使用”;
var http=require('http'),
机车=需要(“机车”),
os=require('os'),
cluster=require('cluster'),
config=require(uu dirname+'/../app/global'),
_=需要('下划线'),
socketio=require('socket.io'),
v1=require(uuu dirname+'/../app/socket.io/v1'),
sockets=require(_dirname+'/../../app/socket/socket');
module.exports=函数(完成){
var app=this.express,
端口=process.env.port | | 3000,
地址='0.0.0.0';
if(this.env=='test'){
端口=3030;
}
var self=这个;
var size=os.cpus().length;
this.clusterStore=new(require('socket.io clusterhub');
if(cluster.isMaster){
对于(变量i=0;i
来自该来源:

如果您计划在不同的服务器之间分配连接负载 进程或机器,您必须确保与 使用特定会话id连接到发起的进程 他们

这是由于某些传输,如XHR轮询或JSONP轮询 依赖于在请求的生命周期内触发多个请求 “插座”

要每次将连接路由到同一个辅助进程,请执行以下操作:

粘性会话

在socket.io文档中,这是每次将请求路由到同一工作者的推荐方法

将socket.io与集群一起使用的一种简单高效的方法

Socket.io正在执行多个请求以执行握手和 与客户端建立连接。在集群中,这些请求可能会 到达不同的工作人员,这将破坏握手协议

要在节点之间传递消息,请执行以下操作:

socket.io redis

在socket.io文档中,这是工作人员之间共享消息的推荐方式

通过使用socket.io-redis适配器运行socket.io,您可以运行 不同进程或服务器中的多个socket.io实例 可以相互广播和发射事件

socket.io-redis的使用方式如下:

var io = require('socket.io')(3000);
var redis = require('socket.io-redis');
io.adapter(redis({ host: 'localhost', port: 6379 }));
同时

我认为您没有使用socket.io v1.0.0。为了获得更高的稳定性,您可能需要更新您的版本

var sticky = require('sticky-sesion');

sticky(function() {
  // This code will be executed only in slave workers

  var http = require('http'),
      io = require('socket.io');

  var server = http.createServer(function(req, res) {
    // ....
  });
  io.listen(server);

  return server;
}).listen(3000, function() {
  console.log('server started on 3000 port');
});
var io = require('socket.io')(3000);
var redis = require('socket.io-redis');
io.adapter(redis({ host: 'localhost', port: 6379 }));
var io = require('socket.io')(3000);
var redis = require('socket.io-redis');
io.adapter(redis({ host: 'localhost', port: 6379 }));
io.connect(yourURL , { transports : ['websocket']});
'use strict';

/*
 var cl = console.log;
 console.log = function(){
 console.trace();
 cl.apply(console,arguments);
 };
 */

var cluster = require('cluster'),
    config = require('./config/all'),
    deferred = require('q').defer(),
    express = require('express'),
    app = express(),
    http = require('http'),
    sticky = require('socketio-sticky-session'),
    io = require('socket.io');

// Code to run if we're in the master process or if we are not in debug mode/ running tests

if ((cluster.isMaster) &&
    (process.execArgv.indexOf('--debug') < 0) &&
    (process.env.NODE_ENV !== 'test') && (process.env.NODE_ENV !== 'development') &&
    (process.execArgv.indexOf('--singleProcess') < 0) &&
    (!config.clusterSticky)) {

    console.log('for real!');
    // Count the machine's CPUs
    var cpuCount = process.env.CPU_COUNT || require('os').cpus().length;

    // Create a worker for each CPU
    for (var i = 0; i < cpuCount; i += 1) {
        console.log('forking ', i);
        cluster.fork();
    }

    // Listen for dying workers
    cluster.on('exit', function (worker) {
        // Replace the dead worker, we're not sentimental
        console.log('Worker ' + worker.id + ' died :(');
        cluster.fork();
    });

// Code to run if we're in a worker process
} else {
    var port = config.http.port;
    var workerId = 0;
    if (!cluster.isMaster) {
        workerId = cluster.worker.id;
    }

    var server = http.createServer(app);
    io.listen(server);

    //TODO routes etc (core)

    server.on('listening', function () {
        console.log('Slave app started on port ' + port + ' (' + process.env.NODE_ENV + ') cluster.worker.id:', workerId);
    });

    if(config.clusterSticky && (process.env.NODE_ENV !== 'test') && (process.env.NODE_ENV !== 'development')) {
        sticky(server).listen(port);
    } else {
        server.listen(port);
    }

    deferred.resolve(server);
}

module.exports = deferred.promise;