如何在karma webpack中按块运行单元测试?
我们在项目中使用了以下技术: angularjs+typescript+webpack+karma+phantomjs 我们在该项目中有1000多个单元测试,它们在karma phantomjs launcher上运行,在windows机器上运行Phantom崩溃,但以下情况除外:如何在karma webpack中按块运行单元测试?,webpack,phantomjs,karma-runner,karma-webpack,Webpack,Phantomjs,Karma Runner,Karma Webpack,我们在项目中使用了以下技术: angularjs+typescript+webpack+karma+phantomjs 我们在该项目中有1000多个单元测试,它们在karma phantomjs launcher上运行,在windows机器上运行Phantom崩溃,但以下情况除外: [phantomjs.launcher]: Fatal Windows exception, code 0xc0000005. 是否可以将测试拆分为块并逐个块运行它们?解决方案如下: 自定义karma.js 要运行
[phantomjs.launcher]: Fatal Windows exception, code 0xc0000005.
是否可以将测试拆分为块并逐个块运行它们?解决方案如下:
自定义karma.js
要运行它,只需运行以下命令:node custom karma.js
const Server = require('karma').Server;
const filesFromJson = require('./karma.files.json');
const glob = require("glob");
const cfg = require('karma/lib/config');
const _ = require("lodash");
const path = require("path");
const minConfig = require('./karma.min.js');
const EventEmitter = require('events');
class ChunkEmitter extends EventEmitter {}
const chunkEmitter = new ChunkEmitter();
// register karma server and setup listener.
const registerKarmaServerAndSetListeners = (config, chunkNumber) => {
const server = new Server(
config,
() => {
console.log('test suite are done' + chunkNumber);
// when first chunk of tests is done we have to call another chunk
const nextChunk = ++chunkNumber;
console.log('proceeding ' + nextChunk);
chunkEmitter.emit('chunk' + nextChunk);
}
);
// listening for server starting event and starting server.
chunkEmitter.on('chunk' + chunkNumber, ()=> {
console.log('staring ' + chunkNumber);
server.start();
});
}
const readAllSpecsSplitIntoChunksEmitServer = () => {
glob("src/**/*spec.ts", {}, (er, files) => {
// spliting all specs into chunks.
const chunkedFiles = _.chunk(files, 50);
chunkedFiles.forEach((chunk, index) => {
let chunkedFiles = filesFromJson.coreFiles.concat(chunk);
let karmaConfig = cfg.parseConfig(path.resolve('./karma.config.js'), {
files: chunkedFiles
});
registerKarmaServerAndSetListeners(karmaConfig, index);
});
// starting tests from chunk0
chunkEmitter.emit('chunk0');
})
}
readAllSpecsSplitIntoChunksEmitServer();