Spring批处理作业无限运行

Spring批处理作业无限运行,spring,spring-batch,Spring,Spring Batch,我正在尝试无限次运行spring批处理作业。主要动机是不允许弹簧处于闲置状态 我使用下面的代码无限地运行作业 private JobExecution execution = null; @Scheduled(cron = "0 */2 * * * ?") public void perform() throws JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteExcepti

我正在尝试无限次运行spring批处理作业。主要动机是不允许弹簧处于闲置状态

我使用下面的代码无限地运行作业

private JobExecution execution = null;

@Scheduled(cron = "0 */2 * * * ?")
public void perform() throws JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException, JobParametersInvalidException {
    System.out.println("=== STATUS STARTED ====");

    if (execution != null && execution.isRunning()) {
        System.out.println("Job is running. Please wait.");
        return;
    }

    JobParameters jobParameters = new JobParametersBuilder().addString("JobId", String.valueOf(System.currentTimeMillis())).addDate("date", new Date()).addLong("time", System.currentTimeMillis()).toJobParameters();

    execution = jobLauncher.run(job, jobParameters);

    if (!execution.getStatus().isRunning()) {
        perform();
    }

    System.out.println("STATUS :: " + execution.getStatus());
}
首先,我们要检查作业是否正在运行。如果不运行,则再次运行相同的方法。现在作业正在无限地运行

我的问题是,这种方法是好是坏?还是其他解决方案

仅供参考,以下是批次配置代码

@Configuration
public class JobConfiguration {

    @Autowired
    private JobBuilderFactory jobBuilderFactory;

    @Autowired
    private StepBuilderFactory stepBuilderFactory;

    @Autowired
    private DataSource dataSource;

    private Resource outputResource = new FileSystemResource("path\\output.csv");
    private Resource inputResource = new FileSystemResource("path\\input.csv");

    @Bean
    public ColumnRangePartitioner partitioner() {
        ColumnRangePartitioner columnRangePartitioner = new ColumnRangePartitioner();
        columnRangePartitioner.setColumn("id");
        columnRangePartitioner.setDataSource(dataSource);
        columnRangePartitioner.setTable("customer");
        return columnRangePartitioner;
    }

    @Bean
    @StepScope
    public FlatFileItemReader<Customer> pagingItemReader(@Value("#{stepExecutionContext['minValue']}") Long minValue, @Value("#{stepExecutionContext['maxValue']}") Long maxValue) {
        System.out.println("reading " + minValue + " to " + maxValue);
        // Create reader instance
        FlatFileItemReader<Customer> reader = new FlatFileItemReader<>();

        // Set input file location
        reader.setResource(inputResource);

        // Set number of lines to skips. Use it if file has header rows.
        reader.setLinesToSkip(1);

        // Configure how each line will be parsed and mapped to different values
        reader.setLineMapper(new DefaultLineMapper() {
            {
                // 3 columns in each row
                setLineTokenizer(new DelimitedLineTokenizer() {
                    {
                        setNames(new String[] { "id", "firstName", "lastName" });
                    }
                });
                // Set values in Employee class
                setFieldSetMapper(new BeanWrapperFieldSetMapper<Customer>() {
                    {
                        setTargetType(Customer.class);
                    }
                });
            }
        });

        return reader;
    }

    @Bean
    @StepScope
    public FlatFileItemWriter<Customer> customerItemWriter() {
        // Create writer instance
        FlatFileItemWriter<Customer> writer = new FlatFileItemWriter<>();

        // Set output file location
        writer.setResource(outputResource);

        // All job repetitions should "append" to same output file
        writer.setAppendAllowed(true);

        // Name field values sequence based on object properties
        writer.setLineAggregator(new DelimitedLineAggregator<Customer>() {
            {
                setDelimiter(",");
                setFieldExtractor(new BeanWrapperFieldExtractor<Customer>() {
                    {
                        setNames(new String[] { "id", "firstName", "lastName" });
                    }
                });
            }
        });
        return writer;
    }

    // Master
    @Bean
    public Step step1() {
        return stepBuilderFactory.get("step1").partitioner(slaveStep().getName(), partitioner()).step(slaveStep()).gridSize(12).taskExecutor(new SimpleAsyncTaskExecutor()).build();
    }

    // slave step
    @Bean
    public Step slaveStep() {
        return stepBuilderFactory.get("slaveStep").<Customer, Customer>chunk(1000).reader(pagingItemReader(null, null)).writer(customerItemWriter()).build();
    }

    @Bean
    public Job job() {
        return jobBuilderFactory.get("job").start(step1()).build();
    }
}
@配置
公共类作业配置{
@自动连线
私人JobBuilderFactory JobBuilderFactory;
@自动连线
私人StepBuilderFactory StepBuilderFactory;
@自动连线
私有数据源;
私有资源outputResource=新文件系统资源(“path\\output.csv”);
私有资源inputResource=新文件系统资源(“path\\input.csv”);
@豆子
public ColumnRangePartitioner分区器(){
ColumnRangePartitioner ColumnRangePartitioner=新ColumnRangePartitioner();
columnRangePartitioner.setColumn(“id”);
columnRangePartitioner.setDataSource(dataSource);
columnRangePartitioner.setTable(“客户”);
返回列RangePartitioner;
}
@豆子
@步进镜
public FlatFileItemReader pagingItemReader(@Value(“{stepExecutionContext['minValue']}”)长minValue,@Value(“{stepExecutionContext['maxValue']}”)长maxValue){
System.out.println(“读取”+minValue+“到”+maxValue”);
//创建读卡器实例
FlatFileItemReader=新的FlatFileItemReader();
//设置输入文件位置
reader.setResource(inputResource);
//设置要跳过的行数。如果文件有标题行,请使用它。
reader.setLinesToSkip(1);
//配置如何解析每一行并将其映射到不同的值
reader.setLineMapper(新的DefaultLineMapper(){
{
//每行3列
setLineTokenizer(新的DelimitedLineTokenizer(){
{
setNames(新字符串[]{“id”、“firstName”、“lastName”});
}
});
//在Employee类中设置值
setFieldSetMapper(新的BeanRapperFieldSetMapper(){
{
setTargetType(Customer.class);
}
});
}
});
返回读取器;
}
@豆子
@步进镜
公共FlatFileItemWriter customerItemWriter(){
//创建编写器实例
FlatFileItemWriter writer=新的FlatFileItemWriter();
//设置输出文件位置
writer.setResource(输出资源);
//所有作业重复都应“附加”到同一输出文件
writer.setAppendAllowed(true);
//基于对象属性的名称字段值序列
writer.setLineAggregator(新的DelimitedLineAggregator(){
{
setDelimiter(“,”);
setFieldExtractor(新BeanWrapperFieldExtractor(){
{
setNames(新字符串[]{“id”、“firstName”、“lastName”});
}
});
}
});
返回作者;
}
//主人
@豆子
公共步骤第1步(){
返回stepBuilderFactory.get(“step1”).partitioner(slaveStep().getName(),partitioner()).step(slaveStep()).gridSize(12).taskExecutor(新的SimpleAsynctAskeExecutor()).build();
}
//从动台阶
@豆子
公共步骤slaveStep(){
返回stepBuilderFactory.get(“slaveStep”).chunk(1000).reader(pagingItemReader(null,null)).writer(customerItemWriter()).build();
}
@豆子
公职{
返回jobBuilderFactory.get(“作业”).start(step1()).build();
}
}
另一种连续运行步骤的方法

<job id="notificationBatchJobProcess"
        xmlns="http://www.springframework.org/schema/batch"
        job-repository="jobRepository">

        <step id="startLogStep" next="execute">
            <tasklet ref="ref1" />
        </step>

        <step id="execute">
            <batch:tasklet ref="ref2" />
            <batch:next on="COMPLETED" to="endLogStep" />
        </step>

        <step id="endLogStep">
            <batch:tasklet ref="ref3" />
            <batch:next on="COMPLETED" to="startLogStep" />
        </step>
    </job>

我使用上述代码试图实现的是,一旦endLogStep任务完成,它将再次调用startLogStep。 这个过程将持续无限长的时间,直到或除非发生任何异常


这是运行这些作业的正确方法吗?

主要动机是不允许spring batch处于空闲状态
:spring batch本身不会处于空闲状态,它将运行作业直到完成(无论是成功还是失败)。看起来您正在一个一直运行的JVM(可能是web应用程序)中运行作业,并试图最大限度地利用资源。是这样吗?是的。。。对的听着,在这种情况下,试图“无限”地运行作业并不是让JVM保持忙碌的方法。你还得给工作做点什么,对吗?我建议从web应用程序中提取批处理作业,并按需在自己的JVM中运行。作业将无限运行,但如果数据为空,则需要退出。有什么解决办法吗?
<job id="notificationBatchJobProcess"
        xmlns="http://www.springframework.org/schema/batch"
        job-repository="jobRepository">

        <step id="startLogStep" next="execute">
            <tasklet ref="ref1" />
        </step>

        <step id="execute">
            <batch:tasklet ref="ref2" />
            <batch:next on="COMPLETED" to="endLogStep" />
        </step>

        <step id="endLogStep">
            <batch:tasklet ref="ref3" />
            <batch:next on="COMPLETED" to="startLogStep" />
        </step>
    </job>