Java Spring批处理分区:连续运行从属(非并行)
我使用spring批处理来执行一些计算,在读卡器中,我必须在处理器/写入程序中处理大量数据,这个过程需要大量内存。 因此,我尝试使用如下分割器分割步骤:Java Spring批处理分区:连续运行从属(非并行),java,spring,parallel-processing,spring-batch,Java,Spring,Parallel Processing,Spring Batch,我使用spring批处理来执行一些计算,在读卡器中,我必须在处理器/写入程序中处理大量数据,这个过程需要大量内存。 因此,我尝试使用如下分割器分割步骤: <batch:step id="MyStep.master" > <partition step="MyStep" partitioner="MyPartitioner"> <handler grid-size="1" task-executor="TaskExecutor" />
<batch:step id="MyStep.master" >
<partition step="MyStep" partitioner="MyPartitioner">
<handler grid-size="1" task-executor="TaskExecutor" />
</partition>
</batch:step>
<batch:step id="MyStep" >
<batch:tasklet transaction-manager="transactionManager">
<batch:chunk reader="MyReader" processor="MyProcessor"
writer="MyWriter" commit-interval="1000" skip-limit="1000">
<batch:skippable-exception-classes>
<batch:include class="...FunctionalException" />
</batch:skippable-exception-classes>
</batch:chunk>
</batch:tasklet>
</batch:step>
<bean id="MyPartitioner" class="...MyPartitioner" scope="step"/>
<bean id="TaskExecutor" class="org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor" >
<bean name="MyReader"
class="org.springframework.batch.item.database.JdbcCursorItemReader"
scope="step">
<property name="dataSource" ref="dataSource" />
<property name="sql">
<value>
<![CDATA[
SELECT...
]]>
</value>
</property>
<property name="rowMapper" ref="MyRowMapper" />
</bean>
<bean id="MyRowMapper" class="...MyRowMapper" />
<bean id="dataSource" class="com.mchange.v2.c3p0.ComboPooledDataSource" destroy-method="close">
<property name="driverClass" value="org.postgresql.Driver"/>
<property name="jdbcUrl" value="jdbc:postgresql://${database.host}/${database.name}"/>
<property name="user" value="${database.user}"/>
<property name="password" value="${database.password}"/>
<property name="acquireIncrement" value="1" />
<property name="autoCommitOnClose" value="true" />
<property name="minPoolSize" value="${min.pool.size}" /> <!-- min.pool.size=5 -->
<property name="maxPoolSize" value="${max.pool.size}" /> <!-- max.pool.size=15 -->
</bean>
但是分区也会占用大量内存,因为这些步骤(从)是并行执行的,所以我想做的是拆分步骤并依次执行线程(不是并行)以减少内存使用(RAM),这可能吗 这个问题有点老了,所以我不确定这是否有帮助,可能是你自己解决的 如果行执行顺序没有问题,那么解决方案是在分区器bean中查询数据库,然后将所有信息传递给每个分区,以便将其拆分为表的各个部分(开始键、结束键),这将减少ram使用(很多) 一些警告:
<?xml version="1.0" encoding="UTF-8"?>
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:batch="http://www.springframework.org/schema/batch"
xmlns:context="http://www.springframework.org/schema/context"
xmlns:aop="http://www.springframework.org/schema/aop" xmlns:util="http://www.springframework.org/schema/util"
xsi:schemaLocation="http://www.springframework.org/schema/aop http://www.springframework.org/schema/aop/spring-aop-4.0.xsd
http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-4.0.xsd
http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util-4.0.xsd
http://www.springframework.org/schema/batch http://www.springframework.org/schema/batch/spring-batch-3.0.xsd
http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-4.0.xsd">
<!-- JOB -->
<batch:job id="printPdf" job-repository="jobRepository"
restartable="false">
<batch:step id="MyStep">
<batch:partition step="MyStep.template"
partitioner="myPartitioner" handler="partitionHandler">
</batch:partition>
</batch:step>
</batch:job>
<!-- Partitioner -->
<bean id="myPartitioner" class="foo.MyPartitioner"
scope="step">
<property name="jdbcTemplate" ref="myJdbcTemplate" />
<property name="sql"
value="Select ...." />
<property name="rowMap">
<bean
class="foo.MyPartitionHandlerRowMapper" />
</property>
<property name="preparedStatementSetter">
<bean
class="org.springframework.batch.core.resource.ListPreparedStatementSetter">
<property name="parameters">
<list>
<value>#{jobParameters['param1']}</value>
</list>
</property>
</bean>
</property>
</bean>
<bean id="partitionHandler" scope="step"
class="org.springframework.batch.core.partition.support.TaskExecutorPartitionHandler">
<property name="taskExecutor" ref="customTaskExecutor" />
<property name="gridSize" value="#{jobParameters['gridSize']}" />
<property name="step" ref="MyStep.template" />
</bean>
<bean id="customTaskExecutor"
class="org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor">
<property name="corePoolSize" value="8" />
<property name="maxPoolSize" value="8" />
<property name="waitForTasksToCompleteOnShutdown" value="true" />
<property name="awaitTerminationSeconds" value="120" />
</bean>
<batch:step id="MyStep.tempate">
<batch:tasklet transaction-manager="transactionManager">
<batch:chunk commit-interval="2500" reader="myReader"
processor="myProcessor" writer="myWriter" skip-limit="2500">
<batch:skippable-exception-classes>
<batch:include class="...FunctionalException" />
</batch:skippable-exception-classes>
</batch:chunk>
</batch:tasklet>
</batch:step>
<!-- Beans -->
<!-- Processors -->
<bean id="myProcessor" class="foo.MyProcessor"
scope="step">
</bean>
<bean id="classLoaderVerifier"
class="it.addvalue.pkjwd.services.genbean.GenericStockKeysForNoDuplicate" />
<!-- Readers -->
<bean id="myReader"
class="org.springframework.batch.item.database.JdbcCursorItemReader"
scope="step">
<property name="dataSource" ref="myDataSouce" />
<property name="sql"
value="select ... from ... where ID >= ? and ID <= ?" />
<property name="rowMapper">
<bean class="foo.MyReaderPartitionedRowMapper" />
</property>
<property name="preparedStatementSetter">
<bean
class="org.springframework.batch.core.resource.ListPreparedStatementSetter">
<property name="parameters">
<list>
<value>#{stepExecutionContext['START_ID']}</value>
<value>#{stepExecutionContext['END_ID']}</value>
</list>
</property>
</bean>
</property>
</bean>
<!-- Writers -->
<bean id="myWriter"
class="org.springframework.batch.item.database.JdbcBatchItemWriter">
<property name="assertUpdates" value="false" />
<property name="itemPreparedStatementSetter">
<bean class="foo.MyWriterStatementSetters" />
</property>
<property name="sql"
value="insert ..." />
<property name="dataSource" ref="myDataSouce" />
</bean>
</beans>
#{jobParameters['param1']}
#{stepExecutionContext['START_ID']}
#{stepExecutionContext['END_ID']}
您的分区Bean将如下所示:
package foo;
import foo.model.MyTable;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang.StringUtils;
import org.springframework.batch.core.partition.support.Partitioner;
import org.springframework.batch.item.ExecutionContext;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.core.PreparedStatementSetter;
import org.springframework.jdbc.core.RowMapper;
public class MyPartitioner implements Partitioner
{
private JdbcTemplate jdbcTemplate;
private RowMapper<foo.model.MyTable> rowMap;
private String sql;
private PreparedStatementSetter preparedStatementSetter;
public JdbcTemplate getJdbcTemplate()
{
return jdbcTemplate;
}
public void setJdbcTemplate(JdbcTemplate jdbcTemplate)
{
this.jdbcTemplate = jdbcTemplate;
}
public RowMapper<foo.model.MyTable> getRowMap()
{
return rowMap;
}
public void setRowMap(RowMapper<PkjwdPolizzePartition> rowMap)
{
this.rowMap = rowMap;
}
public String getSql()
{
return sql;
}
public void setSql(String sql)
{
this.sql = sql;
}
public PreparedStatementSetter getPreparedStatementSetter()
{
return preparedStatementSetter;
}
public void setPreparedStatementSetter(PreparedStatementSetter preparedStatementSetter)
{
this.preparedStatementSetter = preparedStatementSetter;
}
@Override
public Map<String, ExecutionContext> partition(int gridSize)
{
Map<String, ExecutionContext> map = new HashMap<String, ExecutionContext>();
try
{
List<PkjwdPolizzePartition> lstMyRows = jdbcTemplate.query(sql, preparedStatementSetter ,rowMap);
if ( lstMyRows.size() > 0 )
{
int total = lstMyRows.size();
int rowsPerPartition = total / gridSize;
int leftovers = total % gridSize;
total = lstMyRows.size() - 1;
int startPos = 0;
int endPos = rowsPerPartition - 1;
int i = 0;
while (endPos <= (total))
{
ExecutionContext context = new ExecutionContext();
if ( endPos + leftovers == total )
{
endPos = total;
}
else if ( endPos >= (total) )
{
endPos = total;
}
context.put("START_ID", lstMyRows.get(startPos).getId());
context.put("END_ID", lstMyRows.get(endPos).getId());
map.put("PART_" + StringUtils.leftPad("" + i, ("" + gridSize).length(), '0'), context);
i++;
startPos = endPos + 1;
endPos = endPos + rowsPerPartition;
}
}
}
catch ( Exception e )
{
e.printStackTrace();
}
return map;
}
}
package-foo;
导入foo.model.MyTable;
导入java.util.HashMap;
导入java.util.List;
导入java.util.Map;
导入org.apache.commons.lang.StringUtils;
导入org.springframework.batch.core.partition.support.Partitioner;
导入org.springframework.batch.item.ExecutionContext;
导入org.springframework.jdbc.core.jdbc模板;
导入org.springframework.jdbc.core.PreparedStatementSetter;
导入org.springframework.jdbc.core.RowMapper;
公共类MyPartitioner实现了分区器
{
私有JdbcTemplate JdbcTemplate;
私有行映射器行映射;
私有字符串sql;
私有PreparedStatementSetter PreparedStatementSetter;
公共JdbcTemplate getJdbcTemplate()
{
返回jdbc模板;
}
公共void setJdbcTemplate(JdbcTemplate JdbcTemplate)
{
this.jdbcTemplate=jdbcTemplate;
}
公共行映射程序getRowMap()
{
返回行映射;
}
公共void setRowMap(RowMapper rowMap)
{
this.rowMap=rowMap;
}
公共字符串getSql()
{
返回sql;
}
公共void setSql(字符串sql)
{
this.sql=sql;
}
公共PreparedStatementSetter getPreparedStatementSetter()
{
返回准备好的语句设置器;
}
public void setPreparedStatementSetter(PreparedStatementSetter PreparedStatementSetter)
{
this.preparedStatementSetter=preparedStatementSetter;
}
@凌驾
公共地图分区(int gridSize)
{
Map Map=newhashmap();
尝试
{
List lstMyRows=jdbcTemplate.query(sql、preparedStatementSetter、rowMap);
如果(lstMyRows.size()>0)
{
int total=lstMyRows.size();
int rowsPerPartition=总/网格大小;
int剩余=总的%gridSize;
总计=lstMyRows.size()-1;
int startPos=0;
int-endPos=rowsPerPartition-1;
int i=0;
而(endPos=(总计))
{
endPos=总数;
}
put(“START_ID”,lstMyRows.get(startPos.getId());
put(“END_ID”,lstMyRows.get(endPos.getId());
map.put(“PART_”+StringUtils.leftPad(“+i,(“+gridSize.length(),'0'),上下文);
i++;
startPos=endPos+1;
endPos=endPos+rowsPerPartition;
}
}
}
捕获(例外e)
{
e、 printStackTrace();
}
返回图;
}
}
什么占用了RAM?将分区添加到内存不足的面向块的步骤不会有任何帮助(这是同一件事)。我想做的是分割读取器中使用的查询结果,以减少内存使用,我该如何执行?我现在拥有的:带大数据的读卡器R->处理器P->写卡器W我想要的:将结果R拆分为r1、r2、r3。。。读卡器r1->处理器P->写卡器W读卡器r2->处理器P->写卡器W读卡器r3->处理器P->写卡器W….您在使用什么ItemReader
?如果您使用的是Spring批处理中的一个,那么您不应该一次将查询的所有结果加载到内存中。为你的读者发布配置,我们可以从那里开始