Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/google-cloud-platform/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Google cloud platform =ConvertMode.ThroweExceptionIfExists) 抛出新的IllegalStateException(“表”+表+”不为空); else if(config.getDataConvertMode()==ConvertMode.SkipExisting) log.info(“跳过表的数据拷贝”+表); } } } } commit(); } 私有void convertTableWithWorkers(字符串目录、字符串架构、字符串表)引发SQLException { String tableSpec=getTableSpec(目录、模式、表); Columns insertCols=getColumns(目录、架构、表、false); Columns selectCols=getColumns(目录、模式、表格、true); if(insertCols.primaryKeyCols.isEmpty()) { log.warning(“表“+tableSpec+”没有主键。不会复制任何数据。”); 返回; } log.info(“即将从表中复制数据”+tableSpec); int batchSize=config.getBatchSize(); int totalRecordCount=getSourceRecordCount(tableSpec); int numberOfWorkers=CalculateEnumberofWorkers(totalRecordCount); int numberOfRecordsPerWorker=总记录计数/numberOfWorkers; 如果(totalRecordCount%numberOfWorkers>0) numberOfRecordsPerWorker++; int currentOffset=0; ExecutorService=Executors.newFixedThreadPool(工作人员数); 对于(int-workerNumber=0;workerNumber=numberOfRecordsToCopy) 打破 } } 捕获(SQLE异常) { log.severy(“数据复制期间出错:+e.getMessage()); 抛出新的运行时异常(e); } 日志信息(名称+“:已完成复制”); } }_Google Cloud Platform_Google Cloud Spanner - Fatal编程技术网

Google cloud platform =ConvertMode.ThroweExceptionIfExists) 抛出新的IllegalStateException(“表”+表+”不为空); else if(config.getDataConvertMode()==ConvertMode.SkipExisting) log.info(“跳过表的数据拷贝”+表); } } } } commit(); } 私有void convertTableWithWorkers(字符串目录、字符串架构、字符串表)引发SQLException { String tableSpec=getTableSpec(目录、模式、表); Columns insertCols=getColumns(目录、架构、表、false); Columns selectCols=getColumns(目录、模式、表格、true); if(insertCols.primaryKeyCols.isEmpty()) { log.warning(“表“+tableSpec+”没有主键。不会复制任何数据。”); 返回; } log.info(“即将从表中复制数据”+tableSpec); int batchSize=config.getBatchSize(); int totalRecordCount=getSourceRecordCount(tableSpec); int numberOfWorkers=CalculateEnumberofWorkers(totalRecordCount); int numberOfRecordsPerWorker=总记录计数/numberOfWorkers; 如果(totalRecordCount%numberOfWorkers>0) numberOfRecordsPerWorker++; int currentOffset=0; ExecutorService=Executors.newFixedThreadPool(工作人员数); 对于(int-workerNumber=0;workerNumber=numberOfRecordsToCopy) 打破 } } 捕获(SQLE异常) { log.severy(“数据复制期间出错:+e.getMessage()); 抛出新的运行时异常(e); } 日志信息(名称+“:已完成复制”); } }

Google cloud platform =ConvertMode.ThroweExceptionIfExists) 抛出新的IllegalStateException(“表”+表+”不为空); else if(config.getDataConvertMode()==ConvertMode.SkipExisting) log.info(“跳过表的数据拷贝”+表); } } } } commit(); } 私有void convertTableWithWorkers(字符串目录、字符串架构、字符串表)引发SQLException { String tableSpec=getTableSpec(目录、模式、表); Columns insertCols=getColumns(目录、架构、表、false); Columns selectCols=getColumns(目录、模式、表格、true); if(insertCols.primaryKeyCols.isEmpty()) { log.warning(“表“+tableSpec+”没有主键。不会复制任何数据。”); 返回; } log.info(“即将从表中复制数据”+tableSpec); int batchSize=config.getBatchSize(); int totalRecordCount=getSourceRecordCount(tableSpec); int numberOfWorkers=CalculateEnumberofWorkers(totalRecordCount); int numberOfRecordsPerWorker=总记录计数/numberOfWorkers; 如果(totalRecordCount%numberOfWorkers>0) numberOfRecordsPerWorker++; int currentOffset=0; ExecutorService=Executors.newFixedThreadPool(工作人员数); 对于(int-workerNumber=0;workerNumber=numberOfRecordsToCopy) 打破 } } 捕获(SQLE异常) { log.severy(“数据复制期间出错:+e.getMessage()); 抛出新的运行时异常(e); } 日志信息(名称+“:已完成复制”); } },google-cloud-platform,google-cloud-spanner,Google Cloud Platform,Google Cloud Spanner,可以使用BatchClientcla从Cloud Panner并行读取数据 if (source.isWrapperFor(ICloudSpannerConnection.class)) { ICloudSpannerConnection con = source.unwrap(ICloudSpannerConnection.class); // Make sure no transaction is running if (!con.isBatchReadOnly())

可以使用
BatchClient
cla从Cloud Panner并行读取数据
if (source.isWrapperFor(ICloudSpannerConnection.class))
{
    ICloudSpannerConnection con = source.unwrap(ICloudSpannerConnection.class);
    // Make sure no transaction is running
    if (!con.isBatchReadOnly())
    {
        if (con.getAutoCommit())
        {
            con.setAutoCommit(false);
        }
        else
        {
            con.commit();
        }
        con.setBatchReadOnly(true);
    }
}
Statement statement = source.createStatement();
boolean hasResults = statement.execute(select);
int workerNumber = 0;
while (hasResults)
{
    ResultSet rs = statement.getResultSet();
    PartitionWorker worker = new PartitionWorker("PartionWorker-" + workerNumber, config, rs, tableSpec, table, insertCols);
    workers.add(worker);
    hasResults = statement.getMoreResults(Statement.KEEP_CURRENT_RESULT);
    workerNumber++;
}
public void convert(String catalog, String schema) throws SQLException
{
    int batchSize = config.getBatchSize();
    destination.setAutoCommit(false);
    // Set the source connection to transaction mode (no autocommit) and read-only
    source.setAutoCommit(false);
    source.setReadOnly(true);
    try (ResultSet tables = destination.getMetaData().getTables(catalog, schema, null, new String[] { "TABLE" }))
    {
        while (tables.next())
        {
            String tableSchema = tables.getString("TABLE_SCHEM");
            if (!config.getDestinationDatabaseType().isSystemSchema(tableSchema))
            {
                String table = tables.getString("TABLE_NAME");
                // Check whether the destination table is empty.
                int destinationRecordCount = getDestinationRecordCount(table);
                if (destinationRecordCount == 0 || config.getDataConvertMode() == ConvertMode.DropAndRecreate)
                {
                    if (destinationRecordCount > 0)
                    {
                        deleteAll(table);
                    }
                    int sourceRecordCount = getSourceRecordCount(getTableSpec(catalog, tableSchema, table));
                    if (sourceRecordCount > batchSize)
                    {
                        convertTableWithWorkers(catalog, tableSchema, table);
                    }
                    else
                    {
                        convertTable(catalog, tableSchema, table);
                    }
                }
                else
                {
                    if (config.getDataConvertMode() == ConvertMode.ThrowExceptionIfExists)
                        throw new IllegalStateException("Table " + table + " is not empty");
                    else if (config.getDataConvertMode() == ConvertMode.SkipExisting)
                        log.info("Skipping data copy for table " + table);
                }
            }
        }
    }
    source.commit();
}

private void convertTableWithWorkers(String catalog, String schema, String table) throws SQLException
{
    String tableSpec = getTableSpec(catalog, schema, table);
    Columns insertCols = getColumns(catalog, schema, table, false);
    Columns selectCols = getColumns(catalog, schema, table, true);
    if (insertCols.primaryKeyCols.isEmpty())
    {
        log.warning("Table " + tableSpec + " does not have a primary key. No data will be copied.");
        return;
    }
    log.info("About to copy data from table " + tableSpec);

    int batchSize = config.getBatchSize();
    int totalRecordCount = getSourceRecordCount(tableSpec);
    int numberOfWorkers = calculateNumberOfWorkers(totalRecordCount);
    int numberOfRecordsPerWorker = totalRecordCount / numberOfWorkers;
    if (totalRecordCount % numberOfWorkers > 0)
        numberOfRecordsPerWorker++;
    int currentOffset = 0;
    ExecutorService service = Executors.newFixedThreadPool(numberOfWorkers);
    for (int workerNumber = 0; workerNumber < numberOfWorkers; workerNumber++)
    {
        int workerRecordCount = Math.min(numberOfRecordsPerWorker, totalRecordCount - currentOffset);
        UploadWorker worker = new UploadWorker("UploadWorker-" + workerNumber, selectFormat, tableSpec, table,
                insertCols, selectCols, currentOffset, workerRecordCount, batchSize, source,
                config.getUrlDestination(), config.isUseJdbcBatching());
        service.submit(worker);
        currentOffset = currentOffset + numberOfRecordsPerWorker;
    }
    service.shutdown();
    try
    {
        service.awaitTermination(config.getUploadWorkerMaxWaitInMinutes(), TimeUnit.MINUTES);
    }
    catch (InterruptedException e)
    {
        log.severe("Error while waiting for workers to finish: " + e.getMessage());
        throw new RuntimeException(e);
    }

}

public class UploadWorker implements Runnable
{
private static final Logger log = Logger.getLogger(UploadWorker.class.getName());

private final String name;

private String selectFormat;

private String sourceTable;

private String destinationTable;

private Columns insertCols;

private Columns selectCols;

private int beginOffset;

private int numberOfRecordsToCopy;

private int batchSize;

private Connection source;

private String urlDestination;

private boolean useJdbcBatching;

UploadWorker(String name, String selectFormat, String sourceTable, String destinationTable, Columns insertCols,
        Columns selectCols, int beginOffset, int numberOfRecordsToCopy, int batchSize, Connection source,
        String urlDestination, boolean useJdbcBatching)
{
    this.name = name;
    this.selectFormat = selectFormat;
    this.sourceTable = sourceTable;
    this.destinationTable = destinationTable;
    this.insertCols = insertCols;
    this.selectCols = selectCols;
    this.beginOffset = beginOffset;
    this.numberOfRecordsToCopy = numberOfRecordsToCopy;
    this.batchSize = batchSize;
    this.source = source;
    this.urlDestination = urlDestination;
    this.useJdbcBatching = useJdbcBatching;
}

@Override
public void run()
{
    // Connection source = DriverManager.getConnection(urlSource);
    try (Connection destination = DriverManager.getConnection(urlDestination))
    {
        log.info(name + ": " + sourceTable + ": Starting copying " + numberOfRecordsToCopy + " records");

        destination.setAutoCommit(false);
        String sql = "INSERT INTO " + destinationTable + " (" + insertCols.getColumnNames() + ") VALUES \n";
        sql = sql + "(" + insertCols.getColumnParameters() + ")";
        PreparedStatement statement = destination.prepareStatement(sql);

        int lastRecord = beginOffset + numberOfRecordsToCopy;
        int recordCount = 0;
        int currentOffset = beginOffset;
        while (true)
        {
            int limit = Math.min(batchSize, lastRecord - currentOffset);
            String select = selectFormat.replace("$COLUMNS", selectCols.getColumnNames());
            select = select.replace("$TABLE", sourceTable);
            select = select.replace("$PRIMARY_KEY", selectCols.getPrimaryKeyColumns());
            select = select.replace("$BATCH_SIZE", String.valueOf(limit));
            select = select.replace("$OFFSET", String.valueOf(currentOffset));
            try (ResultSet rs = source.createStatement().executeQuery(select))
            {
                while (rs.next())
                {
                    int index = 1;
                    for (Integer type : insertCols.columnTypes)
                    {
                        Object object = rs.getObject(index);
                        statement.setObject(index, object, type);
                        index++;
                    }
                    if (useJdbcBatching)
                        statement.addBatch();
                    else
                        statement.executeUpdate();
                    recordCount++;
                }
                if (useJdbcBatching)
                    statement.executeBatch();
            }
            destination.commit();
            log.info(name + ": " + sourceTable + ": Records copied so far: " + recordCount + " of "
                    + numberOfRecordsToCopy);
            currentOffset = currentOffset + batchSize;
            if (recordCount >= numberOfRecordsToCopy)
                break;
        }
    }
    catch (SQLException e)
    {
        log.severe("Error during data copy: " + e.getMessage());
        throw new RuntimeException(e);
    }
    log.info(name + ": Finished copying");
}

}