Google bigquery AdWord脚本导出到BigQuery“;空洞的回应“;

Google bigquery AdWord脚本导出到BigQuery“;空洞的回应“;,google-bigquery,google-ads-api,Google Bigquery,Google Ads Api,利用以下AdWords脚本导出到BigQuery,BigQuery.Jobs.insert导致脚本因“空响应”而终止。电话没有得到回应的原因是什么 var ACCOUNTS = ['xxx','xxx']; var CONFIG = { BIGQUERY_PROJECT_ID: 'xxx', BIGQUERY_DATASET_ID: 'xxx', // Truncate existing data, otherwise will appe

利用以下AdWords脚本导出到BigQuery,BigQuery.Jobs.insert导致脚本因“空响应”而终止。电话没有得到回应的原因是什么

    var ACCOUNTS = ['xxx','xxx']; 

    var CONFIG = {
      BIGQUERY_PROJECT_ID: 'xxx',
      BIGQUERY_DATASET_ID: 'xxx',

      // Truncate existing data, otherwise will append.
      TRUNCATE_EXISTING_DATASET: true,
      TRUNCATE_EXISTING_TABLES: true,

      // Back up reports to Google Drive.
      WRITE_DATA_TO_DRIVE: false,
      // Folder to put all the intermediate files.
      DRIVE_FOLDER: 'Adwords Big Query Test',

      // Default date range over which statistics fields are retrieved.
      DEFAULT_DATE_RANGE: '20140101,20140105',

      // Lists of reports and fields to retrieve from AdWords.
      REPORTS: [{NAME: 'KEYWORDS_PERFORMANCE_REPORT',
         CONDITIONS: 'WHERE Impressions>0',
         FIELDS: {'AccountDescriptiveName' : 'STRING',
                  'Date' : 'STRING',
                  'CampaignId' : 'STRING',
                  'CampaignName' : 'STRING',
                  'AdGroupId' : 'STRING',
                  'AdGroupName' : 'STRING',
                  'Id' : 'STRING',
                  'Criteria' : 'STRING',
                  'KeywordMatchType' : 'STRING',
                  'AdNetworkType1' : 'STRING',
                  'AdNetworkType2' : 'STRING',
                  'Device' : 'STRING',
                  'AveragePosition' : 'STRING',
                  'QualityScore' : 'STRING',
                  'CpcBid' : 'STRING',
                  'TopOfPageCpc' : 'STRING',
                  'Impressions' : 'STRING',
                  'Clicks' : 'STRING',
                  'ConvertedClicks' : 'STRING',
                  'Cost' : 'STRING',
                  'Conversions' : 'STRING'
                 }
        }],

      RECIPIENT_EMAILS: [
        'xxx',
      ]
    };

    function main() {
      createDataset();
      for (var i = 0; i < CONFIG.REPORTS.length; i++) {
        var reportConfig = CONFIG.REPORTS[i];
        createTable(reportConfig);
      }

      folder = getDriveFolder();

      // Get an account iterator.
      var accountIterator = MccApp.accounts().withIds(ACCOUNTS).withLimit(10).get();
      var jobIdMap = {};
      while (accountIterator.hasNext()) {
         // Get the current account.
         var account = accountIterator.next();

         // Select the child account.
         MccApp.select(account);

         // Run reports against child account.
         var accountJobIds = processReports(folder, account.getCustomerId());
         jobIdMap[account.getCustomerId()] = accountJobIds;
      }

      waitTillJobsComplete(jobIdMap);
      sendEmail(jobIdMap);
    }


    function createDataset() {
       if (datasetExists()) {
        if (CONFIG.TRUNCATE_EXISTING_DATASET) {
          BigQuery.Datasets.remove(CONFIG.BIGQUERY_PROJECT_ID,
            CONFIG.BIGQUERY_DATASET_ID, {'deleteContents' : true});
          Logger.log('Truncated dataset.');
        } else {
          Logger.log('Dataset %s already exists.  Will not recreate.',
           CONFIG.BIGQUERY_DATASET_ID);
          return;
        }
      }

      // Create new dataset.
      var dataSet = BigQuery.newDataset();
      dataSet.friendlyName = CONFIG.BIGQUERY_DATASET_ID;
      dataSet.datasetReference = BigQuery.newDatasetReference();
      dataSet.datasetReference.projectId = CONFIG.BIGQUERY_PROJECT_ID;
      dataSet.datasetReference.datasetId = CONFIG.BIGQUERY_DATASET_ID;

      dataSet = BigQuery.Datasets.insert(dataSet, CONFIG.BIGQUERY_PROJECT_ID);
      Logger.log('Created dataset with id %s.', dataSet.id);
    }

    /**
     * Checks if dataset already exists in project.
     *
     * @return {boolean} Returns true if dataset already exists.
     */
    function datasetExists() {
      // Get a list of all datasets in project.
      var datasets = BigQuery.Datasets.list(CONFIG.BIGQUERY_PROJECT_ID);
      var datasetExists = false;
      // Iterate through each dataset and check for an id match.
      if (datasets.datasets != null) {
        for (var i = 0; i < datasets.datasets.length; i++) {
          var dataset = datasets.datasets[i];
          if (dataset.datasetReference.datasetId == CONFIG.BIGQUERY_DATASET_ID) {
            datasetExists = true;
            break;
          }
        }
      }
      return datasetExists;
    }

    function createTable(reportConfig) {
      if (tableExists(reportConfig.NAME)) {
        if (CONFIG.TRUNCATE_EXISTING_TABLES) {
          BigQuery.Tables.remove(CONFIG.BIGQUERY_PROJECT_ID,
              CONFIG.BIGQUERY_DATASET_ID, reportConfig.NAME);
          Logger.log('Truncated dataset %s.', reportConfig.NAME);
        } else {
          Logger.log('Table %s already exists.  Will not recreate.',
              reportConfig.NAME);
          return;
        }
      }

      // Create new table.
      var table = BigQuery.newTable();
      var schema = BigQuery.newTableSchema();
      var bigQueryFields = [];

      // Add account column to table.
      var accountFieldSchema = BigQuery.newTableFieldSchema();
      accountFieldSchema.description = 'AccountId';
      accountFieldSchema.name = 'AccountId';
      accountFieldSchema.type = 'STRING';
      bigQueryFields.push(accountFieldSchema);

      // Add each field to table schema.
      var fieldNames = Object.keys(reportConfig.FIELDS);
      for (var i = 0; i < fieldNames.length; i++) {
        var fieldName = fieldNames[i];
        var bigQueryFieldSchema = BigQuery.newTableFieldSchema();
        bigQueryFieldSchema.description = fieldName;
        bigQueryFieldSchema.name = fieldName;
        bigQueryFieldSchema.type = reportConfig.FIELDS[fieldName];

        bigQueryFields.push(bigQueryFieldSchema);
      }

      schema.fields = bigQueryFields;
      table.schema = schema;
      table.friendlyName = reportConfig.NAME;

      table.tableReference = BigQuery.newTableReference();
      table.tableReference.datasetId = CONFIG.BIGQUERY_DATASET_ID;
      table.tableReference.projectId = CONFIG.BIGQUERY_PROJECT_ID;
      table.tableReference.tableId = reportConfig.NAME;

      table = BigQuery.Tables.insert(table, CONFIG.BIGQUERY_PROJECT_ID,
          CONFIG.BIGQUERY_DATASET_ID);

      Logger.log('Created table with id %s.', table.id);
    }

    function tableExists(tableId) {
      // Get a list of all tables in the dataset.
      var tables = BigQuery.Tables.list(CONFIG.BIGQUERY_PROJECT_ID,
          CONFIG.BIGQUERY_DATASET_ID);
      var tableExists = false;
      // Iterate through each table and check for an id match.
      if (tables.tables != null) {
        for (var i = 0; i < tables.tables.length; i++) {
          var table = tables.tables[i];
          if (table.tableReference.tableId == tableId) {
            tableExists = true;
            break;
          }
        }
      }
      return tableExists;
    }

    function processReports(folder, accountId) {
      var jobIds = [];

      // Iterate over each report type.
      for (var i = 0; i < CONFIG.REPORTS.length; i++) {
        var reportConfig = CONFIG.REPORTS[i];
        Logger.log('Running report %s for account %s', reportConfig.NAME,
            accountId);
        // Get data as csv
        var csvData = retrieveAdwordsReport(reportConfig, accountId);

        // If configured, back up data.
        if (CONFIG.WRITE_DATA_TO_DRIVE) {
          var fileName = reportConfig.NAME + '_' + accountId;
          folder.createFile(fileName, csvData, MimeType.CSV);
          Logger.log('Exported data to Drive folder ' +
                 CONFIG.DRIVE_FOLDER + ' for report ' + fileName);
        }

        // Convert to Blob format.
        var blobData = Utilities.newBlob(csvData, 'application/octet-stream');
        // Load data
        var jobId = loadDataToBigquery(reportConfig, blobData);
        jobIds.push(jobId);
      }
      return jobIds;
    }

    function retrieveAdwordsReport(reportConfig, accountId) {
      var fieldNames = Object.keys(reportConfig.FIELDS);
      var report = AdWordsApp.report(
        'SELECT ' + fieldNames.join(',') +
        ' FROM ' + reportConfig.NAME + ' ' + reportConfig.CONDITIONS +
        ' DURING ' + CONFIG.DEFAULT_DATE_RANGE);
      var rows = report.rows();
      var csvRows = [];
      // Header row
      csvRows.push('AccountId,'+fieldNames.join(','));

      // Iterate over each row.
      while (rows.hasNext()) {
        var row = rows.next();
        var csvRow = [];
        csvRow.push(accountId);

        for (var i = 0; i < fieldNames.length; i++) {
          var fieldName = fieldNames[i];
          var fieldValue = row[fieldName].toString();
          var fieldType = reportConfig.FIELDS[fieldName];
          /* Strip off % and perform any other formatting here.
          if ((fieldType == 'FLOAT' || fieldType == 'INTEGER') &&
              fieldValue.charAt(fieldValue.length - 1) == '%') {
            fieldValue = fieldValue.substring(0, fieldValue.length - 1);
          }*/
          // Add double quotes to any string values.
          if (fieldType == 'STRING') {
            fieldValue = fieldValue.replace(',', ''); //Handle fields with comma in value returned
            fieldValue = fieldValue.replace('"', ''); //Handle fields with double quotes in value returned
            fieldValue = fieldValue.replace('+', ''); //Handle fields with "+" in value returned
            fieldValue = '"' + fieldValue + '"';
          }
          csvRow.push(fieldValue);
        }
        csvRows.push(csvRow.join(','));
      }
      Logger.log('Downloaded ' + reportConfig.NAME + ' for account ' + accountId +
          ' with ' + csvRows.length + ' rows.');
      return csvRows.join('\n');
    }

    function getDriveFolder() {
      var folders = DriveApp.getFoldersByName(CONFIG.DRIVE_FOLDER);
      // Assume first folder is the correct one.
      if (folders.hasNext()) {
       Logger.log('Folder name found.  Using existing folder.');
       return folders.next();
      }
      return DriveApp.createFolder(CONFIG.DRIVE_FOLDER);
    }

    function loadDataToBigquery(reportConfig, data) {
  function guid() {
  function s4() {
    return Math.floor((1 + Math.random()) * 0x10000)
      .toString(16)
      .substring(1);
  }
  return s4() + s4() + s4() + s4() + s4() + s4() + s4() + s4();
  }

  var makeId = guid();
  var job = {
        jobReference: {
          jobId: makeId
        },
        configuration: {
          load: {
            destinationTable: {
              projectId: CONFIG.BIGQUERY_PROJECT_ID,
              datasetId: CONFIG.BIGQUERY_DATASET_ID,
              tableId: reportConfig.NAME
            },
            skipLeadingRows: 1,
            ignoreUnknownValues: true,
            allowJaggedRows: true,
            allowLargeResults: true
          }
        }
      };

        var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
        Logger.log('Load job started for %s. Check on the status of it here: ' +
                   'https://bigquery.cloud.google.com/jobs/%s', reportConfig.NAME,
                   CONFIG.BIGQUERY_PROJECT_ID);
        return job.jobReference.jobId;

    }

    function waitTillJobsComplete(jobIdMap) {
      var complete = false;
      var remainingJobs = [];
      var accountIds = Object.keys(jobIdMap);
      for (var i = 0; i < accountIds.length; i++){
        var accountJobIds = jobIdMap[accountIds[i]];
        remainingJobs.push.apply(remainingJobs, accountJobIds);
      }
      while (!complete) {
        if (AdWordsApp.getExecutionInfo().getRemainingTime() < 5){
          Logger.log('Script is about to timeout, jobs ' + remainingJobs.join(',') +
            ' are still incomplete.');
        }
        remainingJobs = getIncompleteJobs(remainingJobs);
        if (remainingJobs.length == 0) {
          complete = true;
        }
        if (!complete) {
          Logger.log(remainingJobs.length + ' jobs still being processed.');
          // Wait 5 seconds before checking status again.
          Utilities.sleep(5000);
        }
      }
      Logger.log('All jobs processed.');
    }

    function getIncompleteJobs(jobIds) {
      var remainingJobIds = [];
      for (var i = 0; i < jobIds.length; i++) {
        var jobId = jobIds[i];
        var getJob = BigQuery.Jobs.get(CONFIG.BIGQUERY_PROJECT_ID, jobId);
        if (getJob.status.state != 'DONE') {
          remainingJobIds.push(jobId);
        }
      }
      return remainingJobIds;
    }

我尝试了很多调整,但答案对我来说并不明显。谢谢你的帮助

我可能错了,但是-我认为问题出在jobId上,因为guid()函数有问题-缺少“+”号

为什么不从下面的响应中使用jobId呢

var job = {
  configuration: {
      load: {
        destinationTable: {
          projectId: CONFIG.BIGQUERY_PROJECT_ID,
          datasetId: CONFIG.BIGQUERY_DATASET_ID,
          tableId: reportConfig.NAME
        },
        skipLeadingRows: 1,
        ignoreUnknownValues: true,
        allowJaggedRows: true,
        allowLargeResults: true
      }
  }
};

var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
Logger.log('Load job started for %s. Check on the status of it here: ' +
    'https://bigquery.cloud.google.com/jobs/%s', reportConfig.NAME,
     CONFIG.BIGQUERY_PROJECT_ID);
return insertJob.jobReference.jobId;
增加

在这种情况下,我建议记录jobId(makeId=guid())并在下面的链接中获取作业状态

输入ProjectId和JobId,您至少会看到您的工作发生了什么

AdWords将“-”放在中表示空值。如果将报表字段定义为除字符串以外的任何内容(例如,浮点、整数等),则插入将失败,因为它无法将虚线转换为浮点或整数


尝试将所有字段设置为字符串,看看这是否解决了问题。

是否尝试将WRITE_DATA_to_DRIVE参数设置为true以确认报告导出成功?结果有多大?我在尝试大于10MB的插入(根据列的不同,大约有25k行)时也会遇到相同的错误。如果导出到Google Drive的文件看起来不错,您可以在RetrieveAddOrdsReport中向while循环添加一个条件以限制文件大小。还有一篇文章提到了在包含AdNetworkType列时的一个问题:

限制结果大小:

var processedRows = 0;

// Iterate over each row.
while (rows.hasNext() && ++processedRows < 5000) {
    var row = rows.next();
    var csvRow = [];
    csvRow.push(accountId);

    if (processedRows % 1000 == 0)
    {
        Logger.log('Processed %s rows.',processedRows);
    }
...
var processedRows=0;
//迭代每一行。
while(rows.hasNext()&&++processedRows<5000){
var row=rows.next();
var csvRow=[];
csvRow.push(accountId);
如果(processedRows%1000==0)
{
Logger.log('已处理%s行',已处理行');
}
...

我看不出BigQuery.Jobs.insert有错误!你为什么这么认为?你到底遇到了什么错误?有两个原因:-如果你跟踪日志,它将是最后一次成功输入日志后的下一个函数-它会特别调用该行:创建了id为xxx的数据集。创建了id为xxx的表。找到了文件夹名称。使用现有文件夹。运行帐户xxx的报告关键字\u性能\u报告下载了帐户xxx的关键字\u性能\u报告,共52284行。将数据导出到驱动器文件夹Adwords大查询测试报告关键字\u性能\u报告\u xxx空响应(第396行)第396行为:var insertJob=BigQuery.Jobs.insert(job,CONFIG.BIGQUERY_PROJECT_ID,data);为了安全起见-你能尝试将projectId添加到job的jobReference中吗-jobReference:{projectId:CONFIG.BIGQUERY_PROJECT_ID,jobId:makeId},刚刚尝试了一下,得到了相同的错误消息。你有另一个建议,但它太大了,无法发表评论,所以我把它作为答案-即使在空中射击:o(我已经捕获了+号错误并更正了它,但仍然收到了错误。此外,我最初使用的方法是检索jobId,如您所示,但我收到了该错误,因此我尝试将此方法作为故障排除步骤,但也不起作用。我很想检查作业状态,但它甚至没有达到t。)BigQuery项目,这就是为什么我真的认为错误发生在特定的insert函数上。它似乎得到了一个“空响应”,因为操作没有执行,但我无法找出原因。明白了,这就是为什么我建议将显式projectId添加到jobReference.as next(假设CONFIG.BIGQUERY\u PROJECT\u ID设置正确)-我会在var jobId=loadDataToBigquery(reportConfig,blobData)中跟踪blobData祝贺您的第一个答案!您能为这个答案添加一个代码示例吗?
var job = {
  configuration: {
      load: {
        destinationTable: {
          projectId: CONFIG.BIGQUERY_PROJECT_ID,
          datasetId: CONFIG.BIGQUERY_DATASET_ID,
          tableId: reportConfig.NAME
        },
        skipLeadingRows: 1,
        ignoreUnknownValues: true,
        allowJaggedRows: true,
        allowLargeResults: true
      }
  }
};

var insertJob = BigQuery.Jobs.insert(job, CONFIG.BIGQUERY_PROJECT_ID, data);
Logger.log('Load job started for %s. Check on the status of it here: ' +
    'https://bigquery.cloud.google.com/jobs/%s', reportConfig.NAME,
     CONFIG.BIGQUERY_PROJECT_ID);
return insertJob.jobReference.jobId;
var processedRows = 0;

// Iterate over each row.
while (rows.hasNext() && ++processedRows < 5000) {
    var row = rows.next();
    var csvRow = [];
    csvRow.push(accountId);

    if (processedRows % 1000 == 0)
    {
        Logger.log('Processed %s rows.',processedRows);
    }
...