Java 使用BigQuery仅映射MapReduce作业

Java 使用BigQuery仅映射MapReduce作业,java,hadoop,google-bigquery,google-hadoop,Java,Hadoop,Google Bigquery,Google Hadoop,我们创建了一个Mapreduce作业,将数据注入BigQuery。我们的工作中没有太多的过滤功能,所以我们想让它成为唯一的映射工作,以使其更快、更高效 但是,BigQuery接受的java类“com.google.gson.JsonObject”没有实现hadoop映射器接口所需的可写接口。JsonObject也是最终的,我们不能扩展它 对我们如何回避这个问题有什么建议吗 谢谢,您应该能够使用Hadoop的BigQuery连接器(请参阅),它提供了Hadoop OutputFormat类的实现。

我们创建了一个Mapreduce作业,将数据注入BigQuery。我们的工作中没有太多的过滤功能,所以我们想让它成为唯一的映射工作,以使其更快、更高效

但是,BigQuery接受的java类“com.google.gson.JsonObject”没有实现hadoop映射器接口所需的可写接口。JsonObject也是最终的,我们不能扩展它

对我们如何回避这个问题有什么建议吗


谢谢,

您应该能够使用Hadoop的BigQuery连接器(请参阅),它提供了Hadoop OutputFormat类的实现。

您应该能够使用Hadoop的BigQuery连接器(请参阅)它提供了Hadoop OutputFormat类的实现。

您应该能够使用Hadoop的BigQuery连接器(请参阅),它提供了Hadoop OutputFormat类的实现。

您应该能够使用Hadoop的BigQuery连接器(请参阅)它提供了Hadoop OutputFormat类的实现。

添加到William的响应中:我想自己测试一下,我创建了一个安装了bigquery连接器的新集群,然后运行以下仅映射作业:

import com.google.cloud.hadoop.io.bigquery.BigQueryConfiguration;
import com.google.cloud.hadoop.io.bigquery.BigQueryOutputFormat;
import com.google.common.base.Splitter;
import com.google.gson.JsonObject;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;
import java.util.regex.Pattern;

/**
 * An example MapOnlyJob with BigQuery output
 */
public class MapOnlyJob {
  public static class MapOnlyMapper extends Mapper<LongWritable, Text, LongWritable, JsonObject> {
    private static final LongWritable KEY_OUT = new LongWritable(0L);
    // This requires a new version of guava be included in a shaded / repackaged libjar.
    private static final Splitter SPLITTER =
        Splitter.on(Pattern.compile("\\s+"))
            .trimResults()
            .omitEmptyStrings();
    protected void map(LongWritable key, Text value, Context context)
        throws IOException, InterruptedException {
      String line = value.toString();
      for (String word : SPLITTER.split(line)) {
        JsonObject json = new JsonObject();
        json.addProperty("word", word);
        json.addProperty("mapKey", key.get());
        context.write(KEY_OUT, json);
      }
    }
  }

  /**
   * Configures and runs the main Hadoop job.
   */
  public static void main(String[] args)
      throws IOException, InterruptedException, ClassNotFoundException {

    GenericOptionsParser parser = new GenericOptionsParser(args);
    args = parser.getRemainingArgs();

    if (args.length != 3) {
      System.out.println("Usage: hadoop MapOnlyJob "
          + "[projectId] [input_file] [fullyQualifiedOutputTableId]");
      String indent = "    ";
      System.out.println(indent
          + "projectId - Project under which to issue the BigQuery operations. "
          + "Also serves as the default project for table IDs which don't explicitly specify a "
          + "project for the table.");
      System.out.println(indent
          + "input_file - Input file pattern of the form "
          + "gs://foo/bar*.txt or hdfs:///foo/bar*.txt or foo*.txt");
      System.out.println(indent
          + "fullyQualifiedOutputTableId - Output table ID of the form "
          + "<optional projectId>:<datasetId>.<tableId>");
      System.exit(1);
    }

    // Global parameters from args.
    String projectId = args[0];

    // Set InputFormat parameters from args.
    String inputPattern = args[1];

    // Set OutputFormat parameters from args.
    String fullyQualifiedOutputTableId = args[2];

    // Default OutputFormat parameters for this sample.
    String outputTableSchema =
        "[{'name': 'word','type': 'STRING'},{'name': 'mapKey','type': 'INTEGER'}]";

    Configuration conf = parser.getConfiguration();
    Job job = Job.getInstance(conf);
    // Set the job-level projectId.
    conf.set(BigQueryConfiguration.PROJECT_ID_KEY, projectId);
    // Set classes and configure them:
    job.setOutputFormatClass(BigQueryOutputFormat.class);
    BigQueryConfiguration.configureBigQueryOutput(
        job.getConfiguration() /* Required as Job made a new Configuration object */,
        fullyQualifiedOutputTableId,
        outputTableSchema);
    // Configure file-based input:
    FileInputFormat.setInputPaths(job, inputPattern);

    job.setJarByClass(MapOnlyMapper.class);
    job.setMapperClass(MapOnlyMapper.class);
    // The key will be discarded by BigQueryOutputFormat.
    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(JsonObject.class);
    // Make map-only
    job.setNumReduceTasks(0);

    job.waitForCompletion(true);
  }
}
import com.google.cloud.hadoop.io.bigquery.BigQueryConfiguration;
导入com.google.cloud.hadoop.io.bigquery.BigQueryOutputFormat;
导入com.google.common.base.Splitter;
导入com.google.gson.JsonObject;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.util.GenericOptionsParser;
导入java.io.IOException;
导入java.util.regex.Pattern;
/**
*具有BigQuery输出的MapOnlyJob示例
*/
公共类MapOnlyJob{
公共静态类MapOnlyMapper扩展了Mapper{
私有静态最终可长写密钥\u OUT=新的可长写密钥(0L);
//这需要在着色/重新包装的libjar中包含新版本的番石榴。
专用静态最终拆分器=
Splitter.on(Pattern.compile(\\s+))
.trimpresults()
.省略空字符串();
受保护的void映射(可长写键、文本值、上下文)
抛出IOException、InterruptedException{
字符串行=value.toString();
for(字符串字:SPLITTER.split(行)){
JsonObject json=新的JsonObject();
addProperty(“word”,word);
addProperty(“mapKey”,key.get());
write(KEY_OUT,json);
}
}
}
/**
*配置并运行主Hadoop作业。
*/
公共静态void main(字符串[]args)
抛出IOException、InterruptedException、ClassNotFoundException{
GenericOptionsParser=新的GenericOptionsParser(args);
args=parser.getRemainingArgs();
如果(参数长度!=3){
System.out.println(“用法:hadoop MapOnlyJob”
+“[projectId][input_file][FullyQualifiedOuttableId]”;
字符串缩进=”;
系统输出打印项次(缩进
+“projectId—在其中发出BigQuery操作的项目。”
+“还用作表ID的默认项目,该表ID未显式指定”
+“表格项目”。);
系统输出打印项次(缩进
+“输入文件-表单的输入文件模式”
+“gs://foo/bar*.txt或hdfs:///foo/bar*.txt或foo*.txt”);
系统输出打印项次(缩进
+“FullyQualifiedOuttableId-表单的输出表ID”
+ ":.");
系统出口(1);
}
//args中的全局参数。
字符串projectId=args[0];
//从参数设置InputFormat参数。
字符串inputPattern=args[1];
//从参数设置OutputFormat参数。
字符串FullyQualifiedOuttableId=args[2];
//此示例的默认OutputFormat参数。
字符串输出模式=
“[{'name':'word','type':'STRING'},{'name':'mapKey','type':'INTEGER'}]”;
conf=parser.getConfiguration();
Job Job=Job.getInstance(conf);
//将作业级别设置为projectd。
conf.set(BigQueryConfiguration.PROJECT\u ID\u KEY,projectId);
//设置类并配置它们:
setOutputFormatClass(BigQueryOutputFormat.class);
BigQueryConfiguration.configureBigQueryOutput(
job.getConfiguration()/*是作业创建新配置对象时必需的*/,
完全合格的输出ID,
输出模式);
//配置基于文件的输入:
setInputPath(作业、inputPattern);
job.setJarByClass(MapOnlyMapper.class);
setMapperClass(MapOnlyMapper.class);
//BigQueryOutputFormat将丢弃该键。
job.setOutputKeyClass(LongWritable.class);
setOutputValueClass(JsonObject.class);
//只做地图
job.setNumReduceTasks(0);
job.waitForCompletion(true);
}
}
我有以下依赖项:

<dependency>
  <groupId>org.apache.hadoop</groupId>
  <artifactId>hadoop-core</artifactId>
  <version>1.2.1</version>
  <scope>provided</scope>
</dependency>
<dependency>
  <groupId>com.google.cloud.bigdataoss</groupId>
  <artifactId>bigquery-connector</artifactId>
  <version>0.7.0-hadoop1</version>
</dependency>
<dependency>
  <groupId>com.google.guava</groupId>
  <artifactId>guava</artifactId>
  <version>18.0</version>
</dependency>

org.apache.hadoop
hadoop内核
1.2.1
假如
com.google.cloud.bigdataoss
bigquery连接器
0.7.0-hadoop1
番石榴
番石榴
18

添加到William的响应中:我想自己测试一下,我创建了一个安装了bigquery连接器的新集群,然后运行以下仅映射作业:

import com.google.cloud.hadoop.io.bigquery.BigQueryConfiguration;
import com.google.cloud.hadoop.io.bigquery.BigQueryOutputFormat;
import com.google.common.base.Splitter;
import com.google.gson.JsonObject;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.util.GenericOptionsParser;

import java.io.IOException;
import java.util.regex.Pattern;

/**
 * An example MapOnlyJob with BigQuery output
 */
public class MapOnlyJob {
  public static class MapOnlyMapper extends Mapper<LongWritable, Text, LongWritable, JsonObject> {
    private static final LongWritable KEY_OUT = new LongWritable(0L);
    // This requires a new version of guava be included in a shaded / repackaged libjar.
    private static final Splitter SPLITTER =
        Splitter.on(Pattern.compile("\\s+"))
            .trimResults()
            .omitEmptyStrings();
    protected void map(LongWritable key, Text value, Context context)
        throws IOException, InterruptedException {
      String line = value.toString();
      for (String word : SPLITTER.split(line)) {
        JsonObject json = new JsonObject();
        json.addProperty("word", word);
        json.addProperty("mapKey", key.get());
        context.write(KEY_OUT, json);
      }
    }
  }

  /**
   * Configures and runs the main Hadoop job.
   */
  public static void main(String[] args)
      throws IOException, InterruptedException, ClassNotFoundException {

    GenericOptionsParser parser = new GenericOptionsParser(args);
    args = parser.getRemainingArgs();

    if (args.length != 3) {
      System.out.println("Usage: hadoop MapOnlyJob "
          + "[projectId] [input_file] [fullyQualifiedOutputTableId]");
      String indent = "    ";
      System.out.println(indent
          + "projectId - Project under which to issue the BigQuery operations. "
          + "Also serves as the default project for table IDs which don't explicitly specify a "
          + "project for the table.");
      System.out.println(indent
          + "input_file - Input file pattern of the form "
          + "gs://foo/bar*.txt or hdfs:///foo/bar*.txt or foo*.txt");
      System.out.println(indent
          + "fullyQualifiedOutputTableId - Output table ID of the form "
          + "<optional projectId>:<datasetId>.<tableId>");
      System.exit(1);
    }

    // Global parameters from args.
    String projectId = args[0];

    // Set InputFormat parameters from args.
    String inputPattern = args[1];

    // Set OutputFormat parameters from args.
    String fullyQualifiedOutputTableId = args[2];

    // Default OutputFormat parameters for this sample.
    String outputTableSchema =
        "[{'name': 'word','type': 'STRING'},{'name': 'mapKey','type': 'INTEGER'}]";

    Configuration conf = parser.getConfiguration();
    Job job = Job.getInstance(conf);
    // Set the job-level projectId.
    conf.set(BigQueryConfiguration.PROJECT_ID_KEY, projectId);
    // Set classes and configure them:
    job.setOutputFormatClass(BigQueryOutputFormat.class);
    BigQueryConfiguration.configureBigQueryOutput(
        job.getConfiguration() /* Required as Job made a new Configuration object */,
        fullyQualifiedOutputTableId,
        outputTableSchema);
    // Configure file-based input:
    FileInputFormat.setInputPaths(job, inputPattern);

    job.setJarByClass(MapOnlyMapper.class);
    job.setMapperClass(MapOnlyMapper.class);
    // The key will be discarded by BigQueryOutputFormat.
    job.setOutputKeyClass(LongWritable.class);
    job.setOutputValueClass(JsonObject.class);
    // Make map-only
    job.setNumReduceTasks(0);

    job.waitForCompletion(true);
  }
}
import com.google.cloud.hadoop.io.bigquery.BigQueryConfiguration;
导入com.google.cloud.hadoop.io.bigquery.BigQueryOutputFormat;
导入com.google.common.base.Splitter;
导入com.google.gson.JsonObject;
导入org.apache.hadoop.conf.Configuration;
导入org.apache.hadoop.io.LongWritable;
导入org.apache.hadoop.io.Text;
导入org.apache.hadoop.mapreduce.Job;
导入org.apache.hadoop.mapreduce.Mapper;
导入org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
导入org.apache.hadoop.util.GenericOptionsParser;
导入java.io.IOException;
导入java.util.regex.P