Java Spark:单个应用程序中的两个SparkContext最佳实践

Java Spark:单个应用程序中的两个SparkContext最佳实践,java,apache-spark,apache-spark-sql,spark-streaming,Java,Apache Spark,Apache Spark Sql,Spark Streaming,我想今天我有一个有趣的问题要问大家。在下面的代码中,您会注意到我有两个SparkContext,一个用于SparkStreaming,另一个是普通SparkContext。根据最佳实践,Spark应用程序中应该只有一个SparkContext,即使可以通过配置中的AllowMultipleContext来规避这一点 问题是,我需要从配置单元和卡夫卡主题中检索数据来进行一些逻辑操作,每当我提交应用程序时,它都会明显返回“不能在JVM上运行2个Spark上下文” 我的问题是,除了我现在的做法,还有什

我想今天我有一个有趣的问题要问大家。在下面的代码中,您会注意到我有两个SparkContext,一个用于SparkStreaming,另一个是普通SparkContext。根据最佳实践,Spark应用程序中应该只有一个SparkContext,即使可以通过配置中的AllowMultipleContext来规避这一点

问题是,我需要从配置单元和卡夫卡主题中检索数据来进行一些逻辑操作,每当我提交应用程序时,它都会明显返回“不能在JVM上运行2个Spark上下文”

我的问题是,除了我现在的做法,还有什么正确的方法可以做到这一点吗

public class MainApp {

private final String logFile= Properties.getString("SparkLogFileDir");
private static final String KAFKA_GROUPID = Properties.getString("KafkaGroupId");
private static final String ZOOKEEPER_URL = Properties.getString("ZookeeperURL");
private static final String KAFKA_BROKER = Properties.getString("KafkaBroker");
private static final String KAFKA_TOPIC = Properties.getString("KafkaTopic");
private static final String Database = Properties.getString("HiveDatabase");
private static final Integer KAFKA_PARA = Properties.getInt("KafkaParrallel");

public static void main(String[] args){
    //set settings
    String sql="";

    //START APP
    System.out.println("Starting NPI_TWITTERAPP...." + new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));
    System.out.println("Configuring Settings...."+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));
    SparkConf conf = new SparkConf()
            .setAppName(Properties.getString("SparkAppName"))
            .setMaster(Properties.getString("SparkMasterUrl"));

    //Set Spark/hive/sql Context
    JavaSparkContext sc = new JavaSparkContext(conf);
    JavaStreamingContext jssc = new JavaStreamingContext(conf, new Duration(5000));
    JavaHiveContext HiveSqlContext = new JavaHiveContext(sc);

    //Check if Twitter Hive Table Exists
    try {
        HiveSqlContext.sql("DROP TABLE IF EXISTS "+Database+"TWITTERSTORE");
        HiveSqlContext.sql("CREATE TABLE IF NOT EXISTS "+Database+".TWITTERSTORE "
        +" (created_at String, id String, id_str String, text String, source String, truncated String, in_reply_to_user_id String, processed_at String, lon String, lat String)"
        +" STORED AS TEXTFILE");
    }catch(Exception e){
        System.out.println(e);
    }
    //Check if Ivapp Table Exists

    sql ="CREATE TABLE IF NOT EXISTS "+Database+".IVAPPGEO AS SELECT DISTINCT a.LATITUDE, a.LONGITUDE, b.ODNCIRCUIT_OLT_CLLI, b.ODNCIRCUIT_OLT_TID, a.CITY, a.STATE, a.ZIP FROM "
            +Database+".T_PONNMS_SERVICE B, "
            +Database+".CLLI_LATLON_MSTR A WHERE a.BID_CLLI = substr(b.ODNCIRCUIT_OLT_CLLI,0,8)";
    try {
        System.out.println(sql + new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));
        HiveSqlContext.sql(sql);

        sql = "SELECT LATITUDE, LONGITUDE, ODNCIRCUIT_OLT_CLLI, ODNCIRCUIT_OLT_TID, CITY, STATE, ZIP FROM "+Database+".IVAPPGEO";

        JavaSchemaRDD RDD_IVAPPGEO = HiveSqlContext.sql(sql).cache();

    }catch(Exception e){
        System.out.println(sql + new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));
    }

    //JavaHiveContext hc = new JavaHiveContext();
    System.out.println("Retrieve Data from Kafka Topic: "+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));
    Map<String, Integer> topicMap = new HashMap<String, Integer>();
    topicMap.put(KAFKA_TOPIC,KAFKA_PARA);

    JavaPairReceiverInputDStream<String, String> messages = KafkaUtils.createStream(
                jssc, KAFKA_GROUPID, ZOOKEEPER_URL, topicMap);

    JavaDStream<String> json = messages.map(
            new Function<Tuple2<String, String>, String>() {
                private static final long serialVersionUID = 42l;
                @Override
                public String call(Tuple2<String, String> message) {
                    return message._2();
                }
            }
    );
    System.out.println("Completed Kafka Messages... "+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));


    System.out.println("Filtering Resultset... "+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));

    JavaPairDStream<Long, String> tweets = json.mapToPair(
            new TwitterFilterFunction());

    JavaPairDStream<Long, String> filtered = tweets.filter(
            new Function<Tuple2<Long, String>, Boolean>() {
                private static final long serialVersionUID = 42l;
                @Override
                public Boolean call(Tuple2<Long, String> tweet) {
                    return tweet != null;
                }
            }
    );

    JavaDStream<Tuple2<Long, String>> tweetsFiltered = filtered.map(
            new TextFilterFunction());

    tweetsFiltered = tweetsFiltered.map(
            new StemmingFunction());

    System.out.println("Finished Filtering Resultset... "+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));



    System.out.println("Processing Sentiment Data... "+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));

    //calculate postive tweets
    JavaPairDStream<Tuple2<Long, String>, Float> positiveTweets =
            tweetsFiltered.mapToPair(new PositiveScoreFunction());
    //calculate negative tweets
    JavaPairDStream<Tuple2<Long, String>, Float> negativeTweets =
            tweetsFiltered.mapToPair(new NegativeScoreFunction());

    JavaPairDStream<Tuple2<Long, String>, Tuple2<Float, Float>> joined =
            positiveTweets.join(negativeTweets);

    //Score tweets
    JavaDStream<Tuple4<Long, String, Float, Float>> scoredTweets =
            joined.map(new Function<Tuple2<Tuple2<Long, String>,
                    Tuple2<Float, Float>>,
                    Tuple4<Long, String, Float, Float>>() {
                private static final long serialVersionUID = 42l;
                @Override
                public Tuple4<Long, String, Float, Float> call(
                        Tuple2<Tuple2<Long, String>, Tuple2<Float, Float>> tweet)
                {
                    return new Tuple4<Long, String, Float, Float>(
                            tweet._1()._1(),
                            tweet._1()._2(),
                            tweet._2()._1(),
                            tweet._2()._2());
                }
            });

    System.out.println("Finished Processing Sentiment Data... "+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));

    System.out.println("Outputting Tweets Data to flat file "+Properties.getString("HdfsOutput")+" ... "+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));

    JavaDStream<Tuple5<Long, String, Float, Float, String>> result =
            scoredTweets.map(new ScoreTweetsFunction());

    result.foreachRDD(new FileWriter());

    System.out.println("Outputting Sentiment Data to Hive... "+ new SimpleDateFormat("yyyyMMdd_HHmmss").format(Calendar.getInstance().getTime()));


    jssc.start();
    jssc.awaitTermination();
}
public类MainApp{
私有最终字符串logFile=Properties.getString(“SparkLogFileDir”);
私有静态最终字符串KAFKA_GROUPID=Properties.getString(“KafkaGroupId”);
私有静态最终字符串ZOOKEEPER_URL=Properties.getString(“ZookeeperURL”);
私有静态最终字符串KAFKA_BROKER=Properties.getString(“KafkaBroker”);
私有静态最终字符串KAFKA_TOPIC=Properties.getString(“KafkaTopic”);
私有静态最终字符串数据库=Properties.getString(“HiveDatabase”);
私有静态最终整数KAFKA_PARA=Properties.getInt(“kafkaparallel”);
公共静态void main(字符串[]args){
//设置设置
字符串sql=“”;
//启动应用程序
System.out.println(“启动NPI_TWITTERAPP…”+新的SimpleDataFormat(“yyyyMMdd_HHmmss”).format(Calendar.getInstance().getTime());
System.out.println(“配置设置…”+新的SimpleDataFormat(“yyyyymmdd_HHmmss”).format(Calendar.getInstance().getTime());
SparkConf conf=新的SparkConf()
.setAppName(Properties.getString(“SparkAppName”))
.setMaster(Properties.getString(“SparkMasterUrl”);
//设置Spark/hive/sql上下文
JavaSparkContext sc=新的JavaSparkContext(conf);
JavaStreamingContext jssc=新的JavaStreamingContext(conf,新的持续时间(5000));
JavaHiveContext HiveSqlContext=新的JavaHiveContext(sc);
//检查Twitter配置单元表是否存在
试一试{
sql(“如果存在删除表”+数据库+“TWITTERSTORE”);
sql(“如果不存在创建表”+数据库+”.TWITTERSTORE”
+(已创建的字符串、id字符串、id字符串、文本字符串、源字符串、截断字符串、答复中的字符串、已处理的字符串、lon字符串、lat字符串)
+“存储为文本文件”);
}捕获(例外e){
系统输出打印ln(e);
}
//检查Ivapp表是否存在
sql=“如果不存在则创建表”+数据库+”.IVAPPGEO AS选择不同的a.纬度、a.经度、b.odncircit\u OLT\u CLLI、b.odncircit\u OLT\u TID、a.CITY、a.STATE、a.ZIP FROM”
+数据库+“.T\u PONNMS\u服务B”
+数据库+“.CLLI_LATLON_MSTR A,其中A.BID_CLLI=substr(b.odncircit_OLT_CLLI,0,8)”;
试一试{
System.out.println(sql+新的SimpleDataFormat(“yyyyymmdd_HHmmss”).format(Calendar.getInstance().getTime());
HiveSqlContext.sql(sql);
sql=“从“+数据库+”.IVAPPGEO”中选择纬度、经度、ODNCIRCIT\U OLT\U CLLI、ODNCIRCIT\U OLT\U TID、城市、州、ZIP”;
JavaSchemaRDD RDD_IVAPPGEO=HiveSqlContext.sql(sql.cache();
}捕获(例外e){
System.out.println(sql+新的SimpleDataFormat(“yyyyymmdd_HHmmss”).format(Calendar.getInstance().getTime());
}
//JavaHiveContext hc=新的JavaHiveContext();
System.out.println(“从Kafka主题检索数据:“+new SimpleDataFormat”(“yyyyMMdd_HHmmss”).format(Calendar.getInstance().getTime());
Map topicMap=newhashmap();
主题地图(卡夫卡主题,卡夫卡段落);
JavaPairReceiverInputStream消息=KafkaUtils.createStream(
jssc、KAFKA_GROUPID、ZOOKEEPER_URL、topicMap);
JavaDStream json=messages.map(
新函数(){
私有静态最终长serialVersionUID=42l;
@凌驾
公共字符串调用(Tuple2消息){
返回消息。_2();
}
}
);
System.out.println(“完整的卡夫卡消息…”+新的SimpleDataFormat(“yyyyMMdd_HHmmss”).format(Calendar.getInstance().getTime());
System.out.println(“筛选结果集…”+新的SimpleDataFormat(“yyyyMMdd_HHmmss”).format(Calendar.getInstance().getTime());
JavaPairDStream tweets=json.mapToPair(
新TwitterFilterFunction());
JavaPairDStream filtered=tweets.filter(
新函数(){
私有静态最终长serialVersionUID=42l;
@凌驾
公共布尔调用(Tuple2 tweet){
返回tweet!=null;
}
}
);
JavaDStream tweetsFiltered=filtered.map(
新的TextFilterFunction());
tweetsFiltered=tweetsFiltered.map(
新的词干函数();
System.out.println(“已完成筛选结果集…”+新的SimpleDataFormat(“yyyyMMdd_HHmmss”).format(Calendar.getInstance().getTime());
System.out.println(“处理情绪数据…”+新的SimpleDataFormat(“yyyyymmdd_HHmmss”).format(Calendar.getInstance().getTime());
//计算积极的推文
JavaPairDStream正片网=
tweetsFiltered.mapToPair(新的PositiveScoreFunction());
//计算负面推文
JavaPairDStream NegativeWebets=
tweetsFiltered.mapToPair(新的NegativeScoreFunction());
JavaPairDStream已加入=
正网络连接(负网络连接);
//得分推特
JavadStreamScoredTweets=
joined.map(新函数(){
私有静态最终长serialVersionUID=42l;
@凌驾
公共Tuple4调用(
Tuple2(推特)
{
返回新的Tuple4(
tweet._1()._1(),
tweet._1()._2(),
特威
public class ContextManager {

private static JavaSparkContext context;
private static String currentType;

private ContextManager() {}

public static JavaSparkContext getContext(String type) {

if(type == currentType && context != null) {

   return context;
}
else if (type == "streaming"){

     .. clean up the current context ..
     .. initialize the context to streaming context ..
     currentType = type;
}
else {
    ..clean up the current context..
    ... initialize the context to normal context ..
    currentType = type;


  }

 return context;

 }

}
getOrCreate(): SparkContext
getOrCreate(conf: SparkConf): SparkContext
import org.apache.spark.SparkContext
val sc = SparkContext.getOrCreate()

// Using an explicit SparkConf object
import org.apache.spark.SparkConf
val conf = new SparkConf()
  .setMaster("local[*]")
  .setAppName("SparkMe App")
val sc = SparkContext.getOrCreate(conf)
SparkConf sparkConfig = new SparkConf().setAppName("foo");
JavaStreamingContext jssc = new JavaStreamingContext(sparkConfig, Duration.seconds(30));
SqlContext sqlContext = new SqlContext(jssc.sparkContext());