Apache spark Spark自定义流式处理删除大部分数据

Apache spark Spark自定义流式处理删除大部分数据,apache-spark,spark-streaming,Apache Spark,Spark Streaming,我遵循spark网站上提供的使用customer receiver的spark流媒体示例 然而,这份工作似乎丢失了我的大部分数据。无论我流的数据量是多少,它都会在使用者处成功接收。但是,当我对它执行任何map/flatmap操作时,我只看到10行数据。无论我流了多少数据,情况总是如此 我已将此程序修改为从ActiveMQ队列读取。如果我查看ActiveMQ web界面,spark作业会成功地消耗我生成的所有数据。但是,每批仅处理10个数据。我尝试将批量大小更改为各种值,并在本地和6节点spark

我遵循spark网站上提供的使用customer receiver的spark流媒体示例

然而,这份工作似乎丢失了我的大部分数据。无论我流的数据量是多少,它都会在使用者处成功接收。但是,当我对它执行任何map/flatmap操作时,我只看到10行数据。无论我流了多少数据,情况总是如此

我已将此程序修改为从
ActiveMQ
队列读取。如果我查看ActiveMQ web界面,spark作业会成功地消耗我生成的所有数据。但是,每批仅处理10个数据。我尝试将批量大小更改为各种值,并在本地和6节点spark cluster上进行了尝试,结果都是一样的

这真的令人沮丧,因为我不知道为什么要处理数量有限的数据。这里有我遗漏的东西吗

这是我的星火计划。包括自定义接收器。此外,我并没有真正创建任何套接字连接。相反,我正在为测试目的对消息进行硬编码。行为与为流创建套接字连接时相同

/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.rzt.main;

import com.google.common.collect.Lists;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.storage.StorageLevel;
import org.apache.spark.streaming.Duration;
import org.apache.spark.streaming.api.java.JavaDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.receiver.Receiver;
import scala.Tuple2;

import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.ConnectException;
import java.net.Socket;
import java.util.regex.Pattern;

/**
 * Custom Receiver that receives data over a socket. Received bytes is
 * interpreted as text and \n delimited lines are considered as records. They
 * are then counted and printed.
 *
 * Usage: TestReceiv3 <master> <hostname> <port> <master> is the Spark master
 * URL. In local mode, <master> should be 'local[n]' with n > 1. <hostname> and
 * <port> of the TCP server that Spark Streaming would connect to receive data.
 *
 * To run this on your local machine, you need to first run a Netcat server `$
 * nc -lk 9999` and then run the example `$ bin/run-example
 * org.apache.spark.examples.streaming.TestReceiv3 localhost 9999`
 */

public class TestReceiv3 extends Receiver<String> {
    private static final Pattern SPACE = Pattern.compile(" ");

    public static void main(String[] args) {

        // Create the context with a 1 second batch size
        SparkConf sparkConf = new SparkConf().setAppName("TestReceiv3").setMaster("local[4]");
        JavaStreamingContext ssc = new JavaStreamingContext(sparkConf, new Duration(1000));

        // Create a input stream with the custom receiver on target ip:port and
        // count the
        // words in input stream of \n delimited text (eg. generated by 'nc')
        JavaReceiverInputDStream<String> lines = ssc.receiverStream(new TestReceiv3("TEST", 1));
        JavaDStream<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
            public Iterable<String> call(String x) {
                System.out.println("Message received" + x);
                return Lists.newArrayList(x);
            }
        });

        words.print();
        ssc.start();
        ssc.awaitTermination();
    }

    // ============= Receiver code that receives data over a socket
    // ==============

    String host = null;
    int port = -1;

    public TestReceiv3(String host_, int port_) {
        super(StorageLevel.MEMORY_AND_DISK_2());
        host = host_;
        port = port_;
    }

    public void onStart() {
        // Start the thread that receives data over a connection
        new Thread() {
            @Override
            public void run() {
                receive();
            }
        }.start();
    }

    public void onStop() {
        // There is nothing much to do as the thread calling receive()
        // is designed to stop by itself isStopped() returns false
    }

    /** Create a socket connection and receive data until receiver is stopped */
    private void receive() {
        Socket socket = null;
        String userInput = null;

        try {

            int i = 0;
            // Until stopped or connection broken continue reading
            while (true) {
                i++;
                store("MESSAGE " + i);
                if (i == 1000)
                    break;
            }

            // Restart in an attempt to connect again when server is active
            // again
            restart("Trying to connect again");
        } catch (Throwable t) {
            restart("Error receiving data", t);
        }
    }
}
/*
*根据一个或多个许可证颁发给Apache软件基金会(ASF)
*贡献者许可协议。请参阅随附的通知文件
*本作品提供了有关版权所有权的更多信息。
*ASF根据Apache许可证2.0版将此文件许可给您
*(以下简称“许可证”);除非符合以下要求,否则不得使用此文件
*执照。您可以通过以下方式获得许可证副本:
*
*    http://www.apache.org/licenses/LICENSE-2.0
*
*除非适用法律要求或书面同意,软件
*根据许可证进行的分发是按“原样”进行分发的,
*无任何明示或暗示的保证或条件。
*请参阅许可证以了解管理权限和权限的特定语言
*许可证下的限制。
*/
包com.rzt.main;
导入com.google.common.collect.list;
导入org.apache.spark.SparkConf;
导入org.apache.spark.api.java.function.FlatMapFunction;
导入org.apache.spark.api.java.function.Function2;
导入org.apache.spark.api.java.function.PairFunction;
导入org.apache.spark.storage.StorageLevel;
导入org.apache.spark.streaming.Duration;
导入org.apache.spark.streaming.api.java.JavaDStream;
导入org.apache.spark.streaming.api.java.JavaPairDStream;
导入org.apache.spark.streaming.api.java.JavaReceiverInputDStream;
导入org.apache.spark.streaming.api.java.JavaStreamingContext;
导入org.apache.spark.streaming.receiver.receiver;
导入scala.Tuple2;
导入java.io.BufferedReader;
导入java.io.InputStreamReader;
导入java.net.ConnectException;
导入java.net.Socket;
导入java.util.regex.Pattern;
/**
*通过套接字接收数据的自定义接收器。接收的字节数为
*解释为文本,\n分隔行被视为记录。他们
*然后进行计数和打印。
*
*用法:TestReceiv3是Spark master
*网址。在本地模式下,应为“本地[n]”,且n>1。和
*Spark Streaming将连接以接收数据的TCP服务器的。
*
*要在本地计算机上运行此功能,首先需要运行Netcat服务器`$
*nc-lk9999`然后运行示例`$bin/运行示例
*org.apache.spark.examples.streaming.TestReceiv3 localhost 9999`
*/
公共类TestReceiv3扩展了接收器{
私有静态最终模式空间=Pattern.compile(“”);
公共静态void main(字符串[]args){
//创建具有1秒批量大小的上下文
SparkConf SparkConf=new SparkConf().setAppName(“TestReceiv3”).setMaster(“本地[4]”);
JavaStreamingContext ssc=新的JavaStreamingContext(sparkConf,新的持续时间(1000));
//使用目标ip上的自定义接收器创建输入流:端口和
//数一数
//分隔文本的输入流中的单词(例如由“nc”生成)
javaReceiverInputdStreamlines=ssc.receiverStream(新的TestReceiv3(“TEST”,1));
JavaDStream words=lines.flatMap(新的flatMap函数(){
公共Iterable调用(字符串x){
System.out.println(“收到的消息”+x);
返回列表。newArrayList(x);
}
});
words.print();
ssc.start();
ssc.终止();
}
//=============通过套接字接收数据的接收器代码
// ==============
字符串host=null;
int端口=-1;
公共TestReceiv3(字符串主机、int端口){
super(StorageLevel.MEMORY_和_DISK_2());
主机=主机;
端口=端口;
}
public void onStart(){
//启动通过连接接收数据的线程
新线程(){
@凌驾
公开募捐{
接收();
}
}.start();
}
公共void onStop(){
//除了调用receive()的线程之外,没有什么可做的
//被设计为自行停止,isStopped()返回false
}
/**创建套接字连接并接收数据,直到接收器停止*/
私有无效接收(){
套接字=空;
字符串userInput=null;
试一试{
int i=0;
//直到停止或连接断开,继续读取
while(true){
i++;
存储(“消息”+i);
如果(i==1000)
打破
}
//在服务器处于活动状态时重新启动以尝试再次连接
//再次
重新启动(“尝试再次连接”);
}捕获(可丢弃的t){
重新启动(“接收数据错误”,t);
}
}
}

您看到的输出来自
words.print()
<代码>数据流。打印仅打印数据流的前10个元素。 从:

def print():单位

打印前十个元素