Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/xslt/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Apache spark 基于索引id为';s_Apache Spark_Java 8_Apache Spark Dataset - Fatal编程技术网

Apache spark 基于索引id为';s

Apache spark 基于索引id为';s,apache-spark,java-8,apache-spark-dataset,Apache Spark,Java 8,Apache Spark Dataset,我有一个数据集df,它的内容有一个作为accountid的索引,还有一个带有accountid的数组列表。如何筛选或映射数据集以创建新的数据集,该数据集仅包含基于arraylist中accountid的内容 我正在使用Java8 List<String> accountIdList= new ArrayList<String>(); accountIdList.add("1001"); accountIdList.add("1002"); accountIdList.ad

我有一个数据集df,它的内容有一个作为accountid的索引,还有一个带有accountid的数组列表。如何筛选或映射数据集以创建新的数据集,该数据集仅包含基于arraylist中accountid的内容

我正在使用Java8

List<String> accountIdList= new ArrayList<String>();
accountIdList.add("1001");
accountIdList.add("1002");
accountIdList.add("1003");
accountIdList.add("1004");
Dataset<Row> filteredRows=  df.filter(p-> df.col("accountId").equals(accountIdList));
List accountIdList=new ArrayList();
accountIdList.add(“1001”);
accountIdList.add(“1002”);
accountIdList.add(“1003”);
accountIdList.add(“1004”);
数据集filteredRows=df.filter(p->df.col(“accountId”).equals(accountIdList));
我试图将列表本身传递给比较运算符。您认为这是正确的方法吗

Java语法是


如果您正在寻找java语法

Dataset<Row> filteredRows=  df.where(df.col("accountId").isin(accountIdList.toArray()));
Dataset filteredRows=df.where(df.col(“accountId”).isin(accountIdList.toArray());

使用
列。isin
方法:

import scala.collection.JavaConversions;
import static org.apache.spark.sql.functions.*;

Dataset<Row> filteredRows = df.where(col("accountId").isin(
  JavaConversions.asScalaIterator(accountIdList.iterator()).toSeq()
));
导入scala.collection.JavaConversions;
导入静态org.apache.spark.sql.functions.*;
Dataset filteredRows=df.where(col(“accountId”).isin(
JavaConversions.AsscalAterator(accountIdList.iterator()).toSeq()
));

这是一个Java工作代码。希望能有帮助

这是我的样本文件内容(输入):-

1001

1008

1005

1009

1010

import java.util.ArrayList;
导入java.util.Iterator;
导入java.util.List;
导入org.apache.spark.api.java.function.FilterFunction;
导入org.apache.spark.sql.Dataset;
导入org.apache.spark.sql.SparkSession;
公共类数据集过滤器{
私有静态列表sampleList=newarraylist();
公共静态void main(字符串[]args)
{
样本列表。添加(“1001”);
样本列表。添加(“1002”);
样本列表。添加(“1003”);
样本列表。添加(“1004”);
样本列表。添加(“1005”);
SparkSession SparkSession=SparkSession.builder()
.config(“spark.serializer”、“org.apache.spark.serializer.KryoSerializer”)
.config(“spark.sql.warehouse.dir”file:///C:/Users/user/workspace/Validation/spark-仓库“)
.master(“本地[*]”)。getOrCreate();
//读取源文件。
Dataset src=sparkSession.read().textFile(“C:\\Users\\user\\Desktop\\dataSetFilterTest.txt”);
src.show(10);
//应用过滤器
Dataset filteredSource=src.filter(新的FilterFunction(){
私有静态最终长serialVersionUID=1L;
@凌驾
公共布尔调用(字符串值)引发异常{
System.out.println(“*******************************************************”);
布尔状态=假;
迭代器迭代器=sampleList.Iterator();
while(iterator.hasNext()){
String val=iterator.next();
System.out.println(“Val为::”+Val+”值为::“+Value”);
if(值等信号情况(val)){
状态=真;
打破
}
}
返回状态;
}
});
filteredSource.show();
System.out.println(“完成作业:)”;
}
}
输出:-


如果您正在查找java语法数据集filteredRows=df.where(df.col(“accountId”).isin(accountIdList.toArray());
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;

import org.apache.spark.api.java.function.FilterFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.SparkSession;

public class DatasetFilter {

    private static List<String> sampleList = new ArrayList<String>();

    public static void main(String[] args)
    {
        sampleList.add("1001");
        sampleList.add("1002");
        sampleList.add("1003");
        sampleList.add("1004");
        sampleList.add("1005");

        SparkSession sparkSession = SparkSession.builder()
                .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
                .config("spark.sql.warehouse.dir", "file:///C:/Users/user/workspace/Validation/spark-warehouse")
                .master("local[*]").getOrCreate();

        //Read the source-file.
        Dataset<String> src = sparkSession.read().textFile("C:\\Users\\user\\Desktop\\dataSetFilterTest.txt");
        src.show(10);

        //Apply filter
        Dataset<String> filteredSource = src.filter(new FilterFunction<String>() {

            private static final long serialVersionUID = 1L;

            @Override
            public boolean call(String value) throws Exception {
                System.out.println("***************************************");
                boolean status = false;
                Iterator<String> iterator = sampleList.iterator();
                while (iterator.hasNext()) {
                    String val = iterator.next();
                    System.out.println("Val is :: " + val + " Value is :: " + value);
                    if (value.equalsIgnoreCase(val)) {
                        status = true;
                        break;
                    }
                }
                return status;
            }
        });

        filteredSource.show();

        System.out.println("Completed the job :)");
    }

}