spark提交类未找到错误。未找到java类

spark提交类未找到错误。未找到java类,java,apache-spark,hadoop,Java,Apache Spark,Hadoop,我已经安装了JavaJDK8。我不知道为什么我会犯这个错误。 路径也做得很好。我做错了什么。请帮我解决这个问题。我的spark版本是2.4.7。我正在使用IntellijIDE 当我试图运行代码时,我遇到了这个错误 C:\spark\spark-2.4.7-bin-hadoop2.7\bin>spark-submit --class TopViewedCategories --master local C:\Users\Piyush\IdeaProjects\BDA\target\BDA-

我已经安装了JavaJDK8。我不知道为什么我会犯这个错误。 路径也做得很好。我做错了什么。请帮我解决这个问题。我的spark版本是2.4.7。我正在使用IntellijIDE 当我试图运行代码时,我遇到了这个错误

C:\spark\spark-2.4.7-bin-hadoop2.7\bin>spark-submit --class TopViewedCategories --master local C:\Users\Piyush\IdeaProjects\BDA\target\BDA-1.0-SNAPSHOT.jar
21/05/03 16:25:37 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
21/05/03 16:25:37 WARN SparkSubmit$$anon$2: Failed to load TopViewedCategories.
java.lang.ClassNotFoundException: TopViewedCategories
        at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:418)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:351)
        at java.lang.Class.forName0(Native Method)
        at java.lang.Class.forName(Class.java:348)
        at org.apache.spark.util.Utils$.classForName(Utils.scala:238)
        at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:806)
        at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:161)
        at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)
        at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
        at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:920)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:929)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
21/05/03 16:25:37 INFO ShutdownHookManager: Shutdown hook called
21/05/03 16:25:37 INFO ShutdownHookManager: Deleting directory 
这是密码

package org.example;

import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import static org.apache.spark.SparkContext.getOrCreate;

public class TopViewedCategories {
    public static void main(String[] args) throws Exception {
        long timeElapsed = System.currentTimeMillis();
        System.out.println("Started Processing");
        SparkConf conf = new SparkConf()
                .setMaster("local")
                .setAppName("YouTubeDM");
        JavaSparkContext sc = new JavaSparkContext(conf);
        //Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
        sc.setLogLevel("ERROR");
        JavaRDD<String> mRDD = sc.textFile("C:\\Users\\Piyush\\Desktop\\bda\\INvideos.csv"); //directory where the files are
         JavaPairRDD<Double,String> sortedRDD = mRDD

// .filter(line -> line.split("\t").length > 6)
                 .mapToPair(
                         line -> {
                             String[] lineArr = line.split("\t");
                             String category = lineArr[5];
                             Double views = Double.parseDouble(lineArr[1]);
                             Tuple2<Double, Integer> viewsTuple = new Tuple2<>(views, 1);
                             return new Tuple2<>(category,
                                     viewsTuple);
                         })
                 .reduceByKey((x, y) -> new Tuple2<>(x._1 + y._1, x._2 + y._2)) .mapToPair(x -> new Tuple2<>(x._1, (x._2._1 / x._2._2)))
                 .mapToPair(Tuple2::swap)
                 .sortByKey(false);

// .take(10);
        long count = sortedRDD.count();
        List<Tuple2<Double, String>> topTenTuples = sortedRDD.take(10);
        JavaPairRDD<Double, String> topTenRdd = sc.parallelizePairs(topTenTuples);  String output_dir = "C:output/spark/TopViewedCategories";
//remove output directory if already there
        FileSystem fs = FileSystem.get(sc.hadoopConfiguration());
        fs.delete(new Path(output_dir), true); // delete dir, true for recursive
        topTenRdd.saveAsTextFile(output_dir);
        timeElapsed = System.currentTimeMillis() - timeElapsed;
        System.out.println("Done.Time taken (in seconds): " + timeElapsed/1000f); System.out.println("Processed Records: " + count);
        sc.stop();
        sc.close();
    }
}

package org.example;
导入java.util.List;
导入org.apache.hadoop.fs.FileSystem;
导入org.apache.hadoop.fs.Path;
导入org.apache.spark.SparkConf;
导入org.apache.spark.api.java.javapairdd;
导入org.apache.spark.api.java.JavaRDD;
导入org.apache.spark.api.java.JavaSparkContext;
导入scala.Tuple2;
导入静态org.apache.spark.SparkContext.getOrCreate;
公共类TopViewedCategories{
公共静态void main(字符串[]args)引发异常{
长时间运行=System.currentTimeMillis();
System.out.println(“已开始处理”);
SparkConf conf=新的SparkConf()
.setMaster(“本地”)
.setAppName(“YouTubeDM”);
JavaSparkContext sc=新的JavaSparkContext(conf);
//有效日志级别包括:全部、调试、错误、致命、信息、关闭、跟踪、警告
sc.setLogLevel(“错误”);
JavaRDD mRDD=sc.textFile(“C:\\Users\\Piyush\\Desktop\\bda\\INvideos.csv”);//文件所在的目录
JavaPairRDD sortedRDD=mRDD
//.filter(行->行分割(“\t”)。长度>6)
mapToPair先生(
行->{
字符串[]lineArr=line.split(“\t”);
字符串类别=lineArr[5];
双视图=Double.parseDouble(lineArr[1]);
Tuple2 viewsTuple=新的Tuple2(视图,1);
返回新的Tuple2(类别,
视塔);
})
.reduceByKey((x,y)->新的Tuple2(x.\u 1+y.\u 1,x.\u 2+y.\u 2)).mapToPair(x->新的Tuple2(x.\u 1,(x.\u 2.\u 1/x.\u 2.\u 2)))
.mapToPair(Tuple2::swap)
.sortByKey(假);
//.采取(10);
长计数=sorteddd.count();
List topTenTuples=sortedRDD.take(10);
JavaPairRDD topTenRdd=sc.parallelizePairs(topTenTuples);字符串输出\u dir=“C:output/spark/topviewdcategories”;
//删除输出目录(如果已经存在)
FileSystem fs=FileSystem.get(sc.hadoopConfiguration());
fs.delete(新路径(output_dir),true);//delete dir,对于递归
topTenRdd.saveAsTextFile(输出目录);
timeappeased=System.currentTimeMillis()-timeappeased;
System.out.println(“完成时间(秒):”+timepassed/1000f);System.out.println(“处理记录:”+count);
sc.停止();
sc.close();
}
}

请帮我解决它

您必须设置类名,包括包:

spark-submit --class org.example.TopViewedCategories ...

您确定使用了正确的JAR文件吗?您可以将JAR作为zip文件打开,以检查它包含哪些类sthxman。我还有一个错误,你能帮我吗。