scala-object sql不是包org.apache.spark的成员

scala-object sql不是包org.apache.spark的成员,scala,apache-spark,hdfs,spark-streaming,Scala,Apache Spark,Hdfs,Spark Streaming,当我试图在EclipseIDE中基于scala特性构建maven项目时 获取错误 import org.apache.spark.SparkConf import org.apache.spark.SparkContext import org.apache.spark.streaming.kafka.KafkaUtils import org.apache.spark.streaming.Seconds import org.apache.spark.streaming.StreamingCo

当我试图在EclipseIDE中基于scala特性构建maven项目时

获取错误

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext



object MyApp {
  def main(args: Array[String]) {

    //Read from KAFKA TOPIC 
    val conf = new SparkConf().setMaster("local[*]").setAppName("Spark-Kafk-Integration")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc, Seconds(5))
    val kafkaStream = KafkaUtils.createStream(ssc, "hostname:2181", "spark-streaming-consumer-group", Map("test4" -> 1))
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext.implicits._

 kafkaStream.foreachRDD(rdd => {
 rdd.foreach(println)

 if(rdd.count()>0) {
// rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("file:///D:/my/")
// rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("file://user/cloudera/testdata")

 rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("hdfs://hostname:8020/user/cloudera/testdata")

 // rdd.saveAsTextFile("C:/data/spark/")
 }
 })
对象sql不是包org.apache.spark的成员

我们试过了

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext



object MyApp {
  def main(args: Array[String]) {

    //Read from KAFKA TOPIC 
    val conf = new SparkConf().setMaster("local[*]").setAppName("Spark-Kafk-Integration")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc, Seconds(5))
    val kafkaStream = KafkaUtils.createStream(ssc, "hostname:2181", "spark-streaming-consumer-group", Map("test4" -> 1))
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext.implicits._

 kafkaStream.foreachRDD(rdd => {
 rdd.foreach(println)

 if(rdd.count()>0) {
// rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("file:///D:/my/")
// rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("file://user/cloudera/testdata")

 rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("hdfs://hostname:8020/user/cloudera/testdata")

 // rdd.saveAsTextFile("C:/data/spark/")
 }
 })
在pom.xml中添加此依赖项

<dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-sql_2.11</artifactId>
    <version>1.6.0</version>
</dependency>
完成POM.XML

<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.cyb</groupId>
    <artifactId>First</artifactId>
    <version>0.0.1-SNAPSHOT</version>
    <packaging>jar</packaging>

    <name>First</name>
    <url>http://maven.apache.org</url>

    <properties>
        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
    </properties>

    <dependencies>

        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming_2.11</artifactId>
            <version>1.6.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-streaming-kafka_2.11</artifactId>
            <version>1.6.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.10.2.0</version>
        </dependency>
        <dependency>
    <groupId>org.apache.spark</groupId>
    <artifactId>spark-sql_2.11</artifactId>
    <version>1.6.0</version>
</dependency>

        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>3.8.1</version>
            <scope>test</scope>
        </dependency>
    </dependencies>
</project>

4.0.0
com.cyb
弗斯特
0.0.1-快照
罐子
弗斯特
http://maven.apache.org
UTF-8
org.apache.spark
spark-U 2.11
1.6.0
org.apache.spark
spark-streaming-kafka_2.11
1.6.0
org.apache.kafka
卡夫卡客户
0.10.2.0
org.apache.spark
spark-sql_2.11
1.6.0
朱尼特
朱尼特
3.8.1
测试
输出

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext



object MyApp {
  def main(args: Array[String]) {

    //Read from KAFKA TOPIC 
    val conf = new SparkConf().setMaster("local[*]").setAppName("Spark-Kafk-Integration")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc, Seconds(5))
    val kafkaStream = KafkaUtils.createStream(ssc, "hostname:2181", "spark-streaming-consumer-group", Map("test4" -> 1))
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import sqlContext.implicits._

 kafkaStream.foreachRDD(rdd => {
 rdd.foreach(println)

 if(rdd.count()>0) {
// rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("file:///D:/my/")
// rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("file://user/cloudera/testdata")

 rdd.toDF("value").coalesce(1).write.mode(SaveMode.Append).text("hdfs://hostname:8020/user/cloudera/testdata")

 // rdd.saveAsTextFile("C:/data/spark/")
 }
 })
我们想从Kafka主题将流数据写入HDFS存储


非常感谢您的帮助。

您需要导入spark sql库才能使用spark sql函数。尝试导入此文件

import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.SQLImplicits
import org.apache.spark.sql.SQLContext

您使用的版本还有哪些其他依赖项?添加依赖项
spark-sql_2.11
后,您是否尝试过
mvn clean安装
?是的,我在放置依赖项后进行了mvn clean安装,我使用了以下依赖项版本:org.apache.spark spark-sql_2.11 1.6.0您能在额外的库中看到spark sql的jar文件吗?