Scala 添加父列名作为前缀以避免歧义
检查下面的代码。如果存在重复的密钥,则生成具有模糊性的数据帧。我们应该如何修改代码以添加父列名作为前缀 添加了另一个包含json数据的列Scala 添加父列名作为前缀以避免歧义,scala,apache-spark,apache-spark-sql,spark-streaming,Scala,Apache Spark,Apache Spark Sql,Spark Streaming,检查下面的代码。如果存在重复的密钥,则生成具有模糊性的数据帧。我们应该如何修改代码以添加父列名作为前缀 添加了另一个包含json数据的列 scala> val df = Seq( (77, "email1", """{"key1":38,"key3":39}""","""{"name":"aaa"
scala> val df = Seq(
(77, "email1", """{"key1":38,"key3":39}""","""{"name":"aaa","age":10}"""),
(78, "email2", """{"key1":38,"key4":39}""","""{"name":"bbb","age":20}"""),
(178, "email21", """{"key1":"when string","key4":36, "key6":"test", "key10":false }""","""{"name":"ccc","age":30}"""),
(179, "email8", """{"sub1":"qwerty","sub2":["42"]}""","""{"name":"ddd","age":40}"""),
(180, "email8", """{"sub1":"qwerty","sub2":["42", "56", "test"]}""","""{"name":"eee","age":50}""")
).toDF("id", "name", "colJson","personInfo")
创建了fromJson隐式函数,您可以将多个列传递给该函数&它将解析并提取json中的列
scala> :paste
// Entering paste mode (ctrl-D to finish)
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.apache.spark.sql.functions.from_json
implicit class DFHelper(inDF: DataFrame) {
import inDF.sparkSession.implicits._
def fromJson(columns:Column*):DataFrame = {
val schemas = columns.map(column => (column, inDF.sparkSession.read.json(inDF.select(column).as[String]).schema))
val mdf = schemas.foldLeft(inDF)((df,schema) => {
df.withColumn(schema._1.toString(),from_json(schema._1,schema._2))
})
mdf.selectExpr(mdf.schema.map(c => if(c.dataType.typeName =="struct") s"${c.name}.*" else c.name):_*)
}
}
// Exiting paste mode, now interpreting.
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.apache.spark.sql.functions.from_json
defined class DFHelper
试试这个-
df.show(假)
df.printSchema()
/**
* +---+-------+---------------------------------------------------------------+-----------------------+
*| id | name | colJson | personInfo|
* +---+-------+---------------------------------------------------------------+-----------------------+
*| 77 | email1 |{“key1”:38,“key3”:39}{“姓名”:“aaa”,“年龄”:10}|
*| 78 | email2 |{“key1”:38,“key4”:39}{“name”:“bbb”,“age”:20}|
*| 178 | email21 |{“key1”:“when string”,“key4”:36,“key6”:“test”,“key10”:false}{“name”:“ccc”,“age”:30}|
*| 179 | email8 |{“sub1”:“qwerty”,“sub2”:[“42”]}{“name”:“ddd”,“age”:40}|
*| 180 | email8 |{“sub1”:“qwerty”,“sub2”:[“42”,“56”,“test”]}{“name”:“eee”,“age”:50}|
* +---+-------+---------------------------------------------------------------+-----------------------+
*
*根
*|--id:integer(nullable=false)
*|--name:string(nullable=true)
*|--colJson:string(nullable=true)
*|--personInfo:string(nullable=true)
*
*@param inDF
*/
隐式类DFHelper(inDF:DataFrame){
导入inDF.sparkSession.implicits_
def fromJson(列:列*):数据帧={
val schemas=columns.map(column=>(column,inDF.sparkSession.read.json(inDF.select(column.as[String]).schema))
val mdf=schemas.foldLeft(inDF)((df,schema)=>{
df.withColumn(schema._1.toString(),来自_json(schema._1,schema._2))
})
mdf/.selectExpr(mdf.schema.map(c=>if(c.dataType.typeName==“struct”)s“${c.name}.*”else c.name):*)
}
}
val p=df.fromJson($“colJson”,$“personInfo”)
p、 显示(假)
p、 printSchema()
/**
* +---+-------+---------------------------------+----------+
*| id | name | colJson | personInfo|
* +---+-------+---------------------------------+----------+
*| 77 |电子邮件1 |[38,39,40]|[10,aaa]|
*| 78 |电子邮件2 |[38、、39、、、]|[20,bbb]|
*| 178 | email21 |[当字符串为false时,36,test,,]|[30,ccc]|
*| 179 |电子邮件8 |[,,,,qwerty[42]]|[40,ddd]|
*| 180 | email 8 |[,,,,qwerty,[42,56,测试]]|[50,eee]|
* +---+-------+---------------------------------+----------+
*
*根
*|--id:integer(nullable=false)
*|--name:string(nullable=true)
*|--colJson:struct(nullable=true)
*| |--key1:string(nullable=true)
*| |--key10:布尔值(nullable=true)
*| |--key3:long(nullable=true)
*| |--key4:long(nullable=true)
*| |--key6:string(nullable=true)
*| |--sub1:string(nullable=true)
*| |--sub2:array(nullable=true)
*| | |--元素:字符串(containsnall=true)
*|--personInfo:struct(nullable=true)
*| |--age:long(nullable=true)
*| |--name:string(nullable=true)
*/
//使用获取结构的列。
p、 选择($“colJson.key1”,$“personInfo.age”).show(false)
/**
* +-----------+---+
*|关键1 |年龄|
* +-----------+---+
* |38 |10 |
* |38 |20 |
*|当字符串| 30|
*|空| 40|
*|空| 50|
* +-----------+---+
*/
试试这个-
df.show(假)
df.printSchema()
/**
* +---+-------+---------------------------------------------------------------+-----------------------+
*| id | name | colJson | personInfo|
* +---+-------+---------------------------------------------------------------+-----------------------+
*| 77 | email1 |{“key1”:38,“key3”:39}{“姓名”:“aaa”,“年龄”:10}|
*| 78 | email2 |{“key1”:38,“key4”:39}{“name”:“bbb”,“age”:20}|
*| 178 | email21 |{“key1”:“when string”,“key4”:36,“key6”:“test”,“key10”:false}{“name”:“ccc”,“age”:30}|
*| 179 | email8 |{“sub1”:“qwerty”,“sub2”:[“42”]}{“name”:“ddd”,“age”:40}|
*| 180 | email8 |{“sub1”:“qwerty”,“sub2”:[“42”,“56”,“test”]}{“name”:“eee”,“age”:50}|
* +---+-------+---------------------------------------------------------------+-----------------------+
*
*根
*|--id:integer(nullable=false)
*|--name:string(nullable=true)
*|--colJson:string(nullable=true)
*|--personInfo:string(nullable=true)
*
*@param inDF
*/
隐式类DFHelper(inDF:DataFrame){
导入inDF.sparkSession.implicits_
def fromJson(列:列*):数据帧={
val schemas=columns.map(column=>(column,inDF.sparkSession.read.json(inDF.select(column.as[String]).schema))
val mdf=schemas.foldLeft(inDF)((df,schema)=>{
df.withColumn(schema._1.toString(),来自_json(schema._1,schema._2))
})
mdf/.selectExpr(mdf.schema.map(c=>if(c.dataType.typeName==“struct”)s“${c.name}.*”else c.name):*)
}
}
val p=df.fromJson($“colJson”,$“personInfo”)
p、 显示(假)
p、 printSchema()
/**
* +---+-------+-------
scala> df.show(false)
+---+-------+---------------------------------------------------------------+-----------------------+
|id |name |colJson |personInfo |
+---+-------+---------------------------------------------------------------+-----------------------+
|77 |email1 |{"key1":38,"key3":39} |{"name":"aaa","age":10}|
|78 |email2 |{"key1":38,"key4":39} |{"name":"bbb","age":20}|
|178|email21|{"key1":"when string","key4":36, "key6":"test", "key10":false }|{"name":"ccc","age":30}|
|179|email8 |{"sub1":"qwerty","sub2":["42"]} |{"name":"ddd","age":40}|
|180|email8 |{"sub1":"qwerty","sub2":["42", "56", "test"]} |{"name":"eee","age":50}|
+---+-------+---------------------------------------------------------------+-----------------------+
scala> :paste
// Entering paste mode (ctrl-D to finish)
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.apache.spark.sql.functions.from_json
implicit class DFHelper(inDF: DataFrame) {
import inDF.sparkSession.implicits._
def fromJson(columns:Column*):DataFrame = {
val schemas = columns.map(column => (column, inDF.sparkSession.read.json(inDF.select(column).as[String]).schema))
val mdf = schemas.foldLeft(inDF)((df,schema) => {
df.withColumn(schema._1.toString(),from_json(schema._1,schema._2))
})
mdf.selectExpr(mdf.schema.map(c => if(c.dataType.typeName =="struct") s"${c.name}.*" else c.name):_*)
}
}
// Exiting paste mode, now interpreting.
import org.apache.spark.sql.{Column, DataFrame, Row}
import org.apache.spark.sql.functions.from_json
defined class DFHelper
scala> df.fromJson($"colJson",$"personInfo").show(false)
+---+-------+-----------+-----+----+----+----+------+--------------+---+----+
|id |name |key1 |key10|key3|key4|key6|sub1 |sub2 |age|name|
+---+-------+-----------+-----+----+----+----+------+--------------+---+----+
|77 |email1 |38 |null |39 |null|null|null |null |10 |aaa |
|78 |email2 |38 |null |null|39 |null|null |null |20 |bbb |
|178|email21|when string|false|null|36 |test|null |null |30 |ccc |
|179|email8 |null |null |null|null|null|qwerty|[42] |40 |ddd |
|180|email8 |null |null |null|null|null|qwerty|[42, 56, test]|50 |eee |
+---+-------+-----------+-----+----+----+----+------+--------------+---+----+
scala> df.fromJson($"colJson",$"personInfo").printSchema()
root
|-- id: integer (nullable = false)
|-- name: string (nullable = true)
|-- key1: string (nullable = true)
|-- key10: boolean (nullable = true)
|-- key3: long (nullable = true)
|-- key4: long (nullable = true)
|-- key6: string (nullable = true)
|-- sub1: string (nullable = true)
|-- sub2: array (nullable = true)
| |-- element: string (containsNull = true)
|-- age: long (nullable = true)
|-- name: string (nullable = true)