1 涉及到的API

  BaseRelation: In a simple way, we can say it represents the collection of tuples with known schema
TableScan: provides a way to scan the data and generates the RDD[Row] from the data
RelationProvider: takes a list of parameters and returns a BaseRelation.
BaseRelation提供了定义数据结构Schema的方法,类似tuples的集合结构
TableScan,提供了扫描数据并生成RDD[Row]的方法
RelationProvider,拿到参数列表并返回一个BaseRelation

  

2 代码实现

  定义ralation

package cn.zj.spark.sql.datasource

import org.apache.hadoop.fs.Path
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
import org.apache.spark.sql.sources.{BaseRelation, CreatableRelationProvider, RelationProvider, SchemaRelationProvider}
import org.apache.spark.sql.types.StructType /**
* Created by rana on 29/9/16.
*/
class DefaultSource extends RelationProvider with SchemaRelationProvider with CreatableRelationProvider {
override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = {
createRelation(sqlContext, parameters, null)
} override def createRelation(sqlContext: SQLContext, parameters: Map[String, String], schema: StructType): BaseRelation = {
val path = parameters.get("path")
path match {
case Some(p) => new CustomDatasourceRelation(sqlContext, p, schema)
case _ => throw new IllegalArgumentException("Path is required for custom-datasource format!!")
}
} override def createRelation(sqlContext: SQLContext, mode: SaveMode, parameters: Map[String, String],
data: DataFrame): BaseRelation = {
val path = parameters.getOrElse("path", "./output/") //can throw an exception/error, it's just for this tutorial
val fsPath = new Path(path)
val fs = fsPath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration) mode match {
case SaveMode.Append => sys.error("Append mode is not supported by " + this.getClass.getCanonicalName); sys.exit(1)
case SaveMode.Overwrite => fs.delete(fsPath, true)
case SaveMode.ErrorIfExists => sys.error("Given path: " + path + " already exists!!"); sys.exit(1)
case SaveMode.Ignore => sys.exit()
} val formatName = parameters.getOrElse("format", "customFormat")
formatName match {
case "customFormat" => saveAsCustomFormat(data, path, mode)
case "json" => saveAsJson(data, path, mode)
case _ => throw new IllegalArgumentException(formatName + " is not supported!!!")
}
createRelation(sqlContext, parameters, data.schema)
} private def saveAsJson(data : DataFrame, path : String, mode: SaveMode): Unit = {
/**
* Here, I am using the dataframe's Api for storing it as json.
* you can have your own apis and ways for saving!!
*/
data.write.mode(mode).json(path)
} private def saveAsCustomFormat(data : DataFrame, path : String, mode: SaveMode): Unit = {
/**
* Here, I am going to save this as simple text file which has values separated by "|".
* But you can have your own way to store without any restriction.
*/
val customFormatRDD = data.rdd.map(row => {
row.toSeq.map(value => value.toString).mkString("|")
})
customFormatRDD.saveAsTextFile(path)
}
}

  定义Schema以及读取数据代码

package cn.zj.spark.sql.datasource

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._ /**
* Created by rana on 29/9/16.
*/
class CustomDatasourceRelation(override val sqlContext : SQLContext, path : String, userSchema : StructType)
extends BaseRelation with TableScan with PrunedScan with PrunedFilteredScan with Serializable { override def schema: StructType = {
if (userSchema != null) {
userSchema
} else {
StructType(
StructField("id", IntegerType, false) ::
StructField("name", StringType, true) ::
StructField("gender", StringType, true) ::
StructField("salary", LongType, true) ::
StructField("expenses", LongType, true) :: Nil
)
}
} override def buildScan(): RDD[Row] = {
println("TableScan: buildScan called...") val schemaFields = schema.fields
// Reading the file's content
val rdd = sqlContext.sparkContext.wholeTextFiles(path).map(f => f._2) val rows = rdd.map(fileContent => {
val lines = fileContent.split("\n")
val data = lines.map(line => line.split(",").map(word => word.trim).toSeq)
val tmp = data.map(words => words.zipWithIndex.map{
case (value, index) =>
val colName = schemaFields(index).name
Util.castTo(if (colName.equalsIgnoreCase("gender")) {if(value.toInt == 1) "Male" else "Female"} else value,
schemaFields(index).dataType)
}) tmp.map(s => Row.fromSeq(s))
}) rows.flatMap(e => e)
} override def buildScan(requiredColumns: Array[String]): RDD[Row] = {
println("PrunedScan: buildScan called...") val schemaFields = schema.fields
// Reading the file's content
val rdd = sqlContext.sparkContext.wholeTextFiles(path).map(f => f._2) val rows = rdd.map(fileContent => {
val lines = fileContent.split("\n")
val data = lines.map(line => line.split(",").map(word => word.trim).toSeq)
val tmp = data.map(words => words.zipWithIndex.map{
case (value, index) =>
val colName = schemaFields(index).name
val castedValue = Util.castTo(if (colName.equalsIgnoreCase("gender")) {if(value.toInt == 1) "Male" else "Female"} else value,
schemaFields(index).dataType)
if (requiredColumns.contains(colName)) Some(castedValue) else None
}) tmp.map(s => Row.fromSeq(s.filter(_.isDefined).map(value => value.get)))
}) rows.flatMap(e => e)
} override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
println("PrunedFilterScan: buildScan called...") println("Filters: ")
filters.foreach(f => println(f.toString)) var customFilters: Map[String, List[CustomFilter]] = Map[String, List[CustomFilter]]()
filters.foreach( f => f match {
case EqualTo(attr, value) =>
println("EqualTo filter is used!!" + "Attribute: " + attr + " Value: " + value) /**
* as we are implementing only one filter for now, you can think that this below line doesn't mak emuch sense
* because any attribute can be equal to one value at a time. so what's the purpose of storing the same filter
* again if there are.
* but it will be useful when we have more than one filter on the same attribute. Take the below condition
* for example:
* attr > 5 && attr < 10
* so for such cases, it's better to keep a list.
* you can add some more filters in this code and try them. Here, we are implementing only equalTo filter
* for understanding of this concept.
*/
customFilters = customFilters ++ Map(attr -> {
customFilters.getOrElse(attr, List[CustomFilter]()) :+ new CustomFilter(attr, value, "equalTo")
})
case _ => println("filter: " + f.toString + " is not implemented by us!!")
}) val schemaFields = schema.fields
// Reading the file's content
val rdd = sqlContext.sparkContext.wholeTextFiles(path).map(f => f._2) val rows = rdd.map(file => {
val lines = file.split("\n")
val data = lines.map(line => line.split(",").map(word => word.trim).toSeq) val filteredData = data.map(s => if (customFilters.nonEmpty) {
var includeInResultSet = true
s.zipWithIndex.foreach {
case (value, index) =>
val attr = schemaFields(index).name
val filtersList = customFilters.getOrElse(attr, List())
if (filtersList.nonEmpty) {
if (CustomFilter.applyFilters(filtersList, value, schema)) {
} else {
includeInResultSet = false
}
}
}
if (includeInResultSet) s else Seq()
} else s) val tmp = filteredData.filter(_.nonEmpty).map(s => s.zipWithIndex.map {
case (value, index) =>
val colName = schemaFields(index).name
val castedValue = Util.castTo(if (colName.equalsIgnoreCase("gender")) {
if (value.toInt == 1) "Male" else "Female"
} else value,
schemaFields(index).dataType)
if (requiredColumns.contains(colName)) Some(castedValue) else None
}) tmp.map(s => Row.fromSeq(s.filter(_.isDefined).map(value => value.get)))
}) rows.flatMap(e => e)
}
}

  类型转换类

package cn.zj.spark.sql.datasource

import org.apache.spark.sql.types.{DataType, IntegerType, LongType, StringType}

/**
* Created by rana on 30/9/16.
*/
object Util {
def castTo(value : String, dataType : DataType) = {
dataType match {
case _ : IntegerType => value.toInt
case _ : LongType => value.toLong
case _ : StringType => value
}
}
}

 3 依赖的pom文件配置

  

 <properties>
<maven.compiler.source>1.8</maven.compiler.source>
<maven.compiler.target>1.8</maven.compiler.target>
<scala.version>2.11.8</scala.version>
<spark.version>2.2.0</spark.version>
<!--<hadoop.version>2.6.0-cdh5.7.0</hadoop.version>-->
<!--<hbase.version>1.2.0-cdh5.7.0</hbase.version>-->
<encoding>UTF-8</encoding>
</properties> <dependencies>
<!-- 导入spark的依赖 -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>${spark.version}</version>
</dependency>
<!-- 导入spark的依赖 -->
<!-- https://mvnrepository.com/artifact/org.apache.spark/spark-sql -->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.2.0</version>
</dependency> </dependencies>

4测试代码以及测试文件数据

package cn.zj.spark.sql.datasource

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession /**
* Created by rana on 29/9/16.
*/
object app extends App {
println("Application started...") val conf = new SparkConf().setAppName("spark-custom-datasource")
val spark = SparkSession.builder().config(conf).master("local").getOrCreate() val df = spark.sqlContext.read.format("cn.zj.spark.sql.datasource").load("1229practice/data/") df.createOrReplaceTempView("test")
spark.sql("select * from test where salary = 50000").show() println("Application Ended...")
}

  

数据

  

10002, Alice Heady, 0, 20000, 8000
10003, Jenny Brown, 0, 30000, 120000
10004, Bob Hayden, 1, 40000, 16000
10005, Cindy Heady, 0, 50000, 20000
10006, Doug Brown, 1, 60000, 24000
10007, Carolina Hayden, 0, 70000, 280000

  

参考文献:http://sparkdatasourceapi.blogspot.com/2016/10/spark-data-source-api-write-custom.html

完整代码详见 git@github.com:ZhangJin1988/spark-extend-dataSource.git

Spark SQL自定义外部数据源的更多相关文章

  1. Spark SQL 自定义函数类型

    Spark SQL 自定义函数类型 一.spark读取数据 二.自定义函数结构 三.附上长长的各种pom 一.spark读取数据 前段时间一直在研究GeoMesa下的Spark JTS,Spark J ...

  2. spark SQL学习(数据源之json)

    准备工作 数据文件students.json {"id":1, "name":"leo", "age":18} {&qu ...

  3. spark SQL学习(数据源之parquet)

    Parquet是面向分析型业务得列式存储格式 编程方式加载数据 代码示例 package wujiadong_sparkSQL import org.apache.spark.sql.SQLConte ...

  4. 大数据技术之_19_Spark学习_03_Spark SQL 应用解析 + Spark SQL 概述、解析 、数据源、实战 + 执行 Spark SQL 查询 + JDBC/ODBC 服务器

    第1章 Spark SQL 概述1.1 什么是 Spark SQL1.2 RDD vs DataFrames vs DataSet1.2.1 RDD1.2.2 DataFrame1.2.3 DataS ...

  5. 4. Spark SQL数据源

    4.1 通用加载/保存方法 4.1.1手动指定选项 Spark SQL的DataFrame接口支持多种数据源的操作.一个DataFrame可以进行RDDs方式的操作,也可以被注册为临时表.把DataF ...

  6. Spark SQL | 目前Spark社区最活跃的组件之一

    Spark SQL是一个用来处理结构化数据的Spark组件,前身是shark,但是shark过多的依赖于hive如采用hive的语法解析器.查询优化器等,制约了Spark各个组件之间的相互集成,因此S ...

  7. Spark SQL知识点大全与实战

    Spark SQL概述 1.什么是Spark SQL Spark SQL是Spark用于结构化数据(structured data)处理的Spark模块. 与基本的Spark RDD API不同,Sp ...

  8. Spark SQL知识点与实战

    Spark SQL概述 1.什么是Spark SQL Spark SQL是Spark用于结构化数据(structured data)处理的Spark模块. 与基本的Spark RDD API不同,Sp ...

  9. Spark SQL 官方文档-中文翻译

    Spark SQL 官方文档-中文翻译 Spark版本:Spark 1.5.2 转载请注明出处:http://www.cnblogs.com/BYRans/ 1 概述(Overview) 2 Data ...

随机推荐

  1. ImageMagick - 智能的灰度空间(GRAYColorspace)让人窒息

    今天在处理一张 gray.jpg 图片时,发现生成的图片色彩空间是: GRAYColorspace 可我在代码中明明设置了: MagickWand * mw = NewMagickWand (); M ...

  2. mfc01

    1.解决不能将参数1从“const char []”转换为“LPCTSTR” ,使用多字节字符集.

  3. tail 命令只查看日志中的关键字所在行信息

    tail -f info_log-2019-04-20.log |grep 要查询的关键字

  4. 老项目用webpack中文乱码问题解决记录

    有个很久(有多久呢,你还记得jquery1.6的年代吗...)的项目需要新加一些功能,又想使用新的生产力工具比如说webpack,es6,vue神马的.原来的项目整体都是用GBK编码的,这特么...坑 ...

  5. tp5的include 标签 不能用了么

    直接调用页头页尾 直接原样显示了 正解: 要注意 file后不能加空格, 不然放在 页头会报错, 放在其他地方则不会被解析. 以上是我的经历,供参考. {include file ='public/h ...

  6. mac 修改root密码

    1.用当前用户登录进去bash: 输入命令: sudo passwd root

  7. qml性能优化(来源于群友分享);

    Qt quick性能优化 使用时间驱动 避免定时轮询: 使用信号槽形式: 使用多线程 C++; QML WorkerScript元件: 使用Qt Quick Compiler 只需要再PRO文件中添加 ...

  8. python自动化开发-[第二天]-基础数据类型与编码(续)

    今日简介: - 编码 - 进制转换 - 初识对象 - 基本的数据类型 - 整数 - 布尔值 - 字符串 - 列表 - 元祖 - 字典 - 集合 - range/enumcate 一.编码 encode ...

  9. Javascript鼠标键盘事件

    鼠标事件click:单击dblclick:双击mousedown:鼠标按下mouseup:鼠标抬起mouseover:鼠标悬浮mouseout:鼠标离开mousemove:鼠标移动mouseenter ...

  10. JDK1.5以后的版本特性

    一.JDK1.5新特性 1.泛型:泛型的本质是参数化类型,也就是说所操作的数据类型被指定为一个参数.这种参数类型可以用在类.接口和方法的创建中,分别称为泛型类.泛型接口.泛型方法.可以在编译的时候就能 ...