成品:http://www.cnblogs.com/drawwindows/p/5640606.html

初稿:
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{Logging, SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, _}
import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path} import scala.collection.mutable.ArrayBuffer object DataFrameVisiualize extends Logging { def runforstatistic(hiveContext: HiveContext, params: JSONObject) = {
val arr = params.getJSONArray("targetType")
var i = 0
while( i < arr.size()){
val obj = arr.getJSONObject(i)
if("dataset".equalsIgnoreCase(obj.getString("targetType"))){
val tableNameKey = obj.getString("targetName")
val tableName = params.getString(tableNameKey)
val user = params.getString("user")
run(hiveContext, tableName, user)
}
i = i+1
}
} def run(hiveContext: HiveContext, tableName: String, user: String) = {
val pathParent = s"/user/$user/mlaas/tableStatistic/$tableName"
// val conf = new SparkConf().setAppName("DataFrameVisiualizeJob")
// val sc = new SparkContext(conf)
// val hiveContext = new HiveContext(sc)
// val sqlContext = new SQLContext(sc)
//0.获取DB的schema信息
val schemadf = hiveContext.sql("desc " + tableName)
//schema信息落地
val filePathSchema = pathParent + "/schemajson"
schemadf.write.mode(SaveMode.Overwrite).format("json").save(filePathSchema) //1.加载表到dataframe
val df = hiveContext.sql("select * from " + tableName)
//2.获取dataframe的describe信息,默认为获取到的都为数值型列
val dfdesc = df.describe()
// //3.描述信息落地
// val filePath = pathParent + "/describejson"
// des.write.mode(SaveMode.Overwrite).format("json").save(filePath)
// val dfdesc = sqlContext.read.format("json").load(filePath) //4.列信息区分为mathColArr 和 strColArr
val mathColArr = dfdesc.columns.filter(!_.equalsIgnoreCase("summary"))
val (colMin, colMax, colMean, colStddev, colMedian) = getDesfromDF(dfdesc, mathColArr)
val allColArr = df.columns val strColArr = allColArr.filter(!_.equalsIgnoreCase("summary")).diff(mathColArr) saveRecords(hiveContext, tableName, 100, pathParent + "/recordsjson")
val jsonobj = getAllStatistics(hiveContext, tableName, allColArr, strColArr, mathColArr, 10, colMin, colMax) jsonobj.put("colMin", colMin)
jsonobj.put("colMax", colMax)
jsonobj.put("colMean", colMean)
jsonobj.put("colStddev", colStddev)
jsonobj.put("colMedian", colMedian) val jsonStr = jsonobj.toString
val conf1 = new Configuration()
val fs = FileSystem.get(conf1)
val fileName = pathParent + "/jsonObj"
val path = new Path(fileName)
val hdfsOutStream = fs.create(path)
hdfsOutStream.writeBytes(jsonStr)
hdfsOutStream.flush()
hdfsOutStream.close()
// fs.close(); } def saveRecords(hiveContext: HiveContext, tableName: String, num: Int, filePath: String) : Unit = {
hiveContext.sql(s"select * from $tableName limit $num").write.mode(SaveMode.Overwrite).format("json").save(filePath)
}
/**
* 根据allCols, mathColArr, strColArr 三个数组,返回带有所有统计信息(除去已经根据describe获取到的)的dataframes。
* 返回的dataframe结果进行遍历,填充各个属性的值。
*/
def getAllStatistics(hiveContext: HiveContext, tableName: String, allColArr: Array[String], strColArr: Array[String], mathColArr: Array[String], partNum: Int, colMin: java.util.HashMap[String, Double], colMax: java.util.HashMap[String, Double]) :
JSONObject = {
val jsonobj = new JSONObject()
val sb = new StringBuffer()
sb.append("select ")
for(col <- allColArr){
sb.append(s"count(distinct($col)) as unique_$col , sum(case when $col is null then 1 else 0 end) as missing_$col, ")
}
sb.append(s"sum(1) as totalrows from $tableName")
val df = hiveContext.sql(sb.toString)
val colUnique = new java.util.HashMap[String, Long]//唯一值
val colMissing = new java.util.HashMap[String, Long]//缺失值
df.take(1).foreach(row => (jsonobj.put("totalrows", row.getAs[Long]("totalrows")),allColArr.foreach(col => (colUnique.put(col, row.getAs[Long]("unique_"+col)),colMissing.put(col, row.getAs[Long]("missing_"+col))) ) )) val dfArr = ArrayBuffer[DataFrame]()
val strHistogramSql = new StringBuffer()
strHistogramSql.append(s"""
SELECT tta.colName, tta.value, tta.num
FROM (
SELECT ta.colName, ta.value, ta.num, ROW_NUMBER() OVER (PARTITION BY ta.colName ORDER BY ta.num DESC) AS row
FROM (
""") var vergin = 0
for(col <- strColArr){
if(vergin == 1){
strHistogramSql.append(" UNION ALL ")
}
vergin = 1
strHistogramSql.append(s"""
SELECT 'StrHistogram_$col' AS colName, $col AS value, COUNT(1) AS num
FROM $tableName
GROUP BY $col """)
}
strHistogramSql.append(s"""
) ta
) tta
WHERE tta.row <= $partNum
""")
val dfStrHistogram = hiveContext.sql(strHistogramSql.toString)
dfArr.append(dfStrHistogram)
for(col <- mathColArr){
val df1 = hiveContext.sql(s"select 'Quartile_$col' as colName, ntil, max($col) as num from (select $col, ntile(4) OVER (order by $col)as ntil from $tableName) tt group by ntil ")
log.info("col is :" + col + ", min is :" + colMin.get(col) + ", max is : " + colMax.get(col))
//need toString first, then toDouble。 or:ClassCastException
val min = colMin.get(col).toString.toDouble
val max = colMax.get(col).toString.toDouble
val df2 = getHistogramMathDF(col, hiveContext, tableName, min, max, partNum)
dfArr.append(df1)
dfArr.append(df2)
}
val dfAll = dfArr.reduce(_.unionAll(_))
val allRows = dfAll.collect()
val mathColMapQuartile = new java.util.HashMap[String, Array[java.util.HashMap[String,Long]]] //四分位
val mathColMapHistogram = new java.util.HashMap[String, Array[java.util.HashMap[String,Long]]]//条形图
val strColMapHistogram = new java.util.HashMap[String, Array[java.util.HashMap[String,Long]]]//条形图
val (mathColMapQuartile1, mathColMapHistogram1, strColMapHistogram1) = readRows(allRows)
for(col <- strColArr){
strColMapHistogram.put(col,strColMapHistogram1.get(col).toArray[java.util.HashMap[String,Long]])
}
for(col <- mathColArr){
mathColMapQuartile.put(col,mathColMapQuartile1.get(col).toArray[java.util.HashMap[String,Long]])
mathColMapHistogram.put(col,mathColMapHistogram1.get(col).toArray[java.util.HashMap[String,Long]])
}
jsonobj.put("mathColMapQuartile", mathColMapQuartile)
jsonobj.put("mathColMapHistogram", mathColMapHistogram)
jsonobj.put("strColMapHistogram", strColMapHistogram)
jsonobj.put("colUnique", colUnique)
jsonobj.put("colMissing", colMissing)
jsonobj
}
def readRows(rows: Array[Row]) : (java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]] , java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]], java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]])={
val mathColMapQuartile = new java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]] //四分位
val mathColMapHistogram = new java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]]//条形图
val strColMapHistogram = new java.util.HashMap[String, ArrayBuffer[java.util.HashMap[String,Long]]]//条形图
rows.foreach( row => {
val colName = row.getAs[String]("colName")
if (colName.startsWith("StrHistogram")) {
val value = row.getAs[String](1)
val num = row.getAs[Long](2)
val map = new java.util.HashMap[String, Long]()
val col = colName.substring(colName.indexOf('_') + 1)
map.put(value, num)
val mapValue = strColMapHistogram.get(col)
if (mapValue == null) {
val mapValueNew = ArrayBuffer[java.util.HashMap[String, Long]]()
mapValueNew.append(map)
strColMapHistogram.put(col, mapValueNew)
} else {
mapValue.append(map)
strColMapHistogram.put(col, mapValue)
}
} else if (colName.toString.startsWith("Quartile")) {
val value = row.getAs[String](1)
val num = row.getAs[Long](2)
val map = new java.util.HashMap[String, Long]()
val col = colName.substring(colName.indexOf('_') + 1)
map.put(value, num)
val mapValue = mathColMapQuartile.get(col)
if (mapValue == null) {
val mapValueNew = ArrayBuffer[java.util.HashMap[String, Long]]()
mapValueNew.append(map)
mathColMapQuartile.put(col, mapValueNew)
} else {
mapValue.append(map)
mathColMapQuartile.put(col, mapValue)
}
} else if (colName.toString.startsWith("MathHistogram")) {
val value = row.getAs[String](1)
val num = row.getAs[Long](2)
val map = new java.util.HashMap[String, Long]()
val col = colName.substring(colName.indexOf('_') + 1)
map.put(value, num)
val mapValue = mathColMapHistogram.get(col)
if (mapValue == null) {
val mapValueNew = ArrayBuffer[java.util.HashMap[String, Long]]()
mapValueNew.append(map)
mathColMapHistogram.put(col, mapValueNew)
} else {
mapValue.append(map)
mathColMapHistogram.put(col, mapValue)
}
}
})
(mathColMapQuartile, mathColMapHistogram, strColMapHistogram)
}
/** 数值型的列的条形分布获取方法*/
def getHistogramMathDF(col : String, hiveContext: HiveContext, tableName: String, min: Double, max: Double, partNum: Int) : DataFrame = {
val len = (max - min) / partNum
log.info(s"len is : $len")
val sb = new StringBuffer()
sb.append(s"select $col, (case ")
val firstRight = min + len
sb.append(s" when ($col >= $min and $col <= $firstRight) then 1 ")
for (i <- 2 until (partNum + 1)) {
val left = min + len * (i - 1)
val right = min + len * i
sb.append(s" when ($col > $left and $col <= $right) then $i ")
}
sb.append(s" else 0 end ) as partNum from $tableName")
sb.insert(0, s"select 'MathHistogram_$col' as colName, partNum, count(1) as num from ( ")
sb.append(") temptableScala group by partNum")
log.info("getHistogram is: " + sb.toString)
val df = hiveContext.sql(sb.toString)
df
}
def getDesfromDF(dfdesc : DataFrame, mathColArr: Array[String]):
(java.util.HashMap[String, Double], java.util.HashMap[String, Double], java.util.HashMap[String, Double], java.util.HashMap[String, Double], java.util.HashMap[String, Double])= {
val allRows = dfdesc.collect()
val colMin = new java.util.HashMap[String, Double]//最小值
val colMax = new java.util.HashMap[String, Double]//最大值
val colMean = new java.util.HashMap[String, Double]//平均值
val colStddev = new java.util.HashMap[String, Double]//标准差
val colMedian = new java.util.HashMap[String, Double]//中位值
allRows.foreach(row => {
val mapKey = row.getAs[String]("summary")
for(col <- mathColArr){
if("mean".equalsIgnoreCase(mapKey)){
colMean.put(col, row.getAs[Double](col))
}else if("stddev".equalsIgnoreCase(mapKey)){
colStddev.put(col, row.getAs[Double](col))
}else if("min".equalsIgnoreCase(mapKey)){
log.info("col is " + col +", min is : "+ row.getAs[Double](col))
colMin.put(col, row.getAs[Double](col))
}else if("max".equalsIgnoreCase(mapKey)){
log.info("col is " + col +", max is : "+ row.getAs[Double](col))
colMax.put(col, row.getAs[Double](col))
}else{
colMedian.put(col, row.getAs[Double](col))
}
}
})
(colMin, colMax, colMean, colStddev, colMedian)
}
}

scala-spark练手--dataframe数据可视化初稿的更多相关文章

  1. Spark GraphX 的数据可视化

    概述 Spark GraphX 本身并不提供可视化的支持, 我们通过第三方库 GraphStream 和 Breeze 来实现这一目标 详细 代码下载:http://www.demodashi.com ...

  2. 大数据技术之_27_电商平台数据分析项目_02_预备知识 + Scala + Spark Core + Spark SQL + Spark Streaming + Java 对象池

    第0章 预备知识0.1 Scala0.1.1 Scala 操作符0.1.2 拉链操作0.2 Spark Core0.2.1 Spark RDD 持久化0.2.2 Spark 共享变量0.3 Spark ...

  3. 练手mysqlbinlog日志恢复数据(centos6.5 64,mysql5.1)

    练手mysql bin log日志相关 系统是centos 6.5 64 阿里云的服务器 mysql版本5.1 1 如何开启bin-log日志? vi /etc/my.cnf [mysqld] log ...

  4. spark 将dataframe数据写入Hive分区表

    从spark1.2 到spark1.3,spark SQL中的SchemaRDD变为了DataFrame,DataFrame相对于SchemaRDD有了较大改变,同时提供了更多好用且方便的API.Da ...

  5. python实现列表页数据的批量抓取练手练手的

    python实现列表页数据的批量抓取,练手的,下回带分页的 #!/usr/bin/env python # coding=utf-8 import requests from bs4 import B ...

  6. Python--matplotlib 绘图可视化练手--折线图/条形图

    最近学习matplotlib绘图可视化,感觉知识点比较多,边学习边记录. 对于数据可视化,个人建议Jupyter Notebook. 1.首先导包,设置环境 import pandas as pd i ...

  7. Spark入门之DataFrame/DataSet

    目录 Part I. Gentle Overview of Big Data and Spark Overview 1.基本架构 2.基本概念 3.例子(可跳过) Spark工具箱 1.Dataset ...

  8. 使用bokeh-scala进行数据可视化

    目录 前言 bokeh简介及胡扯 bokeh-scala基本代码 我的封装 总结 一.前言        最近在使用spark集群以及geotrellis框架(相关文章见http://www.cnbl ...

  9. 大数据基础知识问答----spark篇,大数据生态圈

    Spark相关知识点 1.Spark基础知识 1.Spark是什么? UCBerkeley AMPlab所开源的类HadoopMapReduce的通用的并行计算框架 dfsSpark基于mapredu ...

随机推荐

  1. ASP.NET MVC概述

    原文:http://www.asp.net/mvc/tutorials/older-versions/overview/asp-net-mvc-overview 这篇文章帮助你了解关于ASP.NET ...

  2. asp.net <% %>,<%# %>,<%= %>,<%$ %>区别大集合

    前台页面 <div><%--可以执行服务器代码,相当于在后台写代码,Render%><%=取后台变量或方法值,只能绑定客户端控件,绑定服务器控件时后来必须调用databi ...

  3. python mysqldb连接数据库

    今天无事想弄下python做个gui开发,最近发布的是python 3k,用到了数据库,通过搜索发现有一个mysqldb这样的控件,可以使用,就去官方看了下结果,没有2.6以上的版本 没办法就下了一个 ...

  4. [PHP]MemCached高级缓存

    Memcache Win32 的安装下载:Memcache Win32 [www.php100.com]   [www.jehiah.cz/projects/memcached-win32/] 1.解 ...

  5. 即时Web通信总结

    即时Web通信在一些对数据实时性要求特别严格的应用中十分重要,如监控系统.报价系统.股票交易系统和即时在线聊天应用等,由于http协议设计当初是为了服务器端响应客户端的请求而设计的,只能在客户端主动发 ...

  6. Sublime Text3注册码

    这是一个注册码-– BEGIN LICENSE -– Michael Barnes Single User License EA7E-821385 8A353C41 872A0D5C DF9B2950 ...

  7. codeforces Codeforces Round #345 (Div. 1) C. Table Compression 排序+并查集

    C. Table Compression Little Petya is now fond of data compression algorithms. He has already studied ...

  8. vm安装ubuntu桥接模式无法联网

    桥接模式,就是和主机不同的ip,其他都是一样的. 编辑网络连接 查看自己机子的ip ipconfig 一般情况下是 ip 192.168.1.XXX 子网掩码 255.255.255.0 网关 192 ...

  9. SqlServer2008 设置修改表设计限制

    我记起来了 SQL Server 2008 对操作的安全性进行了限制 你要在Management Studio菜单栏 -工具-选项,弹出选项窗口:把 “阻止保存要求重新创建表的更改” 请的勾去掉.

  10. Automotive Security的一些资料和心得(2):Cryptography

    1. Security Goal - Confidentiality - Integrity - Availability - Authenticity - Non-repudiation - Aut ...