spark_learn
package chapter03 import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext} /**
* Created by chenzechao on 2017/12/21.
*/ /**
spark-shell \
--master yarn-client \
--driver-memory 1G \
--driver-cores 1 \
--queue root.queue_0101_04 \
--executor-memory 2G \
--num-executors 2 \
--conf spark.executor.cores=1 \
--name 'tmp_abc_test' \
--conf spark.yarn.executor.memoryOverhead=4096 \
--conf spark.driver.maxResultSize=8G \
--conf spark.sql.hive.metastore.version=1.2.1 \
--conf spark.sql.shuffle.partitions=150
*/ object document {
// 0 获取参数flag //0.设置环境
val conf = new SparkConf().setAppName("tianchi").setMaster("local[*]")
val sc = new SparkContext(conf)
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
val hiveContext = new HiveContext(sc) val jsonFile = "file:///tmp/upload/data/json_file"
val jsonFile_hdfs = "/tmp/ccc/tmpc/json_file"
// 执行SQL
val df1 = sqlContext.sql("select * from sx_360_safe.sub_ladm_exc_app_s16_all_for_double").limit(200).cache()
df1.count() // Print the schema in a tree format
df1.printSchema() // Select only then "gu_flag" column
df1.select("gu_flag").show() // Select everybody, but increment the age by 1
df1.select(df1("empno"),df1("age"),df1("age") + 1 ).show // Select emp age older than 21
df1.filter(df1("age") > 21).select(df1("empno"),df1("age")).show() // Count emp by age
df1.groupBy(df1("age")).count().sort(df1("age")).show()
val gb = df1.groupBy(df1("age")).count()
gb.sort(gb("count")).show() // save dataFrame as json file
df1.write.mode("Overwrite").format("json").save(jsonFile_hdfs)
df1.write.mode("Append").format("json").save(jsonFile_hdfs)
df1.select(df1("empno"), df1("gu_flag")).write.mode("Overwrite").format("parquet").saveAsTable("sx_360_safe.tmp_czc_20180323_04") // this is used to implicitly convert an RDD to a DataFrame.
import sqlContext.implicits._ val df2 = sqlContext.read.json(jsonFile) // Encoders for most common types are automatically provided by importing sqlContext.implicits._
val ds1 = Seq(1, 2, 3).toDS()
ds1.map(_ + 1).collect() // Encoders are also created for case class
case class Person(name:String ,age: Long)
val ds = Seq(Person("Andy",35)).toDS()
ds.show() /**
* Inferring the Schema Using Reflection
*/
import sqlContext.implicits._
case class Person2(name:String, age:Int)
val people = sc.textFile("/tmp/ccc/data/tmpa").filter(_.length > 1).map(_.split(",")).map(p => Person2(p(0),p(1).trim.toInt)).toDF()
people.registerTempTable("people")
sqlContext.sql("select * from people limit 10").show val teenagers = sqlContext.sql("select name,age from people where age >= 23 and age<= 26")
teenagers.map(t => "Name: " + t(0)).collect().foreach(println) // or by field name
teenagers.map(t => "Name: " + t.getAs[String]("name")).collect().foreach(println) // row.getValuesMap[T] retrieves multiple columns at once into a Map[String,T]
teenagers.map(_.getValuesMap[Any](List("name","age"))).collect().foreach(println) /**
* Programmatically Specifying the Schema
*/
val schemaString = "name age"
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StructType,StructField,StringType} val schema =
StructType(
schemaString.split(" ").map(fieldName => StructField(fieldName,StringType,true))
) // Convert records of the RDD (people) to Rows
val people2 = sc.textFile("/tmp/ccc/data/tmpa")
val rowRDD = people2.map(_.split(",")).map(p => Row(p(0),p(1).trim)) // Apply the schema to the RDD
val peopleDataFrame = sqlContext.createDataFrame(rowRDD,schema) // Register the DataFrames as a table
peopleDataFrame.registerTempTable("people") // SQL val df = sqlContext.read.load("/tmp/examples/src/main/resources/users.parquet") val df3 = sqlContext.read.format("json").load("/tmp/examples/src/main/resources/people.json") // Run SQL on files directly
val df4 = sqlContext.sql("select * from parquet.`/tmp/examples/src/main/resources/users.parquet`") // Save modes
/**
* ErrorIfExists (default)
* Append
* Overwrite
* Ignore
*/ val parquetFile = sqlContext.read.parquet("") }
spark_learn的更多相关文章
随机推荐
- 使用Visual Studio进行单元测试-Part2
写在开头:Coding ain't done until all the tests run. No unit test no BB. 另外有童鞋在上一篇博文留言说找不到Add Fake Assemb ...
- const关键字祥解
为什么使用const?采用符号常量写出的代码更容易维护:指针常常是边读边移动,而不是边写边移动:许多函数参数是只读不写的.const最常见用途是作为数组的界和switch分情况标号(也可以用枚举符代替 ...
- 【254】◀▶IEW-Unit19
Unit 19 Technology Communication I.名词性从句在雅思写作中的运用 英语中哪些位置可以放名词? 1)主语 2)宾语 3)表语 4)同位语 名词的位置放一个句子=名词性从 ...
- Improving Deep Neural Networks 笔记
1 Practical aspects of Deep Learning 1.1 Train/Dev/Test sets 在小样本的机器学习中,可以分为60/20/20. 在大数据训练中,不需要划分很 ...
- OpenCV创建轨迹条,图片像素的访问
.OpenCV创建进度条以及图像对比度,亮度调整 1.创建轨迹条createTrackbar() 函数原型C++: intcreateTrackbar(conststring& trackba ...
- linux 安装 elasticsearch
安装 Java 8 当你提前在使用 Elasticsearch,你开始寻找更好的 Java 性能和兼容性时,您可以选择安装 Oracle 的专有 Java (Oracle JDK 8). 将 Orac ...
- datanode与namenode的通信原理
在分析DataNode时, 因为DataNode上保存的是数据块, 因此DataNode主要是对数据块进行操作. **A. DataNode的主要工作流程:** 1. 客户端和DataNode的通信: ...
- 转:Linux下用Jmeter做接口测试
本地设计 首先在本地设计 Apache JMeter 测试计划,大家可以参考<接口测试之 JMeter 初探> ,这里不再重复. 服务器配置 确保服务器已经安装了JDK和Python. 在 ...
- Python及R安装包版本查看方法
R包查询 查询已安装的所有的包:library() 或installed.packages()(括号内为空,区别以上两项) 查询具体包的信息: help(package="pheatmap& ...
- p2921 Trick or Treat on the Farm
传送门 题目 每年万圣节,威斯康星的奶牛们都要打扮一番,出门在农场的N个牛棚里转 悠,来采集糖果.她们每走到一个未曾经过的牛棚,就会采集这个棚里的1颗糖果.农场不大,所以约翰要想尽法子让奶牛们得到快乐 ...