spark 常用技巧总结2
zip拉链操作
def zip[U](other: org.apache.spark.rdd.RDD[U])(implicit evidence$10: scala.reflect.ClassTag[U]): org.apache.spark.rdd.RDD[(String, U)]
scala> val rdd1=sc.makeRDD(Array("apple","pear","grape","egg","elephant"))
rdd1: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[23] at makeRDD at <console>:24
scala> val rdd2=sc.makeRDD(List(20,5,8,6,3))
rdd2: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[24] at makeRDD at <console>:24
scala> rdd1.zip(rdd2).collect
res35: Array[(String, Int)] = Array((apple,20), (pear,5), (grape,8), (egg,6), (elephant,3))
scala> val rdd3=rdd1 zip rdd2
rdd3: org.apache.spark.rdd.RDD[(String, Int)] = ZippedPartitionsRDD2[27] at zip at <console>:28
scala> rdd3.collect
res36: Array[(String, Int)] = Array((apple,20), (pear,5), (grape,8), (egg,6), (elephant,3))
-------------------------
def combineByKey[C](createCombiner: Int => C,mergeValue: (C, Int) => C,mergeCombiners: (C, C) => C): org.apache.spark.rdd.RDD[(String, C)]
def combineByKey[C](createCombiner: Int => C,mergeValue: (C, Int) => C,mergeCombiners: (C, C) => C,numPartitions: Int): org.apache.spark.rdd.RDD[(String, C)]
def combineByKey[C](createCombiner: Int => C,mergeValue: (C, Int) => C,mergeCombiners: (C, C) => C,partitioner: org.apache.spark.Partitioner,mapSideCombine: Boolean,serializer: org.apache.spark.serializer.Serializer): org.apache.spark.rdd.RDD[(String, C)]
def combineByKey[C](
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiners: (C, C) => C, n
umPartitions: Int): RDD[(K, C)] = self.withScope {
combineByKeyWithClassTag(createCombiner, mergeValue, mergeCombiners, numPartitions)(null)
}
scala> rdd3.collect
res53: Array[(String, Int)] = Array((apple,2), (pear,1), (grape,2), (egg,1), (elephant,1))
scala> val rdd4=rdd3.combineByKey(List(_),(x:List[Int],v:Int)=>x:+v,(m:List[Int],n:List[Int])=>m++n)
rdd4: org.apache.spark.rdd.RDD[(String, List[Int])] = ShuffledRDD[35] at combineByKey at <console>:30
scala> rdd4.collect
res51: Array[(String, List[Int])] = Array((egg,List(1)), (elephant,List(1)), (pear,List(1)), (apple,List(2)), (grape,List(2)))
scala> val rdd4=rdd3.map(x=>(x._2,x._1))
rdd4: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[33] at map at <console>:30
scala> val rdd5=rdd4.combineByKey(List(_),(x:List[String],v:String)=>x:+v,(m:List[String],n:List[String])=>m++n)
rdd5: org.apache.spark.rdd.RDD[(Int, List[String])] = ShuffledRDD[37] at combineByKey at <console>:32
scala> rdd5.collect
res52: Array[(Int, List[String])] = Array((1,List(pear, egg, elephant)), (2,List(apple, grape)))
--------------------
scala> val rdd1=sc.makeRDD(Array("apple","apple","pear","egg","hellokitty","egg","apple"))
rdd1: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[4] at makeRDD at <console>:24
scala> rdd1.countByValue
res1: scala.collection.Map[String,Long] = Map(hellokitty -> 1, egg -> 2, pear -> 1, apple -> 3)
scala> val map1=rdd1.countByValue
map1: scala.collection.Map[String,Long] = Map(hellokitty -> 1, egg -> 2, pear -> 1, apple -> 3)
scala> val rdd2=sc.makeRDD(map1.toList)
rdd2: org.apache.spark.rdd.RDD[(String, Long)] = ParallelCollectionRDD[21] at makeRDD at <console>:28
scala> rdd2.collect
res5: Array[(String, Long)] = Array((hellokitty,1), (egg,2), (pear,1), (apple,3))
-------------------
scala> val rdd1=sc.makeRDD(Array("apple","apple","pear","egg","hellokitty","egg","apple"))
rdd1: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[28] at makeRDD at <console>:24
scala> val rdd2=rdd1.map(x=>(x,1))
rdd2: org.apache.spark.rdd.RDD[(String, Int)] = MapPartitionsRDD[29] at map at <console>:26
scala> rdd2.collect
res33: Array[(String, Int)] = Array((apple,1), (apple,1), (pear,1), (egg,1), (hellokitty,1), (egg,1), (apple,1))
scala> rdd2.partitions.size
res34: Int = 4
scala> rdd2.reduceByKey(_+_).collect
res36: Array[(String, Int)] = Array((hellokitty,1), (egg,2), (pear,1), (apple,3))
scala> rdd2.reduceByKey(_+_,2).partitions.size //shuffile重新分为2个分区
res37: Int = 2
-------------------------------
shuffle操作可以重新分区,指定分区数
进行 shuffle 操作的是是很消耗系统资源的,需要写入到磁盘并通过网络传输,有时还需要对数据进行排序.常见的 Transformation 操作如:repartition,join,cogroup,以及任何 *By 或者 *ByKey 的 Transformation 都需要 shuffle
--------------------------------------
scala> val rdd2=rdd1.map(x=>(x,1))
rdd2: org.apache.spark.rdd.RDD[(String, Int)] = MapPartitionsRDD[29] at map at <console>:26
scala> rdd2.collect
res39: Array[(String, Int)] = Array((apple,1), (apple,1), (pear,1), (egg,1), (hellokitty,1), (egg,1), (apple,1))
scala> rdd2.combineByKey(x=>x,(c:Int,n:Int)=>c+n,(c1:Int,c2:Int)=>c1+c2).collect
res41: Array[(String, Int)] = Array((hellokitty,1), (egg,2), (pear,1), (apple,3))
scala> rdd1.countByValue()
res42: scala.collection.Map[String,Long] = Map(hellokitty -> 1, egg -> 2, pear -> 1, apple -> 3)
scala> rdd2.reduceByKey(_+_).collect
res44: Array[(String, Int)] = Array((hellokitty,1), (egg,2), (pear,1), (apple,3))
-------------------------------
scala> val rdd3=rdd1.map(x=>(1,x))
rdd3: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[40] at map at <console>:26
scala> rdd3.collect
res45: Array[(Int, String)] = Array((1,apple), (1,apple), (1,pear), (1,egg), (1,hellokitty), (1,egg), (1,apple))
scala> rdd3.combineByKey(x=>List(x),(c:List[String],y:String)=>c:+y,(c1:List[String],c2:List[String])=>c1++c2).collect
res49: Array[(Int, List[String])] = Array((1,List(apple, apple, pear, egg, hellokitty, egg, apple)))
---------------------------------------------
scala> val rdd00=sc.makeRDD(List(("a",1),("b",1),("a",3),("ba",3),("b",1),("g",10)),2)
rdd00: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[44] at makeRDD at <console>:24
scala> val rdd3=rdd00.map(x=>(x._2,x._1))
rdd3: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[45] at map at <console>:26
scala> rdd3.collect
res51: Array[(Int, String)] = Array((1,a), (1,b), (3,a), (3,ba), (1,b), (10,g))
scala> rdd3.groupByKey().collect
res53: Array[(Int, Iterable[String])] = Array((10,CompactBuffer(g)), (1,CompactBuffer(a, b, b)), (3,CompactBuffer(a, ba)))
scala> rdd3.combineByKey(x=>List(x),(c:List[String],y:String)=>c:+y,(c1:List[String],c2:List[String])=>c1++c2).collect
res54: Array[(Int, List[String])] = Array((10,List(g)), (1,List(a, b, b)), (3,List(a, ba)))
-----------------------
distinct(numPartitions:Int) 去重的同时重新分区
scala> val bb=sc.makeRDD(Array(1,1,2,1,8,6,8,4,5,4),2)
bb: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[81] at makeRDD at <console>:25
scala> bb.distinct(1).partitions.size
res61: Int = 1
scala> bb.distinct(3).partitions.size
res62: Int = 3
----------------------
def randomSplit(weights: Array[Double],seed: Long): Array[org.apache.spark.rdd.RDD[Int]]
randomSplit操作根据weights权重将一个RDD分割为多个RDD。权重越高,划分到的几率越大,权重的总和加起来为1,否则会不正常
scala> val split=aa.randomSplit(Array(0.1,0.2,0.3,0.4))
split: Array[org.apache.spark.rdd.RDD[Int]] = Array(MapPartitionsRDD[165] at randomSplit at <console>:27, MapPartitionsRDD[166] at randomSplit at <console>:27, MapPartitionsRDD[167] at randomSplit at <console>:27, MapPartitionsRDD[168] at randomSplit at <console>:27)
scala> split(0).count
res94: Long = 11
scala> split(1).count
res95: Long = 19
scala> split(2).count
res96: Long = 34
scala> split(3).count
res97: Long = 36
-----------------------------------------------------
def glom(): org.apache.spark.rdd.RDD[Array[Int]]
glom将每个分区中的元素放到一个数组里,变成和分区数一样多的数据
scala> val bb=sc.makeRDD(1 to 10,3)
bb: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[203] at makeRDD at <console>:25
scala> bb.glom().collect
res127: Array[Array[Int]] = Array(Array(1, 2, 3), Array(4, 5, 6), Array(7, 8, 9, 10))
spark 常用技巧总结2的更多相关文章
- spark 常用技巧总结
解析url scala> import java.net.URLimport java.net.URL scala> val urlstr="http://www.baidu.c ...
- 【shell 大系】Linux Shell常用技巧
在最近的日常工作中由于经常会和Linux服务器打交道,如Oracle性能优化.我们数据采集服务器的资源利用率监控,以及Debug服务器代码并解决其效率和稳定性等问题.因此这段时间总结的有关Linux ...
- oracle存储过程常用技巧
我们在进行pl/sql编程时打交道最多的就是存储过程了.存储过程的结构是非常的简单的,我们在这里除了学习存储过程的基本结构外,还会学习编写存储过程时相关的一些实用的知识.如:游标的处理,异常的处理,集 ...
- Vim 常用技巧:
Vim 常用技巧: 将回车由默认的8个空格改为4个空格: 命令:set sw=4 修改tab为4空格: 命令:set ts=4 设置每一级的缩进长度: 命令:set shiftwidth=4 设置文件 ...
- JS~~~ 前端开发一些常用技巧 模块化结构 &&&&& 命名空间处理 奇技淫巧!!!!!!
前端开发一些常用技巧 模块化结构 &&&&& 命名空间处理 奇技淫巧!!!!!!2016-09-29 17 ...
- Android ListView 常用技巧
Android ListView 常用技巧 Android TextView 常用技巧 1.使用ViewHolder提高效率 ViewHolder模式充分利用了ListView的视图缓存机制,避免了每 ...
- JavaScript常用技巧总结(持续添加中...)
在我学习过程中收集的一些常用技巧: typeof x !== undifined 判断x是否已定义: x === Object(x) 判断x是否为对象: Object.keys(x).length ...
- Eclipse调试常用技巧(转)
Eclipse调试常用技巧 转自http://daimojingdeyu.iteye.com/blog/633824 1. 条件断点 断点大家都比较熟悉,在Eclipse Java 编辑区的行头双击就 ...
- AS技巧合集「常用技巧篇」
转载:http://www.apkbus.com/forum.php?mod=viewthread&tid=254723&extra=page%3D2%26filter%3Dautho ...
随机推荐
- linux du 查看文件及文件夹大小
1.查看当前目录大小: 如在:/home/jzw/share/ du -sh 1.6G . 2.查看当前目录下各个文件夹的大小: 如在:/home/jzw/share/ du -sh * 3.6 ...
- Tribon/AM 数据库名字的含义
收集在这里备用 数据库名 Tribon环境变量 内容描述 船体型线数据库 SB_CGDB 船体外型信息 型表面,船体曲线,板缝 船体结构数据库 SB_OGDB 船体模型信息 板材数据库 SB_PLDB ...
- <亲测>阿里云centos7 挂载数据盘配置
阿里云centos7 挂载数据盘配置 2018年07月17日 15:13:53 阅读数:235更多 个人分类: linux阿里云ECS数据盘挂载 查看磁盘情况 fdisk -l 其中/dev/v ...
- PAT 乙级 1060 爱丁顿数(25) C++版
1060. 爱丁顿数(25) 时间限制 250 ms 内存限制 65536 kB 代码长度限制 8000 B 判题程序 Standard 作者 CHEN, Yue 英国天文学家爱丁顿很喜欢骑车.据说他 ...
- 服务注册发现consul之一:consul介绍、安装、及功能介绍
Consul 是一套开源的分布式服务发现和配置管理系统,由 HashiCorp 公司用 Go 语言开发.它具有很多优点.包括:基于 raft 协议,比较简洁: 支持健康检查, 同时支持 HTTP 和 ...
- 学习笔记之Git / Gitflow / TortoiseGit
Git - Wikipedia https://en.wikipedia.org/wiki/Git Git (/ɡɪt/) is a version control system for tracki ...
- go语言学习--map类型的切片
今天在项目中遇到了一个切片的map,记录下map切片的使用 package main import "fmt" func main() { // Version A: items ...
- 关于android中透明、半透明、百分比转换
在xml文件中,可以直接写#0000,这个是全透明的效果.#9000这个值相当于56%的样子,因为颜色值是16进制的,#9000相当于(9/16)而百分比的话,大家可以按照这个比例来换算全透明 #00 ...
- MySQL 之数据库增量数据恢复案例
MySQL 数据库增量数据恢复案例 一.场景概述 MySQL数据库每日零点自动全备 某天上午10点,小明莫名其妙地drop了一个数据库 我们需要通过全备的数据文件,以及增量的binlog文件进行数据恢 ...
- [电脑知识点]Excel取消受保护视图