zip拉链操作

def zip[U](other: org.apache.spark.rdd.RDD[U])(implicit evidence$10: scala.reflect.ClassTag[U]): org.apache.spark.rdd.RDD[(String, U)]

scala> val rdd1=sc.makeRDD(Array("apple","pear","grape","egg","elephant"))

rdd1: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[23] at makeRDD at <console>:24

scala> val rdd2=sc.makeRDD(List(20,5,8,6,3))

rdd2: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[24] at makeRDD at <console>:24

scala> rdd1.zip(rdd2).collect
res35: Array[(String, Int)] = Array((apple,20), (pear,5), (grape,8), (egg,6), (elephant,3))

scala> val rdd3=rdd1 zip rdd2

rdd3: org.apache.spark.rdd.RDD[(String, Int)] = ZippedPartitionsRDD2[27] at zip at <console>:28

scala> rdd3.collect

res36: Array[(String, Int)] = Array((apple,20), (pear,5), (grape,8), (egg,6), (elephant,3))

-------------------------

def combineByKey[C](createCombiner: Int => C,mergeValue: (C, Int) => C,mergeCombiners: (C, C) => C): org.apache.spark.rdd.RDD[(String, C)]
def combineByKey[C](createCombiner: Int => C,mergeValue: (C, Int) => C,mergeCombiners: (C, C) => C,numPartitions: Int): org.apache.spark.rdd.RDD[(String, C)]
def combineByKey[C](createCombiner: Int => C,mergeValue: (C, Int) => C,mergeCombiners: (C, C) => C,partitioner: org.apache.spark.Partitioner,mapSideCombine: Boolean,serializer: org.apache.spark.serializer.Serializer): org.apache.spark.rdd.RDD[(String, C)]

def combineByKey[C](

createCombiner: V => C,

mergeValue: (C, V) => C,

mergeCombiners: (C, C) => C,       n

umPartitions: Int): RDD[(K, C)] = self.withScope {

combineByKeyWithClassTag(createCombiner, mergeValue, mergeCombiners, numPartitions)(null)

}

scala> rdd3.collect
res53: Array[(String, Int)] = Array((apple,2), (pear,1), (grape,2), (egg,1), (elephant,1))

scala> val rdd4=rdd3.combineByKey(List(_),(x:List[Int],v:Int)=>x:+v,(m:List[Int],n:List[Int])=>m++n)
rdd4: org.apache.spark.rdd.RDD[(String, List[Int])] = ShuffledRDD[35] at combineByKey at <console>:30

scala> rdd4.collect

res51: Array[(String, List[Int])] = Array((egg,List(1)), (elephant,List(1)), (pear,List(1)), (apple,List(2)), (grape,List(2)))

scala> val rdd4=rdd3.map(x=>(x._2,x._1))

rdd4: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[33] at map at <console>:30

scala> val rdd5=rdd4.combineByKey(List(_),(x:List[String],v:String)=>x:+v,(m:List[String],n:List[String])=>m++n)
rdd5: org.apache.spark.rdd.RDD[(Int, List[String])] = ShuffledRDD[37] at combineByKey at <console>:32

scala> rdd5.collect

res52: Array[(Int, List[String])] = Array((1,List(pear, egg, elephant)), (2,List(apple, grape)))

--------------------

scala> val rdd1=sc.makeRDD(Array("apple","apple","pear","egg","hellokitty","egg","apple"))

rdd1: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[4] at makeRDD at <console>:24

scala> rdd1.countByValue

res1: scala.collection.Map[String,Long] = Map(hellokitty -> 1, egg -> 2, pear -> 1, apple -> 3)

scala> val map1=rdd1.countByValue
map1: scala.collection.Map[String,Long] = Map(hellokitty -> 1, egg -> 2, pear -> 1, apple -> 3)

scala> val rdd2=sc.makeRDD(map1.toList)

rdd2: org.apache.spark.rdd.RDD[(String, Long)] = ParallelCollectionRDD[21] at makeRDD at <console>:28

scala> rdd2.collect

res5: Array[(String, Long)] = Array((hellokitty,1), (egg,2), (pear,1), (apple,3))

-------------------

scala> val rdd1=sc.makeRDD(Array("apple","apple","pear","egg","hellokitty","egg","apple"))

rdd1: org.apache.spark.rdd.RDD[String] = ParallelCollectionRDD[28] at makeRDD at <console>:24

scala> val rdd2=rdd1.map(x=>(x,1))

rdd2: org.apache.spark.rdd.RDD[(String, Int)] = MapPartitionsRDD[29] at map at <console>:26

scala> rdd2.collect

res33: Array[(String, Int)] = Array((apple,1), (apple,1), (pear,1), (egg,1), (hellokitty,1), (egg,1), (apple,1))

scala> rdd2.partitions.size
res34: Int = 4

scala> rdd2.reduceByKey(_+_).collect
res36: Array[(String, Int)] = Array((hellokitty,1), (egg,2), (pear,1), (apple,3))

scala> rdd2.reduceByKey(_+_,2).partitions.size //shuffile重新分为2个分区
res37: Int = 2

-------------------------------

shuffle操作可以重新分区,指定分区数

进行 shuffle 操作的是是很消耗系统资源的,需要写入到磁盘并通过网络传输,有时还需要对数据进行排序.常见的 Transformation 操作如:repartition,join,cogroup,以及任何 *By 或者 *ByKey 的 Transformation 都需要 shuffle

--------------------------------------

scala> val rdd2=rdd1.map(x=>(x,1))
rdd2: org.apache.spark.rdd.RDD[(String, Int)] = MapPartitionsRDD[29] at map at <console>:26

scala> rdd2.collect
res39: Array[(String, Int)] = Array((apple,1), (apple,1), (pear,1), (egg,1), (hellokitty,1), (egg,1), (apple,1))

scala> rdd2.combineByKey(x=>x,(c:Int,n:Int)=>c+n,(c1:Int,c2:Int)=>c1+c2).collect
res41: Array[(String, Int)] = Array((hellokitty,1), (egg,2), (pear,1), (apple,3))

scala> rdd1.countByValue()
res42: scala.collection.Map[String,Long] = Map(hellokitty -> 1, egg -> 2, pear -> 1, apple -> 3)

scala> rdd2.reduceByKey(_+_).collect
res44: Array[(String, Int)] = Array((hellokitty,1), (egg,2), (pear,1), (apple,3))

-------------------------------

scala> val rdd3=rdd1.map(x=>(1,x))

rdd3: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[40] at map at <console>:26

scala> rdd3.collect

res45: Array[(Int, String)] = Array((1,apple), (1,apple), (1,pear), (1,egg), (1,hellokitty), (1,egg), (1,apple))

scala> rdd3.combineByKey(x=>List(x),(c:List[String],y:String)=>c:+y,(c1:List[String],c2:List[String])=>c1++c2).collect
res49: Array[(Int, List[String])] = Array((1,List(apple, apple, pear, egg, hellokitty, egg, apple)))

---------------------------------------------

scala> val rdd00=sc.makeRDD(List(("a",1),("b",1),("a",3),("ba",3),("b",1),("g",10)),2)

rdd00: org.apache.spark.rdd.RDD[(String, Int)] = ParallelCollectionRDD[44] at makeRDD at <console>:24

scala> val rdd3=rdd00.map(x=>(x._2,x._1))

rdd3: org.apache.spark.rdd.RDD[(Int, String)] = MapPartitionsRDD[45] at map at <console>:26

scala> rdd3.collect

res51: Array[(Int, String)] = Array((1,a), (1,b), (3,a), (3,ba), (1,b), (10,g))

scala> rdd3.groupByKey().collect

res53: Array[(Int, Iterable[String])] = Array((10,CompactBuffer(g)), (1,CompactBuffer(a, b, b)), (3,CompactBuffer(a, ba)))

scala> rdd3.combineByKey(x=>List(x),(c:List[String],y:String)=>c:+y,(c1:List[String],c2:List[String])=>c1++c2).collect

res54: Array[(Int, List[String])] = Array((10,List(g)), (1,List(a, b, b)), (3,List(a, ba)))

-----------------------

distinct(numPartitions:Int) 去重的同时重新分区

scala> val bb=sc.makeRDD(Array(1,1,2,1,8,6,8,4,5,4),2)

bb: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[81] at makeRDD at <console>:25

scala> bb.distinct(1).partitions.size

res61: Int = 1

scala> bb.distinct(3).partitions.size

res62: Int = 3

----------------------

def randomSplit(weights: Array[Double],seed: Long): Array[org.apache.spark.rdd.RDD[Int]]

randomSplit操作根据weights权重将一个RDD分割为多个RDD。权重越高,划分到的几率越大,权重的总和加起来为1,否则会不正常

scala> val split=aa.randomSplit(Array(0.1,0.2,0.3,0.4))

split: Array[org.apache.spark.rdd.RDD[Int]] = Array(MapPartitionsRDD[165] at randomSplit at <console>:27, MapPartitionsRDD[166] at randomSplit at <console>:27, MapPartitionsRDD[167] at randomSplit at <console>:27, MapPartitionsRDD[168] at randomSplit at <console>:27)

scala> split(0).count

res94: Long = 11

scala> split(1).count

res95: Long = 19

scala> split(2).count

res96: Long = 34

scala> split(3).count

res97: Long = 36

-----------------------------------------------------

def glom(): org.apache.spark.rdd.RDD[Array[Int]]

glom将每个分区中的元素放到一个数组里,变成和分区数一样多的数据

scala> val bb=sc.makeRDD(1 to 10,3)

bb: org.apache.spark.rdd.RDD[Int] = ParallelCollectionRDD[203] at makeRDD at <console>:25

scala> bb.glom().collect

res127: Array[Array[Int]] = Array(Array(1, 2, 3), Array(4, 5, 6), Array(7, 8, 9, 10))

spark 常用技巧总结2的更多相关文章

  1. spark 常用技巧总结

    解析url scala> import java.net.URLimport java.net.URL scala> val urlstr="http://www.baidu.c ...

  2. 【shell 大系】Linux Shell常用技巧

    在最近的日常工作中由于经常会和Linux服务器打交道,如Oracle性能优化.我们数据采集服务器的资源利用率监控,以及Debug服务器代码并解决其效率和稳定性等问题.因此这段时间总结的有关Linux ...

  3. oracle存储过程常用技巧

    我们在进行pl/sql编程时打交道最多的就是存储过程了.存储过程的结构是非常的简单的,我们在这里除了学习存储过程的基本结构外,还会学习编写存储过程时相关的一些实用的知识.如:游标的处理,异常的处理,集 ...

  4. Vim 常用技巧:

    Vim 常用技巧: 将回车由默认的8个空格改为4个空格: 命令:set sw=4 修改tab为4空格: 命令:set ts=4 设置每一级的缩进长度: 命令:set shiftwidth=4 设置文件 ...

  5. JS~~~ 前端开发一些常用技巧 模块化结构 &&&&& 命名空间处理 奇技淫巧!!!!!!

    前端开发一些常用技巧               模块化结构       &&&&&     命名空间处理 奇技淫巧!!!!!!2016-09-29    17 ...

  6. Android ListView 常用技巧

    Android ListView 常用技巧 Android TextView 常用技巧 1.使用ViewHolder提高效率 ViewHolder模式充分利用了ListView的视图缓存机制,避免了每 ...

  7. JavaScript常用技巧总结(持续添加中...)

    在我学习过程中收集的一些常用技巧: typeof x !== undifined 判断x是否已定义: x === Object(x)  判断x是否为对象: Object.keys(x).length ...

  8. Eclipse调试常用技巧(转)

    Eclipse调试常用技巧 转自http://daimojingdeyu.iteye.com/blog/633824 1. 条件断点 断点大家都比较熟悉,在Eclipse Java 编辑区的行头双击就 ...

  9. AS技巧合集「常用技巧篇」

    转载:http://www.apkbus.com/forum.php?mod=viewthread&tid=254723&extra=page%3D2%26filter%3Dautho ...

随机推荐

  1. 跟着未名学Office – 整体了解 Ms Office 2010

    目录 MS Office 2010    2 Microsoft Office System    2 Ribbon(功能区)    2 文件选项卡    3 SmartArt    3 屏幕截图   ...

  2. 自定义指令 格式化input数据为非负整数

    <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8&quo ...

  3. 微服务测试打桩/mock工具mountebank

    1,安装 Linux安装包,不用安装Node.js https://s3.amazonaws.com/mountebank/v1.10/mountebank-v1.10.0-linux-x64.tar ...

  4. PAT 乙级 1046 划拳(15) C++版

    1046. 划拳(15) 时间限制 400 ms 内存限制 65536 kB 代码长度限制 8000 B 判题程序 Standard 作者 CHEN, Yue 划拳是古老中国酒文化的一个有趣的组成部分 ...

  5. 学习笔记之Swagger

    World's Most Popular API Framework | Swagger https://swagger.io/ Swagger is the world’s largest fram ...

  6. 网站优化URL需要注意的几个细节

     原文地址:http://www.douban.com/note/474016612/   一个好的URL结构无论是对搜索引擎,还是用户,都具有非常重要的作用,那么什么样的URL才是既能面向搜索引擎, ...

  7. jQuery的效果

    jQuery的效果也是极其强大的 学习方法的三要素   功能 参数 返回值 fadeout() 由可见过渡到隐藏 三个参数 第一个参数:毫秒(过渡的周期) 第二个参数:匀速(过渡的效果) 第三个参数: ...

  8. ServiceLoader

    先从业务场景分析,要完成数据的分析处理功能.根据数据的不同种类,先调用groovy或者python脚本等中的一种处理数据,处理完数据的后需要流程相同. 要支持处理能力的动态扩展,也就是框架完成后,可以 ...

  9. 集合--(List、Set、Map)遍历、删除、比较元素时的小陷阱

    6,Map集合遍历的4中方法? 5,List遍历时如何remove元素 4.漏网之鱼-for循环递增下标方式遍历集合,并删除元素 如果你用for循环递增下标方式遍历集合,在遍历过程中删除元素,你可能会 ...

  10. [UE4]一个好用的虚幻4插件,根据资源名称动态加载资源,GetCurrentLeveName(获得当前地图名称)

    下载地址 一.下载与UE4相对应的版本 二.在工程根目录新建Plugins目录,解压插件. 三.如果工程已经打开,则需要重新打开   四.重新打开工程后,右下角会有提示有新插件可用. 五.这个插件提供 ...