Spark_JGroupByKey
package core.java;
import java.util.Arrays;
import java.util.List; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext; import scala.Tuple2; public class JGroupByKey {
public static void main(String[] args) {
SparkConf conf = new SparkConf();
conf.setMaster("local");
conf.setAppName("union");
System.setProperty("hadoop.home.dir", "C:/hadoop-2.8.2");
JavaSparkContext sc = new JavaSparkContext(conf);
List<Tuple2<String, Integer>> list = Arrays.asList(new Tuple2<String, Integer>("cl1", ),
new Tuple2<String, Integer>("cl2", ),new Tuple2<String, Integer>("cl3", ),
new Tuple2<String, Integer>("cl1", ),new Tuple2<String, Integer>("cl1", ),
new Tuple2<String, Integer>("cl3", ),new Tuple2<String, Integer>("cl2", ));
JavaPairRDD<String, Integer> listRDD = sc.parallelizePairs(list);
JavaPairRDD<String, Iterable<Integer>> results = listRDD.groupByKey();
System.out.println(results.collect());
sc.close();
}
} //SLF4J: Class path contains multiple SLF4J bindings.
//SLF4J: Found binding in [jar:file:/E:/bigdata/spark-1.4.0-bin-hadoop2.6/lib/spark-assembly-1.4.0-hadoop2.6.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
//SLF4J: Found binding in [jar:file:/E:/bigdata/spark-1.4.0-bin-hadoop2.6/lib/spark-examples-1.4.0-hadoop2.6.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
//SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
//SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
//Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
//17/12/27 20:23:41 INFO SparkContext: Running Spark version 1.4.0
//17/12/27 20:23:43 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
//17/12/27 20:23:44 INFO SecurityManager: Changing view acls to:
//17/12/27 20:23:44 INFO SecurityManager: Changing modify acls to:
//17/12/27 20:23:44 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(); users with modify permissions: Set()
//17/12/27 20:23:46 INFO Slf4jLogger: Slf4jLogger started
//17/12/27 20:23:47 INFO Remoting: Starting remoting
//17/12/27 20:23:48 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriver@172.18.3.6:58955]
//17/12/27 20:23:48 INFO Utils: Successfully started service 'sparkDriver' on port 58955.
//17/12/27 20:23:48 INFO SparkEnv: Registering MapOutputTracker
//17/12/27 20:23:49 INFO SparkEnv: Registering BlockManagerMaster
//17/12/27 20:23:49 INFO DiskBlockManager: Created local directory at C:\Users\\AppData\Local\Temp\spark-c1db5ccf-8e4b-4ef9-9a7f-c6ec66d46664\blockmgr-a60ebb60-8b7c-433c-b035-eded748b261b
//17/12/27 20:23:49 INFO MemoryStore: MemoryStore started with capacity 467.6 MB
//17/12/27 20:23:49 INFO HttpFileServer: HTTP File server directory is C:\Users\\AppData\Local\Temp\spark-c1db5ccf-8e4b-4ef9-9a7f-c6ec66d46664\httpd-f45db3a6-b75c-46a5-a1d1-3539b1698cd0
//17/12/27 20:23:49 INFO HttpServer: Starting HTTP Server
//17/12/27 20:23:49 INFO Utils: Successfully started service 'HTTP file server' on port 58959.
//17/12/27 20:23:49 INFO SparkEnv: Registering OutputCommitCoordinator
//17/12/27 20:23:50 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.
//17/12/27 20:23:50 INFO Utils: Successfully started service 'SparkUI' on port 4041.
//17/12/27 20:23:50 INFO SparkUI: Started SparkUI at http://172.18.3.6:4041
//17/12/27 20:23:50 INFO Executor: Starting executor ID driver on host localhost
//17/12/27 20:23:51 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 58978.
//17/12/27 20:23:51 INFO NettyBlockTransferService: Server created on 58978
//17/12/27 20:23:51 INFO BlockManagerMaster: Trying to register BlockManager
//17/12/27 20:23:51 INFO BlockManagerMasterEndpoint: Registering block manager localhost:58978 with 467.6 MB RAM, BlockManagerId(driver, localhost, 58978)
//17/12/27 20:23:51 INFO BlockManagerMaster: Registered BlockManager
//17/12/27 20:23:57 INFO SparkContext: Starting job: collect at JGroupByKey.java:27
//17/12/27 20:23:57 INFO DAGScheduler: Registering RDD 0 (parallelizePairs at JGroupByKey.java:25)
//17/12/27 20:23:57 INFO DAGScheduler: Got job 0 (collect at JGroupByKey.java:27) with 1 output partitions (allowLocal=false)
//17/12/27 20:23:57 INFO DAGScheduler: Final stage: ResultStage 1(collect at JGroupByKey.java:27)
//17/12/27 20:23:57 INFO DAGScheduler: Parents of final stage: List(ShuffleMapStage 0)
//17/12/27 20:23:57 INFO DAGScheduler: Missing parents: List(ShuffleMapStage 0)
//17/12/27 20:23:57 INFO DAGScheduler: Submitting ShuffleMapStage 0 (ParallelCollectionRDD[0] at parallelizePairs at JGroupByKey.java:25), which has no missing parents
//17/12/27 20:23:58 INFO MemoryStore: ensureFreeSpace(2832) called with curMem=0, maxMem=490356080
//17/12/27 20:23:58 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 2.8 KB, free 467.6 MB)
//17/12/27 20:23:58 INFO MemoryStore: ensureFreeSpace(1553) called with curMem=2832, maxMem=490356080
//17/12/27 20:23:58 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 1553.0 B, free 467.6 MB)
//17/12/27 20:23:58 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on localhost:58978 (size: 1553.0 B, free: 467.6 MB)
//17/12/27 20:23:58 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:874
//17/12/27 20:23:58 INFO DAGScheduler: Submitting 1 missing tasks from ShuffleMapStage 0 (ParallelCollectionRDD[0] at parallelizePairs at JGroupByKey.java:25)
//17/12/27 20:23:58 INFO TaskSchedulerImpl: Adding task set 0.0 with 1 tasks
//17/12/27 20:23:58 INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID 0, localhost, PROCESS_LOCAL, 1627 bytes)
//17/12/27 20:23:58 INFO Executor: Running task 0.0 in stage 0.0 (TID 0)
//17/12/27 20:23:58 INFO Executor: Finished task 0.0 in stage 0.0 (TID 0). 879 bytes result sent to driver
//17/12/27 20:23:58 INFO TaskSetManager: Finished task 0.0 in stage 0.0 (TID 0) in 427 ms on localhost (1/1)
//17/12/27 20:23:58 INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
//17/12/27 20:23:58 INFO DAGScheduler: ShuffleMapStage 0 (parallelizePairs at JGroupByKey.java:25) finished in 0.559 s
//17/12/27 20:23:58 INFO DAGScheduler: looking for newly runnable stages
//17/12/27 20:23:58 INFO DAGScheduler: running: Set()
//17/12/27 20:23:58 INFO DAGScheduler: waiting: Set(ResultStage 1)
//17/12/27 20:23:58 INFO DAGScheduler: failed: Set()
//17/12/27 20:23:58 INFO DAGScheduler: Missing parents for ResultStage 1: List()
//17/12/27 20:23:58 INFO DAGScheduler: Submitting ResultStage 1 (MapPartitionsRDD[2] at groupByKey at JGroupByKey.java:26), which is now runnable
//17/12/27 20:23:58 INFO MemoryStore: ensureFreeSpace(4000) called with curMem=4385, maxMem=490356080
//17/12/27 20:23:58 INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 3.9 KB, free 467.6 MB)
//17/12/27 20:23:58 INFO MemoryStore: ensureFreeSpace(2129) called with curMem=8385, maxMem=490356080
//17/12/27 20:23:58 INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 2.1 KB, free 467.6 MB)
//17/12/27 20:23:58 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on localhost:58978 (size: 2.1 KB, free: 467.6 MB)
//17/12/27 20:23:58 INFO SparkContext: Created broadcast 1 from broadcast at DAGScheduler.scala:874
//17/12/27 20:23:58 INFO DAGScheduler: Submitting 1 missing tasks from ResultStage 1 (MapPartitionsRDD[2] at groupByKey at JGroupByKey.java:26)
//17/12/27 20:23:58 INFO TaskSchedulerImpl: Adding task set 1.0 with 1 tasks
//17/12/27 20:23:58 INFO TaskSetManager: Starting task 0.0 in stage 1.0 (TID 1, localhost, PROCESS_LOCAL, 1165 bytes)
//17/12/27 20:23:58 INFO Executor: Running task 0.0 in stage 1.0 (TID 1)
//17/12/27 20:23:59 INFO ShuffleBlockFetcherIterator: Getting 1 non-empty blocks out of 1 blocks
//17/12/27 20:23:59 INFO ShuffleBlockFetcherIterator: Started 0 remote fetches in 57 ms
//17/12/27 20:23:59 INFO Executor: Finished task 0.0 in stage 1.0 (TID 1). 5294 bytes result sent to driver
//17/12/27 20:23:59 INFO TaskSetManager: Finished task 0.0 in stage 1.0 (TID 1) in 231 ms on localhost (1/1)
//17/12/27 20:23:59 INFO TaskSchedulerImpl: Removed TaskSet 1.0, whose tasks have all completed, from pool
//17/12/27 20:23:59 INFO DAGScheduler: ResultStage 1 (collect at JGroupByKey.java:27) finished in 0.236 s
//17/12/27 20:23:59 INFO DAGScheduler: Job 0 finished: collect at JGroupByKey.java:27, took 1.963917 s
//[(cl3,[97, 90]), (cl1,[90, 96, 89]), (cl2,[91, 60])]
//17/12/27 20:23:59 INFO SparkUI: Stopped Spark web UI at http://172.18.3.6:4041
//17/12/27 20:23:59 INFO DAGScheduler: Stopping DAGScheduler
//17/12/27 20:23:59 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
//17/12/27 20:23:59 INFO Utils: path = C:\Users\\AppData\Local\Temp\spark-c1db5ccf-8e4b-4ef9-9a7f-c6ec66d46664\blockmgr-a60ebb60-8b7c-433c-b035-eded748b261b, already present as root for deletion.
//17/12/27 20:23:59 INFO MemoryStore: MemoryStore cleared
//17/12/27 20:23:59 INFO BlockManager: BlockManager stopped
//17/12/27 20:23:59 INFO BlockManagerMaster: BlockManagerMaster stopped
//17/12/27 20:23:59 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
//17/12/27 20:23:59 INFO SparkContext: Successfully stopped SparkContext
//17/12/27 20:23:59 INFO RemoteActorRefProvider$RemotingTerminator: Shutting down remote daemon.
//17/12/27 20:23:59 INFO RemoteActorRefProvider$RemotingTerminator: Remote daemon shut down; proceeding with flushing remote transports.
//17/12/27 20:23:59 INFO Utils: Shutdown hook called
//17/12/27 20:23:59 INFO Utils: Deleting directory C:\Users\\AppData\Local\Temp\spark-c1db5ccf-8e4b-4ef9-9a7f-c6ec66d46664
Spark_JGroupByKey的更多相关文章
随机推荐
- 新浪广告交易平台(SAX)DSP手册
新浪广告交易平台(SAX)DSP手册 http://amp.ad.sina.com.cn/sax/doc/zh-CN/xhtml/index.xhtml 新浪广告交易平台(SAX)DSP手册 版权 © ...
- Flink - FlinkKafkaConsumer010
Properties properties = new Properties(); properties.setProperty("bootstrap.servers", &quo ...
- Excel--数据对比方法
1.函数对比: 适用于两列数据对比 =IF(EXACT(A2,B2)=TRUE,"相同","不同") 2.快捷键对比: 适用于少数数据对比 选中对比两列数据,快 ...
- 《Redis 主从复制》
万念俱灰,说的就是我现在的心情...... 周六下午写了一下午的读书笔记,由于我的 MAC 有点问题,重启了一下...... 灰飞烟灭...... 读 第八章<集群> 总结 1:如何开启主 ...
- 根据后台加载数据,添加loading动画
<script> var current = 0; var hit = @hits; $(this).scroll(function(){ var viewHeight =$(this). ...
- Android SDK下载和更新失败的如何解决
一.scenario: 在部署Appium环境搭建时,安装SDK Manager.exe,遇到Android SDK下载更新失败. 报错信息如下图: Failed to fetch URL https ...
- 【Python基础】Pycharm默认快捷键
PyCharm常用快捷键和设置 代码快速运行: Alt+Shift+F10 编辑代码的时候经常的要换下一行,但是光标没有在行末,可以用这个命令直接换行: Shift+Enter 行注释/取消行注释: ...
- 前端 HTML form表单标签 select标签 option 下拉框
<select></select> select里面通常跟option配合使用 <!DOCTYPE html> <html lang="en&quo ...
- 宝塔Linux面板安装Redis
宝塔Linux面板安装Redis不会特别麻烦,只要几步就可以实现:1.安装redis服务2.配置redis设置3.安装PHP扩展,下面就随ytkah一起来看看吧 1.首先,我们来安装redis服务,进 ...
- 【剑指offer】两个链表的第一个公共结点
一.题目: 输入两个链表,找出它们的第一个公共结点. 二.思路: 思路一:模拟数组,进行两次遍历,时间复杂度O(n2) 思路二:假定 List1长度: a+n List2 长度:b+n, 且 a&l ...