package core.java;
import java.util.Arrays;
import java.util.List; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext; import scala.Tuple2; public class JGroupByKey {
public static void main(String[] args) {
SparkConf conf = new SparkConf();
conf.setMaster("local");
conf.setAppName("union");
System.setProperty("hadoop.home.dir", "C:/hadoop-2.8.2");
JavaSparkContext sc = new JavaSparkContext(conf);
List<Tuple2<String, Integer>> list = Arrays.asList(new Tuple2<String, Integer>("cl1", ),
new Tuple2<String, Integer>("cl2", ),new Tuple2<String, Integer>("cl3", ),
new Tuple2<String, Integer>("cl1", ),new Tuple2<String, Integer>("cl1", ),
new Tuple2<String, Integer>("cl3", ),new Tuple2<String, Integer>("cl2", ));
JavaPairRDD<String, Integer> listRDD = sc.parallelizePairs(list);
JavaPairRDD<String, Iterable<Integer>> results = listRDD.groupByKey();
System.out.println(results.collect());
sc.close();
}
} //SLF4J: Class path contains multiple SLF4J bindings.
//SLF4J: Found binding in [jar:file:/E:/bigdata/spark-1.4.0-bin-hadoop2.6/lib/spark-assembly-1.4.0-hadoop2.6.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
//SLF4J: Found binding in [jar:file:/E:/bigdata/spark-1.4.0-bin-hadoop2.6/lib/spark-examples-1.4.0-hadoop2.6.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
//SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
//SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
//Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
//17/12/27 20:23:41 INFO SparkContext: Running Spark version 1.4.0
//17/12/27 20:23:43 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
//17/12/27 20:23:44 INFO SecurityManager: Changing view acls to:
//17/12/27 20:23:44 INFO SecurityManager: Changing modify acls to:
//17/12/27 20:23:44 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(); users with modify permissions: Set()
//17/12/27 20:23:46 INFO Slf4jLogger: Slf4jLogger started
//17/12/27 20:23:47 INFO Remoting: Starting remoting
//17/12/27 20:23:48 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriver@172.18.3.6:58955]
//17/12/27 20:23:48 INFO Utils: Successfully started service 'sparkDriver' on port 58955.
//17/12/27 20:23:48 INFO SparkEnv: Registering MapOutputTracker
//17/12/27 20:23:49 INFO SparkEnv: Registering BlockManagerMaster
//17/12/27 20:23:49 INFO DiskBlockManager: Created local directory at C:\Users\\AppData\Local\Temp\spark-c1db5ccf-8e4b-4ef9-9a7f-c6ec66d46664\blockmgr-a60ebb60-8b7c-433c-b035-eded748b261b
//17/12/27 20:23:49 INFO MemoryStore: MemoryStore started with capacity 467.6 MB
//17/12/27 20:23:49 INFO HttpFileServer: HTTP File server directory is C:\Users\\AppData\Local\Temp\spark-c1db5ccf-8e4b-4ef9-9a7f-c6ec66d46664\httpd-f45db3a6-b75c-46a5-a1d1-3539b1698cd0
//17/12/27 20:23:49 INFO HttpServer: Starting HTTP Server
//17/12/27 20:23:49 INFO Utils: Successfully started service 'HTTP file server' on port 58959.
//17/12/27 20:23:49 INFO SparkEnv: Registering OutputCommitCoordinator
//17/12/27 20:23:50 WARN Utils: Service 'SparkUI' could not bind on port 4040. Attempting port 4041.
//17/12/27 20:23:50 INFO Utils: Successfully started service 'SparkUI' on port 4041.
//17/12/27 20:23:50 INFO SparkUI: Started SparkUI at http://172.18.3.6:4041
//17/12/27 20:23:50 INFO Executor: Starting executor ID driver on host localhost
//17/12/27 20:23:51 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 58978.
//17/12/27 20:23:51 INFO NettyBlockTransferService: Server created on 58978
//17/12/27 20:23:51 INFO BlockManagerMaster: Trying to register BlockManager
//17/12/27 20:23:51 INFO BlockManagerMasterEndpoint: Registering block manager localhost:58978 with 467.6 MB RAM, BlockManagerId(driver, localhost, 58978)
//17/12/27 20:23:51 INFO BlockManagerMaster: Registered BlockManager
//17/12/27 20:23:57 INFO SparkContext: Starting job: collect at JGroupByKey.java:27
//17/12/27 20:23:57 INFO DAGScheduler: Registering RDD 0 (parallelizePairs at JGroupByKey.java:25)
//17/12/27 20:23:57 INFO DAGScheduler: Got job 0 (collect at JGroupByKey.java:27) with 1 output partitions (allowLocal=false)
//17/12/27 20:23:57 INFO DAGScheduler: Final stage: ResultStage 1(collect at JGroupByKey.java:27)
//17/12/27 20:23:57 INFO DAGScheduler: Parents of final stage: List(ShuffleMapStage 0)
//17/12/27 20:23:57 INFO DAGScheduler: Missing parents: List(ShuffleMapStage 0)
//17/12/27 20:23:57 INFO DAGScheduler: Submitting ShuffleMapStage 0 (ParallelCollectionRDD[0] at parallelizePairs at JGroupByKey.java:25), which has no missing parents
//17/12/27 20:23:58 INFO MemoryStore: ensureFreeSpace(2832) called with curMem=0, maxMem=490356080
//17/12/27 20:23:58 INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 2.8 KB, free 467.6 MB)
//17/12/27 20:23:58 INFO MemoryStore: ensureFreeSpace(1553) called with curMem=2832, maxMem=490356080
//17/12/27 20:23:58 INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 1553.0 B, free 467.6 MB)
//17/12/27 20:23:58 INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on localhost:58978 (size: 1553.0 B, free: 467.6 MB)
//17/12/27 20:23:58 INFO SparkContext: Created broadcast 0 from broadcast at DAGScheduler.scala:874
//17/12/27 20:23:58 INFO DAGScheduler: Submitting 1 missing tasks from ShuffleMapStage 0 (ParallelCollectionRDD[0] at parallelizePairs at JGroupByKey.java:25)
//17/12/27 20:23:58 INFO TaskSchedulerImpl: Adding task set 0.0 with 1 tasks
//17/12/27 20:23:58 INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID 0, localhost, PROCESS_LOCAL, 1627 bytes)
//17/12/27 20:23:58 INFO Executor: Running task 0.0 in stage 0.0 (TID 0)
//17/12/27 20:23:58 INFO Executor: Finished task 0.0 in stage 0.0 (TID 0). 879 bytes result sent to driver
//17/12/27 20:23:58 INFO TaskSetManager: Finished task 0.0 in stage 0.0 (TID 0) in 427 ms on localhost (1/1)
//17/12/27 20:23:58 INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
//17/12/27 20:23:58 INFO DAGScheduler: ShuffleMapStage 0 (parallelizePairs at JGroupByKey.java:25) finished in 0.559 s
//17/12/27 20:23:58 INFO DAGScheduler: looking for newly runnable stages
//17/12/27 20:23:58 INFO DAGScheduler: running: Set()
//17/12/27 20:23:58 INFO DAGScheduler: waiting: Set(ResultStage 1)
//17/12/27 20:23:58 INFO DAGScheduler: failed: Set()
//17/12/27 20:23:58 INFO DAGScheduler: Missing parents for ResultStage 1: List()
//17/12/27 20:23:58 INFO DAGScheduler: Submitting ResultStage 1 (MapPartitionsRDD[2] at groupByKey at JGroupByKey.java:26), which is now runnable
//17/12/27 20:23:58 INFO MemoryStore: ensureFreeSpace(4000) called with curMem=4385, maxMem=490356080
//17/12/27 20:23:58 INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 3.9 KB, free 467.6 MB)
//17/12/27 20:23:58 INFO MemoryStore: ensureFreeSpace(2129) called with curMem=8385, maxMem=490356080
//17/12/27 20:23:58 INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 2.1 KB, free 467.6 MB)
//17/12/27 20:23:58 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on localhost:58978 (size: 2.1 KB, free: 467.6 MB)
//17/12/27 20:23:58 INFO SparkContext: Created broadcast 1 from broadcast at DAGScheduler.scala:874
//17/12/27 20:23:58 INFO DAGScheduler: Submitting 1 missing tasks from ResultStage 1 (MapPartitionsRDD[2] at groupByKey at JGroupByKey.java:26)
//17/12/27 20:23:58 INFO TaskSchedulerImpl: Adding task set 1.0 with 1 tasks
//17/12/27 20:23:58 INFO TaskSetManager: Starting task 0.0 in stage 1.0 (TID 1, localhost, PROCESS_LOCAL, 1165 bytes)
//17/12/27 20:23:58 INFO Executor: Running task 0.0 in stage 1.0 (TID 1)
//17/12/27 20:23:59 INFO ShuffleBlockFetcherIterator: Getting 1 non-empty blocks out of 1 blocks
//17/12/27 20:23:59 INFO ShuffleBlockFetcherIterator: Started 0 remote fetches in 57 ms
//17/12/27 20:23:59 INFO Executor: Finished task 0.0 in stage 1.0 (TID 1). 5294 bytes result sent to driver
//17/12/27 20:23:59 INFO TaskSetManager: Finished task 0.0 in stage 1.0 (TID 1) in 231 ms on localhost (1/1)
//17/12/27 20:23:59 INFO TaskSchedulerImpl: Removed TaskSet 1.0, whose tasks have all completed, from pool
//17/12/27 20:23:59 INFO DAGScheduler: ResultStage 1 (collect at JGroupByKey.java:27) finished in 0.236 s
//17/12/27 20:23:59 INFO DAGScheduler: Job 0 finished: collect at JGroupByKey.java:27, took 1.963917 s
//[(cl3,[97, 90]), (cl1,[90, 96, 89]), (cl2,[91, 60])]
//17/12/27 20:23:59 INFO SparkUI: Stopped Spark web UI at http://172.18.3.6:4041
//17/12/27 20:23:59 INFO DAGScheduler: Stopping DAGScheduler
//17/12/27 20:23:59 INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
//17/12/27 20:23:59 INFO Utils: path = C:\Users\\AppData\Local\Temp\spark-c1db5ccf-8e4b-4ef9-9a7f-c6ec66d46664\blockmgr-a60ebb60-8b7c-433c-b035-eded748b261b, already present as root for deletion.
//17/12/27 20:23:59 INFO MemoryStore: MemoryStore cleared
//17/12/27 20:23:59 INFO BlockManager: BlockManager stopped
//17/12/27 20:23:59 INFO BlockManagerMaster: BlockManagerMaster stopped
//17/12/27 20:23:59 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
//17/12/27 20:23:59 INFO SparkContext: Successfully stopped SparkContext
//17/12/27 20:23:59 INFO RemoteActorRefProvider$RemotingTerminator: Shutting down remote daemon.
//17/12/27 20:23:59 INFO RemoteActorRefProvider$RemotingTerminator: Remote daemon shut down; proceeding with flushing remote transports.
//17/12/27 20:23:59 INFO Utils: Shutdown hook called
//17/12/27 20:23:59 INFO Utils: Deleting directory C:\Users\\AppData\Local\Temp\spark-c1db5ccf-8e4b-4ef9-9a7f-c6ec66d46664

Spark_JGroupByKey的更多相关文章

随机推荐

  1. MVC 实用构架实战(一)——项目结构搭建

    一.前言 在<上篇>中,已经把项目整体结构规划做了个大概的规划.在本文中,将使用代码的方式来一一解说各个层次.由于要搭建一个基本完整的结构,可能文章会比较长.另外,本系列主要出于实用的目的 ...

  2. 优化网站设计(七):避免在CSS中使用表达式

    前言 网站设计的优化是一个很大的话题,有一些通用的原则,也有针对不同开发平台的一些建议.这方面的研究一直没有停止过,我在不同的场合也分享过这样的话题. 作为通用的原则,雅虎的工程师团队曾经给出过35个 ...

  3. 存储空间消耗磁盘比较 int varchar date

    小结: 1.日期类型按照date存储节省空间,仅3字节,而按照字符串型char 8字节 20190316 ,  varchar  20190316 9字节: 2.对于小于32768的整数,按照smal ...

  4. mysq数据库基本操作

    MySQL的数据库名称,表名称是区分大小写,MySQL 的SQL keywords不区分大小写: if when you attempt to log in, you get an error mes ...

  5. git 将本地仓库提交至github

    -or create a new repository on the command line touch README.md git init git add README.md git commi ...

  6. 转:jsp内置对象中page与pageContext与el内置对象pageScope与pageContext区别

    原文地址:jsp内置对象中page与pageContext与el内置对象pageScope与pageContext区别 首先说明一下jsp9大内置对象 (1)HttpSession类的session对 ...

  7. LeetCode 590 N-ary Tree Postorder Traversal 解题报告

    题目要求 Given an n-ary tree, return the postorder traversal of its nodes' values. 题目分析及思路 题目给出一棵N叉树,要求返 ...

  8. java System类的一些静态方法

    package cn.sasa.demo2; public class SystemDemo { public static void main(String[] args) { func_array ...

  9. NOIP国王游戏

    #include<iostream> #include<cstdio> #include<cstdlib> #include<cstring> #inc ...

  10. SQL优化之踩过的坑【一】

    正看资料看的过瘾,突然收到报警,说服务器负载太高,好吧,登录服务器看看,我擦嘞,还能不能愉快的玩耍了?下面是当时的负载情况 看见mysql使用cpu已经到了2000,io没有等待.说明应该没有大的临时 ...