spark源码阅读之network(2)
在上节的解读中发现spark的源码中大量使用netty的buffer部分的api,该节将看到netty核心的一些api,比如channel:
privatestaticclassClientPool{TransportClient[] clients;Object[] locks;publicClientPool(int size){clients =newTransportClient[size];locks =newObject[size];for(int i =0; i < size; i++){locks[i]=newObject();}}
publicTransportClient createClient(String remoteHost,int remotePort)throwsIOException{// Get connection from the connection pool first.// If it is not found or not active, create a new one.finalInetSocketAddress address =newInetSocketAddress(remoteHost, remotePort);// Create the ClientPool if we don't have it yet.ClientPool clientPool = connectionPool.get(address);if(clientPool ==null){connectionPool.putIfAbsent(address,newClientPool(numConnectionsPerPeer));clientPool = connectionPool.get(address);}int clientIndex = rand.nextInt(numConnectionsPerPeer);TransportClient cachedClient = clientPool.clients[clientIndex];if(cachedClient !=null&& cachedClient.isActive()){logger.trace("Returning cached connection to {}: {}", address, cachedClient);return cachedClient;}// If we reach here, we don't have an existing connection open. Let's create a new one.// Multiple threads might race here to create new connections. Keep only one of them active.synchronized(clientPool.locks[clientIndex]){cachedClient = clientPool.clients[clientIndex];if(cachedClient !=null){if(cachedClient.isActive()){logger.trace("Returning cached connection to {}: {}", address, cachedClient);return cachedClient;}else{logger.info("Found inactive connection to {}, creating a new one.", address);}}clientPool.clients[clientIndex]= createClient(address);return clientPool.clients[clientIndex];}}
*/publicTransportClient createUnmanagedClient(String remoteHost,int remotePort)throwsIOException{finalInetSocketAddress address =newInetSocketAddress(remoteHost, remotePort);return createClient(address);}/** Create a completely new {@link TransportClient} to the remote address. */privateTransportClient createClient(InetSocketAddress address)throwsIOException{logger.debug("Creating new connection to "+ address);Bootstrap bootstrap =newBootstrap();bootstrap.group(workerGroup).channel(socketChannelClass)// Disable Nagle's Algorithm since we don't want packets to wait.option(ChannelOption.TCP_NODELAY,true).option(ChannelOption.SO_KEEPALIVE,true).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs()).option(ChannelOption.ALLOCATOR, pooledAllocator);finalAtomicReference<TransportClient> clientRef =newAtomicReference<TransportClient>();finalAtomicReference<Channel> channelRef =newAtomicReference<Channel>();bootstrap.handler(newChannelInitializer<SocketChannel>(){@Overridepublicvoid initChannel(SocketChannel ch){TransportChannelHandler clientHandler = context.initializePipeline(ch);clientRef.set(clientHandler.getClient());channelRef.set(ch);}});// Connect to the remote serverlong preConnect =System.nanoTime();ChannelFuture cf = bootstrap.connect(address);if(!cf.awaitUninterruptibly(conf.connectionTimeoutMs())){thrownewIOException(String.format("Connecting to %s timed out (%s ms)", address, conf.connectionTimeoutMs()));}elseif(cf.cause()!=null){thrownewIOException(String.format("Failed to connect to %s", address), cf.cause());}TransportClient client = clientRef.get();Channel channel = channelRef.get();assert client !=null:"Channel future completed successfully with null client";// Execute any client bootstraps synchronously before marking the Client as successful.long preBootstrap =System.nanoTime();logger.debug("Connection to {} successful, running bootstraps...", address);try{for(TransportClientBootstrap clientBootstrap : clientBootstraps){clientBootstrap.doBootstrap(client, channel);}}catch(Exception e){// catch non-RuntimeExceptions too as bootstrap may be written in Scalalong bootstrapTimeMs =(System.nanoTime()- preBootstrap)/1000000;logger.error("Exception while bootstrapping client after "+ bootstrapTimeMs +" ms", e);client.close();throwThrowables.propagate(e);}long postBootstrap =System.nanoTime();logger.debug("Successfully created connection to {} after {} ms ({} ms spent in bootstraps)",address,(postBootstrap - preConnect)/1000000,(postBootstrap - preBootstrap)/1000000);return client;}
privatefinalChannel channel;privatefinalTransportResponseHandler handler;@NullableprivateString clientId;
publicvoid fetchChunk(long streamId,finalint chunkIndex,finalChunkReceivedCallback callback){finalString serverAddr =NettyUtils.getRemoteAddress(channel);finallong startTime =System.currentTimeMillis();logger.debug("Sending fetch chunk request {} to {}", chunkIndex, serverAddr);finalStreamChunkId streamChunkId =newStreamChunkId(streamId, chunkIndex);handler.addFetchRequest(streamChunkId, callback);channel.writeAndFlush(newChunkFetchRequest(streamChunkId)).addListener(newChannelFutureListener(){@Overridepublicvoid operationComplete(ChannelFuture future)throwsException{if(future.isSuccess()){long timeTaken =System.currentTimeMillis()- startTime;logger.trace("Sending request {} to {} took {} ms", streamChunkId, serverAddr,timeTaken);}else{String errorMsg =String.format("Failed to send request %s to %s: %s", streamChunkId,serverAddr, future.cause());logger.error(errorMsg, future.cause());handler.removeFetchRequest(streamChunkId);channel.close();try{callback.onFailure(chunkIndex,newIOException(errorMsg, future.cause()));}catch(Exception e){logger.error("Uncaught exception in RPC response callback handler!", e);}}}});}
publicvoid stream(finalString streamId,finalStreamCallback callback){finalString serverAddr =NettyUtils.getRemoteAddress(channel);finallong startTime =System.currentTimeMillis();logger.debug("Sending stream request for {} to {}", streamId, serverAddr);// Need to synchronize here so that the callback is added to the queue and the RPC is// written to the socket atomically, so that callbacks are called in the right order// when responses arrive.synchronized(this){handler.addStreamCallback(callback);channel.writeAndFlush(newStreamRequest(streamId)).addListener(newChannelFutureListener(){@Overridepublicvoid operationComplete(ChannelFuture future)throwsException{if(future.isSuccess()){long timeTaken =System.currentTimeMillis()- startTime;logger.trace("Sending request for {} to {} took {} ms", streamId, serverAddr,timeTaken);}else{String errorMsg =String.format("Failed to send request for %s to %s: %s", streamId,serverAddr, future.cause());logger.error(errorMsg, future.cause());channel.close();try{callback.onFailure(streamId,newIOException(errorMsg, future.cause()));}catch(Exception e){logger.error("Uncaught exception in RPC response callback handler!", e);}}}});}}
publicbyte[] sendRpcSync(byte[] message,long timeoutMs){finalSettableFuture<byte[]> result =SettableFuture.create();sendRpc(message,newRpcResponseCallback(){@Overridepublicvoid onSuccess(byte[] response){result.set(response);}@Overridepublicvoid onFailure(Throwable e){result.setException(e);}});try{return result.get(timeoutMs,TimeUnit.MILLISECONDS);}catch(ExecutionException e){throwThrowables.propagate(e.getCause());}catch(Exception e){throwThrowables.propagate(e);}}
spark源码阅读之network(2)的更多相关文章
- spark源码阅读之network(1)
spark将在1.6中替换掉akka,而采用netty实现整个集群的rpc的框架,netty的内存管理和NIO支持将有效的提高spark集群的网络传输能力,为了看懂这块代码,在网上找了两本书看< ...
- spark源码阅读之network(3)
TransportContext用来创建TransportServer和TransportclientFactory,同时使用TransportChannelHandler用来配置channel的pi ...
- Spark源码阅读之存储体系--存储体系概述与shuffle服务
一.概述 根据<深入理解Spark:核心思想与源码分析>一书,结合最新的spark源代码master分支进行源码阅读,对新版本的代码加上自己的一些理解,如有错误,希望指出. 1.块管理器B ...
- win7+idea+maven搭建spark源码阅读环境
1.参考. 利用IDEA工具编译Spark源码(1.60~2.20) https://blog.csdn.net/He11o_Liu/article/details/78739699 Maven编译打 ...
- spark源码阅读
根据spark2.2的编译顺序来确定源码阅读顺序,只阅读核心的基本部分. 1.common目录 ①Tags②Sketch③Networking④Shuffle Streaming Service⑤Un ...
- emacs+ensime+sbt打造spark源码阅读环境
欢迎转载,转载请注明出处,徽沪一郎. 概述 Scala越来越流行, Spark也愈来愈红火, 对spark的代码进行走读也成了一个很普遍的行为.不巧的是,当前java社区中很流行的ide如eclips ...
- spark源码阅读---Utils.getCallSite
1 作用 当该方法在spark内部代码中调用时,会返回当前调用spark代码的用户类的名称,以及其所调用的spark方法.所谓用户类,就是我们这些用户使用spark api的类. 2 内部实现 2.1 ...
- spark源码阅读--SparkContext启动过程
##SparkContext启动过程 基于spark 2.1.0 scala 2.11.8 spark源码的体系结构实在是很庞大,从使用spark-submit脚本提交任务,到向yarn申请容器,启 ...
- Spark源码阅读(1): Stage划分
Spark中job由action动作生成,那么stage是如何划分的呢?一般的解答是根据宽窄依赖划分.那么我们深入源码看看吧 一个action 例如count,会在多次runJob中传递,最终会到一个 ...
随机推荐
- 读论文系列:Object Detection ICCV2015 Fast RCNN
Fast RCNN是对RCNN的性能优化版本,在VGG16上,Fast R-CNN训练速度是RCNN的9倍, 测试速度是RCNN213倍:训练速度是SPP-net的3倍,测试速度是SPP-net的3倍 ...
- CentOS虚拟机中安装VMWare Tools
1.单击VMWare的[虚拟机]菜单,选择[安装VMWare Tools]命令 2.接着CentOS系统会自动挂载VMWare Tools,并自动打开,如果没有打开可以自己去图形界面打开VMWare ...
- ubuntu中配置samba方法
1.在保证能上网的前提下,安装samba软件包,中途出现是否执行,一直点击回车键 #sudo apt-get install samba #sudo apt-get install smbclient ...
- 织梦 dede 笔记
将项目转移到另一服务器 方法: https://www.genban.org/news/dedecms-13096.html 在实际中,我走的是第二种方法 方法一: 1 后台>系统>备份 ...
- NIOS EDS最容易出错的地方
越来越多的人使用NIOS II.毕竟,NIOS II是世界上功能最多的软核处理器. NIOS EDS通常是在装QUARTUS的时候一起装上的.通常我们在用的时候都是以模板建立工程的. 在很多情况下,我 ...
- 【转】使用JMeter测试你的EJB
对EJB进行一些性能基准测试是非常有必要和有帮助的,测试的方法和工具有很多,不过我最近发现,Apache JMeter是进行基准测试的一个优秀工具.可惜的是,JMeter没有提供一个可测试任意EJB的 ...
- HDU 3018 Ant Trip(欧拉回路,要几笔)
Ant Trip Time Limit: 2000/1000 MS (Java/Others) Memory Limit: 32768/32768 K (Java/Others)Total Su ...
- django-settings.py配置
django settings 详细资料 ############ 开始项目 python3.5 pip -m install django==1.11.7 指定版本安装 pip3 install d ...
- MFC学习(七) 单文档程序
1 MFC单文档程序的主要类 (1)文档类(Document) 即应用程序处理的数据对象,文档一般从 MFC 中 CDocument 中派生.CDocument 类用于相应数据文件的读取以及存储 Cv ...
- DNS 解析流程
DNS( Domain Name System)是“域名系统”的英文缩写,是一种组织成域层次结构的计算机和网络服务命名系统,它用于TCP/IP网络,它所提供的服务是用来将主机名和域名转换为IP地址的工 ...