spark源码阅读之network(2)
在上节的解读中发现spark的源码中大量使用netty的buffer部分的api,该节将看到netty核心的一些api,比如channel:
privatestaticclassClientPool{TransportClient[] clients;Object[] locks;publicClientPool(int size){clients =newTransportClient[size];locks =newObject[size];for(int i =0; i < size; i++){locks[i]=newObject();}}
publicTransportClient createClient(String remoteHost,int remotePort)throwsIOException{// Get connection from the connection pool first.// If it is not found or not active, create a new one.finalInetSocketAddress address =newInetSocketAddress(remoteHost, remotePort);// Create the ClientPool if we don't have it yet.ClientPool clientPool = connectionPool.get(address);if(clientPool ==null){connectionPool.putIfAbsent(address,newClientPool(numConnectionsPerPeer));clientPool = connectionPool.get(address);}int clientIndex = rand.nextInt(numConnectionsPerPeer);TransportClient cachedClient = clientPool.clients[clientIndex];if(cachedClient !=null&& cachedClient.isActive()){logger.trace("Returning cached connection to {}: {}", address, cachedClient);return cachedClient;}// If we reach here, we don't have an existing connection open. Let's create a new one.// Multiple threads might race here to create new connections. Keep only one of them active.synchronized(clientPool.locks[clientIndex]){cachedClient = clientPool.clients[clientIndex];if(cachedClient !=null){if(cachedClient.isActive()){logger.trace("Returning cached connection to {}: {}", address, cachedClient);return cachedClient;}else{logger.info("Found inactive connection to {}, creating a new one.", address);}}clientPool.clients[clientIndex]= createClient(address);return clientPool.clients[clientIndex];}}
*/publicTransportClient createUnmanagedClient(String remoteHost,int remotePort)throwsIOException{finalInetSocketAddress address =newInetSocketAddress(remoteHost, remotePort);return createClient(address);}/** Create a completely new {@link TransportClient} to the remote address. */privateTransportClient createClient(InetSocketAddress address)throwsIOException{logger.debug("Creating new connection to "+ address);Bootstrap bootstrap =newBootstrap();bootstrap.group(workerGroup).channel(socketChannelClass)// Disable Nagle's Algorithm since we don't want packets to wait.option(ChannelOption.TCP_NODELAY,true).option(ChannelOption.SO_KEEPALIVE,true).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs()).option(ChannelOption.ALLOCATOR, pooledAllocator);finalAtomicReference<TransportClient> clientRef =newAtomicReference<TransportClient>();finalAtomicReference<Channel> channelRef =newAtomicReference<Channel>();bootstrap.handler(newChannelInitializer<SocketChannel>(){@Overridepublicvoid initChannel(SocketChannel ch){TransportChannelHandler clientHandler = context.initializePipeline(ch);clientRef.set(clientHandler.getClient());channelRef.set(ch);}});// Connect to the remote serverlong preConnect =System.nanoTime();ChannelFuture cf = bootstrap.connect(address);if(!cf.awaitUninterruptibly(conf.connectionTimeoutMs())){thrownewIOException(String.format("Connecting to %s timed out (%s ms)", address, conf.connectionTimeoutMs()));}elseif(cf.cause()!=null){thrownewIOException(String.format("Failed to connect to %s", address), cf.cause());}TransportClient client = clientRef.get();Channel channel = channelRef.get();assert client !=null:"Channel future completed successfully with null client";// Execute any client bootstraps synchronously before marking the Client as successful.long preBootstrap =System.nanoTime();logger.debug("Connection to {} successful, running bootstraps...", address);try{for(TransportClientBootstrap clientBootstrap : clientBootstraps){clientBootstrap.doBootstrap(client, channel);}}catch(Exception e){// catch non-RuntimeExceptions too as bootstrap may be written in Scalalong bootstrapTimeMs =(System.nanoTime()- preBootstrap)/1000000;logger.error("Exception while bootstrapping client after "+ bootstrapTimeMs +" ms", e);client.close();throwThrowables.propagate(e);}long postBootstrap =System.nanoTime();logger.debug("Successfully created connection to {} after {} ms ({} ms spent in bootstraps)",address,(postBootstrap - preConnect)/1000000,(postBootstrap - preBootstrap)/1000000);return client;}
privatefinalChannel channel;privatefinalTransportResponseHandler handler;@NullableprivateString clientId;
publicvoid fetchChunk(long streamId,finalint chunkIndex,finalChunkReceivedCallback callback){finalString serverAddr =NettyUtils.getRemoteAddress(channel);finallong startTime =System.currentTimeMillis();logger.debug("Sending fetch chunk request {} to {}", chunkIndex, serverAddr);finalStreamChunkId streamChunkId =newStreamChunkId(streamId, chunkIndex);handler.addFetchRequest(streamChunkId, callback);channel.writeAndFlush(newChunkFetchRequest(streamChunkId)).addListener(newChannelFutureListener(){@Overridepublicvoid operationComplete(ChannelFuture future)throwsException{if(future.isSuccess()){long timeTaken =System.currentTimeMillis()- startTime;logger.trace("Sending request {} to {} took {} ms", streamChunkId, serverAddr,timeTaken);}else{String errorMsg =String.format("Failed to send request %s to %s: %s", streamChunkId,serverAddr, future.cause());logger.error(errorMsg, future.cause());handler.removeFetchRequest(streamChunkId);channel.close();try{callback.onFailure(chunkIndex,newIOException(errorMsg, future.cause()));}catch(Exception e){logger.error("Uncaught exception in RPC response callback handler!", e);}}}});}
publicvoid stream(finalString streamId,finalStreamCallback callback){finalString serverAddr =NettyUtils.getRemoteAddress(channel);finallong startTime =System.currentTimeMillis();logger.debug("Sending stream request for {} to {}", streamId, serverAddr);// Need to synchronize here so that the callback is added to the queue and the RPC is// written to the socket atomically, so that callbacks are called in the right order// when responses arrive.synchronized(this){handler.addStreamCallback(callback);channel.writeAndFlush(newStreamRequest(streamId)).addListener(newChannelFutureListener(){@Overridepublicvoid operationComplete(ChannelFuture future)throwsException{if(future.isSuccess()){long timeTaken =System.currentTimeMillis()- startTime;logger.trace("Sending request for {} to {} took {} ms", streamId, serverAddr,timeTaken);}else{String errorMsg =String.format("Failed to send request for %s to %s: %s", streamId,serverAddr, future.cause());logger.error(errorMsg, future.cause());channel.close();try{callback.onFailure(streamId,newIOException(errorMsg, future.cause()));}catch(Exception e){logger.error("Uncaught exception in RPC response callback handler!", e);}}}});}}
publicbyte[] sendRpcSync(byte[] message,long timeoutMs){finalSettableFuture<byte[]> result =SettableFuture.create();sendRpc(message,newRpcResponseCallback(){@Overridepublicvoid onSuccess(byte[] response){result.set(response);}@Overridepublicvoid onFailure(Throwable e){result.setException(e);}});try{return result.get(timeoutMs,TimeUnit.MILLISECONDS);}catch(ExecutionException e){throwThrowables.propagate(e.getCause());}catch(Exception e){throwThrowables.propagate(e);}}
spark源码阅读之network(2)的更多相关文章
- spark源码阅读之network(1)
spark将在1.6中替换掉akka,而采用netty实现整个集群的rpc的框架,netty的内存管理和NIO支持将有效的提高spark集群的网络传输能力,为了看懂这块代码,在网上找了两本书看< ...
- spark源码阅读之network(3)
TransportContext用来创建TransportServer和TransportclientFactory,同时使用TransportChannelHandler用来配置channel的pi ...
- Spark源码阅读之存储体系--存储体系概述与shuffle服务
一.概述 根据<深入理解Spark:核心思想与源码分析>一书,结合最新的spark源代码master分支进行源码阅读,对新版本的代码加上自己的一些理解,如有错误,希望指出. 1.块管理器B ...
- win7+idea+maven搭建spark源码阅读环境
1.参考. 利用IDEA工具编译Spark源码(1.60~2.20) https://blog.csdn.net/He11o_Liu/article/details/78739699 Maven编译打 ...
- spark源码阅读
根据spark2.2的编译顺序来确定源码阅读顺序,只阅读核心的基本部分. 1.common目录 ①Tags②Sketch③Networking④Shuffle Streaming Service⑤Un ...
- emacs+ensime+sbt打造spark源码阅读环境
欢迎转载,转载请注明出处,徽沪一郎. 概述 Scala越来越流行, Spark也愈来愈红火, 对spark的代码进行走读也成了一个很普遍的行为.不巧的是,当前java社区中很流行的ide如eclips ...
- spark源码阅读---Utils.getCallSite
1 作用 当该方法在spark内部代码中调用时,会返回当前调用spark代码的用户类的名称,以及其所调用的spark方法.所谓用户类,就是我们这些用户使用spark api的类. 2 内部实现 2.1 ...
- spark源码阅读--SparkContext启动过程
##SparkContext启动过程 基于spark 2.1.0 scala 2.11.8 spark源码的体系结构实在是很庞大,从使用spark-submit脚本提交任务,到向yarn申请容器,启 ...
- Spark源码阅读(1): Stage划分
Spark中job由action动作生成,那么stage是如何划分的呢?一般的解答是根据宽窄依赖划分.那么我们深入源码看看吧 一个action 例如count,会在多次runJob中传递,最终会到一个 ...
随机推荐
- assembly 需要 unload 和 update 的时候怎么办?
我正在开发公司的业务组件平台,组件池的灵活性要求很高,业务组件都是可以立即更新和及时装配的;目前完成这些功能,有待测试.用appDomain.unload 拆卸assembly 可以,只是用起来比较麻 ...
- 使用DeflateStream压缩与解压
具体可以了解下:http://msdn.microsoft.com/zh-cn/library/system.io.compression.deflatestream(v=vs.110).aspx / ...
- ajax同步异步
test.html <a href="javascript:void(0)" onmouseover="testAsync()"> asy.js f ...
- Linux I/O 映射(ioremap)和writel/readl
在裸奔代码中,如果要控制gpio,直接控制gpio寄存器地址即可: 在linux系统中,所有操作的地址都是虚拟地址,都是由linux内核去管理,所以需要将物理地址转换成内核可识别的虚拟地址. 1.io ...
- catkin 工作空间 - Package 组成
package 是 ROS 软件的基本组织形式,ROS 就是由一个个的 package 组成的 package 是 catkin 的编译基本单元 一个 package 可以包含多个可执行文件(节点) ...
- springboot成神之——spring jdbc的使用
本文介绍spring jdbc的使用 目录结构 pom配置 properties配置 model层User类 Dao层QueryForListDao config层AppConfiguration 程 ...
- 微信小程序之本地缓存
目前,微信给每个小程序提供了10M的本地缓存空间(哎哟妈呀好大) 有了本地缓存,你的小程序可以做到: 离线应用(已测试在无网络的情况下,可以操作缓存数据) 流畅的用户体验 减少网络请求,节省服务器资源 ...
- oracle误删数据的解决方法
之前不小心误删了一条数据,索性我还记得id,通过select * from 表名 as of timestamp to_timestamp('2017-6-23 9:10:00','yyyy-mm-d ...
- [android] setOnTouchEvent 设置返回值为true 和 false的区别
今天在做自定义的可选文本的 TextView 类时,用到了 View 类的 setOnTouchListener(OnTouchListener l)事件监听,在构造 OnTouchListener ...
- Android输入法部分遮挡UI的问题(与EditText框相切)
首先,我们来看看遇到问题的图片 遇到的问题是,当点击输入框之后,输入法会切到红线的位置,理想状态应该是在绿线位置 那么,是什么原因造成的呢? 问题其实很简单,是因为drawableleft图片比该输入 ...