spark streaming 4: DStreamGraph JobScheduler


final private[streaming] class DStreamGraph extends Serializable with Logging {
private val inputStreams = new ArrayBuffer[InputDStream[_]]()
private val outputStreams = new ArrayBuffer[DStream[_]]()
var rememberDuration: Duration = null
var checkpointInProgress = false
var zeroTime: Time = null
var startTime: Time = null
var batchDuration: Duration = null
def addInputStream(inputStream: InputDStream[_]) {
this.synchronized {
inputStream.setGraph(this)
inputStreams += inputStream
}
}
def addOutputStream(outputStream: DStream[_]) {
this.synchronized {
outputStream.setGraph(this)
outputStreams += outputStream
}
}
def getInputStreams() = this.synchronized { inputStreams.toArray }
def getOutputStreams() = this.synchronized { outputStreams.toArray }
def getReceiverInputStreams() = this.synchronized {
inputStreams.filter(_.isInstanceOf[ReceiverInputDStream[_]])
.map(_.asInstanceOf[ReceiverInputDStream[_]])
.toArray
}
def generateJobs(time: Time): Seq[Job] = {
logDebug("Generating jobs for time " + time)
val jobs = this.synchronized {
outputStreams.flatMap(outputStream => outputStream.generateJob(time))
}
logDebug("Generated " + jobs.length + " jobs for time " + time)
jobs
}
@throws(classOf[IOException])
private def writeObject(oos: ObjectOutputStream): Unit = Utils.tryOrIOException {
logDebug("DStreamGraph.writeObject used")
this.synchronized {
checkpointInProgress = true
logDebug("Enabled checkpoint mode")
oos.defaultWriteObject()
checkpointInProgress = false
logDebug("Disabled checkpoint mode")
}
}
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
logDebug("DStreamGraph.readObject used")
this.synchronized {
checkpointInProgress = true
ois.defaultReadObject()
checkpointInProgress = false
}
}
/**
* This class schedules jobs to be run on Spark. It uses the JobGenerator to generate
* the jobs and runs them using a thread pool.
*/
private[streaming]
class JobScheduler(val ssc: StreamingContext) extends Logging {
private val jobSets = new ConcurrentHashMap[Time, JobSet]
private val numConcurrentJobs = ssc.conf.getInt("spark.streaming.concurrentJobs", 1)
private val jobExecutor = Executors.newFixedThreadPool(numConcurrentJobs)
private val jobGenerator = new JobGenerator(this)
val clock = jobGenerator.clock
val listenerBus = new StreamingListenerBus()
// These two are created only when scheduler starts.
// eventActor not being null means the scheduler has been started and not stopped
var receiverTracker: ReceiverTracker = null
private var eventActor: ActorRef = null
def start(): Unit = synchronized {
if (eventActor != null) return // scheduler has already been started
logDebug("Starting JobScheduler")
eventActor = ssc.env.actorSystem.actorOf(Props(new Actor {
def receive = {
case event: JobSchedulerEvent => processEvent(event)
}
}), "JobScheduler")
listenerBus.start()
receiverTracker = new ReceiverTracker(ssc)
receiverTracker.start()
jobGenerator.start()
logInfo("Started JobScheduler")
}
def submitJobSet(jobSet: JobSet) {
if (jobSet.jobs.isEmpty) {
logInfo("No jobs added for time " + jobSet.time)
} else {
jobSets.put(jobSet.time, jobSet)
jobSet.jobs.foreach(job => jobExecutor.execute(new JobHandler(job)))
logInfo("Added jobs for time " + jobSet.time)
}
}
private class JobHandler(job: Job) extends Runnable {
def run() {
eventActor ! JobStarted(job)
job.run()
eventActor ! JobCompleted(job)
}
}
private def handleJobCompletion(job: Job) {
job.result match {
case Success(_) =>
val jobSet = jobSets.get(job.time)
jobSet.handleJobCompletion(job)
logInfo("Finished job " + job.id + " from job set of time " + jobSet.time)
if (jobSet.hasCompleted) {
jobSets.remove(jobSet.time)
jobGenerator.onBatchCompletion(jobSet.time)
logInfo("Total delay: %.3f s for time %s (execution: %.3f s)".format(
jobSet.totalDelay / 1000.0, jobSet.time.toString,
jobSet.processingDelay / 1000.0
))
listenerBus.post(StreamingListenerBatchCompleted(jobSet.toBatchInfo))
}
case Failure(e) =>
reportError("Error running job " + job, e)
}
}
spark streaming 4: DStreamGraph JobScheduler的更多相关文章
- Spark Streaming Backpressure分析
1.为什么引入Backpressure 默认情况下,Spark Streaming通过Receiver以生产者生产数据的速率接收数据,计算过程中会出现batch processing time > ...
- Spark Streaming性能优化: 如何在生产环境下应对流数据峰值巨变
1.为什么引入Backpressure 默认情况下,Spark Streaming通过Receiver以生产者生产数据的速率接收数据,计算过程中会出现batch processing time > ...
- 5. Spark Streaming高级解析
5.1 DStreamGraph对象分析 在Spark Streaming中,DStreamGraph是一个非常重要的组件,主要用来: 1. 通过成员inputStreams持有Spark Strea ...
- 4. Spark Streaming解析
4.1 初始化StreamingContext import org.apache.spark._ import org.apache.spark.streaming._ val conf = new ...
- Spark Streaming揭秘 Day25 StreamingContext和JobScheduler启动源码详解
Spark Streaming揭秘 Day25 StreamingContext和JobScheduler启动源码详解 今天主要理一下StreamingContext的启动过程,其中最为重要的就是Jo ...
- Spark Streaming揭秘 Day3-运行基石(JobScheduler)大揭秘
Spark Streaming揭秘 Day3 运行基石(JobScheduler)大揭秘 引子 作为一个非常强大框架,Spark Streaming兼具了流处理和批处理的特点.还记得第一天的谜团么,众 ...
- Spark Streaming源码分析 – JobScheduler
先给出一个job从被generate到被执行的整个过程在JobGenerator中,需要定时的发起GenerateJobs事件,而每个job其实就是针对DStream中的一个RDD,发起一个Spark ...
- 贯通Spark Streaming JobScheduler内幕实现和深入思考
本节主要内容: 一.SparkStreaming Job生成深度思考 二.SparkStreaming Job生成源码解析 JobScheduler的地位非常的重要,所有的关键都在JobSchedul ...
- Spark Streaming源码解读之JobScheduler内幕实现和深度思考
本期内容 : JobScheduler内幕实现 JobScheduler深度思考 JobScheduler 是整个Spark Streaming调度的核心,需要设置多线程,一条用于接收数据不断的循环, ...
随机推荐
- remote mounting from windows to linux
8 Ways To Mount SMBfs (SAMBA FILE SYSTEM) In Linux. Sep 8, 2009 How to Mount smbfs (SAMBA file syste ...
- Django静态资源配置
Settings文件设置 INSTALLED_APPS 设置 确保 django.contrib.staticfiles 添加到INSTALLED_APPS中 默认是已经添加上的 INSTALLED_ ...
- CTAP: Complementary Temporal Action Proposal Generation论文笔记
主要观点:基于sliding window(SW)类的方法,如TURN,可以达到很高的AR,但定位不准:基于Group的方法,如TAG,AR有明显的上界,但定位准.所以结合两者的特长,加入Comple ...
- python面向编程:类的组合、封装、property装饰器、多态
一.组合 二.封装 三.propert装饰器 四.多态 一.组合 ''' 1. 什么是组合 一个对象的属性是来自于另外一个类的对象,称之为组合 2. 为何用组合 组合也是用来解决类与类代码冗余的问题 ...
- 自动化测试 selenium 测试软件安装
一.自动化测试优点 1.对程序的回归测试更方便.在程修改的比较平凡的时候,表现的更明显. 2.可以代替测试人员运行更繁琐的测试,也可以代替测试人员不可能完成的操作(比如连续点击50次) 3.更好的 ...
- 【转载】GAN for NLP 论文笔记
本篇随笔为转载,原贴地址,知乎:GAN for NLP(论文笔记及解读).
- IntelliJ IDEA安装后几个重要的目录及配置文件讲解
本文大概记录了IntelliJ IDEA安装之后比较重要的目录和几个核心文件 重要的安装目录 安装完之后的bin目录大致如下 IntelliJ IDEA 的安装目录并不复杂,上图为最常改动的 bin ...
- CSS基础学习-6.CSS属性_列表、表格
- (十一)zabbix监控mysql
1)配置脚本获取mysql的各种参数 注意:脚本中定义host #vim /etc/zabbix/zabbix_agentd.d/mysql_status.sh #!/bin/bash MySQlBi ...
- loj2613 「NOIP2013」华容道[最短路]
感觉和以前做过的一个推箱子很像,都是可以用bfs解决的,而且都是手玩出结论. 因为起始棋子肯定是要和空格交换的,所以第一件事是先把空格移到棋子旁边.然后讨论怎么设计搜索状态.由于和推箱子实在太像了,所 ...