SparkContext是spark的入口,通过它来连接集群、创建RDD、广播变量等等。

class SparkContext(config: SparkConf) extends Logging with ExecutorAllocationClient {

 private val creationSite: CallSite = Utils.getCallSite()

//如果生命了2个sparkContext,则会使用warn来取代exception.防止退出
private val allowMultipleContexts: Boolean =
config.getBoolean("spark.driver.allowMultipleContexts", false) ..防止两个sparkcontext同时运行
SparkContext.markPartiallyConstructed(this, allowMultipleContexts) private[spark] var preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map() val startTime = System.currentTimeMillis() //当提交任务执行spark-submit时,加载系统环境变量
def this() = this(new SparkConf()) def this(master: String, appName: String, conf: SparkConf) =
this(SparkContext.updatedConf(conf, master, appName)) //preferredNodeLocationData 用于启动查找nodes,启动相应的container
def this(
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map(),
preferredNodeLocationData: Map[String, Set[SplitInfo]] = Map()) =
{
this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment))
if (preferredNodeLocationData.nonEmpty) {
logWarning("Passing in preferred locations has no effect at all, see SPARK-8949")
}
this.preferredNodeLocationData = preferredNodeLocationData //构造函数
private[spark] def this(master: String, appName: String) =
this(master, appName, null, Nil, Map(), Map()) private[spark] def this(master: String, appName: String, sparkHome: String) =
this(master, appName, sparkHome, Nil, Map(), Map()) private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) =
this(master, appName, sparkHome, jars, Map(), Map()) private[spark] def conf: SparkConf = _conf //clone Conf,那么在运行时就不能被修改
def getConf: SparkConf = conf.clone() def jars: Seq[String] = _jars
def files: Seq[String] = _files
def master: String = _conf.get("spark.master")
def appName: String = _conf.get("spark.app.name") private[spark] def isEventLogEnabled: Boolean = _conf.getBoolean("spark.eventLog.enabled", false)
private[spark] def eventLogDir: Option[URI] = _eventLogDir
private[spark] def eventLogCodec: Option[String] = _eventLogCodec //创建schedular
val (sched, ts) = SparkContext.createTaskScheduler(this, master)
_schedulerBackend = sched
_taskScheduler = ts
_dagScheduler = new DAGScheduler(this)
_heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet) //启动taskschedular
_taskScheduler.start() applicationId = _taskScheduler.applicationId()
_applicationAttemptId = taskScheduler.applicationAttemptId()
_conf.set("spark.app.id", _applicationId)
_env.blockManager.initialize(_applicationId) //创建一个新的RDD,通过step来增加元素
def range(
start: Long,
end: Long,
step: Long = 1,
numSlices: Int = defaultParallelism): RDD[Long] = withScope {
assertNotStopped()
// when step is 0, range will run infinitely
require(step != 0, "step cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || safeEnd > safeStart ^ step > 0) {
(safeEnd - safeStart) / step
} else {
(safeEnd - safeStart) / step + 1
}
} parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex((i, _) => {
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
if (bi.isValidLong) {
bi.toLong
} else if (bi > 0) {
Long.MaxValue
} else {
Long.MinValue
}
val safePartitionStart = getSafeMargin(partitionStart)
val safePartitionEnd = getSafeMargin(partitionEnd) new Iterator[Long] {
private[this] var number: Long = safePartitionStart
private[this] var overflow: Boolean = false override def hasNext =
if (!overflow) {
if (step > 0) {
number < safePartitionEnd
} else {
number > safePartitionEnd
}
} else false override def next() = {
val ret = number
number += step
if (number < ret ^ step < 0) {
overflow = true
}
ret
}
}
})
} //创建一个RDD
def makeRDD[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
parallelize(seq, numSlices)
} //读取本地、HDFS的文件,返回一个String的字符串
def textFile(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
assertNotStopped()
hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
minPartitions).map(pair => pair._2.toString)
} //加载一个二进制文件,
@Experimental
def binaryRecords(
path: String,
recordLength: Int,
conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope {
assertNotStopped()
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path,
classOf[FixedLengthBinaryInputFormat],
classOf[LongWritable],
classOf[BytesWritable],
conf = conf)
val data = br.map { case (k, v) =>
val bytes = v.getBytes
assert(bytes.length == recordLength, "Byte array does not have correct length")
bytes
}
data
} //获得一个为HADOOP sequenceFile给定键值对类型的RDD
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): RDD[(K, V)] = withScope {
assertNotStopped()
val inputFormatClass = classOf[SequenceFileInputFormat[K, V]]
hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
} //1300发送一个广播变量到集群的每个节点
def broadcast[T: ClassTag](value: T): Broadcast[T] = {
assertNotStopped()
if (classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass)) {
logWarning("Can not directly broadcast RDDs; instead, call collect() and "
+ "broadcast the result (see SPARK-5063)")
}
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
}

SparkContext源码阅读的更多相关文章

  1. SparkConf加载与SparkContext创建(源码阅读一)

    即日起开始spark源码阅读之旅,这个过程是相当痛苦的,也许有大量的看不懂,但是每天一个方法,一点点看,相信总归会有极大地提高的.那么下面开始: 创建sparkConf对象,那么究竟它干了什么了类,从 ...

  2. Spark源码阅读之存储体系--存储体系概述与shuffle服务

    一.概述 根据<深入理解Spark:核心思想与源码分析>一书,结合最新的spark源代码master分支进行源码阅读,对新版本的代码加上自己的一些理解,如有错误,希望指出. 1.块管理器B ...

  3. 【原】FMDB源码阅读(三)

    [原]FMDB源码阅读(三) 本文转载请注明出处 —— polobymulberry-博客园 1. 前言 FMDB比较优秀的地方就在于对多线程的处理.所以这一篇主要是研究FMDB的多线程处理的实现.而 ...

  4. 【原】FMDB源码阅读(二)

    [原]FMDB源码阅读(二) 本文转载请注明出处 -- polobymulberry-博客园 1. 前言 上一篇只是简单地过了一下FMDB一个简单例子的基本流程,并没有涉及到FMDB的所有方方面面,比 ...

  5. 【原】FMDB源码阅读(一)

    [原]FMDB源码阅读(一) 本文转载请注明出处 —— polobymulberry-博客园 1. 前言 说实话,之前的SDWebImage和AFNetworking这两个组件我还是使用过的,但是对于 ...

  6. 【原】AFNetworking源码阅读(六)

    [原]AFNetworking源码阅读(六) 本文转载请注明出处 —— polobymulberry-博客园 1. 前言 这一篇的想讲的,一个就是分析一下AFSecurityPolicy文件,看看AF ...

  7. 【原】AFNetworking源码阅读(五)

    [原]AFNetworking源码阅读(五) 本文转载请注明出处 —— polobymulberry-博客园 1. 前言 上一篇中提及到了Multipart Request的构建方法- [AFHTTP ...

  8. 【原】AFNetworking源码阅读(四)

    [原]AFNetworking源码阅读(四) 本文转载请注明出处 —— polobymulberry-博客园 1. 前言 上一篇还遗留了很多问题,包括AFURLSessionManagerTaskDe ...

  9. 【原】AFNetworking源码阅读(三)

    [原]AFNetworking源码阅读(三) 本文转载请注明出处 —— polobymulberry-博客园 1. 前言 上一篇的话,主要是讲了如何通过构建一个request来生成一个data tas ...

随机推荐

  1. Launch Screen在iOS7/8中的实现

    Launch Screen在iOS7/8中的实现 目前项目中需要解决的问题是: 兼容iOS7和iOS8,之前的版本不需要支持了 实现兼容3.5.4.4.7和5.5寸屏幕,竖屏的Lauch Screen ...

  2. Topcoder SRM583 DIV 2 250

    #include <string> #include <iostream> using namespace std; class SwappingDigits { public ...

  3. window.requestAnimationFrame

    今天小猪在看一个html5的demo时一直在找他的动画是怎么实现的,按照我的理解就应该是调用setInterval来循环调用动画函数来实现.但是在Demo中就是找不到这个函数.干着急的小猪只好一步一步 ...

  4. 如何在Linux下使用Gitblit工具创建Git仓库服务

    嗨!朋友,今天我们将学习如何在你的Linux服务器或者PC上安装Gitblit工具.首先,我们看看什么是Git,它的功能以及安装Gitblit的步骤.Git是分布式版本控制系统,它强调速度.数据一致性 ...

  5. C++编程优化心得(持续更新)

    1. 对齐原则.比如64位总线,每次寻址读取8B.编程时注意变量地址,尽量消耗总线最少的寻址次数.堆内存申请时,系统严格按照对齐原则分配,故而使用时候也尽量不要跨寻址边界. 2. 需要的时候,可为了效 ...

  6. Android Camera进行拍照

    Android应用提供了Camera来控制拍照,使用Camera进行拍照的步骤: 1.调用Camera的open()方法打开相机. 2.调用Camera的getParameters()方法获取拍照参数 ...

  7. python leetcode 日记 --Contains Duplicate II --219

    题目: Given an array of integers and an integer k, find out whether there are two distinct indices i a ...

  8. Cordova android框架详解

    一.Cordova 核心java类说明 CordovaActivity:Cordova Activity入口,已实现PluginManager.WebView的相关初始化工作, 只需继承Cordova ...

  9. Error Handling

    Use Exceptions Rather Than Return Codes Back in the distant past there were many languages that didn ...

  10. JMeter学习(十)内存溢出解决方法

    使用jmeter进行压力测试时遇到一段时间后报内存溢出outfmenmory错误,导致jmeter卡死了,先尝试在jmeter.bat中增加了JVM_ARGS="-Xmx2048m -Xms ...