./bin/spark-shell --master yarn

  

2019-07-01 12:20:13 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
2019-07-01 12:20:29 WARN Client:66 - Neither spark.yarn.jars nor spark.yarn.archive is set, falling back to uploading libraries under SPARK_HOME.
2019-07-01 12:20:55 z
org.apache.spark.SparkException: Yarn application has already ended! It might have been killed or unable to launch application master.
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.waitForApplication(YarnClientSchedulerBackend.scala:89)
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start(YarnClientSchedulerBackend.scala:63)
at org.apache.spark.scheduler.TaskSchedulerImpl.start(TaskSchedulerImpl.scala:164)
at org.apache.spark.SparkContext.<init>(SparkContext.scala:500)
at org.apache.spark.SparkContext$.getOrCreate(SparkContext.scala:2493)
at org.apache.spark.sql.SparkSession$Builder$$anonfun$7.apply(SparkSession.scala:934)
at org.apache.spark.sql.SparkSession$Builder$$anonfun$7.apply(SparkSession.scala:925)
at scala.Option.getOrElse(Option.scala:121)
at org.apache.spark.sql.SparkSession$Builder.getOrCreate(SparkSession.scala:925)
at org.apache.spark.repl.Main$.createSparkSession(Main.scala:103)
at $line3.$read$$iw$$iw.<init>(<console>:15)
at $line3.$read$$iw.<init>(<console>:43)
at $line3.$read.<init>(<console>:45)
at $line3.$read$.<init>(<console>:49)
at $line3.$read$.<clinit>(<console>)
at $line3.$eval$.$print$lzycompute(<console>:7)
at $line3.$eval$.$print(<console>:6)
at $line3.$eval.$print(<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:786)
at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1047)
at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:638)
at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:637)
at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:19)
at scala.tools.nsc.interpreter.IMain$WrappedRequest.loadAndRunReq(IMain.scala:637)
at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:569)
at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:565)
at scala.tools.nsc.interpreter.ILoop.interpretStartingWith(ILoop.scala:807)
at scala.tools.nsc.interpreter.ILoop.command(ILoop.scala:681)
at scala.tools.nsc.interpreter.ILoop.processLine(ILoop.scala:395)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1$$anonfun$apply$mcV$sp$2.apply(SparkILoop.scala:79)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1$$anonfun$apply$mcV$sp$2.apply(SparkILoop.scala:79)
at scala.collection.immutable.List.foreach(List.scala:381)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(SparkILoop.scala:79)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1.apply(SparkILoop.scala:79)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1$$anonfun$apply$mcV$sp$1.apply(SparkILoop.scala:79)
at scala.tools.nsc.interpreter.ILoop.savingReplayStack(ILoop.scala:91)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1.apply$mcV$sp(SparkILoop.scala:78)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1.apply(SparkILoop.scala:78)
at org.apache.spark.repl.SparkILoop$$anonfun$initializeSpark$1.apply(SparkILoop.scala:78)
at scala.tools.nsc.interpreter.IMain.beQuietDuring(IMain.scala:214)
at org.apache.spark.repl.SparkILoop.initializeSpark(SparkILoop.scala:77)
at org.apache.spark.repl.SparkILoop.loadFiles(SparkILoop.scala:110)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply$mcZ$sp(ILoop.scala:920)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
at scala.reflect.internal.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:97)
at scala.tools.nsc.interpreter.ILoop.process(ILoop.scala:909)
at org.apache.spark.repl.Main$.doMain(Main.scala:76)
at org.apache.spark.repl.Main$.main(Main.scala:56)
at org.apache.spark.repl.Main.main(Main.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:894)
at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:198)
at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:228)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:137)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
2019-07-01 12:20:55 WARN YarnSchedulerBackend$YarnSchedulerEndpoint:66 - Attempted to request executors before the AM has registered!
2019-07-01 12:20:55 WARN MetricsSystem:66 - Stopping a MetricsSystem that is not running
org.apache.spark.SparkException: Yarn application has already ended! It might have been killed or unable to launch application master.
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.waitForApplication(YarnClientSchedulerBackend.scala:89)
at org.apache.spark.scheduler.cluster.YarnClientSchedulerBackend.start(YarnClientSchedulerBackend.scala:63)
at org.apache.spark.scheduler.TaskSchedulerImpl.start(TaskSchedulerImpl.scala:164)
at org.apache.spark.SparkContext.<init>(SparkContext.scala:500)
at org.apache.spark.SparkContext$.getOrCreate(SparkContext.scala:2493)
at org.apache.spark.sql.SparkSession$Builder$$anonfun$7.apply(SparkSession.scala:934)
at org.apache.spark.sql.SparkSession$Builder$$anonfun$7.apply(SparkSession.scala:925)
at scala.Option.getOrElse(Option.scala:121)

  主要原因在与spark2+的版本对jdk进行了检查导致的,换了低版本的jdk之后,发现版本不支持,spark2.+需要使用jdk1.8+以上的版本,把jdk版本切换过来。在yarn的配置文件添加一下配置即可。

vi yarn-site.xml  

# 添加以下配置

  

<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property> <property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>

  最后,最后,最后,不要忘记重启hadoop,不然在去执行还是会报错的。

2019-07-01 12:31:36 WARN  NativeCodeLoader:62 - Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
2019-07-01 12:32:02 WARN Client:66 - Neither spark.yarn.jars nor spark.yarn.archive is set, falling back to uploading libraries under SPARK_HOME.
Spark context Web UI available at http://master:4040
Spark context available as 'sc' (master = yarn, app id = application_1561955386005_0001).
Spark session available as 'spark'.
Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/___/ .__/\_,_/_/ /_/\_\ version 2.3.3
/_/ Using Scala version 2.11.8 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_211)
Type in expressions to have them evaluated.
Type :help for more information.

  

Spark跑在Yarn上出现错误,原因是jdk的版本问题的更多相关文章

  1. spark执行在yarn上executor内存不足异常ERROR YarnScheduler: Lost executor 542 on host-bigdata3: Container marked as failed: container_e40_1550646084627_1007653_01_000546 on host: host-bigdata3. Exit status: 143.

    当spark跑在yarn上时 单个executor执行时,数据量过大时会导致executor的memory不足而使得rdd  最后lost,最终导致任务执行失败 其中会抛出如图异常信息 如图中异常所示 ...

  2. 执行Spark运行在yarn上的命令报错 spark-shell --master yarn-client

    1.执行Spark运行在yarn上的命令报错 spark-shell --master yarn-client,错误如下所示: // :: ERROR SparkContext: Error init ...

  3. 是时候考虑让你的Spark跑在K8S上了

    [摘要] Spark社区在2.3版本开始,已经可以很好的支持跑着Kubernetes上了.这样对于统一资源池,提高整体资源利用率,降低运维成本(特别是技术栈归一)有着非常大的帮助.这些趋势是一个大数据 ...

  4. Yarn上运行spark-1.6.0

    目录 目录 1 1. 约定 1 2. 安装Scala 1 2.1. 下载 2 2.2. 安装 2 2.3. 设置环境变量 2 3. 安装Spark 2 3.1. 下载 2 3.2. 安装 2 3.3. ...

  5. spark提交至yarn的的动态资源分配

    1.为什么开启动态资源分配 ⽤户提交Spark应⽤到Yarn上时,可以通过spark-submit的num-executors参数显示地指定executor 个数,随后,ApplicationMast ...

  6. spark跑YARN模式或Client模式提交任务不成功(application state: ACCEPTED)

    不多说,直接上干货! 问题详情 电脑8G,目前搭建3节点的spark集群,采用YARN模式. master分配2G,slave1分配1G,slave2分配1G.(在安装虚拟机时) export SPA ...

  7. spark跑YARN模式或Client模式提交任务不成功(application state: ACCEPTED)(转)

    不多说,直接上干货! 问题详情 电脑8G,目前搭建3节点的spark集群,采用YARN模式. master分配2G,slave1分配1G,slave2分配1G.(在安装虚拟机时) export SPA ...

  8. Apache Spark源码走读之10 -- 在YARN上运行SparkPi

    y欢迎转载,转载请注明出处,徽沪一郎. 概要 “spark已经比较头痛了,还要将其运行在yarn上,yarn是什么,我一点概念都没有哎,再怎么办啊.不要跟我讲什么原理了,能不能直接告诉我怎么将spar ...

  9. spark(四)yarn上的运行模式

    架构图 yarn-cluster yarn-client 区别 Yarn-cluster spark的driver运行在applicationMaster内,启动流程为: 这张图可能比较直观 Yarn ...

随机推荐

  1. 图像平移 cv.warpAffine()函数用法

    # 图像平移image1='C:\\Users\\10107472\\Desktop\\myfile\\tensorflow-yolov\\read.jpg'img = cv.imread(image ...

  2. JavaWeb 之 三层架构:软件设计架构

    界面层(表示层):用户看的得界面.用户可以通过界面上的组件和服务器进行交互. 业务逻辑层:处理业务逻辑的. 数据访问层:操作数据存储文件.

  3. iOS多线程GCD简介(二)

    在上一篇中,我们主要讲了Dispatch Queue相关的内容.这篇主要讲一下一些和实际相关的使用实例,Dispatch Groups和Dispatch Semaphore. dispatch_aft ...

  4. IDEA 阿里巴巴代码规范检查插件

    1.问题概要 大家都想写出规范的代码,可规范的标准是什么勒,估计每个人心中的标准都不是完全一致的 在分工合作越来越精细化的时代,我们需要一个最大程度接近公认的规范,这里我们以阿里巴巴的代码规范作为参考 ...

  5. npm 安装、卸载 模块或包的命令(转载)

    npm安装卸载命令 来源:https://www.jianshu.com/p/e6ee00ea03cd npm安装模块 [npm install xxx]利用 npm 安装xxx模块到当前命令行所在目 ...

  6. 乌班图安装Lnmp环境

    1.nginx //切到root用户 sudo su //更新源 apt-get update //安装 apt-get install nginx //安装完成后配置文件目录 cd /etc/ngi ...

  7. SSH SSL TELNET的比较(转)

    转载链接  https://blog.csdn.net/baidu_39486224/article/details/81295701 SSL(Secure Sockets Layer (SSL) a ...

  8. windows动态库和静态库VS导入

    1. 静态库和动态库 1.1 静态库(.lib) 函数和数据被编译进一个二进制文件(通常扩展名为.LIB).在使用静态库的情况下,在编译链接可执行文件时,链接器从库中复制这些函数和数据并把它们和应用程 ...

  9. JAVA设计模式之工厂模式—Factory Pattern

    1.工厂模式简介 工厂模式用于对象的创建,使得客户从具体的产品对象中被解耦. 2.工厂模式分类 这里以制造coffee的例子开始工厂模式设计之旅. 我们知道coffee只是一种泛举,在点购咖啡时需要指 ...

  10. ViCANdo新版本发布(PART1) | 点云库(PCL)集成

    激光雷达         随着智能驾驶技术的发展,激光雷达迅速的进入工程师的视野,不管是机械式.MEMS还是纯固态激光雷达,本质上都是以一定的速度扫描照射区域,在此过程中激光雷达不断的发出激光并接收反 ...