https://www.scala-lang.org/download/2.12.4.html
# ==================================================================安装 scala

tar -zxvf ~/scala-2.12.4.tgz -C /usr/local
rm –r ~/scala-2.12.4.tgz

# http://archive.apache.org/dist/spark/spark-2.3.0/

# ==================================================================安装 spark

tar -zxf ~/spark-2.3.0-bin-hadoop2.7.tgz -C /usr/local
mv /usr/local/spark-2.3.0-bin-hadoop2.7 /usr/local/spark-2.3.0
rm –r ~/spark-2.3.0-bin-hadoop2.7.tgz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$SPARK_HOME/sbin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $SPARK_HOME

# ==================================================================node1

cp $SPARK_HOME/conf/docker.properties.template $SPARK_HOME/conf/docker.properties
vi $SPARK_HOME/conf/docker.properties spark.mesos.executor.home: /usr/local/spark-2.3.0 cp $SPARK_HOME/conf/fairscheduler.xml.template $SPARK_HOME/conf/fairscheduler.xml
cp $SPARK_HOME/conf/log4j.properties.template $SPARK_HOME/conf/log4j.properties
cp $SPARK_HOME/conf/metrics.properties.template $SPARK_HOME/conf/metrics.properties cp $SPARK_HOME/conf/slaves.template $SPARK_HOME/conf/slaves
vi $SPARK_HOME/conf/slaves node1
node2
node3 cp $SPARK_HOME/conf/spark-defaults.conf.template $SPARK_HOME/conf/spark-defaults.conf
vi $SPARK_HOME/conf/spark-defaults.conf spark.eventLog.enabled true
spark.eventLog.dir hdfs://appcluster/spark/eventslog
# 监控页面需要监控的目录,需要先启用和指定事件日志目录,配合上面两项使用
spark.history.fs.logDirectory hdfs://appcluster/spark
spark.eventLog.compress true # 如果想 YARN ResourceManager 访问 Spark History Server ,则添加一行:
# spark.yarn.historyServer.address http://node1:19888 cp $SPARK_HOME/conf/spark-env.sh.template $SPARK_HOME/conf/spark-env.sh
vi $SPARK_HOME/conf/spark-env.sh export SPARK_MASTER_PORT=7077 #提交任务的端口,默认是7077
export SPARK_MASTER_WEBUI_PORT=8070 #masster节点的webui端口 默认8080改为8070
export SPARK_WORKER_CORES=1 #每个worker从节点能够支配的core的个数
export SPARK_WORKER_MEMORY=1g #每个worker从节点能够支配的内存数
export SPARK_WORKER_PORT=7078 #每个worker从节点的端口(可选配置)
export SPARK_WORKER_WEBUI_PORT=8071 #每个worker从节点的wwebui端口(可选配置)
export SPARK_WORKER_INSTANCES=1 #每个worker从节点的实例(可选配置) export JAVA_HOME=/usr/java/jdk1.8.0_111
export SCALA_HOME=/usr/local/scala-2.12.4
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_HOME/etc/Hadoop
export SPARK_PID_DIR=/usr/local/spark-2.3.0/pids
export SPARK_LOCAL_DIR=/usr/local/spark-2.3.0/tmp
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=node1:2181,node2:2181,node3:2181 -Dspark.deploy.zookeeper.dir=/spark" vi $SPARK_HOME/sbin/start-master.sh SPARK_MASTER_WEBUI_PORT=8070 cp $HADOOP_HOME/etc/hadoop/hdfs-site.xml $SPARK_HOME/conf/ vi $HADOOP_HOME/etc/hadoop/log4j.properties log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR scp -r $HADOOP_HOME/etc/hadoop/log4j.properties node2:$HADOOP_HOME/etc/hadoop/
scp -r $HADOOP_HOME/etc/hadoop/log4j.properties node3:$HADOOP_HOME/etc/hadoop/

# ==================================================================node1

scp -r $SPARK_HOME node2:/usr/local/
scp -r $SPARK_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $FLUME_HOME

# 启动

# ==================================================================node1 node2 node3
# 先启动zookeeper 和 hdfs
zkServer.sh start
zkServer.sh status # ==================================================================node1
zkCli.sh
create /spark '' $HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager

# 启动spark

# ==================================================================node1
$SPARK_HOME/sbin/start-master.sh $SPARK_HOME/sbin/start-slaves.sh # ==================================================================node2
$SPARK_HOME/sbin/start-master.sh # ==================================================================node1
# 获取安全模式的状态:
hdfs dfsadmin -safemode get # 安全模式打开
# hdfs dfsadmin -safemode enter # 安全模式关闭
# hdfs dfsadmin -safemode leave hdfs dfs -mkdir -p /spark/eventslog $SPARK_HOME/bin/spark-shell # http://node1:4040
# http://node1:8070 > :quit

# test

# 需保证hdfs上该目录不存在
# hdfs dfs -mkdir -p /spark/output
# hdfs dfs -rmr /spark/output vi ~/sparkdata.txt hello man
what are you doing now
my running
hello
kevin
hi man hdfs dfs -mkdir -p /usr/file/input hdfs dfs -put ~/sparkdata.txt /usr/file/input
hdfs dfs -ls /usr/file/input val file1 = sc.textFile("file:///root/sparkdata.txt")
val count1=file1.flatMap(line => line.split(" ")).map(word => (word,1)).reduceByKey(_+_)
count1.saveAsTextFile("hdfs://node1:8020/spark/output1") val file=sc.textFile("hdfs://appcluster/usr/file/input/sparkdata.txt")
val count=file.flatMap(line => line.split(" ")).map(word => (word,1)).reduceByKey(_+_)
count.saveAsTextFile("hdfs://node1:8020/spark/output") hdfs dfs -ls /spark/output hdfs dfs -cat /spark/output/part-00000

# stop已经启动的进程

# ==================================================================node1
$SPARK_HOME/sbin/stop-slaves.sh $SPARK_HOME/sbin/stop-master.sh $HADOOP_HOME/sbin/stop-all.sh # ==================================================================node1 node2 node3
# 停止 zookeeper
zkServer.sh stop # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc # ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc shutdown -h now
# 快照 spark

hadoop生态搭建(3节点)-10.spark配置的更多相关文章

  1. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  2. hadoop生态搭建(3节点)-08.kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  3. hadoop生态搭建(3节点)-04.hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  4. hadoop生态搭建(3节点)-13.mongodb配置

    # 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...

  5. hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

  6. hadoop生态搭建(3节点)-09.flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

  7. hadoop生态搭建(3节点)-11.storm配置

    # http://archive.apache.org/dist/storm/apache-storm-1.1.0/ # ======================================= ...

  8. hadoop生态搭建(3节点)-12.rabbitmq配置

    # 安装 需要相关包# ==================================================================node1 node2 node3 yum ...

  9. hadoop生态搭建(3节点)-14.redis配置

    # ==================================================================规划node1 redis:7000 7001 192.168. ...

随机推荐

  1. SQL 查询:查询学生平均成绩

    编程萌新,因为遇到这么个SQL 查询的问题:在一张表A里有如下字段:学生姓名.学科名.学科成绩.写一条SQL 语句查出各科平均成绩并按学生姓名分组,按如下格式显示:学生姓名|语文|数学|英语.一开始遇 ...

  2. 使用UIWebView中html标签显示富文本

    使用UIWebView中html标签显示富文本 用UIWebView来渲染文本并期望达到富文本的效果开销很大哦! Work 本人此处直接加载自定义字体"新蒂小丸子体",源码不公开, ...

  3. 网页入口ControlServlet分析

    init() configureBsf(); //配置自定义bsf,即在bean script中注册ofbiz实现的脚本引擎 getRequestHandler(); //初始化request han ...

  4. web服务器、app(应用)服务器、DB后端性能瓶颈和分析

    性能测试day07_性能瓶颈和分析 https://www.cnblogs.com/leixiaobai/p/9463748.html 其实如果之前都做的很到位的话,那么再加上APM工具(dynaTr ...

  5. [原]零基础学习视频解码之安装ffmpeg

    写在文章前面:ffmpeg是一个开源的编解码框架,拥有很强大的功能.但是对于如果使用其来做开发呈现着严重两极分化,大神们讨论着高深的问题,大多数像我这样的小白连门都进不去.最近无意间领会了如何入门,现 ...

  6. hdu-4135 Co-prime---容斥定理经典&&求1-m中与n互质的数目

    题目链接: http://acm.hdu.edu.cn/showproblem.php?pid=4135 题目大意: 求区间[a, b]中与N互质的数目. 解题思路: 首先对n求出所有素因子. 对于区 ...

  7. getResource和getResourceAsStream

    1. 前言 在Java中获取资源的时候,经常用到getResource和getResourceAsStream,本文总结一下这两种获取资源文件的路径差异. 2.Class.getResource(St ...

  8. Hadoop-2.2.0中文文档—— Common - 超级用户模拟别的用户

    简单介绍 此文档描写叙述了一个超级用户怎样在安全的方式下以还有一用户的名义提交作业或訪问hdfs. Use Case 下一部分描写叙述的的代码演示样例对此用户用例是可用的. 一个username为's ...

  9. LeNet 分类 FashionMNIST

    import mxnet as mx from mxnet import autograd, gluon, init, nd from mxnet.gluon import loss as gloss ...

  10. [USACO11DEC]Umbrellas for Cows

    嘟嘟嘟 我dp真是太弱了,这么简单dp都不会. 令dp[i]表示前 i 头牛头被遮住了的最低成本.则dp[i] = min{dp[i], dp[j - 1] + c[a[i] - a[j] + 1]} ...