https://www.scala-lang.org/download/2.12.4.html
# ==================================================================安装 scala

tar -zxvf ~/scala-2.12.4.tgz -C /usr/local
rm –r ~/scala-2.12.4.tgz

# http://archive.apache.org/dist/spark/spark-2.3.0/

# ==================================================================安装 spark

tar -zxf ~/spark-2.3.0-bin-hadoop2.7.tgz -C /usr/local
mv /usr/local/spark-2.3.0-bin-hadoop2.7 /usr/local/spark-2.3.0
rm –r ~/spark-2.3.0-bin-hadoop2.7.tgz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$SPARK_HOME/sbin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $SPARK_HOME

# ==================================================================node1

cp $SPARK_HOME/conf/docker.properties.template $SPARK_HOME/conf/docker.properties
vi $SPARK_HOME/conf/docker.properties spark.mesos.executor.home: /usr/local/spark-2.3.0 cp $SPARK_HOME/conf/fairscheduler.xml.template $SPARK_HOME/conf/fairscheduler.xml
cp $SPARK_HOME/conf/log4j.properties.template $SPARK_HOME/conf/log4j.properties
cp $SPARK_HOME/conf/metrics.properties.template $SPARK_HOME/conf/metrics.properties cp $SPARK_HOME/conf/slaves.template $SPARK_HOME/conf/slaves
vi $SPARK_HOME/conf/slaves node1
node2
node3 cp $SPARK_HOME/conf/spark-defaults.conf.template $SPARK_HOME/conf/spark-defaults.conf
vi $SPARK_HOME/conf/spark-defaults.conf spark.eventLog.enabled true
spark.eventLog.dir hdfs://appcluster/spark/eventslog
# 监控页面需要监控的目录,需要先启用和指定事件日志目录,配合上面两项使用
spark.history.fs.logDirectory hdfs://appcluster/spark
spark.eventLog.compress true # 如果想 YARN ResourceManager 访问 Spark History Server ,则添加一行:
# spark.yarn.historyServer.address http://node1:19888 cp $SPARK_HOME/conf/spark-env.sh.template $SPARK_HOME/conf/spark-env.sh
vi $SPARK_HOME/conf/spark-env.sh export SPARK_MASTER_PORT=7077 #提交任务的端口,默认是7077
export SPARK_MASTER_WEBUI_PORT=8070 #masster节点的webui端口 默认8080改为8070
export SPARK_WORKER_CORES=1 #每个worker从节点能够支配的core的个数
export SPARK_WORKER_MEMORY=1g #每个worker从节点能够支配的内存数
export SPARK_WORKER_PORT=7078 #每个worker从节点的端口(可选配置)
export SPARK_WORKER_WEBUI_PORT=8071 #每个worker从节点的wwebui端口(可选配置)
export SPARK_WORKER_INSTANCES=1 #每个worker从节点的实例(可选配置) export JAVA_HOME=/usr/java/jdk1.8.0_111
export SCALA_HOME=/usr/local/scala-2.12.4
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_HOME/etc/Hadoop
export SPARK_PID_DIR=/usr/local/spark-2.3.0/pids
export SPARK_LOCAL_DIR=/usr/local/spark-2.3.0/tmp
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=node1:2181,node2:2181,node3:2181 -Dspark.deploy.zookeeper.dir=/spark" vi $SPARK_HOME/sbin/start-master.sh SPARK_MASTER_WEBUI_PORT=8070 cp $HADOOP_HOME/etc/hadoop/hdfs-site.xml $SPARK_HOME/conf/ vi $HADOOP_HOME/etc/hadoop/log4j.properties log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR scp -r $HADOOP_HOME/etc/hadoop/log4j.properties node2:$HADOOP_HOME/etc/hadoop/
scp -r $HADOOP_HOME/etc/hadoop/log4j.properties node3:$HADOOP_HOME/etc/hadoop/

# ==================================================================node1

scp -r $SPARK_HOME node2:/usr/local/
scp -r $SPARK_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $FLUME_HOME

# 启动

# ==================================================================node1 node2 node3
# 先启动zookeeper 和 hdfs
zkServer.sh start
zkServer.sh status # ==================================================================node1
zkCli.sh
create /spark '' $HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager

# 启动spark

# ==================================================================node1
$SPARK_HOME/sbin/start-master.sh $SPARK_HOME/sbin/start-slaves.sh # ==================================================================node2
$SPARK_HOME/sbin/start-master.sh # ==================================================================node1
# 获取安全模式的状态:
hdfs dfsadmin -safemode get # 安全模式打开
# hdfs dfsadmin -safemode enter # 安全模式关闭
# hdfs dfsadmin -safemode leave hdfs dfs -mkdir -p /spark/eventslog $SPARK_HOME/bin/spark-shell # http://node1:4040
# http://node1:8070 > :quit

# test

# 需保证hdfs上该目录不存在
# hdfs dfs -mkdir -p /spark/output
# hdfs dfs -rmr /spark/output vi ~/sparkdata.txt hello man
what are you doing now
my running
hello
kevin
hi man hdfs dfs -mkdir -p /usr/file/input hdfs dfs -put ~/sparkdata.txt /usr/file/input
hdfs dfs -ls /usr/file/input val file1 = sc.textFile("file:///root/sparkdata.txt")
val count1=file1.flatMap(line => line.split(" ")).map(word => (word,1)).reduceByKey(_+_)
count1.saveAsTextFile("hdfs://node1:8020/spark/output1") val file=sc.textFile("hdfs://appcluster/usr/file/input/sparkdata.txt")
val count=file.flatMap(line => line.split(" ")).map(word => (word,1)).reduceByKey(_+_)
count.saveAsTextFile("hdfs://node1:8020/spark/output") hdfs dfs -ls /spark/output hdfs dfs -cat /spark/output/part-00000

# stop已经启动的进程

# ==================================================================node1
$SPARK_HOME/sbin/stop-slaves.sh $SPARK_HOME/sbin/stop-master.sh $HADOOP_HOME/sbin/stop-all.sh # ==================================================================node1 node2 node3
# 停止 zookeeper
zkServer.sh stop # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc # ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc shutdown -h now
# 快照 spark

hadoop生态搭建(3节点)-10.spark配置的更多相关文章

  1. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  2. hadoop生态搭建(3节点)-08.kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  3. hadoop生态搭建(3节点)-04.hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  4. hadoop生态搭建(3节点)-13.mongodb配置

    # 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...

  5. hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

  6. hadoop生态搭建(3节点)-09.flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

  7. hadoop生态搭建(3节点)-11.storm配置

    # http://archive.apache.org/dist/storm/apache-storm-1.1.0/ # ======================================= ...

  8. hadoop生态搭建(3节点)-12.rabbitmq配置

    # 安装 需要相关包# ==================================================================node1 node2 node3 yum ...

  9. hadoop生态搭建(3节点)-14.redis配置

    # ==================================================================规划node1 redis:7000 7001 192.168. ...

随机推荐

  1. ansible之基本原理及命令

    什么是ansible ansible是新出现的自动化运维工具,基于Python开发,集合了众多运维工具(\(puppet.chef.func.fabric\))的优点,实现了批量系统配置.批量程序部署 ...

  2. win+ R下的常见命令

    -------------------------电脑运行常见命令----------------------------- Windows+R输入cmd 运行net start mssqlserve ...

  3. [徐培成系列实战课程]docker篇

    [徐培成系列实战课程]docker篇 如何利用docker快速构建Spark独立模式的集群 1.介绍 利用docker容器技术快速构建跨节点的独立模型的Spark大数据集群.Spark是时下非常热门的 ...

  4. Google Colab 免费的谷歌GPU for deep learning

    Who wants to use a free GPU for deep learning?Google Colab is a free cloud service and now it suppor ...

  5. springMVC+mybatis事务管理总结

    1.spring,mybatis事务管理配置与@Transactional注解使用: 概述事务管理对于企业应用来说是至关重要的,即使出现异常情况,它也可以保证数据的一致性.Spring Framewo ...

  6. C#配置IIS搭建网站的工具类

    public class IISWorker { public static string HostName = "localhost"; /// <summary> ...

  7. Oracle 数据库视图与基表的关系

    本文转载自:http://www.linuxidc.com/Linux/2015-03/115165.htm 一:首先解释什么是视图: 视图其实就是一条查询sql语句,用于显示一个或多个表或其他视图中 ...

  8. 【转】2013 PHP技术峰会《Bug Free的PHP开发实践分享》摘录

    要想代码写的好,前提配置做的好 error_reporting  =  E_ALL | E_STRICT display_errors = 测试机设置为 On,生产机设置为 Off display_s ...

  9. BZOJ3940:[USACO]Censoring(AC自动机,栈)

    Description Farmer John has purchased a subscription to Good Hooveskeeping magazine for his cows, so ...

  10. 视频(video)属性

    Figure 3视频相关的属性: 属性 值 描述  muted muted  定义音频的初始状态,目前仅支持muted.   crossorigin  空  定义当前视频是否是一个跨域的项目.  me ...