https://www.scala-lang.org/download/2.12.4.html
# ==================================================================安装 scala

tar -zxvf ~/scala-2.12.4.tgz -C /usr/local
rm –r ~/scala-2.12.4.tgz

# http://archive.apache.org/dist/spark/spark-2.3.0/

# ==================================================================安装 spark

tar -zxf ~/spark-2.3.0-bin-hadoop2.7.tgz -C /usr/local
mv /usr/local/spark-2.3.0-bin-hadoop2.7 /usr/local/spark-2.3.0
rm –r ~/spark-2.3.0-bin-hadoop2.7.tgz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$SPARK_HOME/sbin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $SPARK_HOME

# ==================================================================node1

cp $SPARK_HOME/conf/docker.properties.template $SPARK_HOME/conf/docker.properties
vi $SPARK_HOME/conf/docker.properties spark.mesos.executor.home: /usr/local/spark-2.3.0 cp $SPARK_HOME/conf/fairscheduler.xml.template $SPARK_HOME/conf/fairscheduler.xml
cp $SPARK_HOME/conf/log4j.properties.template $SPARK_HOME/conf/log4j.properties
cp $SPARK_HOME/conf/metrics.properties.template $SPARK_HOME/conf/metrics.properties cp $SPARK_HOME/conf/slaves.template $SPARK_HOME/conf/slaves
vi $SPARK_HOME/conf/slaves node1
node2
node3 cp $SPARK_HOME/conf/spark-defaults.conf.template $SPARK_HOME/conf/spark-defaults.conf
vi $SPARK_HOME/conf/spark-defaults.conf spark.eventLog.enabled true
spark.eventLog.dir hdfs://appcluster/spark/eventslog
# 监控页面需要监控的目录,需要先启用和指定事件日志目录,配合上面两项使用
spark.history.fs.logDirectory hdfs://appcluster/spark
spark.eventLog.compress true # 如果想 YARN ResourceManager 访问 Spark History Server ,则添加一行:
# spark.yarn.historyServer.address http://node1:19888 cp $SPARK_HOME/conf/spark-env.sh.template $SPARK_HOME/conf/spark-env.sh
vi $SPARK_HOME/conf/spark-env.sh export SPARK_MASTER_PORT=7077 #提交任务的端口,默认是7077
export SPARK_MASTER_WEBUI_PORT=8070 #masster节点的webui端口 默认8080改为8070
export SPARK_WORKER_CORES=1 #每个worker从节点能够支配的core的个数
export SPARK_WORKER_MEMORY=1g #每个worker从节点能够支配的内存数
export SPARK_WORKER_PORT=7078 #每个worker从节点的端口(可选配置)
export SPARK_WORKER_WEBUI_PORT=8071 #每个worker从节点的wwebui端口(可选配置)
export SPARK_WORKER_INSTANCES=1 #每个worker从节点的实例(可选配置) export JAVA_HOME=/usr/java/jdk1.8.0_111
export SCALA_HOME=/usr/local/scala-2.12.4
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_HOME/etc/Hadoop
export SPARK_PID_DIR=/usr/local/spark-2.3.0/pids
export SPARK_LOCAL_DIR=/usr/local/spark-2.3.0/tmp
export LD_LIBRARY_PATH=$HADOOP_HOME/lib/native
export SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=node1:2181,node2:2181,node3:2181 -Dspark.deploy.zookeeper.dir=/spark" vi $SPARK_HOME/sbin/start-master.sh SPARK_MASTER_WEBUI_PORT=8070 cp $HADOOP_HOME/etc/hadoop/hdfs-site.xml $SPARK_HOME/conf/ vi $HADOOP_HOME/etc/hadoop/log4j.properties log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR scp -r $HADOOP_HOME/etc/hadoop/log4j.properties node2:$HADOOP_HOME/etc/hadoop/
scp -r $HADOOP_HOME/etc/hadoop/log4j.properties node3:$HADOOP_HOME/etc/hadoop/

# ==================================================================node1

scp -r $SPARK_HOME node2:/usr/local/
scp -r $SPARK_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $FLUME_HOME

# 启动

# ==================================================================node1 node2 node3
# 先启动zookeeper 和 hdfs
zkServer.sh start
zkServer.sh status # ==================================================================node1
zkCli.sh
create /spark '' $HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager

# 启动spark

# ==================================================================node1
$SPARK_HOME/sbin/start-master.sh $SPARK_HOME/sbin/start-slaves.sh # ==================================================================node2
$SPARK_HOME/sbin/start-master.sh # ==================================================================node1
# 获取安全模式的状态:
hdfs dfsadmin -safemode get # 安全模式打开
# hdfs dfsadmin -safemode enter # 安全模式关闭
# hdfs dfsadmin -safemode leave hdfs dfs -mkdir -p /spark/eventslog $SPARK_HOME/bin/spark-shell # http://node1:4040
# http://node1:8070 > :quit

# test

# 需保证hdfs上该目录不存在
# hdfs dfs -mkdir -p /spark/output
# hdfs dfs -rmr /spark/output vi ~/sparkdata.txt hello man
what are you doing now
my running
hello
kevin
hi man hdfs dfs -mkdir -p /usr/file/input hdfs dfs -put ~/sparkdata.txt /usr/file/input
hdfs dfs -ls /usr/file/input val file1 = sc.textFile("file:///root/sparkdata.txt")
val count1=file1.flatMap(line => line.split(" ")).map(word => (word,1)).reduceByKey(_+_)
count1.saveAsTextFile("hdfs://node1:8020/spark/output1") val file=sc.textFile("hdfs://appcluster/usr/file/input/sparkdata.txt")
val count=file.flatMap(line => line.split(" ")).map(word => (word,1)).reduceByKey(_+_)
count.saveAsTextFile("hdfs://node1:8020/spark/output") hdfs dfs -ls /spark/output hdfs dfs -cat /spark/output/part-00000

# stop已经启动的进程

# ==================================================================node1
$SPARK_HOME/sbin/stop-slaves.sh $SPARK_HOME/sbin/stop-master.sh $HADOOP_HOME/sbin/stop-all.sh # ==================================================================node1 node2 node3
# 停止 zookeeper
zkServer.sh stop # ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc # ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc shutdown -h now
# 快照 spark

hadoop生态搭建(3节点)-10.spark配置的更多相关文章

  1. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  2. hadoop生态搭建(3节点)-08.kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  3. hadoop生态搭建(3节点)-04.hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  4. hadoop生态搭建(3节点)-13.mongodb配置

    # 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...

  5. hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

  6. hadoop生态搭建(3节点)-09.flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

  7. hadoop生态搭建(3节点)-11.storm配置

    # http://archive.apache.org/dist/storm/apache-storm-1.1.0/ # ======================================= ...

  8. hadoop生态搭建(3节点)-12.rabbitmq配置

    # 安装 需要相关包# ==================================================================node1 node2 node3 yum ...

  9. hadoop生态搭建(3节点)-14.redis配置

    # ==================================================================规划node1 redis:7000 7001 192.168. ...

随机推荐

  1. 【Leetcode】【Medium】4Sum

    Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = tar ...

  2. 重新认识KCP

    什么是KCP KCP是一种网络传输协议(ARQ,自动重传请求),可以视它为TCP的代替品,但是它运行于用户空间,它不管底层的发送与接收,只是个纯算法实现可靠传输,它的特点是牺牲带宽来降低延迟.因为TC ...

  3. easyui学习笔记5—panel加载其他的页面

    上一篇中我们看到了panel的基本实现,没有什么难度,最重要的是data-options和class两个标签属性的定义.这里我们将看一下在panel中如何加载其他的页面. 1.先看看引用的资源文件和h ...

  4. 使用nodejs代码在SAP C4C里创建Individual customer

    需求:使用nodejs代码在SAP Cloud for Customer里创建Individual customer实例. 代码: var createAndBind = require('../je ...

  5. tftp传输可执行程序问题

    昨天搭建了板子从nfs系统启动,这样只要在开发机上编写程序编译,就可以在板子上测试运行了,编写了hello world 程序,用arm编译器编译,在主板上运行,提示出错:什么exception ((什 ...

  6. 笔记,记事软件(RedbookNote, lifeopraph)

    许多人重视记日记是因为它是一种以天为基础保存个人或商务信息的良好方式:持续跟踪每天的生活和思想上的点点滴滴,组织和巩固记忆.思考.商业交易.电子邮件.账单.未来计划.联系人列表,甚至是秘密信息.Lin ...

  7. 百度地图隐藏LOGO显示

    在引入地图的页面加入下列样式即可隐藏百度地图左下角的LOGO   <style type="text/css">   .anchorBL{display:none;} ...

  8. HandyJSON代码阅读

    功能:model = modelType.transform(rawdata) 使用分析: 使用机制:继承+实现配置+使用: 需要自己实现什么? 设计分析: 工具模块?机制模块?model基类? 生成 ...

  9. android 智能提示

    <AutoCompleteTextView android:id="@+id/autoCompleteTextView" android:completionThreshol ...

  10. AtCoder Grand Contest

    一句话题解 QwQ主要是因为这篇文章写的有点长……有时候要找某一个题可能不是很好找,所以写了这个东西. 具体的题意.题解和代码可以再往下翻._(:з」∠)_ AGC 001 C:枚举中点/中边. D: ...