cd /home

mkdir shixi_enzhao
cd shixi_enzhao
mkdir suanec
cd suanec
mkdir installs
mkdir libs
mkdir scripts
mkdir tmp
mkdir wsp
yum install vim-enhanced
yum install lrzsz
cd /home/shixi_enzhao/suanec/installs
pip install --upgrade pip
# wget -c -t 10 https://repo.continuum.io/archive/Anaconda2-4.4.0-Linux-x86_64.sh
wget -c -t 10 http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz
wget -c -t 10 https://d3kbcqa49mib13.cloudfront.net/spark-2.0.2-bin-hadoop2.7.tgz
wget -c -t 10 http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
wget -c -t 10 https://downloads.lightbend.com/scala/2.11.8/scala-2.11.8.tgz
wget -c -t 10
wget -c -t 10

ipMaster=10.85.125.175
ipSlaver1=10.85.125.176
mainPath=/home/hadoop/
useradd -d /home/hadoop -m hadoop -p hadoop
echo 'hadoop' | passwd --stdin hadoop
ssh localhost
cd ~/.ssh
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat id_rsa.pub >> authorized_keys
ssh root@10.85.125.175
cat ~/.ssh/id_rsa.pub>> authorized_keys

chmod 755 ~
chmod 700 ~/.ssh
chmod 600 ~/.ssh/authorized_keys
scp authorized_keys hadoop@${ipSlaver1}:/home/hadoop/.ssh/authorized_keys
sudo echo ${ipMaster}' iZ2zeh3rmvn71widtik73gZ' >> /etc/hosts
sudo echo ${ipSlaver1}' iZ2zeh3rmvn71widtik72kZ' >> /etc/hosts
cd ~
mkdir ~/libs
mkdir ~/libs/hadoop2.7
cd ~/installs
if [ $(ls | grep hadoop | wc -l) = 0 ]; then
wget -c -t 10 http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
#statements
fi
cp *.*gz ~/libs
cd ~/libs
for tarGz in *.*gz; do tar -xvf $tarGz ; done
rm *.*gz
chown -R hadoop:hadoop ~/libs/
echo 'export JAVA_HOME=~/libs/jdk1.8.0_131' > ~/libs/env.sh
echo 'export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar' >> ~/libs/env.sh
echo 'export PATH=$PATH:$JAVA_HOME/bin' >> ~/libs/env.sh
echo 'export HADOOP_HOME=~/libs/hadoop-2.7.3' >> ~/libs/env.sh
echo 'export PATH=$PATH:$HADOOP_HOME/bin;' >> ~/libs/env.sh
echo 'export PATH=$PATH:$HADOOP_HOME/sbin' >> ~/libs/env.sh
echo 'export HADOOP_MAPRED_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_COMMON_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_HDFS_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export YARN_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native' >> ~/libs/env.sh
echo 'export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_HOME/lib/native"' >> ~/libs/env.sh
echo 'export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native:$JAVA_LIBRARY_PATH' >> ~/libs/env.sh
echo 'export SCALA_HOME=~/libs/scala-2.11.8' >> ~/libs/env.sh
echo 'export SPARK_HOME=~/libs/spark-2.0.2-bin-hadoop2.7' >> ~/libs/env.sh
echo 'export PATH=$PATH:$SPARK_HOME/bin:$SCALA_HOME/bin' >> ~/libs/env.sh
echo 'alias hadoop-stop=${HADOOP_HOME}/sbin/stop-all.sh' >> ~/libs/env.sh
echo 'alias hadoop-start=${HADOOP_HOME}/sbin/start-all.sh' >> ~/libs/env.sh
echo 'alias hadoop-restart="hadoop-stop;hadoop-start"' >> ~/libs/env.sh
source ~/libs/env.sh

mkdir $HADOOP_HOME/tmp
mkdir $HADOOP_HOME/data
mkdir $HADOOP_HOME/data/hdfs
mkdir $HADOOP_HOME/data/hdfs/namenode
mkdir $HADOOP_HOME/data/hdfs/datanode
cd $HADOOP_HOME/etc/hadoop
echo 'export JAVA_HOME='$JAVA_HOME >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
echo 'export HADOOP_SSH_OPTS="-i /home/hadoop/.ssh/id_rsa"' >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
cp $HADOOP_HOME/etc/hadoop/core-site.xml $HADOOP_HOME/etc/hadoop/core-site.xml.origin
echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<property>
<name>fs.default.name</name>
<value>hdfs://iZ2zeh3rmvn71widtik73gZ:9000</value>
</property>
<!--默认file system uri-->

<property>
<name>hadoop.native.lib</name>
<value>false</value>
<description>if loaddown native lib, open warn, so set this</description>
</property>
<!--使用本地hadoop库标识-->
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>'${HADOOP_HOME}'/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<!--用于临时文件夹的基础路径-->
</configuration>

' > $HADOOP_HOME/etc/hadoop/core-site.xml

cp $HADOOP_HOME/etc/hadoop/hdfs-site.xml $HADOOP_HOME/etc/hadoop/hdfs-site.xml.origin
echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<!-- <property>
<name>dfs.namenode.http-address</name>
<value>iZ2zeh3rmvn71widtik73gZ:50070</value>
<description> fetch NameNode images and edits.注意主机名称 </description>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>iZ2zeh3rmvn71widtik72kZ:50090</value>
<description> fetch SecondNameNode fsimage </description>
</property> -->

<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<!--配置备份数-->

<property>
<name>dfs.namenode.name.dir</name>
<value>file:'${HADOOP_HOME}'/data/hdfs/namenode</value>
</property>
<!--namenode数据存放地址-->

<property>
<name>dfs.datanode.data.dir</name>
<value>file:'${HADOOP_HOME}'/data/hdfs/datanode</value>
</property>
<!--datanode 数据存放地址-->
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>'${HADOOP_HOME}'/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<!--用于临时文件夹的基础路径-->
</configuration>

' > $HADOOP_HOME/etc/hadoop/hdfs-site.xml

echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!--执行mapreduce的框架-->
<!-- <property>
<name>mapreduce.jobhistory.address</name>
<value>iZ2zeh3rmvn71widtik73gZ:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>iZ2zeh3rmvn71widtik73gZ:19888</value>
</proerty>-->
</configuration>

' > $HADOOP_HOME/etc/hadoop/mapred-site.xml

cp $HADOOP_HOME/etc/hadoop/yarn-site.xml $HADOOP_HOME/etc/hadoop/yarn-site.xml.origin
echo '
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->

<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--NodeManage上运行的附属服务。需配置成mapreduce_shuffle,才可运行MapReduce程序-->

<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- mapreduce_shuffle服务的支持类-->

<property>
<name>yarn.nodemanager.container-manager.thread-count</name>
<value>8</value>
<final>true</final>
</property>
<!-- container manager 使用的线程数,默认20 -->

<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>8</value>
<final>true</final>
</property>
<!-- 给containers分配的核数 -->

<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>2048</value>
<final>true</final>
</property>
<!-- 允许给每一个container分配的最小内存,单位MB,默认1024 -->

<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>102400</value>
<final>true</final>
</property>
<!--允许给每一个container分配的最大内存,单位MB,默认8192 -->

<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>32768</value>
<final>true</final>
</property>
<!-- 给container分配的物理内存 -->

<property>
<name>yarn.resourcemanager.hostname</name>
<value>iZ2zeh3rmvn71widtik73gZ</value>
</property>

</configuration>

' > $HADOOP_HOME/etc/hadoop/yarn-site.xml
echo 'iZ2zeh3rmvn71widtik72kZ' >> $HADOOP_HOME/etc/hadoop/slaves

# spark
cp $HADOOP_HOME/etc/hadoop/slaves ${SPARK_HOME}/conf
echo 'HADOOP_CONF_DIR='${HADOOP_HOME}/etc/hadoop >> ${SPARK_HOME}/conf/spark-env.sh

ssh hadoop@10.85.125.176
scp -r ~/libs hadoop@10.85.125.176: ~/
hadoop namenode –format
sh start-all.sh
${SPARK_HOME}/sbin/start-all.sh

centos 7 配置hadoop与spark的更多相关文章

  1. 在MacOs上配置Hadoop和Spark环境

    在MacOs上配置hadoop和spark环境 Setting up Hadoop with Spark on MacOs Instructions 准备环境 如果没有brew,先google怎样安装 ...

  2. CentOS安装配置Hadoop 1.2.1(伪分布模式)

    CentOS安装配置Hadoop1.2.1 1.下载安装文件 下载2个安装文件 JAVA环境:jdk-6u21-linux-i586.bin Hadoop环境:hadoop-1.2.1.tar.gz ...

  3. Centos下基于Hadoop安装Spark(分布式)

    前提 Hadoop可成功在分布式系统下启动 下载scala  链接是https://downloads.lightbend.com/scala/2.12.7/scala-2.12.7.tgz Mast ...

  4. 配置Hadoop,hive,spark,hbase ————待整理

    五一一天在家搭建好了集群,要上班了来不及整理,待下周周末有时间好好整理整理一个完整的搭建hadoop生态圈的集群的系列 若出现license information(license not accep ...

  5. CentOS下配置Hadoop集群:java.net.NoRouteToHostException: No route to host问题的解决

    我用的是hadoop 1.2.1 遇到的问题是: hadoop中datanode无法启动,报Caused by: java.net.NoRouteToHostException: No route t ...

  6. CentOS环境配置Hadoop(一)

    配置Linux开发环境(hadoop-2.6.4) 一.准备工具 VMware-workstation-10.0.1注册机 CentOS-6.5-x86_64-bin-DVD1 jdk-7u79-li ...

  7. Ubuntu14.04或16.04下Hadoop及Spark的开发配置

    对于Hadoop和Spark的开发,最常用的还是Eclipse以及Intellij IDEA. 其中,Eclipse是免费开源的,基于Eclipse集成更多框架配置的还有MyEclipse.Intel ...

  8. hadoop+tachyon+spark的zybo cluster集群综合配置

    1.zybo cluster 架构简述: 1.1 zybo cluster 包含5块zybo 开发板组成一个集群,zybo的boot文件为digilent zybo reference design提 ...

  9. CentOS 配置hadoop

    Hadoop是用作处理大数据用的,核心是HDFS.Map/Reduce.虽然目前工作中不需要使用这个,但是,技多不压身,经过虚拟机很多遍的尝试,终于将Hadoop2.5.2的环境顺利搭建起来了.    ...

随机推荐

  1. Linux 中C/C++ search path(头文件搜索路径)

    https://blog.csdn.net/BjarneCpp/article/details/76135980 起因 我拿到了一套Linux下的C++代码,代码中有这个头文件#include < ...

  2. Always run a program in administrator mode in Windows 10

    From: https://www.cnet.com/how-to/always-run-a-program-in-administrator-mode-in-windows-10/ If you'r ...

  3. getting-started-with-mqtt

    来自:https://dzone.com/refcardz/getting-started-with-mqtt SECTION 1 Why MQTT? The Internet of Things ( ...

  4. Spark机器学习(3):保序回归算法

    保序回归即给定了一个无序的数字序列,通过修改其中元素的值,得到一个非递减的数字序列,要求是使得误差(预测值和实际值差的平方)最小.比如在动物身上实验某种药物,使用了不同的剂量,按理说剂量越大,有效的比 ...

  5. android4.3 截屏功能的尝试与失败分析

    1.背景 上一篇讲了在源码中捕获到了android手机的截屏函数(同时按下电源键与音量减,详情http://blog.csdn.net/buptgshengod/article/details/199 ...

  6. 使用Python解析JSON详解

    为大家介绍如何使用 Python 语言来编码和解码 JSON 对象. JSON(JavaScript Object Notation) 是一种轻量级的数据交换格式,易于人阅读和编写. JSON 函数 ...

  7. request.GetResponse()超时的解决办法

    var request = (HttpWebRequest)WebRequest.Create(url); request.Timeout = Timeout.Infinite; request.Ke ...

  8. 【emWin】例程十四:xbf外置字体

    介绍: 本例将xbf格式文件放到SD卡中,通过读取SD卡中的字库文件在液晶上显示文字.   实验指导书及代码包下载: 链接:http://pan.baidu.com/s/1mhTdYeG 密码:aka ...

  9. GitStack 第三方开源服务器端

      GitStack 开源集成Git的界面服务器端 官网URL:http://gitstack.com     详情 请看<分布式版本控制系统Git--使用GitStack+TortoiseGi ...

  10. C++ 智能指针三

    /* 智能指针shared_ptr注意点 */ #include <iostream> #include <string> #include <memory> // ...