cd /home

mkdir shixi_enzhao
cd shixi_enzhao
mkdir suanec
cd suanec
mkdir installs
mkdir libs
mkdir scripts
mkdir tmp
mkdir wsp
yum install vim-enhanced
yum install lrzsz
cd /home/shixi_enzhao/suanec/installs
pip install --upgrade pip
# wget -c -t 10 https://repo.continuum.io/archive/Anaconda2-4.4.0-Linux-x86_64.sh
wget -c -t 10 http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz
wget -c -t 10 https://d3kbcqa49mib13.cloudfront.net/spark-2.0.2-bin-hadoop2.7.tgz
wget -c -t 10 http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
wget -c -t 10 https://downloads.lightbend.com/scala/2.11.8/scala-2.11.8.tgz
wget -c -t 10
wget -c -t 10

ipMaster=10.85.125.175
ipSlaver1=10.85.125.176
mainPath=/home/hadoop/
useradd -d /home/hadoop -m hadoop -p hadoop
echo 'hadoop' | passwd --stdin hadoop
ssh localhost
cd ~/.ssh
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat id_rsa.pub >> authorized_keys
ssh root@10.85.125.175
cat ~/.ssh/id_rsa.pub>> authorized_keys

chmod 755 ~
chmod 700 ~/.ssh
chmod 600 ~/.ssh/authorized_keys
scp authorized_keys hadoop@${ipSlaver1}:/home/hadoop/.ssh/authorized_keys
sudo echo ${ipMaster}' iZ2zeh3rmvn71widtik73gZ' >> /etc/hosts
sudo echo ${ipSlaver1}' iZ2zeh3rmvn71widtik72kZ' >> /etc/hosts
cd ~
mkdir ~/libs
mkdir ~/libs/hadoop2.7
cd ~/installs
if [ $(ls | grep hadoop | wc -l) = 0 ]; then
wget -c -t 10 http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
#statements
fi
cp *.*gz ~/libs
cd ~/libs
for tarGz in *.*gz; do tar -xvf $tarGz ; done
rm *.*gz
chown -R hadoop:hadoop ~/libs/
echo 'export JAVA_HOME=~/libs/jdk1.8.0_131' > ~/libs/env.sh
echo 'export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar' >> ~/libs/env.sh
echo 'export PATH=$PATH:$JAVA_HOME/bin' >> ~/libs/env.sh
echo 'export HADOOP_HOME=~/libs/hadoop-2.7.3' >> ~/libs/env.sh
echo 'export PATH=$PATH:$HADOOP_HOME/bin;' >> ~/libs/env.sh
echo 'export PATH=$PATH:$HADOOP_HOME/sbin' >> ~/libs/env.sh
echo 'export HADOOP_MAPRED_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_COMMON_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_HDFS_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export YARN_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native' >> ~/libs/env.sh
echo 'export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_HOME/lib/native"' >> ~/libs/env.sh
echo 'export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native:$JAVA_LIBRARY_PATH' >> ~/libs/env.sh
echo 'export SCALA_HOME=~/libs/scala-2.11.8' >> ~/libs/env.sh
echo 'export SPARK_HOME=~/libs/spark-2.0.2-bin-hadoop2.7' >> ~/libs/env.sh
echo 'export PATH=$PATH:$SPARK_HOME/bin:$SCALA_HOME/bin' >> ~/libs/env.sh
echo 'alias hadoop-stop=${HADOOP_HOME}/sbin/stop-all.sh' >> ~/libs/env.sh
echo 'alias hadoop-start=${HADOOP_HOME}/sbin/start-all.sh' >> ~/libs/env.sh
echo 'alias hadoop-restart="hadoop-stop;hadoop-start"' >> ~/libs/env.sh
source ~/libs/env.sh

mkdir $HADOOP_HOME/tmp
mkdir $HADOOP_HOME/data
mkdir $HADOOP_HOME/data/hdfs
mkdir $HADOOP_HOME/data/hdfs/namenode
mkdir $HADOOP_HOME/data/hdfs/datanode
cd $HADOOP_HOME/etc/hadoop
echo 'export JAVA_HOME='$JAVA_HOME >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
echo 'export HADOOP_SSH_OPTS="-i /home/hadoop/.ssh/id_rsa"' >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
cp $HADOOP_HOME/etc/hadoop/core-site.xml $HADOOP_HOME/etc/hadoop/core-site.xml.origin
echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>

<property>
<name>fs.default.name</name>
<value>hdfs://iZ2zeh3rmvn71widtik73gZ:9000</value>
</property>
<!--默认file system uri-->

<property>
<name>hadoop.native.lib</name>
<value>false</value>
<description>if loaddown native lib, open warn, so set this</description>
</property>
<!--使用本地hadoop库标识-->
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>'${HADOOP_HOME}'/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<!--用于临时文件夹的基础路径-->
</configuration>

' > $HADOOP_HOME/etc/hadoop/core-site.xml

cp $HADOOP_HOME/etc/hadoop/hdfs-site.xml $HADOOP_HOME/etc/hadoop/hdfs-site.xml.origin
echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<!-- <property>
<name>dfs.namenode.http-address</name>
<value>iZ2zeh3rmvn71widtik73gZ:50070</value>
<description> fetch NameNode images and edits.注意主机名称 </description>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>iZ2zeh3rmvn71widtik72kZ:50090</value>
<description> fetch SecondNameNode fsimage </description>
</property> -->

<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<!--配置备份数-->

<property>
<name>dfs.namenode.name.dir</name>
<value>file:'${HADOOP_HOME}'/data/hdfs/namenode</value>
</property>
<!--namenode数据存放地址-->

<property>
<name>dfs.datanode.data.dir</name>
<value>file:'${HADOOP_HOME}'/data/hdfs/datanode</value>
</property>
<!--datanode 数据存放地址-->
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>'${HADOOP_HOME}'/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<!--用于临时文件夹的基础路径-->
</configuration>

' > $HADOOP_HOME/etc/hadoop/hdfs-site.xml

echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->

<!-- Put site-specific property overrides in this file. -->

<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!--执行mapreduce的框架-->
<!-- <property>
<name>mapreduce.jobhistory.address</name>
<value>iZ2zeh3rmvn71widtik73gZ:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>iZ2zeh3rmvn71widtik73gZ:19888</value>
</proerty>-->
</configuration>

' > $HADOOP_HOME/etc/hadoop/mapred-site.xml

cp $HADOOP_HOME/etc/hadoop/yarn-site.xml $HADOOP_HOME/etc/hadoop/yarn-site.xml.origin
echo '
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>

<!-- Site specific YARN configuration properties -->

<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--NodeManage上运行的附属服务。需配置成mapreduce_shuffle,才可运行MapReduce程序-->

<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- mapreduce_shuffle服务的支持类-->

<property>
<name>yarn.nodemanager.container-manager.thread-count</name>
<value>8</value>
<final>true</final>
</property>
<!-- container manager 使用的线程数,默认20 -->

<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>8</value>
<final>true</final>
</property>
<!-- 给containers分配的核数 -->

<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>2048</value>
<final>true</final>
</property>
<!-- 允许给每一个container分配的最小内存,单位MB,默认1024 -->

<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>102400</value>
<final>true</final>
</property>
<!--允许给每一个container分配的最大内存,单位MB,默认8192 -->

<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>32768</value>
<final>true</final>
</property>
<!-- 给container分配的物理内存 -->

<property>
<name>yarn.resourcemanager.hostname</name>
<value>iZ2zeh3rmvn71widtik73gZ</value>
</property>

</configuration>

' > $HADOOP_HOME/etc/hadoop/yarn-site.xml
echo 'iZ2zeh3rmvn71widtik72kZ' >> $HADOOP_HOME/etc/hadoop/slaves

# spark
cp $HADOOP_HOME/etc/hadoop/slaves ${SPARK_HOME}/conf
echo 'HADOOP_CONF_DIR='${HADOOP_HOME}/etc/hadoop >> ${SPARK_HOME}/conf/spark-env.sh

ssh hadoop@10.85.125.176
scp -r ~/libs hadoop@10.85.125.176: ~/
hadoop namenode –format
sh start-all.sh
${SPARK_HOME}/sbin/start-all.sh

centos 7 配置hadoop与spark的更多相关文章

  1. 在MacOs上配置Hadoop和Spark环境

    在MacOs上配置hadoop和spark环境 Setting up Hadoop with Spark on MacOs Instructions 准备环境 如果没有brew,先google怎样安装 ...

  2. CentOS安装配置Hadoop 1.2.1(伪分布模式)

    CentOS安装配置Hadoop1.2.1 1.下载安装文件 下载2个安装文件 JAVA环境:jdk-6u21-linux-i586.bin Hadoop环境:hadoop-1.2.1.tar.gz ...

  3. Centos下基于Hadoop安装Spark(分布式)

    前提 Hadoop可成功在分布式系统下启动 下载scala  链接是https://downloads.lightbend.com/scala/2.12.7/scala-2.12.7.tgz Mast ...

  4. 配置Hadoop,hive,spark,hbase ————待整理

    五一一天在家搭建好了集群,要上班了来不及整理,待下周周末有时间好好整理整理一个完整的搭建hadoop生态圈的集群的系列 若出现license information(license not accep ...

  5. CentOS下配置Hadoop集群:java.net.NoRouteToHostException: No route to host问题的解决

    我用的是hadoop 1.2.1 遇到的问题是: hadoop中datanode无法启动,报Caused by: java.net.NoRouteToHostException: No route t ...

  6. CentOS环境配置Hadoop(一)

    配置Linux开发环境(hadoop-2.6.4) 一.准备工具 VMware-workstation-10.0.1注册机 CentOS-6.5-x86_64-bin-DVD1 jdk-7u79-li ...

  7. Ubuntu14.04或16.04下Hadoop及Spark的开发配置

    对于Hadoop和Spark的开发,最常用的还是Eclipse以及Intellij IDEA. 其中,Eclipse是免费开源的,基于Eclipse集成更多框架配置的还有MyEclipse.Intel ...

  8. hadoop+tachyon+spark的zybo cluster集群综合配置

    1.zybo cluster 架构简述: 1.1 zybo cluster 包含5块zybo 开发板组成一个集群,zybo的boot文件为digilent zybo reference design提 ...

  9. CentOS 配置hadoop

    Hadoop是用作处理大数据用的,核心是HDFS.Map/Reduce.虽然目前工作中不需要使用这个,但是,技多不压身,经过虚拟机很多遍的尝试,终于将Hadoop2.5.2的环境顺利搭建起来了.    ...

随机推荐

  1. 高性能JavaScript之DOM编程

    我们知道.DOM是用于操作XML和HTML文档的应用程序接口,用脚本进行DOM操作的代价非常昂贵. 有个贴切的比喻.把DOM和JavaScript(这里指ECMScript)各自想象为一个岛屿,它们之 ...

  2. Mac下安装Mysql以及修改Mysql密码

    1.安装Mysql 安装有2种办法,一种是下载mysql安装文件,慢慢安装,另一种就是下载xmpp,里面自带mysql,就像是安装普通的mac软件一样,安装完毕,就有了mysql了. 方法一,官网下载 ...

  3. Spring.profiles多环境配置最佳实践

    转自:https://www.cnblogs.com/jason0529/p/6567373.html Spring的profiles机制,是应对多环境下面的一个解决方案,比较常见的是开发和测试环境的 ...

  4. 微软BI 之SSRS 系列 - 实现 Excel 中图表结合的报表设计

    来自群里面讨论的一个问题,EXCEL 中有类似于这样的图形,上面是 Chart, Chart X轴上的值正好就是下方 Table 的列头,这个在 SSRS 中应该如何实现?   SSRS 2008.2 ...

  5. 天猫魔盒1代TMB100E刷机, 以及右声道无声的问题

    这个是在小米盒子1代之后买的, 当时速度比小米盒子快, 除了遥控器比较软, 电池盖不太对得齐以外, 用起来还不错. 但是时间长了之后总是不停自己升级, 自己安装一些应用, 还删不了, 要知道这个盒子的 ...

  6. 同一个脚本在SQLPLUS和SQLDEV上的不同

    前几天收集了信息给Oracle.oracle那边表示格式不正确.让我又一次收集.我非常费解,我是依照官方文档做的呀,怎么会? 于是我果断自己搭了一个环境:RHEL5.8+10.2.0.5 客户那边没法 ...

  7. SNF快速开发平台MVC-富文本控件集成了百度开源项目editor

    一.效果如下: 二.在框架当中调用代码如下: 1.在js里配置如下: <script type="text/javascript"> var viewModel =fu ...

  8. IP子系统集成

    IP子系统集成 1.Creating External Connections 由此可以看出:block design的设计是可以连接电路板上的CPU的(外挂CPU). 2.生成外部接口 端口生成之后 ...

  9. Socket网络编程--聊天程序(8)

    上一节已经完成了对用户的身份验证了,既然有了验证,那么接下来就能对不同的客户端进行区分了,所以这一节讲实现私聊功能.就是通过服务器对客户端的数据进行转发到特定的用户上, 实现私聊功能的聊天程序 实现的 ...

  10. [20170706]SQL Server事务复制订阅端,job不小心被删,修复

    右击还存在的订阅,生成脚本,有个过程sp_addpullsubscription_agent 执行,发现报错说distribution agent 已经存在 执行: UPDATE dbo.MSrepl ...