centos 7 配置hadoop与spark
cd /home
mkdir shixi_enzhao
cd shixi_enzhao
mkdir suanec
cd suanec
mkdir installs
mkdir libs
mkdir scripts
mkdir tmp
mkdir wsp
yum install vim-enhanced
yum install lrzsz
cd /home/shixi_enzhao/suanec/installs
pip install --upgrade pip
# wget -c -t 10 https://repo.continuum.io/archive/Anaconda2-4.4.0-Linux-x86_64.sh
wget -c -t 10 http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz
wget -c -t 10 https://d3kbcqa49mib13.cloudfront.net/spark-2.0.2-bin-hadoop2.7.tgz
wget -c -t 10 http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
wget -c -t 10 https://downloads.lightbend.com/scala/2.11.8/scala-2.11.8.tgz
wget -c -t 10
wget -c -t 10
ipMaster=10.85.125.175
ipSlaver1=10.85.125.176
mainPath=/home/hadoop/
useradd -d /home/hadoop -m hadoop -p hadoop
echo 'hadoop' | passwd --stdin hadoop
ssh localhost
cd ~/.ssh
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
cat id_rsa.pub >> authorized_keys
ssh root@10.85.125.175
cat ~/.ssh/id_rsa.pub>> authorized_keys
chmod 755 ~
chmod 700 ~/.ssh
chmod 600 ~/.ssh/authorized_keys
scp authorized_keys hadoop@${ipSlaver1}:/home/hadoop/.ssh/authorized_keys
sudo echo ${ipMaster}' iZ2zeh3rmvn71widtik73gZ' >> /etc/hosts
sudo echo ${ipSlaver1}' iZ2zeh3rmvn71widtik72kZ' >> /etc/hosts
cd ~
mkdir ~/libs
mkdir ~/libs/hadoop2.7
cd ~/installs
if [ $(ls | grep hadoop | wc -l) = 0 ]; then
wget -c -t 10 http://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz
#statements
fi
cp *.*gz ~/libs
cd ~/libs
for tarGz in *.*gz; do tar -xvf $tarGz ; done
rm *.*gz
chown -R hadoop:hadoop ~/libs/
echo 'export JAVA_HOME=~/libs/jdk1.8.0_131' > ~/libs/env.sh
echo 'export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar' >> ~/libs/env.sh
echo 'export PATH=$PATH:$JAVA_HOME/bin' >> ~/libs/env.sh
echo 'export HADOOP_HOME=~/libs/hadoop-2.7.3' >> ~/libs/env.sh
echo 'export PATH=$PATH:$HADOOP_HOME/bin;' >> ~/libs/env.sh
echo 'export PATH=$PATH:$HADOOP_HOME/sbin' >> ~/libs/env.sh
echo 'export HADOOP_MAPRED_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_COMMON_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_HDFS_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export YARN_HOME=$HADOOP_HOME' >> ~/libs/env.sh
echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native' >> ~/libs/env.sh
echo 'export HADOOP_OPTS="-Djava.library.path=$HADOOP_HOME/lib:$HADOOP_HOME/lib/native"' >> ~/libs/env.sh
echo 'export JAVA_LIBRARY_PATH=$HADOOP_HOME/lib/native:$JAVA_LIBRARY_PATH' >> ~/libs/env.sh
echo 'export SCALA_HOME=~/libs/scala-2.11.8' >> ~/libs/env.sh
echo 'export SPARK_HOME=~/libs/spark-2.0.2-bin-hadoop2.7' >> ~/libs/env.sh
echo 'export PATH=$PATH:$SPARK_HOME/bin:$SCALA_HOME/bin' >> ~/libs/env.sh
echo 'alias hadoop-stop=${HADOOP_HOME}/sbin/stop-all.sh' >> ~/libs/env.sh
echo 'alias hadoop-start=${HADOOP_HOME}/sbin/start-all.sh' >> ~/libs/env.sh
echo 'alias hadoop-restart="hadoop-stop;hadoop-start"' >> ~/libs/env.sh
source ~/libs/env.sh
mkdir $HADOOP_HOME/tmp
mkdir $HADOOP_HOME/data
mkdir $HADOOP_HOME/data/hdfs
mkdir $HADOOP_HOME/data/hdfs/namenode
mkdir $HADOOP_HOME/data/hdfs/datanode
cd $HADOOP_HOME/etc/hadoop
echo 'export JAVA_HOME='$JAVA_HOME >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
echo 'export HADOOP_SSH_OPTS="-i /home/hadoop/.ssh/id_rsa"' >> $HADOOP_HOME/etc/hadoop/hadoop-env.sh
cp $HADOOP_HOME/etc/hadoop/core-site.xml $HADOOP_HOME/etc/hadoop/core-site.xml.origin
echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://iZ2zeh3rmvn71widtik73gZ:9000</value>
</property>
<!--默认file system uri-->
<property>
<name>hadoop.native.lib</name>
<value>false</value>
<description>if loaddown native lib, open warn, so set this</description>
</property>
<!--使用本地hadoop库标识-->
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>'${HADOOP_HOME}'/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<!--用于临时文件夹的基础路径-->
</configuration>
' > $HADOOP_HOME/etc/hadoop/core-site.xml
cp $HADOOP_HOME/etc/hadoop/hdfs-site.xml $HADOOP_HOME/etc/hadoop/hdfs-site.xml.origin
echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- <property>
<name>dfs.namenode.http-address</name>
<value>iZ2zeh3rmvn71widtik73gZ:50070</value>
<description> fetch NameNode images and edits.注意主机名称 </description>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>iZ2zeh3rmvn71widtik72kZ:50090</value>
<description> fetch SecondNameNode fsimage </description>
</property> -->
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<!--配置备份数-->
<property>
<name>dfs.namenode.name.dir</name>
<value>file:'${HADOOP_HOME}'/data/hdfs/namenode</value>
</property>
<!--namenode数据存放地址-->
<property>
<name>dfs.datanode.data.dir</name>
<value>file:'${HADOOP_HOME}'/data/hdfs/datanode</value>
</property>
<!--datanode 数据存放地址-->
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>'${HADOOP_HOME}'/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<!--用于临时文件夹的基础路径-->
</configuration>
' > $HADOOP_HOME/etc/hadoop/hdfs-site.xml
echo '
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!--执行mapreduce的框架-->
<!-- <property>
<name>mapreduce.jobhistory.address</name>
<value>iZ2zeh3rmvn71widtik73gZ:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>iZ2zeh3rmvn71widtik73gZ:19888</value>
</proerty>-->
</configuration>
' > $HADOOP_HOME/etc/hadoop/mapred-site.xml
cp $HADOOP_HOME/etc/hadoop/yarn-site.xml $HADOOP_HOME/etc/hadoop/yarn-site.xml.origin
echo '
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--NodeManage上运行的附属服务。需配置成mapreduce_shuffle,才可运行MapReduce程序-->
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!-- mapreduce_shuffle服务的支持类-->
<property>
<name>yarn.nodemanager.container-manager.thread-count</name>
<value>8</value>
<final>true</final>
</property>
<!-- container manager 使用的线程数,默认20 -->
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>8</value>
<final>true</final>
</property>
<!-- 给containers分配的核数 -->
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>2048</value>
<final>true</final>
</property>
<!-- 允许给每一个container分配的最小内存,单位MB,默认1024 -->
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>102400</value>
<final>true</final>
</property>
<!--允许给每一个container分配的最大内存,单位MB,默认8192 -->
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>32768</value>
<final>true</final>
</property>
<!-- 给container分配的物理内存 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>iZ2zeh3rmvn71widtik73gZ</value>
</property>
</configuration>
' > $HADOOP_HOME/etc/hadoop/yarn-site.xml
echo 'iZ2zeh3rmvn71widtik72kZ' >> $HADOOP_HOME/etc/hadoop/slaves
# spark
cp $HADOOP_HOME/etc/hadoop/slaves ${SPARK_HOME}/conf
echo 'HADOOP_CONF_DIR='${HADOOP_HOME}/etc/hadoop >> ${SPARK_HOME}/conf/spark-env.sh
ssh hadoop@10.85.125.176
scp -r ~/libs hadoop@10.85.125.176: ~/
hadoop namenode –format
sh start-all.sh
${SPARK_HOME}/sbin/start-all.sh
centos 7 配置hadoop与spark的更多相关文章
- 在MacOs上配置Hadoop和Spark环境
在MacOs上配置hadoop和spark环境 Setting up Hadoop with Spark on MacOs Instructions 准备环境 如果没有brew,先google怎样安装 ...
- CentOS安装配置Hadoop 1.2.1(伪分布模式)
CentOS安装配置Hadoop1.2.1 1.下载安装文件 下载2个安装文件 JAVA环境:jdk-6u21-linux-i586.bin Hadoop环境:hadoop-1.2.1.tar.gz ...
- Centos下基于Hadoop安装Spark(分布式)
前提 Hadoop可成功在分布式系统下启动 下载scala 链接是https://downloads.lightbend.com/scala/2.12.7/scala-2.12.7.tgz Mast ...
- 配置Hadoop,hive,spark,hbase ————待整理
五一一天在家搭建好了集群,要上班了来不及整理,待下周周末有时间好好整理整理一个完整的搭建hadoop生态圈的集群的系列 若出现license information(license not accep ...
- CentOS下配置Hadoop集群:java.net.NoRouteToHostException: No route to host问题的解决
我用的是hadoop 1.2.1 遇到的问题是: hadoop中datanode无法启动,报Caused by: java.net.NoRouteToHostException: No route t ...
- CentOS环境配置Hadoop(一)
配置Linux开发环境(hadoop-2.6.4) 一.准备工具 VMware-workstation-10.0.1注册机 CentOS-6.5-x86_64-bin-DVD1 jdk-7u79-li ...
- Ubuntu14.04或16.04下Hadoop及Spark的开发配置
对于Hadoop和Spark的开发,最常用的还是Eclipse以及Intellij IDEA. 其中,Eclipse是免费开源的,基于Eclipse集成更多框架配置的还有MyEclipse.Intel ...
- hadoop+tachyon+spark的zybo cluster集群综合配置
1.zybo cluster 架构简述: 1.1 zybo cluster 包含5块zybo 开发板组成一个集群,zybo的boot文件为digilent zybo reference design提 ...
- CentOS 配置hadoop
Hadoop是用作处理大数据用的,核心是HDFS.Map/Reduce.虽然目前工作中不需要使用这个,但是,技多不压身,经过虚拟机很多遍的尝试,终于将Hadoop2.5.2的环境顺利搭建起来了. ...
随机推荐
- Always run a program in administrator mode in Windows 10
From: https://www.cnet.com/how-to/always-run-a-program-in-administrator-mode-in-windows-10/ If you'r ...
- ArrayList vs LinkedList 空间占用
空间占用上,ArrayList完胜 看下两者的内存占用图 这三个图,横轴是list长度,纵轴是内存占用值.两条蓝线是LinkedList,两条红线是ArrayList,可以看到,LinkedLis ...
- TerminateProcess的使用问题
最好时外部进程来结束目标进程,类似于任务管理器的结束目标进程方式.如果是自身进程想结束自身,可能不同版本的windows行为不一致,有一些能自身强制退出,有一些强制退出不了. 本来MSDN上就说了这个 ...
- (原)pycharm中使用CUDA_VISIBLE_DEVICES
转载请注明出处: http://www.cnblogs.com/darkknightzh/p/8576825.html 如果使用多gpu运行程序,可以直接使用CUDA_VISIBLE_DEVICES= ...
- mysql中TIMESTAMP设置默认时间为当前时间
在我们保存数据进入到数据库中时多半会使用像php之类的脚本来获取一个时间保存到mysql中,其实在mysql可以直接使用TIMESTAMP 数据类型来实现默认类型了,下面一起来看看. 很多时候,为 ...
- 关于tcp中time_wait状态的4个问题
time_wait是个常问的问题.tcp网络编程中最不easy理解的也是它的time_wait状态,这也说明了tcp/ip四次挥手中time_wait状态的重要性. 以下通过4个问题来描写叙述它 问题 ...
- Chrome F12 温故而知新 :因为重定向导致清空Network信息
虽然我以前都是用Fiddler 4来作为解决方案.但实际上可以勾选 [Preserve log]来保存日志 这样就不担心因为页面重定向导致清空了日志了
- android studio build.gradle 中的dependencies 的 compile jar文件
1.其下载之后的存放地址 例如:compile 'com.qiniu:happy-dns:0.2.5' 存放在:.gradle\caches\modules-\files-\c0ee826650468 ...
- 使用vuejs做一个todolist
在输入框内输入一个list,回车,添加到list列表中,点击列表中的项样式改变 1.index.html <!DOCTYPE html> <html> <head> ...
- PHP Backdoor + Reverse Shell on Vulnerable Website
翻译总结自: https://shellgam3.com/2016/07/27/php-backdoor-reverse-shell-on-vulnerable-website/ 扫描Web服务器,爆 ...