三台虚拟机,centos6.5

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
:: localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.59.130 m1
192.168.59.131 s1
192.168.59.132 s2

修改主机名

[root@m1 hadoop]# cat /etc/sysconfig/network
NETWORKING=yes
HOSTNAME=m1

修改主机映射

[root@m1 hadoop]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
:: localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.59.130 m1
192.168.59.131 s1
192.168.59.132 s2

ssh免密码登陆(注意! 要求每台机子互相都能ssh包括本机)

ssh-keygen -t rsa
ssh-copy-id -i ~/.ssh/id_rsa.pub m2

安装jdk

http://www.cnblogs.com/xiaojf/p/6568426.html

安装hadoop2.7.3

解压,重命名

[root@m1 soft]# ll
total
drwxr-xr-x. root root Aug hadoop
drwxr-xr-x. root root Mar : jar
drwxr-xr-x. uucp Dec : jdk
drwxr-xr-x. root root Mar : kafka
drwxrwxr-x. Mar scala-2.11.
drwxr-xr-x. root root Mar : tmp
drwxr-xr-x. Aug zookeeper-3.4.

创建目录存放日志文件还要有数据文件

mkdir -p /usr/local/soft/tmp/hadoop/tmp
mkdir -p /usr/local/soft/tmp/hadoop/dfs/name
mkdir -p /usr/local/soft/tmp/hadoop/dfs/data

修改配置文件

[root@m1 soft]# cd /usr/local/soft/hadoop/etc/hadoop/
[root@m1 hadoop]# ll
total
-rw-r--r--. root root Aug capacity-scheduler.xml
-rw-r--r--. root root Aug configuration.xsl
-rw-r--r--. root root Aug container-executor.cfg
-rw-r--r--. root root Aug core-site.xml
-rw-r--r--. root root Aug hadoop-env.cmd
-rw-r--r--. root root Aug hadoop-env.sh
-rw-r--r--. root root Aug hadoop-metrics2.properties
-rw-r--r--. root root Aug hadoop-metrics.properties
-rw-r--r--. root root Aug hadoop-policy.xml
-rw-r--r--. root root Aug hdfs-site.xml
-rw-r--r--. root root Aug httpfs-env.sh
-rw-r--r--. root root Aug httpfs-log4j.properties
-rw-r--r--. root root Aug httpfs-signature.secret
-rw-r--r--. root root Aug httpfs-site.xml
-rw-r--r--. root root Aug kms-acls.xml
-rw-r--r--. root root Aug kms-env.sh
-rw-r--r--. root root Aug kms-log4j.properties
-rw-r--r--. root root Aug kms-site.xml
-rw-r--r--. root root Aug log4j.properties
-rw-r--r--. root root Aug mapred-env.cmd
-rw-r--r--. root root Aug mapred-env.sh
-rw-r--r--. root root Aug mapred-queues.xml.template
-rw-r--r--. root root Aug mapred-site.xml.template
-rw-r--r--. root root Aug slaves
-rw-r--r--. root root Aug ssl-client.xml.example
-rw-r--r--. root root Aug ssl-server.xml.example
-rw-r--r--. root root Aug yarn-env.cmd
-rw-r--r--. root root Aug yarn-env.sh
-rw-r--r--. root root Aug yarn-site.xml

yarn-env.sh

[root@m1 hadoop]# vi yarn-env.sh 
# Licensed to the Apache Software Foundation (ASF) under one

# or more contributor license agreements.  See the NOTICE file

# distributed with this work for additional information

# regarding copyright ownership.  The ASF licenses this file

# to you under the Apache License, Version 2.0 (the

# "License"); you may not use this file except in compliance

# with the License.  You may obtain a copy of the License at

#

#     http://www.apache.org/licenses/LICENSE-2.0

#

# Unless required by applicable law or agreed to in writing, software

# distributed under the License is distributed on an "AS IS" BASIS,

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

# See the License for the specific language governing permissions and

# limitations under the License.

# Set Hadoop-specific environment variables here.

# The only required environment variable is JAVA_HOME. All others are

# optional.  When running a distributed configuration it is best to

# set JAVA_HOME in this file, so that it is correctly defined on

# remote nodes.

# The java implementation to use.

export JAVA_HOME=/usr/local/soft/jdk

slaves

[root@m1 hadoop]# vi slaves 
s1
s2

core-site.xml

<configuration>

      <property>

        <name>fs.defaultFS</name>

       <value>hdfs://m1:9000</value>

    </property>

    <property>

        <name>io.file.buffer.size</name>

        <value></value>

    </property>

    <property>

        <name>hadoop.tmp.dir</name>

       <value>file:/usr/local/soft/tmp/hadoop/tmp</value>

        <description>Abase for other temporary  directories.</description>

    </property>

</configuration>

hdfs-site.xml

<configuration>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>m1:9001</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>

mapred-site.xml

<configuration>

      <property>                                                                 

        <name>mapreduce.framework.name</name>

                <value>yarn</value>

           </property>

          <property>

                 <name>mapreduce.jobhistory.address</name>

                  <value>m1:</value>

          </property>

          <property>

               <name>mapreduce.jobhistory.webapp.address</name>

               <value>m1:</value>

       </property>

</configuration>

yarn-site.xml

<configuration>

<!-- Site specific YARN configuration properties -->

        <property>

              <name>yarn.nodemanager.aux-services</name>

              <value>mapreduce_shuffle</value>

        </property>

        <property>                                                              

<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>

              <value>org.apache.hadoop.mapred.ShuffleHandler</value>

        </property>

        <property>

              <name>yarn.resourcemanager.address</name>

              <value>m1:</value>

       </property>

       <property>

               <name>yarn.resourcemanager.scheduler.address</name>

              <value>m1:</value>

       </property>

       <property>

           <name>yarn.resourcemanager.resource-tracker.address</name>

             <value>m1:</value>

      </property>

      <property>

             <name>yarn.resourcemanager.admin.address</name>

              <value>m1:</value>

       </property>

       <property>

              <name>yarn.resourcemanager.webapp.address</name>

              <value>m1:</value>

       </property>

</configuration>

设置Hadoop环境变量

export HADOOP_HOME=/usr/local/soft/hadoop
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

分发代码

[root@m1 soft]# scp -r hadoop root@s2:/usr/local/soft/

namenode format

[root@m1 soft]# hdfs namenode -format
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it. // :: INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = m1/192.168.59.130
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 2.7.3

启动

[root@m1 soft]# start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [m1]
m1: starting namenode, logging to /usr/local/soft/hadoop/logs/hadoop-root-namenode-m1.out
s1: starting datanode, logging to /usr/local/soft/hadoop/logs/hadoop-root-datanode-s1.out
s2: starting datanode, logging to /usr/local/soft/hadoop/logs/hadoop-root-datanode-s2.out
Starting secondary namenodes [master]
master: ssh: Could not resolve hostname master: Name or service not known
starting yarn daemons
starting resourcemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-resourcemanager-m1.out
s1: starting nodemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-nodemanager-s1.out
s2: starting nodemanager, logging to /usr/local/soft/hadoop/logs/yarn-root-nodemanager-s2.out

验证

[root@m1 soft]# hadoop dfs -ls /
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it. [root@m1 soft]# hadoop dfs -mkdir /xiaojf
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it. [root@m1 soft]# hadoop dfs -ls /
DEPRECATED: Use of this script to execute hdfs command is deprecated.
Instead use the hdfs command for it. Found items
drwxr-xr-x - root supergroup -- : /xiaojf

完成

hadoop 2.7.3 集群安装的更多相关文章

  1. hadoop 2.2.0集群安装详细步骤(简单配置,无HA)

    安装环境操作系统:CentOS 6.5 i586(32位)java环境:JDK 1.7.0.51hadoop版本:社区版本2.2.0,hadoop-2.2.0.tar.gz 安装准备设置集群的host ...

  2. hadoop 2.2.0集群安装

    相关阅读: hbase 0.98.1集群安装 本文将基于hadoop 2.2.0解说其在linux集群上的安装方法,并对一些重要的设置项进行解释,本文原文链接:http://blog.csdn.net ...

  3. Hadoop 2.6.1 集群安装配置教程

    集群环境: 192.168.56.10 master 192.168.56.11 slave1 192.168.56.12 slave2 下载安装包/拷贝安装包 # 存放路径: cd /usr/loc ...

  4. Hive之 hive-1.2.1 + hadoop 2.7.4 集群安装

    一. 相关概念 Hive Metastore有三种配置方式,分别是: Embedded Metastore Database (Derby) 内嵌模式Local Metastore Server 本地 ...

  5. Hadoop完全高可用集群安装

    架构图(HA模型没有SNN节点) 用vm规划了8台机器,用到了7台,SNN节点没用   NN DN SN ZKFC ZK JNN RM NM node1 *     *         node2 * ...

  6. Hadoop 2.4.x集群安装配置问题总结

    配置文件:/etc/profile export JAVA_HOME=/usr/java/latest export HADOOP_PREFIX=/opt/hadoop-2.4.1 export HA ...

  7. CentOS系统下Hadoop 2.4.1集群安装配置(简易版)

    安装配置 1.软件下载 JDK下载:jdk-7u65-linux-i586.tar.gz http://www.oracle.com/technetwork/java/javase/downloads ...

  8. Hadoop 2.5.1集群安装配置

    本文的安装只涉及了hadoop-common.hadoop-hdfs.hadoop-mapreduce和hadoop-yarn,并不包含HBase.Hive和Pig等. http://blog.csd ...

  9. hadoop 1.0.1集群安装及配置

    1.hadoop下载地址:http://www.apache.org/dyn/closer.cgi/hadoop/core/ 2.下载java6软件包,分别在三台安装 3.三台虚拟机,一台作为mast ...

随机推荐

  1. ios 苹果手机硬件摘要

    IPhone4 * 2010年发布. * A4单核处理器. * 3.5英寸Retina显示屏(视网膜屏幕),960x640像素分辨率. * 后置摄像头500万像素. * 前置摄像头30万像素. IPh ...

  2. OutOfMemoryError内存不足

    java.lang.OutOfMemoryError内存不足错误.当可用内存不足以让Java虚拟机分配给一个对象时抛出该错误. 造成此错误的原因有一下几个: 1.内存中加载的数据量过于庞大,如一次从数 ...

  3. android-studio-bundle-141.2178183首次执行Hello World的时候出现ADB not responding. If you'd like to retry, then please manually kill "adb.e的错误

    这是由于有其他程序在占用adb.exe要使用的端口5037,打开命令提示符cmd,输入指令netstat -aon|findstr 5037查看在使用此端口的程序,如 8036即为占用该端口号的PID ...

  4. java源码剖析: 对象内存布局、JVM锁以及优化

    一.目录 1.启蒙知识预热:CAS原理+JVM对象头内存存储结构 2.JVM中锁优化:锁粗化.锁消除.偏向锁.轻量级锁.自旋锁. 3.总结:偏向锁.轻量级锁,重量级锁的优缺点. 二.启蒙知识预热 开启 ...

  5. border-raduis 在IE8中的兼容性问题

    border-raduis 是css3新增加的属性,我们运用起来也很方便,效果很好,但是在IE8以及之前的ie版本并不支持这个属性,怎么解决这个问题呢? 1.切成背景 这也是我经常用到的方法,虽然说有 ...

  6. Java ssh 框架 hibernate 详细理解

    Hibernate框架技术相信对大多数的 java 程序员并不陌生,数据表之间的关系如何通过Hibernate来建立,需要我们认真的分析数据表中数据项之间的交互: 数据库表的之间的关系有: (1)一对 ...

  7. Python 面向对象之一

    Python 面向对象之 类与属性 今天接触了一下面向对象,发现面向对象和之前理解的简直就是天壤之别,在学Linux的时候,一切皆文件,现在学面向对象了,so,一切皆对象. 之前不是一直在学的用面向函 ...

  8. 【lucene系列学习四】log4j日志文件实现多线程的测试

    参考资料:http://nudtgk2000.iteye.com/blog/1716379 首先,在http://www.apache.org/dyn/closer.cgi/logging/log4j ...

  9. poj3020二分图匹配

    The Global Aerial Research Centre has been allotted the task of building the fifth generation of mob ...

  10. 在android中,如何去掉webview读取网页后点击网页上的按钮出现的方框

    参考:http://blog.sina.com.cn/s/blog_64056edc0100xt3l.html <style type="text/css"> .bor ...