从0开始 超详细搭建hadoop平台手册

创建三台使用centos7操作系统的虚拟机

基础环境配置

ps:不建议使用DHCP,因为ip地址会变动

配置ip

1.master

[root@master ~]# nmcli connection  add ifname ens32 con-name ens32 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@master ~]# nmcli con up ens32

2.slave1

[root@slave1 ~]# nmcli connection  add ifname ens33 con-name ens33 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave1 ~]# nmcli con up ens33

3.slave2

[root@slave2 ~]# nmcli connection  add ifname ens33 con-name ens33 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave2 ~]# nmcli con up ens33

ping百度

1.master

[root@master ~]# ping baidu.com
PING baidu.com (39.156.66.10) 56(84) bytes of data.
64 bytes from 39.156.66.10 (39.156.66.10): icmp_seq=1 ttl=128 time=28.5 ms
^C
--- baidu.com ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 28.587/28.587/28.587/0.000 ms
[root@master ~]#

2.slave1

[root@slave1 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=34.5 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=34.9 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 34.512/34.708/34.904/0.196 ms
[root@slave1 ~]#

3.slave2

[root@slave2 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=33.0 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=35.2 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 33.035/34.138/35.241/1.103 ms
[root@slave2 ~]#

关闭防火墙和selinux

1.master

[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
[root@master ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
[root@master ~]#

2.slave1

[root@slave1 ~]# systemctl stop firewalld
[root@slave1 ~]# systemctl disable firewalld
[root@slave1 ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
[root@slave1 ~]#

创建hadoop用户

1.master

[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]#

2.slave1

[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]#

3.slave2

[root@slave2 ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@slave2 ~]#

创建hadoop用户密码

1.master

[root@master ~]# echo password|passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@master ~]#

2.slave1

[root@slave1 ~]# echo 'password' |passwd --stdin  hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave1 ~]#

3.slave2

[root@slave2 ~]# echo 'password' |passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave2 ~]#

安装jdk

删除原有jdk版本

1.master

[root@master ~]# rpm -qa |grep java
java-1.8.0-openjdk-headless-1.8.0.131-11.b12.el7.x86_64
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
java-1.7.0-openjdk-headless-1.7.0.141-2.6.10.5.el7.x86_64
java-1.7.0-openjdk-1.7.0.141-2.6.10.5.el7.x86_64
java-1.8.0-openjdk-1.8.0.131-11.b12.el7.x86_64
python-javapackages-3.4.1-11.el7.noarch
[root@master ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@master ~]# rpm -qa |grep java
[root@master ~]#

2.slave1

[root@slave1 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave1 ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@slave1 ~]# rpm -qa |grep java
[root@slave1 ~]#

3.slave2

[root@slave2 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave2 ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@slave2 ~]# rpm -qa |grep java
[root@slave2 ~]#

安装新的jdk

一,安装安装包

1.master

[root@master software]# tar -zxvf jdk-8u152-linux-x64.tar.gz  -C /usr/local/src/

2.slave1



3.slave2



二,配置jdk环境变量

1.master

[root@master src]# ls
jdk1.8.0_152
[root@master src]# mv jdk1.8.0_152/ jdk //修改名字(有数字太长了)
[root@master jdk]# vim /etc/profile
//G(大写跳转到末行写下面两行)
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
[root@master jdk]# source /etc/profile
//生成新的环境变量
[root@master jdk]# java -version
java version "1.8.0_152"
Java(TM) SE Runtime Environment (build 1.8.0_152-b16)
Java HotSpot(TM) 64-Bit Server VM (build 25.152-b16, mixed mode)
[root@master jdk]#

hadoop

安装hadoop包

1.master

[root@master software]# tar -xzf hadoop-2.7.1.tar.gz  -C /usr/local/src/
[root@master software]# cd /usr/local/src/
[root@master src]# mv hadoop-2.7.1/ hadoop

修改环境变量

[root@master hadoop]# tail -n 3 /etc/profile
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@master hadoop]# source /etc/profile //生成环境变量
[root@master hadoop]# hadoop
Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]
CLASSNAME run the class named CLASSNAME
or
where COMMAND is one of:
fs run a generic filesystem user client
version print the version
jar <jar> run a jar file
note: please use "yarn jar" to launch
YARN applications, not this command.
checknative [-a|-h] check native hadoop and compression libraries availability
distcp <srcurl> <desturl> copy file or directories recursively
archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive
classpath prints the class path needed to get the
credential interact with credential providers
Hadoop jar and the required libraries
daemonlog get/set the log level for each daemon
trace view and modify Hadoop tracing settings Most commands print help when invoked w/o parameters.
[root@master hadoop]#

给hadoop权限

[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/
[root@master hadoop]# ll /usr/local/src/
total 0
drwxr-xr-x 9 hadoop hadoop 149 Jun 29 2015 hadoop
drwxr-xr-x 8 hadoop hadoop 255 Sep 14 2017 jdk

配置hadoop-env.sh

[root@master hadoop]# vi etc/hadoop/hadoop-env.sh
[root@master hadoop]# cat etc/hadoop/hadoop-env.sh |grep JAVA
# The only required environment variable is JAVA_HOME. All others are
# set JAVA_HOME in this file, so that it is correctly defined on
export JAVA_HOME=/usr/local/src/jdk

配置集群环境

域名解析

1.master

[root@master hadoop]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@master hadoop]#

2.slave1

[root@slave1 ~]# vim /etc/hosts
[root@slave1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave1 ~]#

3.slave2

[root@slave2 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave2 ~]#

免密登陆

生成密钥文件并且发送给自己
  1. master
[root@master ~]# su - hadoop
Last login: Thu Apr 25 17:45:05 CST 2024 on pts/0
[hadoop@master ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:2b+pnJuChp6RkHkx9XYqUgEpyhCf8WHBz3dtE67E9lA hadoop@master
The key's randomart image is:
+---[RSA 2048]----+
|....=+o |
|...=oo o E |
|o.o.+o. o..+ . |
|.. o +o..=* = |
| + o ..S+.= . |
| o o . ... |
| o. . . |
| .oo .. o o |
| .o. .*oo |
+----[SHA256]-----+
[hadoop@master ~]$
[hadoop@master ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@master ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@master ~]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@master ~]$ ll ~/.ssh/
total 12
-rw------- 1 hadoop hadoop 3358 Apr 25 18:30 authorized_keys
-rw------- 1 hadoop hadoop 1679 Apr 25 17:45 id_rsa
-rw-r--r-- 1 hadoop hadoop 395 Apr 25 17:45 id_rsa.pub
[root@master .ssh]# cat /etc/ssh/sshd_config |grep Pub
PubkeyAuthentication yes //(使用vim打开该文件)去掉该文件中这一行的注释
[root@master .ssh]# systemctl restart sshd
[root@master .ssh]#
  1. slave1
[root@slave1 ~]# su - hadoop
Last login: Tue Apr 7 15:37:22 CST 2020 on :0
[hadoop@slave1 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:H5ouJBZdnawOEbyx1xFU3tKt8NEm5XlmnpzghOtQMcY hadoop@slave1
The key's randomart image is:
+---[RSA 2048]----+
| ... ++B. .|
| + . *E+o =.|
| . * o ++o=.O|
| . = o o ++oO+|
| . +S... .o+.|
| o . .+o. |
| . o o .. |
| .. |
| .. |
+----[SHA256]-----+
[hadoop@slave1 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave1 ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@slave1 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave1 ~]# systemctl restart sshd
  1. slave2
[root@slave2 ~]# su - hadoop
Last login: Tue Apr 7 15:37:22 CST 2020 on :0
[hadoop@slave2 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:Brs8qBwc6izUBbj10eS/AWBZ6Dtxs1EZ8mVc9fM97Yg hadoop@slave2
The key's randomart image is:
+---[RSA 2048]----+
| . o*+ .+o.... |
| . o.+o.ooo. .|
| o + oo.. ..|
| . = *o =|
| o . = So .+|
| + o = + o . o.|
|o o . = . E . .|
|+. o . |
|.oo |
+----[SHA256]-----+
[root@slave2 ~]# su - hadoop
Last login: Thu Apr 25 17:45:29 CST 2024 on pts/0
[hadoop@slave2 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave2 ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@slave2 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave2 ~]# vim /etc/ssh/sshd_config
[root@slave2 ~]# cat /etc/ssh/sshd_config |grep Pub
PubkeyAuthentication yes
[root@slave2 ~]# systemctl restart sshd
交换ssh密钥

hadoop参数(全在master上修改)

hdfs-site.xml

[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim hdfs-site.xml Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
--> <!-- Put site-specific property overrides in this file. --> <configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/local/src/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/local/src/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>

core-site.xml

[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim core-site.xml Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
--> <!-- Put site-specific property overrides in this file. --> <configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.130.101:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/src/hadoop/tmp</value>
</property>

mapred-site.xml

[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@master hadoop]# vim mapred-site.xml you may not use this file except in compliance with the License. Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
--> <!-- Put site-specific property overrides in this file. --> <configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
</configuration>

yarn-site.xml

[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim yarn-site.xml <property>
<configuration>
<!-- Site specific YARN configuration properties -->
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.address</name>
<value>master:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:8088</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>

其他

[root@master hadoop]# vim masters
192.168.130.101
[root@master hadoop]# vim slaves
slave1
slave2
[root@master hadoop]# mkdir /usr/local/src/hadoop/tmp
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/name -p
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/data -p
[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/hadoop/

复制到其他节点

1.slave1

[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave1:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.141)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * *

2.slave2

[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave2:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.142)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * *
其他节点配置环境变量(slave1,slave2)

1.slave1

[root@slave1 .ssh]# tail -n 8 /etc/profile
unset -f pathmunge # jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@slave1 .ssh]#

2.slave2

[root@slave2 ~]# tail -n 8 /etc/profile

# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin [root@slave2 ~]#

启动hadoop集群

格式化namenode(master)

[hadoop@master hadoop]$ bin/hdfs namenode –format
*****
24/04/25 19:41:28 INFO util.ExitUtil: Exiting with status 0
24/04/25 19:41:28 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.130.101
************************************************************/

启动namenode(master)

[hadoop@master hadoop]$ hadoop-daemon.sh start namenode
starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
[hadoop@master hadoop]$ jps //查看进程
4746 NameNode
4782 Jps
[hadoop@master hadoop]$

在slave1上启动datenode

[root@slave1 hadoop]# chown -R  hadoop:hadoop /usr/local/src/
[root@slave1 hadoop]# su - hadoop
[hadoop@slave2 ~]$ source /etc/profile
[hadoop@slave1 src]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
[hadoop@slave1 src]$ jps
4990 Jps
4943 DataNode
[hadoop@slave1 src]$

ps:

如果显示

[hadoop@slave1 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
/usr/local/src/hadoop/bin/hdfs: line 304: /usr/local/src/jdk/bin/java: No such file or directory
//那么久在master上使用`scp -r jdk/ hadoop@slave1:/usr/local/src/jdk` 传到slave1

在slave2上启动datenode

[hadoop@slave2 hadoop]$ source /etc/profile
[hadoop@slave2 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
[hadoop@slave2 hadoop]$ jps
3598 Jps
3551 DataNode
[hadoop@slave2 hadoop]$

启动 SecondaryNameNode(master)

[hadoop@master src]$  hadoop-daemon.sh start secondarynamenode
starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
[hadoop@master src]$ jps
5009 Jps
4746 NameNode
4974 SecondaryNameNode
[hadoop@master src]$

查看hdfs报告

[hadoop@master src]$ hdfs dfsadmin -report
Configured Capacity: 94434762752 (87.95 GB)
Present Capacity: 82971066368 (77.27 GB)
DFS Remaining: 82971058176 (77.27 GB)
DFS Used: 8192 (8 KB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0 -------------------------------------------------
Live datanodes (2): Name: 192.168.130.103:50010 (slave2)
Hostname: slave2
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5731614720 (5.34 GB)
DFS Remaining: 41485762560 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024 Name: 192.168.130.102:50010 (slave1)
Hostname: slave1
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5732081664 (5.34 GB)
DFS Remaining: 41485295616 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024 [hadoop@master src]$

浏览器查看节点状态

  1. http://master:50070/ (看namenode和datanode节点状态)
  2. http://master:50090 (看SecondaryNameNode节点状态)

全部启动服务

[hadoop@master hadoop]$ start-yarn.sh
[hadoop@master hadoop]$ start-dfs.sh
[hadoop@master hadoop]$ jps
34257 NameNode
34449 SecondaryNameNode
34494 Jps
32847 ResourceManager
###################################################
[学习hadoop的第三天——hive搭建-CSDN博客](https://blog.csdn.net/m0_74752717/article/details/137449938?spm=1001.2014.3001.5501)

大数据平台搭建手册——hadoop的更多相关文章

  1. 大数据平台搭建(hadoop+spark)

    大数据平台搭建(hadoop+spark) 一.基本信息 1. 服务器基本信息 主机名 ip地址 安装服务 spark-master 172.16.200.81 jdk.hadoop.spark.sc ...

  2. 大数据平台搭建:Hadoop

    To construct big data distributed platform based on Hadoop is a common method. Hadoop comes fron Goo ...

  3. product of大数据平台搭建------CM 和CDH安装

    一.安装说明 CM是由cloudera公司提供的大数据组件自动部署和监控管理工具,相应的和CDH是cloudera公司在开源的hadoop社区版的基础上做了商业化的封装的大数据平台. 采用离线安装模式 ...

  4. CDH 大数据平台搭建

    一.概述 Cloudera版本(Cloudera’s Distribution Including Apache Hadoop,简称“CDH”),基于Web的用户界面,支持大多数Hadoop组件,包括 ...

  5. HDP 大数据平台搭建

    一.概述 Apache Ambari是一个基于Web的支持Apache Hadoop集群的供应.管理和监控的开源工具,Ambari已支持大多数Hadoop组件,包括HDFS.MapReduce.Hiv ...

  6. 大数据平台搭建 - cdh5.11.1 - hadoop集群安装

    一.前言 由于线下测试的需要,需要在公司线下(测试)环境搭建大数据集群. 那么CDH是什么? hadoop是一个开源项目,所以很多公司再这个基础上进行商业化,不收费的hadoop版本主要有三个,分别是 ...

  7. Hadoop大数据平台搭建之前期配置(2)

    环境:CentOS 7.4 (1708  DVD) 工具:VMware.MobaXterm 一. 克隆大数据集群 1. 选中已经进行了基本配置的虚拟机,进行克隆. 2. 此处改为"创建完整克 ...

  8. 大数据平台搭建-zookeeper集群的搭建

    本系列文章主要阐述大数据计算平台相关框架的搭建,包括如下内容: 基础环境安装 zookeeper集群的搭建 kafka集群的搭建 hadoop/hbase集群的搭建 spark集群的搭建 flink集 ...

  9. 大数据平台搭建-kafka集群的搭建

    本系列文章主要阐述大数据计算平台相关框架的搭建,包括如下内容: 基础环境安装 zookeeper集群的搭建 kafka集群的搭建 hadoop/hbase集群的搭建 spark集群的搭建 flink集 ...

  10. 分布式处理与大数据平台(RabbitMQ&Celery&Hadoop&Spark&Storm&Elasticsearch)

    热门的消息队列中间件RabbitMQ,分布式任务处理平台Celery,大数据分布式处理的三大重量级武器:Hadoop.Spark.Storm,以及新一代的数据采集和分析引擎Elasticsearch. ...

随机推荐

  1. drf——基于apiview写过滤、排序和分页

    基于APIView带过滤和排序 from rest_framework.views import APIView from .models import Book from .serializer i ...

  2. 关于console.log中this指向的问题

  3. 新手小白-创建IDEA项目( IDEA 2021.2.1 版本)

    好久没打开IDEA这个软件,我发现连项目怎么具体创建都不知道了,哭唧唧~记录一下 创建项目: 1.先创建一个空项目,点击[File] -- > [New] --> [Project]; 2 ...

  4. 我们为什么要做 SoloPi

    SoloPi现状 去年(2019年)7月份,蚂蚁集团正式对外开源了客户端自动化测试工具 SoloPi ,其主要包括三大模块:录制回放(用于功能测试).性能工具(用于性能测试)以及一机多控(服务于兼容性 ...

  5. 双11特刊 | 一文揭秘云数据库RDS如何顺滑应对流量洪峰

    ​简介:从绿色低碳到硬核科技,看RDS如何用绿色科技助力2021"双11"? 双十一回顾 从平台到商家,再从物流到客户手中,云数据库RDS支撑着双11集团电商的在线业务.RDS首次 ...

  6. 实时数仓入门训练营:基于 Apache Flink + Hologres 的实时推荐系统架构解析

    ​ 简介: <实时数仓入门训练营>由阿里云研究员王峰.阿里云资深技术专家金晓军.阿里云高级产品专家刘一鸣等实时计算 Flink 版和 Hologres 的多名技术/产品一线专家齐上阵,合力 ...

  7. 使用 Flink Hudi 构建流式数据湖

    ​简介: 本文介绍了 Flink Hudi 通过流计算对原有基于 mini-batch 的增量计算模型的不断优化演进. 本文介绍了 Flink Hudi 通过流计算对原有基于 mini-batch 的 ...

  8. [Nova] belongsTo, belongsToMany 当前页动态 dependsOn 其它 fields, nova-belongs-to-dependency, belongs-to-many-field-nova

    nova-belongs-to-dependency 例子: use Manmohanjit\BelongsToDependency\BelongsToDependency; ... return [ ...

  9. WPF 通过 GetMessageExtraInfo 方法获取当前收到的鼠标消息是否由触摸转换过来

    本文将告诉大家如何在 WPF 或者其他 Win32 应用里面,在收到鼠标消息时,通过 GetMessageExtraInfo 方法获取当前收到的鼠标消息是否由触摸消息提升而来 大家都知道,在不开启 W ...

  10. Fiddler 将插件放在独立子文件夹

    我的 Fiddler 安装了许多插件,有一些插件存在 DLL 名冲突问题,比如多个不同的插件都存在名为 PluginCore.dll 但实际实现逻辑完全不相同的程序集.这就导致了多个插件的安装之间,如 ...