大数据平台搭建手册——hadoop
从0开始 超详细搭建hadoop平台手册
创建三台使用centos7操作系统的虚拟机
基础环境配置
ps:不建议使用DHCP,因为ip地址会变动
配置ip
1.master
[root@master ~]# nmcli connection add ifname ens32 con-name ens32 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@master ~]# nmcli con up ens32
2.slave1
[root@slave1 ~]# nmcli connection add ifname ens33 con-name ens33 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave1 ~]# nmcli con up ens33
3.slave2
[root@slave2 ~]# nmcli connection add ifname ens33 con-name ens33 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave2 ~]# nmcli con up ens33
ping百度
1.master
[root@master ~]# ping baidu.com
PING baidu.com (39.156.66.10) 56(84) bytes of data.
64 bytes from 39.156.66.10 (39.156.66.10): icmp_seq=1 ttl=128 time=28.5 ms
^C
--- baidu.com ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 28.587/28.587/28.587/0.000 ms
[root@master ~]#
2.slave1
[root@slave1 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=34.5 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=34.9 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 34.512/34.708/34.904/0.196 ms
[root@slave1 ~]#
3.slave2
[root@slave2 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=33.0 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=35.2 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 33.035/34.138/35.241/1.103 ms
[root@slave2 ~]#
关闭防火墙和selinux
1.master
[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
[root@master ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
[root@master ~]#
2.slave1
[root@slave1 ~]# systemctl stop firewalld
[root@slave1 ~]# systemctl disable firewalld
[root@slave1 ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
[root@slave1 ~]#
创建hadoop用户
1.master
[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]#
2.slave1
[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]#
3.slave2
[root@slave2 ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@slave2 ~]#
创建hadoop用户密码
1.master
[root@master ~]# echo password|passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@master ~]#
2.slave1
[root@slave1 ~]# echo 'password' |passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave1 ~]#
3.slave2
[root@slave2 ~]# echo 'password' |passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave2 ~]#
安装jdk
删除原有jdk版本
1.master
[root@master ~]# rpm -qa |grep java
java-1.8.0-openjdk-headless-1.8.0.131-11.b12.el7.x86_64
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
java-1.7.0-openjdk-headless-1.7.0.141-2.6.10.5.el7.x86_64
java-1.7.0-openjdk-1.7.0.141-2.6.10.5.el7.x86_64
java-1.8.0-openjdk-1.8.0.131-11.b12.el7.x86_64
python-javapackages-3.4.1-11.el7.noarch
[root@master ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@master ~]# rpm -qa |grep java
[root@master ~]#
2.slave1
[root@slave1 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave1 ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@slave1 ~]# rpm -qa |grep java
[root@slave1 ~]#
3.slave2
[root@slave2 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave2 ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@slave2 ~]# rpm -qa |grep java
[root@slave2 ~]#
安装新的jdk
一,安装安装包
1.master
[root@master software]# tar -zxvf jdk-8u152-linux-x64.tar.gz -C /usr/local/src/
2.slave1
无
3.slave2
无
二,配置jdk环境变量
1.master
[root@master src]# ls
jdk1.8.0_152
[root@master src]# mv jdk1.8.0_152/ jdk //修改名字(有数字太长了)
[root@master jdk]# vim /etc/profile
//G(大写跳转到末行写下面两行)
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
[root@master jdk]# source /etc/profile
//生成新的环境变量
[root@master jdk]# java -version
java version "1.8.0_152"
Java(TM) SE Runtime Environment (build 1.8.0_152-b16)
Java HotSpot(TM) 64-Bit Server VM (build 25.152-b16, mixed mode)
[root@master jdk]#
hadoop
安装hadoop包
1.master
[root@master software]# tar -xzf hadoop-2.7.1.tar.gz -C /usr/local/src/
[root@master software]# cd /usr/local/src/
[root@master src]# mv hadoop-2.7.1/ hadoop
修改环境变量
[root@master hadoop]# tail -n 3 /etc/profile
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@master hadoop]# source /etc/profile //生成环境变量
[root@master hadoop]# hadoop
Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]
CLASSNAME run the class named CLASSNAME
or
where COMMAND is one of:
fs run a generic filesystem user client
version print the version
jar <jar> run a jar file
note: please use "yarn jar" to launch
YARN applications, not this command.
checknative [-a|-h] check native hadoop and compression libraries availability
distcp <srcurl> <desturl> copy file or directories recursively
archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive
classpath prints the class path needed to get the
credential interact with credential providers
Hadoop jar and the required libraries
daemonlog get/set the log level for each daemon
trace view and modify Hadoop tracing settings
Most commands print help when invoked w/o parameters.
[root@master hadoop]#
给hadoop权限
[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/
[root@master hadoop]# ll /usr/local/src/
total 0
drwxr-xr-x 9 hadoop hadoop 149 Jun 29 2015 hadoop
drwxr-xr-x 8 hadoop hadoop 255 Sep 14 2017 jdk
配置hadoop-env.sh
[root@master hadoop]# vi etc/hadoop/hadoop-env.sh
[root@master hadoop]# cat etc/hadoop/hadoop-env.sh |grep JAVA
# The only required environment variable is JAVA_HOME. All others are
# set JAVA_HOME in this file, so that it is correctly defined on
export JAVA_HOME=/usr/local/src/jdk
配置集群环境
域名解析
1.master
[root@master hadoop]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@master hadoop]#
2.slave1
[root@slave1 ~]# vim /etc/hosts
[root@slave1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave1 ~]#
3.slave2
[root@slave2 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave2 ~]#
免密登陆
生成密钥文件并且发送给自己
- master
[root@master ~]# su - hadoop
Last login: Thu Apr 25 17:45:05 CST 2024 on pts/0
[hadoop@master ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:2b+pnJuChp6RkHkx9XYqUgEpyhCf8WHBz3dtE67E9lA hadoop@master
The key's randomart image is:
+---[RSA 2048]----+
|....=+o |
|...=oo o E |
|o.o.+o. o..+ . |
|.. o +o..=* = |
| + o ..S+.= . |
| o o . ... |
| o. . . |
| .oo .. o o |
| .o. .*oo |
+----[SHA256]-----+
[hadoop@master ~]$
[hadoop@master ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@master ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@master ~]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@master ~]$ ll ~/.ssh/
total 12
-rw------- 1 hadoop hadoop 3358 Apr 25 18:30 authorized_keys
-rw------- 1 hadoop hadoop 1679 Apr 25 17:45 id_rsa
-rw-r--r-- 1 hadoop hadoop 395 Apr 25 17:45 id_rsa.pub
[root@master .ssh]# cat /etc/ssh/sshd_config |grep Pub
PubkeyAuthentication yes //(使用vim打开该文件)去掉该文件中这一行的注释
[root@master .ssh]# systemctl restart sshd
[root@master .ssh]#
- slave1
[root@slave1 ~]# su - hadoop
Last login: Tue Apr 7 15:37:22 CST 2020 on :0
[hadoop@slave1 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:H5ouJBZdnawOEbyx1xFU3tKt8NEm5XlmnpzghOtQMcY hadoop@slave1
The key's randomart image is:
+---[RSA 2048]----+
| ... ++B. .|
| + . *E+o =.|
| . * o ++o=.O|
| . = o o ++oO+|
| . +S... .o+.|
| o . .+o. |
| . o o .. |
| .. |
| .. |
+----[SHA256]-----+
[hadoop@slave1 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave1 ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@slave1 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave1 ~]# systemctl restart sshd
- slave2
[root@slave2 ~]# su - hadoop
Last login: Tue Apr 7 15:37:22 CST 2020 on :0
[hadoop@slave2 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:Brs8qBwc6izUBbj10eS/AWBZ6Dtxs1EZ8mVc9fM97Yg hadoop@slave2
The key's randomart image is:
+---[RSA 2048]----+
| . o*+ .+o.... |
| . o.+o.ooo. .|
| o + oo.. ..|
| . = *o =|
| o . = So .+|
| + o = + o . o.|
|o o . = . E . .|
|+. o . |
|.oo |
+----[SHA256]-----+
[root@slave2 ~]# su - hadoop
Last login: Thu Apr 25 17:45:29 CST 2024 on pts/0
[hadoop@slave2 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave2 ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@slave2 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave2 ~]# vim /etc/ssh/sshd_config
[root@slave2 ~]# cat /etc/ssh/sshd_config |grep Pub
PubkeyAuthentication yes
[root@slave2 ~]# systemctl restart sshd
交换ssh密钥
hadoop参数(全在master上修改)
hdfs-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim hdfs-site.xml
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/local/src/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/local/src/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
core-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim core-site.xml
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.130.101:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/src/hadoop/tmp</value>
</property>
mapred-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@master hadoop]# vim mapred-site.xml
you may not use this file except in compliance with the License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
</configuration>
yarn-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim yarn-site.xml
<property>
<configuration>
<!-- Site specific YARN configuration properties -->
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.address</name>
<value>master:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:8088</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
其他
[root@master hadoop]# vim masters
192.168.130.101
[root@master hadoop]# vim slaves
slave1
slave2
[root@master hadoop]# mkdir /usr/local/src/hadoop/tmp
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/name -p
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/data -p
[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/hadoop/
复制到其他节点
1.slave1
[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave1:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.141)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * *
2.slave2
[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave2:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.142)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * *
其他节点配置环境变量(slave1,slave2)
1.slave1
[root@slave1 .ssh]# tail -n 8 /etc/profile
unset -f pathmunge
# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@slave1 .ssh]#
2.slave2
[root@slave2 ~]# tail -n 8 /etc/profile
# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@slave2 ~]#
启动hadoop集群
格式化namenode(master)
[hadoop@master hadoop]$ bin/hdfs namenode –format
*****
24/04/25 19:41:28 INFO util.ExitUtil: Exiting with status 0
24/04/25 19:41:28 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.130.101
************************************************************/
启动namenode(master)
[hadoop@master hadoop]$ hadoop-daemon.sh start namenode
starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
[hadoop@master hadoop]$ jps //查看进程
4746 NameNode
4782 Jps
[hadoop@master hadoop]$
在slave1上启动datenode
[root@slave1 hadoop]# chown -R hadoop:hadoop /usr/local/src/
[root@slave1 hadoop]# su - hadoop
[hadoop@slave2 ~]$ source /etc/profile
[hadoop@slave1 src]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
[hadoop@slave1 src]$ jps
4990 Jps
4943 DataNode
[hadoop@slave1 src]$
ps:
如果显示
[hadoop@slave1 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
/usr/local/src/hadoop/bin/hdfs: line 304: /usr/local/src/jdk/bin/java: No such file or directory
//那么久在master上使用`scp -r jdk/ hadoop@slave1:/usr/local/src/jdk` 传到slave1
在slave2上启动datenode
[hadoop@slave2 hadoop]$ source /etc/profile
[hadoop@slave2 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
[hadoop@slave2 hadoop]$ jps
3598 Jps
3551 DataNode
[hadoop@slave2 hadoop]$
启动 SecondaryNameNode(master)
[hadoop@master src]$ hadoop-daemon.sh start secondarynamenode
starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
[hadoop@master src]$ jps
5009 Jps
4746 NameNode
4974 SecondaryNameNode
[hadoop@master src]$
查看hdfs报告
[hadoop@master src]$ hdfs dfsadmin -report
Configured Capacity: 94434762752 (87.95 GB)
Present Capacity: 82971066368 (77.27 GB)
DFS Remaining: 82971058176 (77.27 GB)
DFS Used: 8192 (8 KB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0
-------------------------------------------------
Live datanodes (2):
Name: 192.168.130.103:50010 (slave2)
Hostname: slave2
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5731614720 (5.34 GB)
DFS Remaining: 41485762560 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024
Name: 192.168.130.102:50010 (slave1)
Hostname: slave1
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5732081664 (5.34 GB)
DFS Remaining: 41485295616 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024
[hadoop@master src]$
浏览器查看节点状态
- http://master:50070/ (看namenode和datanode节点状态)
- http://master:50090 (看SecondaryNameNode节点状态)
全部启动服务
[hadoop@master hadoop]$ start-yarn.sh
[hadoop@master hadoop]$ start-dfs.sh
[hadoop@master hadoop]$ jps
34257 NameNode
34449 SecondaryNameNode
34494 Jps
32847 ResourceManager
###################################################
[学习hadoop的第三天——hive搭建-CSDN博客](https://blog.csdn.net/m0_74752717/article/details/137449938?spm=1001.2014.3001.5501)
大数据平台搭建手册——hadoop的更多相关文章
- 大数据平台搭建(hadoop+spark)
大数据平台搭建(hadoop+spark) 一.基本信息 1. 服务器基本信息 主机名 ip地址 安装服务 spark-master 172.16.200.81 jdk.hadoop.spark.sc ...
- 大数据平台搭建:Hadoop
To construct big data distributed platform based on Hadoop is a common method. Hadoop comes fron Goo ...
- product of大数据平台搭建------CM 和CDH安装
一.安装说明 CM是由cloudera公司提供的大数据组件自动部署和监控管理工具,相应的和CDH是cloudera公司在开源的hadoop社区版的基础上做了商业化的封装的大数据平台. 采用离线安装模式 ...
- CDH 大数据平台搭建
一.概述 Cloudera版本(Cloudera’s Distribution Including Apache Hadoop,简称“CDH”),基于Web的用户界面,支持大多数Hadoop组件,包括 ...
- HDP 大数据平台搭建
一.概述 Apache Ambari是一个基于Web的支持Apache Hadoop集群的供应.管理和监控的开源工具,Ambari已支持大多数Hadoop组件,包括HDFS.MapReduce.Hiv ...
- 大数据平台搭建 - cdh5.11.1 - hadoop集群安装
一.前言 由于线下测试的需要,需要在公司线下(测试)环境搭建大数据集群. 那么CDH是什么? hadoop是一个开源项目,所以很多公司再这个基础上进行商业化,不收费的hadoop版本主要有三个,分别是 ...
- Hadoop大数据平台搭建之前期配置(2)
环境:CentOS 7.4 (1708 DVD) 工具:VMware.MobaXterm 一. 克隆大数据集群 1. 选中已经进行了基本配置的虚拟机,进行克隆. 2. 此处改为"创建完整克 ...
- 大数据平台搭建-zookeeper集群的搭建
本系列文章主要阐述大数据计算平台相关框架的搭建,包括如下内容: 基础环境安装 zookeeper集群的搭建 kafka集群的搭建 hadoop/hbase集群的搭建 spark集群的搭建 flink集 ...
- 大数据平台搭建-kafka集群的搭建
本系列文章主要阐述大数据计算平台相关框架的搭建,包括如下内容: 基础环境安装 zookeeper集群的搭建 kafka集群的搭建 hadoop/hbase集群的搭建 spark集群的搭建 flink集 ...
- 分布式处理与大数据平台(RabbitMQ&Celery&Hadoop&Spark&Storm&Elasticsearch)
热门的消息队列中间件RabbitMQ,分布式任务处理平台Celery,大数据分布式处理的三大重量级武器:Hadoop.Spark.Storm,以及新一代的数据采集和分析引擎Elasticsearch. ...
随机推荐
- 重新整理数据结构与算法(c#)—— 顺序存储二叉树[十九]
前言 二叉树顺序存bai储是二叉树的一种存储方式.将二du叉树存储在一zhi个数组中,通过存储元素的下dao标反映元素之间的父子关系. 正文 这个概念比较简单,比如一个节点的在数组的index是x,那 ...
- PIL.Image, numpy, tensor, cv2 之间的互转,以及在cv2在图片上画各种形状的线
''' PIL.Image, numpy, tensor, cv2 之间的互转 ''' import cv2 import torch from PIL import Image import num ...
- 《Effective C#》系列之(二)——如何使代码易于阅读和理解
在<Effective C#>这本书中,使代码更易于阅读和理解是其中一章的主要内容.以下是该章节的一些核心建议: 使用清晰.有意义的名称:变量.方法.类型等的名称应该能够准确地描述其含义, ...
- 力扣540(java&python)-有序数组中的单一元素(中等)
题目: 给你一个仅由整数组成的有序数组,其中每个元素都会出现两次,唯有一个数只会出现一次. 请你找出并返回只出现一次的那个数. 你设计的解决方案必须满足 O(log n) 时间复杂度和 O(1) 空间 ...
- 力扣35(java&python)-搜索插入位置(简单)
题目: 给定一个排序数组和一个目标值,在数组中找到目标值,并返回其索引.如果目标值不存在于数组中,返回它将会被按顺序插入的位置. 请必须使用时间复杂度为 O(log n) 的算法. 示例 1: 输入: ...
- 力扣394(java)-字符串解码(中等)
题目: 给定一个经过编码的字符串,返回它解码后的字符串. 编码规则为: k[encoded_string],表示其中方括号内部的 encoded_string 正好重复 k 次.注意 k 保证为正整数 ...
- 开箱即用!Linux 内核首个原生支持,让你的容器体验飞起来!| 龙蜥技术
简介: 本文将从 Nydus 架构回顾.RAFS v6 镜像格式和 EROFS over Fscache 按需加载技术三个角度来分别介绍这一技术的演变历程. 文/阿里云内核存储团队,龙蜥社区高性能存储 ...
- Flink集成Iceberg在同程艺龙的实践
------------恢复内容开始------------ null ------------恢复内容结束------------
- dotnet 给 NuGet 包加上 Aliases 别名解决类型冲突
有时某个相同命名空间相同名字的类型被两个不同的 NuGet 包定义了,尽管这是非常少见的事情,咱需要使用到其中的一个 NuGet 包的类型,但默认情况下将会因为类型冲突而构建不通过.本文将告诉大家如何 ...
- SAP集成技术(十三)SAP Cloud Integration
异构应用环境给IT带来了各种问题.在这种情况下,混合集成环境尤其受到影响.同时,对于建立在混合IT环境上的数字化转型项目,数据集成和跨系统访问已经开始发挥核心作用.为了满足不断增长的需求,SAP Bu ...