大数据平台搭建手册——hadoop
从0开始 超详细搭建hadoop平台手册
创建三台使用centos7操作系统的虚拟机
基础环境配置
ps:不建议使用DHCP,因为ip地址会变动
配置ip
1.master
[root@master ~]# nmcli connection add ifname ens32 con-name ens32 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@master ~]# nmcli con up ens32
2.slave1
[root@slave1 ~]# nmcli connection add ifname ens33 con-name ens33 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave1 ~]# nmcli con up ens33
3.slave2
[root@slave2 ~]# nmcli connection add ifname ens33 con-name ens33 autoconnect
yes type ethernet ipv4.method manual ipv4.addresses 192.168.130.101/24 ipv4.dns 114.114.114.114 ipv4.gateway 192.168.130.2
[root@slave2 ~]# nmcli con up ens33
ping百度
1.master
[root@master ~]# ping baidu.com
PING baidu.com (39.156.66.10) 56(84) bytes of data.
64 bytes from 39.156.66.10 (39.156.66.10): icmp_seq=1 ttl=128 time=28.5 ms
^C
--- baidu.com ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 28.587/28.587/28.587/0.000 ms
[root@master ~]#
2.slave1
[root@slave1 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=34.5 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=34.9 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 34.512/34.708/34.904/0.196 ms
[root@slave1 ~]#
3.slave2
[root@slave2 ~]# ping baidu.com
PING baidu.com (110.242.68.66) 56(84) bytes of data.
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=1 ttl=128 time=33.0 ms
64 bytes from 110.242.68.66 (110.242.68.66): icmp_seq=2 ttl=128 time=35.2 ms
^C
--- baidu.com ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1002ms
rtt min/avg/max/mdev = 33.035/34.138/35.241/1.103 ms
[root@slave2 ~]#
关闭防火墙和selinux
1.master
[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
[root@master ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
[root@master ~]#
2.slave1
[root@slave1 ~]# systemctl stop firewalld
[root@slave1 ~]# systemctl disable firewalld
[root@slave1 ~]# systemctl status firewalld
● firewalld.service - firewalld - dynamic firewall daemon
Loaded: loaded (/usr/lib/systemd/system/firewalld.service; disabled; vendor preset: enabled)
Active: inactive (dead)
Docs: man:firewalld(1)
[root@slave1 ~]#
创建hadoop用户
1.master
[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]#
2.slave1
[root@master ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@master ~]#
3.slave2
[root@slave2 ~]# id hadoop
uid=1000(hadoop) gid=1000(hadoop) groups=1000(hadoop),10(wheel)
[root@slave2 ~]#
创建hadoop用户密码
1.master
[root@master ~]# echo password|passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@master ~]#
2.slave1
[root@slave1 ~]# echo 'password' |passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave1 ~]#
3.slave2
[root@slave2 ~]# echo 'password' |passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave2 ~]#
安装jdk
删除原有jdk版本
1.master
[root@master ~]# rpm -qa |grep java
java-1.8.0-openjdk-headless-1.8.0.131-11.b12.el7.x86_64
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
java-1.7.0-openjdk-headless-1.7.0.141-2.6.10.5.el7.x86_64
java-1.7.0-openjdk-1.7.0.141-2.6.10.5.el7.x86_64
java-1.8.0-openjdk-1.8.0.131-11.b12.el7.x86_64
python-javapackages-3.4.1-11.el7.noarch
[root@master ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@master ~]# rpm -qa |grep java
[root@master ~]#
2.slave1
[root@slave1 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave1 ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@slave1 ~]# rpm -qa |grep java
[root@slave1 ~]#
3.slave2
[root@slave2 ~]# rpm -qa |grep java
javapackages-tools-3.4.1-11.el7.noarch
tzdata-java-2017b-1.el7.noarch
python-javapackages-3.4.1-11.el7.noarch
[root@slave2 ~]# rpm -e --nodeps $(rpm -qa|grep java)
[root@slave2 ~]# rpm -qa |grep java
[root@slave2 ~]#
安装新的jdk
一,安装安装包
1.master
[root@master software]# tar -zxvf jdk-8u152-linux-x64.tar.gz -C /usr/local/src/
2.slave1
无
3.slave2
无
二,配置jdk环境变量
1.master
[root@master src]# ls
jdk1.8.0_152
[root@master src]# mv jdk1.8.0_152/ jdk //修改名字(有数字太长了)
[root@master jdk]# vim /etc/profile
//G(大写跳转到末行写下面两行)
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
[root@master jdk]# source /etc/profile
//生成新的环境变量
[root@master jdk]# java -version
java version "1.8.0_152"
Java(TM) SE Runtime Environment (build 1.8.0_152-b16)
Java HotSpot(TM) 64-Bit Server VM (build 25.152-b16, mixed mode)
[root@master jdk]#
hadoop
安装hadoop包
1.master
[root@master software]# tar -xzf hadoop-2.7.1.tar.gz -C /usr/local/src/
[root@master software]# cd /usr/local/src/
[root@master src]# mv hadoop-2.7.1/ hadoop
修改环境变量
[root@master hadoop]# tail -n 3 /etc/profile
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@master hadoop]# source /etc/profile //生成环境变量
[root@master hadoop]# hadoop
Usage: hadoop [--config confdir] [COMMAND | CLASSNAME]
CLASSNAME run the class named CLASSNAME
or
where COMMAND is one of:
fs run a generic filesystem user client
version print the version
jar <jar> run a jar file
note: please use "yarn jar" to launch
YARN applications, not this command.
checknative [-a|-h] check native hadoop and compression libraries availability
distcp <srcurl> <desturl> copy file or directories recursively
archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive
classpath prints the class path needed to get the
credential interact with credential providers
Hadoop jar and the required libraries
daemonlog get/set the log level for each daemon
trace view and modify Hadoop tracing settings
Most commands print help when invoked w/o parameters.
[root@master hadoop]#
给hadoop权限
[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/
[root@master hadoop]# ll /usr/local/src/
total 0
drwxr-xr-x 9 hadoop hadoop 149 Jun 29 2015 hadoop
drwxr-xr-x 8 hadoop hadoop 255 Sep 14 2017 jdk
配置hadoop-env.sh
[root@master hadoop]# vi etc/hadoop/hadoop-env.sh
[root@master hadoop]# cat etc/hadoop/hadoop-env.sh |grep JAVA
# The only required environment variable is JAVA_HOME. All others are
# set JAVA_HOME in this file, so that it is correctly defined on
export JAVA_HOME=/usr/local/src/jdk
配置集群环境
域名解析
1.master
[root@master hadoop]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@master hadoop]#
2.slave1
[root@slave1 ~]# vim /etc/hosts
[root@slave1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave1 ~]#
3.slave2
[root@slave2 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.130.101 master
192.168.130.102 slave1
192.168.130.103 slave2
[root@slave2 ~]#
免密登陆
生成密钥文件并且发送给自己
- master
[root@master ~]# su - hadoop
Last login: Thu Apr 25 17:45:05 CST 2024 on pts/0
[hadoop@master ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:2b+pnJuChp6RkHkx9XYqUgEpyhCf8WHBz3dtE67E9lA hadoop@master
The key's randomart image is:
+---[RSA 2048]----+
|....=+o |
|...=oo o E |
|o.o.+o. o..+ . |
|.. o +o..=* = |
| + o ..S+.= . |
| o o . ... |
| o. . . |
| .oo .. o o |
| .o. .*oo |
+----[SHA256]-----+
[hadoop@master ~]$
[hadoop@master ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@master ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@master ~]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@master ~]$ ll ~/.ssh/
total 12
-rw------- 1 hadoop hadoop 3358 Apr 25 18:30 authorized_keys
-rw------- 1 hadoop hadoop 1679 Apr 25 17:45 id_rsa
-rw-r--r-- 1 hadoop hadoop 395 Apr 25 17:45 id_rsa.pub
[root@master .ssh]# cat /etc/ssh/sshd_config |grep Pub
PubkeyAuthentication yes //(使用vim打开该文件)去掉该文件中这一行的注释
[root@master .ssh]# systemctl restart sshd
[root@master .ssh]#
- slave1
[root@slave1 ~]# su - hadoop
Last login: Tue Apr 7 15:37:22 CST 2020 on :0
[hadoop@slave1 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:H5ouJBZdnawOEbyx1xFU3tKt8NEm5XlmnpzghOtQMcY hadoop@slave1
The key's randomart image is:
+---[RSA 2048]----+
| ... ++B. .|
| + . *E+o =.|
| . * o ++o=.O|
| . = o o ++oO+|
| . +S... .o+.|
| o . .+o. |
| . o o .. |
| .. |
| .. |
+----[SHA256]-----+
[hadoop@slave1 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave1 ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@slave1 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave1 ~]# systemctl restart sshd
- slave2
[root@slave2 ~]# su - hadoop
Last login: Tue Apr 7 15:37:22 CST 2020 on :0
[hadoop@slave2 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:Brs8qBwc6izUBbj10eS/AWBZ6Dtxs1EZ8mVc9fM97Yg hadoop@slave2
The key's randomart image is:
+---[RSA 2048]----+
| . o*+ .+o.... |
| . o.+o.ooo. .|
| o + oo.. ..|
| . = *o =|
| o . = So .+|
| + o = + o . o.|
|o o . = . E . .|
|+. o . |
|.oo |
+----[SHA256]-----+
[root@slave2 ~]# su - hadoop
Last login: Thu Apr 25 17:45:29 CST 2024 on pts/0
[hadoop@slave2 ~]$ cat .ssh/id_rsa >> .ssh/authorized_keys
[hadoop@slave2 ~]$ ls ~/.ssh/
authorized_keys id_rsa id_rsa.pub
[hadoop@slave2 ~]$ chmod 600 ~/.ssh/authorized_keys
[root@slave2 ~]# vim /etc/ssh/sshd_config
[root@slave2 ~]# cat /etc/ssh/sshd_config |grep Pub
PubkeyAuthentication yes
[root@slave2 ~]# systemctl restart sshd
交换ssh密钥
hadoop参数(全在master上修改)
hdfs-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim hdfs-site.xml
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/usr/local/src/hadoop/dfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/usr/local/src/hadoop/dfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
core-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim core-site.xml
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.130.101:9000</value>
</property>
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/src/hadoop/tmp</value>
</property>
mapred-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# cp mapred-site.xml.template mapred-site.xml
[root@master hadoop]# vim mapred-site.xml
you may not use this file except in compliance with the License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>master:19888</value>
</property>
</configuration>
yarn-site.xml
[root@master hadoop]# pwd
/usr/local/src/hadoop/etc/hadoop
[root@master hadoop]# vim yarn-site.xml
<property>
<configuration>
<!-- Site specific YARN configuration properties -->
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.address</name>
<value>master:8032</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8031</value>
</property>
<property>
<name>yarn.resourcemanager.admin.address</name>
<value>master:8033</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>master:8088</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
其他
[root@master hadoop]# vim masters
192.168.130.101
[root@master hadoop]# vim slaves
slave1
slave2
[root@master hadoop]# mkdir /usr/local/src/hadoop/tmp
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/name -p
[root@master hadoop]# mkdir /usr/local/src/hadoop/dfs/data -p
[root@master hadoop]# chown -R hadoop:hadoop /usr/local/src/hadoop/
复制到其他节点
1.slave1
[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave1:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.141)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * *
2.slave2
[root@master ~]# scp -r /usr/local/src/hadoop/ root@slave2:/usr/local/src/
The authenticity of host 'slave1 (192.168.47.142)' can't be established.
ECDSA key fingerprint is SHA256:vnHclJTJVtDbeULN8jdOLhTCmqxJNqUQshH9g9LfJ3k.
ECDSA key fingerprint is MD5:31:03:3d:83:46:aa:c4:d0:c9:fc:5f:f1:cf:2d:fd:e2.
Are you sure you want to continue connecting (yes/no)? yes
* * * * * * *
其他节点配置环境变量(slave1,slave2)
1.slave1
[root@slave1 .ssh]# tail -n 8 /etc/profile
unset -f pathmunge
# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@slave1 .ssh]#
2.slave2
[root@slave2 ~]# tail -n 8 /etc/profile
# jdk
export JAVA_HOME=/usr/local/src/jdk
export PATH=$PATH:$JAVA_HOME/bin
# hadoop
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[root@slave2 ~]#
启动hadoop集群
格式化namenode(master)
[hadoop@master hadoop]$ bin/hdfs namenode –format
*****
24/04/25 19:41:28 INFO util.ExitUtil: Exiting with status 0
24/04/25 19:41:28 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at master/192.168.130.101
************************************************************/
启动namenode(master)
[hadoop@master hadoop]$ hadoop-daemon.sh start namenode
starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
[hadoop@master hadoop]$ jps //查看进程
4746 NameNode
4782 Jps
[hadoop@master hadoop]$
在slave1上启动datenode
[root@slave1 hadoop]# chown -R hadoop:hadoop /usr/local/src/
[root@slave1 hadoop]# su - hadoop
[hadoop@slave2 ~]$ source /etc/profile
[hadoop@slave1 src]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
[hadoop@slave1 src]$ jps
4990 Jps
4943 DataNode
[hadoop@slave1 src]$
ps:
如果显示
[hadoop@slave1 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
/usr/local/src/hadoop/bin/hdfs: line 304: /usr/local/src/jdk/bin/java: No such file or directory
//那么久在master上使用`scp -r jdk/ hadoop@slave1:/usr/local/src/jdk` 传到slave1
在slave2上启动datenode
[hadoop@slave2 hadoop]$ source /etc/profile
[hadoop@slave2 hadoop]$ hadoop-daemon.sh start datanode
starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
[hadoop@slave2 hadoop]$ jps
3598 Jps
3551 DataNode
[hadoop@slave2 hadoop]$
启动 SecondaryNameNode(master)
[hadoop@master src]$ hadoop-daemon.sh start secondarynamenode
starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
[hadoop@master src]$ jps
5009 Jps
4746 NameNode
4974 SecondaryNameNode
[hadoop@master src]$
查看hdfs报告
[hadoop@master src]$ hdfs dfsadmin -report
Configured Capacity: 94434762752 (87.95 GB)
Present Capacity: 82971066368 (77.27 GB)
DFS Remaining: 82971058176 (77.27 GB)
DFS Used: 8192 (8 KB)
DFS Used%: 0.00%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0
-------------------------------------------------
Live datanodes (2):
Name: 192.168.130.103:50010 (slave2)
Hostname: slave2
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5731614720 (5.34 GB)
DFS Remaining: 41485762560 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024
Name: 192.168.130.102:50010 (slave1)
Hostname: slave1
Decommission Status : Normal
Configured Capacity: 47217381376 (43.97 GB)
DFS Used: 4096 (4 KB)
Non DFS Used: 5732081664 (5.34 GB)
DFS Remaining: 41485295616 (38.64 GB)
DFS Used%: 0.00%
DFS Remaining%: 87.86%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Thu Apr 25 19:57:20 CST 2024
[hadoop@master src]$
浏览器查看节点状态
- http://master:50070/ (看namenode和datanode节点状态)
- http://master:50090 (看SecondaryNameNode节点状态)
全部启动服务
[hadoop@master hadoop]$ start-yarn.sh
[hadoop@master hadoop]$ start-dfs.sh
[hadoop@master hadoop]$ jps
34257 NameNode
34449 SecondaryNameNode
34494 Jps
32847 ResourceManager
###################################################
[学习hadoop的第三天——hive搭建-CSDN博客](https://blog.csdn.net/m0_74752717/article/details/137449938?spm=1001.2014.3001.5501)
大数据平台搭建手册——hadoop的更多相关文章
- 大数据平台搭建(hadoop+spark)
大数据平台搭建(hadoop+spark) 一.基本信息 1. 服务器基本信息 主机名 ip地址 安装服务 spark-master 172.16.200.81 jdk.hadoop.spark.sc ...
- 大数据平台搭建:Hadoop
To construct big data distributed platform based on Hadoop is a common method. Hadoop comes fron Goo ...
- product of大数据平台搭建------CM 和CDH安装
一.安装说明 CM是由cloudera公司提供的大数据组件自动部署和监控管理工具,相应的和CDH是cloudera公司在开源的hadoop社区版的基础上做了商业化的封装的大数据平台. 采用离线安装模式 ...
- CDH 大数据平台搭建
一.概述 Cloudera版本(Cloudera’s Distribution Including Apache Hadoop,简称“CDH”),基于Web的用户界面,支持大多数Hadoop组件,包括 ...
- HDP 大数据平台搭建
一.概述 Apache Ambari是一个基于Web的支持Apache Hadoop集群的供应.管理和监控的开源工具,Ambari已支持大多数Hadoop组件,包括HDFS.MapReduce.Hiv ...
- 大数据平台搭建 - cdh5.11.1 - hadoop集群安装
一.前言 由于线下测试的需要,需要在公司线下(测试)环境搭建大数据集群. 那么CDH是什么? hadoop是一个开源项目,所以很多公司再这个基础上进行商业化,不收费的hadoop版本主要有三个,分别是 ...
- Hadoop大数据平台搭建之前期配置(2)
环境:CentOS 7.4 (1708 DVD) 工具:VMware.MobaXterm 一. 克隆大数据集群 1. 选中已经进行了基本配置的虚拟机,进行克隆. 2. 此处改为"创建完整克 ...
- 大数据平台搭建-zookeeper集群的搭建
本系列文章主要阐述大数据计算平台相关框架的搭建,包括如下内容: 基础环境安装 zookeeper集群的搭建 kafka集群的搭建 hadoop/hbase集群的搭建 spark集群的搭建 flink集 ...
- 大数据平台搭建-kafka集群的搭建
本系列文章主要阐述大数据计算平台相关框架的搭建,包括如下内容: 基础环境安装 zookeeper集群的搭建 kafka集群的搭建 hadoop/hbase集群的搭建 spark集群的搭建 flink集 ...
- 分布式处理与大数据平台(RabbitMQ&Celery&Hadoop&Spark&Storm&Elasticsearch)
热门的消息队列中间件RabbitMQ,分布式任务处理平台Celery,大数据分布式处理的三大重量级武器:Hadoop.Spark.Storm,以及新一代的数据采集和分析引擎Elasticsearch. ...
随机推荐
- 密码学系列——数字签名(c# 代码实操)
前言 结合消息摘要.非对称加密.数字签名三篇,进行代码实操. 代码完整,可复制运行. 正文 代码如下: public class SignatureHelper { /// <summary&g ...
- Faiss 向量库编译安装
Faiss 是 Facebook 开源的一套高效相似性搜索以及向量聚类的开发库,支持各类相似性搜索的算法,Faiss 项目本身是使用 C++ 编写的,但是提供 Python 的绑定,可以直接使用 nu ...
- 第十課-Change Mirth Connect To Mariadb
随着接口部署数量和交互数据越来越多,原来基于Mirth Connect默认derby嵌入式数据库下运行越来越不能满足高性能.高可靠等线上刚性需求:故需要切换Mirth Connect的运行后台数据库. ...
- 力扣482(java)-密钥格式化(简单)
题目: 给定一个许可密钥字符串 s,仅由字母.数字字符和破折号组成.字符串由 n 个破折号分成 n + 1 组.你也会得到一个整数 k . 我们想要重新格式化字符串 s,使每一组包含 k 个字符,除了 ...
- 力扣43(java)-字符串相乘(中等)
题目: 给定两个以字符串形式表示的非负整数 num1 和 num2,返回 num1 和 num2 的乘积,它们的乘积也表示为字符串形式. 注意:不能使用任何内置的 BigInteger 库或直接将输入 ...
- 当 TiDB 与 Flink 相结合:高效、易用的实时数仓
简介: 利用实时数仓,企业可以实现实时 OLAP 分析.实时数据看板.实时业务监控.实时数据接口服务等用途.但想到实时数仓,很多人的第一印象就是架构复杂,难以操作与维护.而得益于新版 Flink 对 ...
- dotnet 对指针转换为结构体多个不同方法的性能分析
在 dotnet 里面,拿到一个指针,可以有多个不同的方法转换为结构体,本文将来告诉大家这几个方法的性能的差别 特别感谢性能优化狂魔 Stephen Toub 大佬的指导 在 WPF 框架开发中,有小 ...
- STM32F10X 部分引脚不能使用的问题
一.概述 说来惭愧,我到现在都没有完整的学习过STM32.接触 STM32 还是突然的一个项目,需要用到 STM32,紧急需求,只能边学边完成.不过好在 ST 的资料还是比较多的,相对也比较简单,基本 ...
- 51k+ Star!动画图解、一键运行的数据结构与算法教程!
大家好,我是 Java陈序员. 我们都知道,<数据结构与算法> -- 是程序员的必修课. 无论是使用什么编程语音,亦或者是前后端开发,都需要修好<数据结构与算法>这门课! 在各 ...
- ABAP CDS 在7.55有比较大的更新
几年前翻译过CDS的相关文章,部分内容已经过时,比如当时的DDIC CDS在7.55以后已经沦为obsolete,在新版本中,开发者应使用DEFINE VIEW ENTITY而非DEFINE VIEW ...