1.datanode起不来
2016-11-25 09:46:43,685 WARN org.apache.hadoop.hdfs.server.datanode.DataNode: Invalid dfs.datanode.data.dir /home/hadoop3/hadoop_data/data :
org.apache.hadoop.util.DiskChecker$DiskErrorException: Directory is not writable: /home/hadoop3/hadoop_data/data
        at org.apache.hadoop.util.DiskChecker.checkAccessByFileMethods(DiskChecker.java:193)
        at org.apache.hadoop.util.DiskChecker.checkDirAccess(DiskChecker.java:174)
        at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:157)
        at org.apache.hadoop.hdfs.server.datanode.DataNode$DataNodeDiskChecker.checkDir(DataNode.java:2272)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.checkStorageLocations(DataNode.java:2314)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:2296)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:2188)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.createDataNode(DataNode.java:2235)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.secureMain(DataNode.java:2411)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.main(DataNode.java:2435)

java.io.IOException: BlockPoolSliceStorage.recoverTransitionRead: attempt to load an used block storage: /home/hdmaster/hadoop_data/data/current/BP-994368505-192.168.30.223-1441944900262
        at org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage.loadBpStorageDirectories(BlockPoolSliceStorage.java:210)
        at org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage.recoverTransitionRead(BlockPoolSliceStorage.java:242)
        at org.apache.hadoop.hdfs.server.datanode.DataStorage.addStorageLocations(DataStorage.java:391)
        at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:472)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1322)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1292)
        at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:321)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:225)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:862)
        at java.lang.Thread.run(Thread.java:745)
2016-11-25 09:27:18,795 WARN org.apache.hadoop.hdfs.server.common.Storage: Failed to add storage for block pool: BP-994368505-192.168.30.223-1441944900262 : BlockPoolSliceStorage.recoverTransitionRead: attempt to load an used block storage: /home/hdmaster/hadoop_data/data/current/BP-994368505-192.168.30.223-1441944900262
2016-11-25 09:27:18,795 INFO org.apache.hadoop.hdfs.server.common.Storage: Storage directory [DISK]file:/home/hadoop1/hadoop_data/data/ has already been used.
2016-11-25 09:27:18,818 INFO org.apache.hadoop.hdfs.server.common.Storage: Analyzing storage directories for bpid BP-994368505-192.168.30.223-1441944900262
2016-11-25 09:27:18,818 WARN org.apache.hadoop.hdfs.server.common.Storage: Failed to analyze storage directories for block pool BP-994368505-192.168.30.223-1441944900262
java.io.IOException: BlockPoolSliceStorage.recoverTransitionRead: attempt to load an used block storage: /home/hadoop1/hadoop_data/data/current/BP-994368505-192.168.30.223-1441944900262
        at org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage.loadBpStorageDirectories(BlockPoolSliceStorage.java:210)
        at org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage.recoverTransitionRead(BlockPoolSliceStorage.java:242)
        at org.apache.hadoop.hdfs.server.datanode.DataStorage.addStorageLocations(DataStorage.java:391)
        at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:472)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1322)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1292)
        at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:321)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:225)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:862)
        at java.lang.Thread.run(Thread.java:745)
2016-11-25 09:27:18,818 WARN org.apache.hadoop.hdfs.server.common.Storage: Failed to add storage for block pool: BP-994368505-192.168.30.223-1441944900262 : BlockPoolSliceStorage.recoverTransitionRead: attempt to load an used block storage: /home/hadoop1/hadoop_data/data/current/BP-994368505-192.168.30.223-1441944900262
2016-11-25 09:27:18,818 INFO org.apache.hadoop.hdfs.server.common.Storage: Storage directory [DISK]file:/home/hadoop2/hadoop_data/data/ has already been used.
2016-11-25 09:27:18,839 INFO org.apache.hadoop.hdfs.server.common.Storage: Analyzing storage directories for bpid BP-994368505-192.168.30.223-1441944900262
2016-11-25 09:27:18,839 WARN org.apache.hadoop.hdfs.server.common.Storage: Failed to analyze storage directories for block pool BP-994368505-192.168.30.223-1441944900262
java.io.IOException: BlockPoolSliceStorage.recoverTransitionRead: attempt to load an used block storage: /home/hadoop2/hadoop_data/data/current/BP-994368505-192.168.30.223-1441944900262
        at org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage.loadBpStorageDirectories(BlockPoolSliceStorage.java:210)
        at org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage.recoverTransitionRead(BlockPoolSliceStorage.java:242)
        at org.apache.hadoop.hdfs.server.datanode.DataStorage.addStorageLocations(DataStorage.java:391)
        at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:472)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1322)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1292)
        at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:321)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:225)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:862)
        at java.lang.Thread.run(Thread.java:745)
2016-11-25 09:27:18,840 WARN org.apache.hadoop.hdfs.server.common.Storage: Failed to add storage for block pool: BP-994368505-192.168.30.223-1441944900262 : BlockPoolSliceStorage.recoverTransitionRead: attempt to load an used block storage: /home/hadoop2/hadoop_data/data/current/BP-994368505-192.168.30.223-1441944900262
2016-11-25 09:27:18,840 FATAL org.apache.hadoop.hdfs.server.datanode.DataNode: Initialization failed for Block pool <registering> (Datanode Uuid unassigned) service to namenode01/192.168.30.223:9000. Exiting.
java.io.IOException: All specified directories are failed to load.
        at org.apache.hadoop.hdfs.server.datanode.DataStorage.recoverTransitionRead(DataStorage.java:473)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1322)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1292)
        at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:321)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:225)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:862)
        at java.lang.Thread.run(Thread.java:745)
2016-11-25 09:27:18,840 FATAL org.apache.hadoop.hdfs.server.datanode.DataNode: Initialization failed for Block pool <registering> (Datanode Uuid unassigned) service to namenode02/192.168.32.124:9000. Exiting.
org.apache.hadoop.util.DiskChecker$DiskErrorException: Too many failed volumes - current valid volumes: 3, volumes configured: 4, volumes failed: 1, volume failures tolerated: 0
        at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.<init>(FsDatasetImpl.java:247)
        at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory.newInstance(FsDatasetFactory.java:34)
        at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory.newInstance(FsDatasetFactory.java:30)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initStorage(DataNode.java:1335)
        at org.apache.hadoop.hdfs.server.datanode.DataNode.initBlockPool(DataNode.java:1292)
        at org.apache.hadoop.hdfs.server.datanode.BPOfferService.verifyAndSetNamespaceInfo(BPOfferService.java:321)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.connectToNNAndHandshake(BPServiceActor.java:225)
        at org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:862)
        at java.lang.Thread.run(Thread.java:745)

---原因第三块盘坏了
解决步骤1:修复磁盘
[root@hdslave04 ~]# df -h
Filesystem            Size  Used Avail Use% Mounted on
/dev/mapper/vg_server03-lv_root
                       50G   27G   20G  58% /
tmpfs                  16G   68K   16G   1% /dev/shm
/dev/sda1             477M   59M  393M  13% /boot
/dev/mapper/vg_server03-lv_home
                      1.8T  1.5T  207G  88% /home
/dev/sdb1             1.8T  1.5T  303G  83% /home/hadoop1
/dev/sdc1             1.8T  1.5T  286G  84% /home/hadoop2
/dev/sdd1             1.8T  1.5T  250G  86% /home/hadoop3

umount /dev/sdd1
如果出现umount: /dev/sdd1: device is busy,
fuser -m /home/hadoop3
kill pid
umount /dev/sdd1

[root@hdslave04 ~]# fsck -y /dev/sdd1
fsck from util-linux-ng 2.17.2
e2fsck 1.41.12 (17-May-2010)
fsck.ext4: 没有那个设备或地址 当尝试打开 /dev/sdd1 时
Possibly non-existent or swap device?

--以上说明磁盘已经损坏

解决步骤2:更换磁盘 Parted工具来实现对GPT磁盘进行分区
  参考:http://soft.chinabyte.com/os/447/12439447.shtml
  parted /dev/sdd
  (parted) mklabel ----创建创建磁盘标签
   New disk labeltype? gpt
   (parted) p ----查看分区状态
   Model: VMware,VMware Virtual S (scsi)

  Disk /dev/sde:2000GB

  Sector size(logical/physical): 512B/512B

  Partition Table:gpt

  Number Start End Size File system Name Flags

  (parted) mkpart

  Partition name? []? sdd1 ---指定分区名称

  File system type? [ext2]ext4 ----指定分区类型

  Start? 1 ---指定开始位置

  End? 2000GB ---指定结束位置

  (parted) P ----显示分区信息

  Model: VMware, VMware Virtual S (scsi)

  Disk /dev/sde: 2000GB

  Sector size (logical/physical): 512B/512B

  Partition Table: gpt

  Number Start End Size File system Name Flags

  1 17.4kB 2000GB 2000GB sdd1

  (parted) Q ---退出

步骤三:格式化最新磁盘
mkfs.ext4 /dev/sdd1
修改fstab挂载最新磁盘
/dev/sdd1             1.8T  1.5T  250G  86% /home/hadoop3
重启
shutdown -r now

步骤四:重启datanode ,nodeManager
sh hadoop-daemon.sh start datanode
sh yarn-daemon.sh start nodemanager

--步骤1附加 死马当活马医看看重启是否
重启服务器看看 shutdown -r now

发现机器卡住了
1.进入Linux单用户模式
执行 root# mount -o remount,rw /
vim /etc/fstab
#/dev/sdd1             1.8T  1.5T  250G  86% /home/hadoop3
2.重启服务器进入系统正常
shutdown -r now

hadoop2 datanode启动异常解决步骤的更多相关文章

  1. datanode启动异常(Incompatible clusterIDs)

    问题: 正常start-all.sh无法启动datanode进程,但是./hadoop-daemon.sh start datanode又可以启动.过一会后datanode进程又莫名消失. 原理: 多 ...

  2. java.lang.NullPointerException at org.apache.jsp.**_jsp.jspInit(**_jsp.java)tomcat启动异常解决方法

    今天遇到的其他一个问题就是,启动tomcat时,报:java.lang.NullPointerException at org.apache.jsp.**_jsp.jspInit(index_jsp. ...

  3. docker 安装好后启动异常解决

    一个月前在虚拟机中根据视频教程安装了docker 启动docker后执行 systemctl status docker 出现了异常,具体如下: [root@joinApp2 ~]# systemct ...

  4. Linux下MongoDB非正常关闭启动异常解决方法

    1.将配置信息写入一个文件中 vim mongo.conf 里面写如下内容: dbpath=/usr/local/mongodb/data/ logpath=/usr/local/mongodb/lo ...

  5. 【转】一招解决MCU启动异常

    对于主电源掉电后需要继续工作一段时间来用于数据保存或者发出报警的产品,我们往往都能够看见产品PCB板上有大电容甚至是超级电容器的身影.大容量的电容虽然能延时系统掉电,使得系统在电源意外关闭时MCU能继 ...

  6. 安装visual studio2017后 首次启动出现ActivityLog.xml异常解决方法

    安装visual studio2017后 首次启动出现ActivityLog.xml异常解决方法 ps:操作系统是win10 在官网下载了vs2017社区版按照教程(教程链接在文末)安装完成之后,首次 ...

  7. # vmware异常关机后,虚拟系统无法启动的解决办法

    vmware异常关机后,虚拟系统无法启动的解决办法 先使用everything搜索所有后缀为.lck的文件,这些文件全部删除,如果不确定是否可以删除,先把这些文件转移到桌面,等能启动虚拟系统之后再删除 ...

  8. java启动RabbitMQ消息报异常解决办法

    启动SpringCloud微服务,RabbitMQ报如下异常: 2019-08-12 18:15:49.543 ERROR 53096 --- [68.252.131:5672] o.s.a.r.c. ...

  9. hadoop2集群中的datanode启动以后自动关闭的问题

    今天在启动前几天搭建成功的hadoop集群的时候,出现了datanode启动之后自动关闭的情况,经过查询之后发现问题产生的原因是:在第一次格式化dfs后,启动并使用了hadoop,后来又重新执行了格式 ...

随机推荐

  1. Logstash 6.4.3 导入 csv 数据到 ElasticSearch 6.4.3

    本文实践最新版的Logstash从csv文件导入数据到ElasticSearch. 本文目录: 1.初始化ES.Kibana.Logstash 2.安装logstash文件导入.过滤器等插件 3.配置 ...

  2. tensor内部结构

    内部结构 1.tensor分为头信息区(Tensor)和存储区(Storage): 信息区:tensor的形状(size).步长(stride).数据类型(type),信息区占用内存较少 存储区:数据 ...

  3. sunTime

    哈哈哈  开通了博客,有缘的你看能不能来到这里,或许我们认识呢

  4. WinccFlexible 同一个项目创建多个connections

    在一个WinccFlexible 项目中,可以创建多个通讯连接,以满足不同的接口要求. 但是需要在控制面板上 Set PG/PC Interface中添加新的连接,并绑定对应的网卡即可.

  5. iis 和 node express 共用80端口 iisnode 全过程

    一.首先下载iisnode.exe https://github.com/tjanczuk/iisnode/wiki/iisnode-releases  链接 安装完毕! 二.打开IIS 7 选中 D ...

  6. php调用第三方接口

    方式一 $url = 'http://ip.taobao.com/service/getIpInfo.php?ip='.$realip;$data = file_get_contents(" ...

  7. 关于css中为什么要设置html和body的高度?

    1.在怪异模式下,也就是网页的头部不写DOCTYPE的时候,body作为根元素,设置高度为百分百的时候.可以是页面的高度和浏览高度相同,在标准模式下也就是有DOCTYPE的时候,html才是根元素这时 ...

  8. python os.walk()方法--遍历当前目录的方法

    前记:有个奇妙的想法并想使用代码实现,发现了一个坑,百度了好久也没发现的"填坑"的文章~~~~~~~~~ 那就由我来填 os.walk()支持相对路径 例如 os.walk(&qu ...

  9. 深入理解Spring Redis的使用 (八)、Spring Redis实现 注解 自动缓存

    项目中有些业务方法希望在有缓存的时候直接从缓存获取,不再执行方法,来提高吞吐率.而且这种情况有很多.如果为每一个方法都写一段if else的代码,导致耦合非常大,不方便后期的修改. 思来想去,决定使用 ...

  10. WordPress独立下载页面与演示插件:xydown

    我的博客是个资源分享的网站,所以需要提供下载,之前一直是在内容里直接添加个下载链接,感觉不是很美观,而且也麻烦,所以今天找了下看看有没有可以用的下载插件 xydown,这是一款可以独立下载页面与演示的 ...