工具类吧


 package org.apache.hadoop.fs;

 import org.apache.hadoop.io.*;
//IO包下的类还没涉及到。遇到一个分析一个。
import java.io.*; /*
* A BlockLocation lists hosts, offset and length
* of block.
*
*/
//记录block的元数据信息,如所在host,长度和偏移量
public class BlockLocation implements Writable {
//针对集群块位置的类
static { // register a ctor
WritableFactories.setFactory
(BlockLocation.class,
new WritableFactory() {
public Writable newInstance() { return new BlockLocation(); }
});
}
//注册了一个Writable子类BlockLocation的工厂。内部类。详细可看http://book.2cto.com/201305/21915.html
private String[] hosts; //hostnames of datanodes
//节点的主机名数组
private String[] names; //hostname:portNumber of datanodes
//节点的名称数组。名称的格式。
private String[] topologyPaths; // full path name in network topology
//节点在网络拓扑结构中的地址
private long offset; //offset of the of the block in the file
//块在文件中的偏移量
private long length;
//块长度 /**
* Default Constructor
*/
public BlockLocation() {
this(new String[0], new String[0], 0L, 0L);
}
//默认构造方法
/**
* Constructor with host, name, offset and length
*/
public BlockLocation(String[] names, String[] hosts, long offset,
long length) {
if (names == null) {
this.names = new String[0];
} else {
this.names = names;
}
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
this.offset = offset;
this.length = length;
this.topologyPaths = new String[0];
}
//根据名称主机偏移量长度初始化一个块对象
/**
* Constructor with host, name, network topology, offset and length
*/
public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
long offset, long length) {
this(names, hosts, offset, length);
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
}
//根据.........
/**
* Get the list of hosts (hostname) hosting this block
*/
public String[] getHosts() throws IOException {
if ((hosts == null) || (hosts.length == 0)) {
return new String[0];
} else {
return hosts;
}
}
//获得块的主机
/**
* Get the list of names (hostname:port) hosting this block
*/
public String[] getNames() throws IOException {
if ((names == null) || (names.length == 0)) {
return new String[0];
} else {
return this.names;
}
}
//。。。。
/**
* Get the list of network topology paths for each of the hosts.
* The last component of the path is the host.
*/
public String[] getTopologyPaths() throws IOException {
if ((topologyPaths == null) || (topologyPaths.length == 0)) {
return new String[0];
} else {
return this.topologyPaths;
}
}
//。。。。。
/**
* Get the start offset of file associated with this block
*/
public long getOffset() {
return offset;
}
//。。。。。
/**
* Get the length of the block
*/
public long getLength() {
return length;
}
//。。。。。
/**
* Set the start offset of file associated with this block
*/
public void setOffset(long offset) {
this.offset = offset;
}
//。。。。。
/**
* Set the length of block
*/
public void setLength(long length) {
this.length = length;
}
//。。。。。
/**
* Set the hosts hosting this block
*/
public void setHosts(String[] hosts) throws IOException {
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
}
//。。。。。
/**
* Set the names (host:port) hosting this block
*/
public void setNames(String[] names) throws IOException {
if (names == null) {
this.names = new String[0];
} else {
this.names = names;
}
}
//。。。。。
/**
* Set the network topology paths of the hosts
*/
public void setTopologyPaths(String[] topologyPaths) throws IOException {
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
}
//。。。。。
/**
* Implement write of Writable
*/
public void write(DataOutput out) throws IOException {
out.writeLong(offset);
out.writeLong(length);
out.writeInt(names.length);
for (int i=0; i < names.length; i++) {
Text name = new Text(names[i]);
name.write(out);
}
out.writeInt(hosts.length);
for (int i=0; i < hosts.length; i++) {
Text host = new Text(hosts[i]);
host.write(out);
}
out.writeInt(topologyPaths.length);
for (int i=0; i < topologyPaths.length; i++) {
Text host = new Text(topologyPaths[i]);
host.write(out);
}
}
//把块信息写到输出流中。用到了Writable子类Long,Int,Text的Write方法。序列化
/**
* Implement readFields of Writable
*/
public void readFields(DataInput in) throws IOException {
this.offset = in.readLong();
this.length = in.readLong();
int numNames = in.readInt();
this.names = new String[numNames];
for (int i = 0; i < numNames; i++) {
Text name = new Text();
name.readFields(in);
names[i] = name.toString();
}
int numHosts = in.readInt();
for (int i = 0; i < numHosts; i++) {
Text host = new Text();
host.readFields(in);
hosts[i] = host.toString();
}
int numTops = in.readInt();
Text path = new Text();
for (int i = 0; i < numTops; i++) {
path.readFields(in);
topologyPaths[i] = path.toString();
}
}
//把块信息从输入流中读出来。....。反序列化
public String toString() {
StringBuilder result = new StringBuilder();
result.append(offset);
result.append(',');
result.append(length);
for(String h: hosts) {
result.append(',');
result.append(h);
}
return result.toString();
}
//。。。。。。
}

org.apache.hadoop.fs-BlockLocation的更多相关文章

  1. 用java运行Hadoop程序报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.

    用java运行Hadoop例程报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.所写代码如下: package ...

  2. spark-shell报错:Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream

    环境: openSUSE42.2 hadoop2.6.0-cdh5.10.0 spark1.6.0-cdh5.10.0 按照网上的spark安装教程安装完之后,启动spark-shell,出现如下报错 ...

  3. 报错:Exception in thread "main" java.lang.NoClassDefFoundError: Lorg/apache/hadoop/fs/FileSystem

    报错现象: Exception in thread "main" java.lang.NoClassDefFoundError: Lorg/apache/hadoop/fs/Fil ...

  4. Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/CanUnbuffer

    在执行spark on hive 的时候在  sql.show()处报错 : Exception in thread "main" java.lang.NoClassDefFoun ...

  5. hive启动时报错 java.lang.IllegalArgumentException: java.net.URISyntaxException: Relative path in absolute URI: ${system:java.io.tmpdir%7D/$%7Bsystem:user.name%7D at org.apache.hadoop.fs.Path.initialize

    错误提示信息如下 错误信息如下 [root@node1 bin]# ./hive Logging initialized -bin/lib/hive-common-.jar!/hive-log4j.p ...

  6. org.apache.hadoop.fs.FsUrlStreamHandlerFactory 在哪个jar包

    org.apache.hadoop.fs.FsUrlStreamHandlerFactory在org.apache.hadoop类中,org.apache.hadoop在hadoop安装目录下.

  7. sparkOnYarn报错org.apache.hadoop.fs.FSDataInputStream

    Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInpu ...

  8. Hadoop 3.1.2报错:xception in thread "main" org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "hdfs"

    报错内容如下: Exception in thread "main" org.apache.hadoop.fs.UnsupportedFileSystemException: No ...

  9. 你遇到了吗?Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.fs.FileAlreadyExistsException)

    我在使用 Structured Streaming 的 ForeachWriter,写 HDFS 文件时,出现了这个异常 这个异常出现的原因是HDFS作为一个分布式文件系统,支持多线程读,但是不支持多 ...

  10. 错误Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream排查思路

    spark1(默认CDH自带版本)不存在这个问题,主要是升级了spark2(CDHparcel升级)版本安装后需要依赖到spark1的旧配置去读取hadoop集群的依赖包. 1./etc/spark2 ...

随机推荐

  1. My implementation of AVL tree

    C++实现的avl平衡树 #include <stdlib.h> #include <time.h> #include <string.h> #include &l ...

  2. WeChat Official Account Admin Platform Message API Guide

    Keyword: WeChat Message API Text Image Location Link Event Music RichMedia Author: PondBay Studio[We ...

  3. leetcode—Valid Parentheses

    1.问题描述 Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if t ...

  4. 《Genesis-3D开源游戏引擎完整实例教程-2D射击游戏篇02:滚屏》

    2.滚屏 滚屏概述: 打飞机游戏场景背景设计通常很简单,因为角色敌人道具等都不与背景发生交互事件.开发者只需要根据设定的游戏类型,为游戏制作背景,模拟一个大环境即可. 滚屏原理: 材质UV动画,实现背 ...

  5. xcode6默认不支持armv7s

    升级到xcode6以后发现,配置里关于Architectures到默认选项只有armv7和arm64.而再次之前xcode5到时代还是有armv7.armv7s和arm64三项的.  xcode5.1 ...

  6. 【转】nginx的优缺点

    原博文出自于:http://blog.csdn.net/a454211787/article/details/22494485     感谢! 1.nginx相对于apache优点: 轻量级同样起we ...

  7. 现代程序设计homework-06

    现代程序设计homework-06 1) 把程序编译通过, 跑起来. 加入了倒退的功能,程序已经能跑起来了(见代码). 不过倒退功能有些bug,不过这是由于原本程序的主逻辑就有点问题(对于不可走的格子 ...

  8. linux@64 获取时间的性能评估

    听人说gettimeofday 在64bit下有缓存,速度很快,测试下了,感觉不对啊.. #include <time.h> #include <sys/time.h> #in ...

  9. Spring SimpleJdbcTemplate batchUpdate() example

    In this tutorial, we show you how to use batchUpdate() in SimpleJdbcTemplate class. See batchUpdate( ...

  10. Spring入门(6)-使用注解装配

    Spring入门(6)-使用注解装配 本文介绍如何使用注解装配. 0. 目录 使用Autowired 可选的自动装配 使用Qualifier选择 1. 使用Autowired package com. ...