工具类吧


 package org.apache.hadoop.fs;

 import org.apache.hadoop.io.*;
//IO包下的类还没涉及到。遇到一个分析一个。
import java.io.*; /*
* A BlockLocation lists hosts, offset and length
* of block.
*
*/
//记录block的元数据信息,如所在host,长度和偏移量
public class BlockLocation implements Writable {
//针对集群块位置的类
static { // register a ctor
WritableFactories.setFactory
(BlockLocation.class,
new WritableFactory() {
public Writable newInstance() { return new BlockLocation(); }
});
}
//注册了一个Writable子类BlockLocation的工厂。内部类。详细可看http://book.2cto.com/201305/21915.html
private String[] hosts; //hostnames of datanodes
//节点的主机名数组
private String[] names; //hostname:portNumber of datanodes
//节点的名称数组。名称的格式。
private String[] topologyPaths; // full path name in network topology
//节点在网络拓扑结构中的地址
private long offset; //offset of the of the block in the file
//块在文件中的偏移量
private long length;
//块长度 /**
* Default Constructor
*/
public BlockLocation() {
this(new String[0], new String[0], 0L, 0L);
}
//默认构造方法
/**
* Constructor with host, name, offset and length
*/
public BlockLocation(String[] names, String[] hosts, long offset,
long length) {
if (names == null) {
this.names = new String[0];
} else {
this.names = names;
}
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
this.offset = offset;
this.length = length;
this.topologyPaths = new String[0];
}
//根据名称主机偏移量长度初始化一个块对象
/**
* Constructor with host, name, network topology, offset and length
*/
public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
long offset, long length) {
this(names, hosts, offset, length);
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
}
//根据.........
/**
* Get the list of hosts (hostname) hosting this block
*/
public String[] getHosts() throws IOException {
if ((hosts == null) || (hosts.length == 0)) {
return new String[0];
} else {
return hosts;
}
}
//获得块的主机
/**
* Get the list of names (hostname:port) hosting this block
*/
public String[] getNames() throws IOException {
if ((names == null) || (names.length == 0)) {
return new String[0];
} else {
return this.names;
}
}
//。。。。
/**
* Get the list of network topology paths for each of the hosts.
* The last component of the path is the host.
*/
public String[] getTopologyPaths() throws IOException {
if ((topologyPaths == null) || (topologyPaths.length == 0)) {
return new String[0];
} else {
return this.topologyPaths;
}
}
//。。。。。
/**
* Get the start offset of file associated with this block
*/
public long getOffset() {
return offset;
}
//。。。。。
/**
* Get the length of the block
*/
public long getLength() {
return length;
}
//。。。。。
/**
* Set the start offset of file associated with this block
*/
public void setOffset(long offset) {
this.offset = offset;
}
//。。。。。
/**
* Set the length of block
*/
public void setLength(long length) {
this.length = length;
}
//。。。。。
/**
* Set the hosts hosting this block
*/
public void setHosts(String[] hosts) throws IOException {
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
}
//。。。。。
/**
* Set the names (host:port) hosting this block
*/
public void setNames(String[] names) throws IOException {
if (names == null) {
this.names = new String[0];
} else {
this.names = names;
}
}
//。。。。。
/**
* Set the network topology paths of the hosts
*/
public void setTopologyPaths(String[] topologyPaths) throws IOException {
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
}
//。。。。。
/**
* Implement write of Writable
*/
public void write(DataOutput out) throws IOException {
out.writeLong(offset);
out.writeLong(length);
out.writeInt(names.length);
for (int i=0; i < names.length; i++) {
Text name = new Text(names[i]);
name.write(out);
}
out.writeInt(hosts.length);
for (int i=0; i < hosts.length; i++) {
Text host = new Text(hosts[i]);
host.write(out);
}
out.writeInt(topologyPaths.length);
for (int i=0; i < topologyPaths.length; i++) {
Text host = new Text(topologyPaths[i]);
host.write(out);
}
}
//把块信息写到输出流中。用到了Writable子类Long,Int,Text的Write方法。序列化
/**
* Implement readFields of Writable
*/
public void readFields(DataInput in) throws IOException {
this.offset = in.readLong();
this.length = in.readLong();
int numNames = in.readInt();
this.names = new String[numNames];
for (int i = 0; i < numNames; i++) {
Text name = new Text();
name.readFields(in);
names[i] = name.toString();
}
int numHosts = in.readInt();
for (int i = 0; i < numHosts; i++) {
Text host = new Text();
host.readFields(in);
hosts[i] = host.toString();
}
int numTops = in.readInt();
Text path = new Text();
for (int i = 0; i < numTops; i++) {
path.readFields(in);
topologyPaths[i] = path.toString();
}
}
//把块信息从输入流中读出来。....。反序列化
public String toString() {
StringBuilder result = new StringBuilder();
result.append(offset);
result.append(',');
result.append(length);
for(String h: hosts) {
result.append(',');
result.append(h);
}
return result.toString();
}
//。。。。。。
}

org.apache.hadoop.fs-BlockLocation的更多相关文章

  1. 用java运行Hadoop程序报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.

    用java运行Hadoop例程报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.所写代码如下: package ...

  2. spark-shell报错:Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream

    环境: openSUSE42.2 hadoop2.6.0-cdh5.10.0 spark1.6.0-cdh5.10.0 按照网上的spark安装教程安装完之后,启动spark-shell,出现如下报错 ...

  3. 报错:Exception in thread "main" java.lang.NoClassDefFoundError: Lorg/apache/hadoop/fs/FileSystem

    报错现象: Exception in thread "main" java.lang.NoClassDefFoundError: Lorg/apache/hadoop/fs/Fil ...

  4. Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/CanUnbuffer

    在执行spark on hive 的时候在  sql.show()处报错 : Exception in thread "main" java.lang.NoClassDefFoun ...

  5. hive启动时报错 java.lang.IllegalArgumentException: java.net.URISyntaxException: Relative path in absolute URI: ${system:java.io.tmpdir%7D/$%7Bsystem:user.name%7D at org.apache.hadoop.fs.Path.initialize

    错误提示信息如下 错误信息如下 [root@node1 bin]# ./hive Logging initialized -bin/lib/hive-common-.jar!/hive-log4j.p ...

  6. org.apache.hadoop.fs.FsUrlStreamHandlerFactory 在哪个jar包

    org.apache.hadoop.fs.FsUrlStreamHandlerFactory在org.apache.hadoop类中,org.apache.hadoop在hadoop安装目录下.

  7. sparkOnYarn报错org.apache.hadoop.fs.FSDataInputStream

    Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInpu ...

  8. Hadoop 3.1.2报错:xception in thread "main" org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "hdfs"

    报错内容如下: Exception in thread "main" org.apache.hadoop.fs.UnsupportedFileSystemException: No ...

  9. 你遇到了吗?Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.fs.FileAlreadyExistsException)

    我在使用 Structured Streaming 的 ForeachWriter,写 HDFS 文件时,出现了这个异常 这个异常出现的原因是HDFS作为一个分布式文件系统,支持多线程读,但是不支持多 ...

  10. 错误Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream排查思路

    spark1(默认CDH自带版本)不存在这个问题,主要是升级了spark2(CDHparcel升级)版本安装后需要依赖到spark1的旧配置去读取hadoop集群的依赖包. 1./etc/spark2 ...

随机推荐

  1. MVC中CheckBox

    一.单个Checkbox 1.View文件 <%= Html.CheckBoxFor(model => model.IsNeverExpired)%> 2.生成的HTML为 < ...

  2. QT多线程笔记

    1.QT多线程涉及到主线程和子线程之间交互大量数据的时候,使用QThread并不方便,因为run()函数本身不能接受任何参数,因此只能通过信号和槽的交互来获取数据,如果只是单方面简单交互数据还过得去, ...

  3. C++容器学习

    以前自学C++的时候就没怎么看容器,一直以来也没怎么编过C++程序,现在想用C++写点东西,突感容器类型有些生疏,故做此笔记.(参考<C++ primer> 容器:容纳特定类型对象的集合. ...

  4. LyX转Word

    写毕业论文是一件非常繁锁的事情,一大堆的图片.公式都要往上贴,有时弄不好就把编号搞错了,有时可能没注意,一不小心字体格式.版面格式又全乱了.怎么办?--其实这只是在word环境下才会有的烦恼. 对于w ...

  5. Windows10输入法的切换

    Alt+Shift            中⇒あ,あ⇒中 Shift                  中⇒英,英⇒中 Alt+Caps Lock    あ⇒カ,A⇒あ⇒カ Ctrl+Caps Loc ...

  6. HDU 5831 Rikka with Parenthesis II (栈+模拟)

    Rikka with Parenthesis II 题目链接: http://acm.hdu.edu.cn/showproblem.php?pid=5831 Description As we kno ...

  7. jquery easyui将form表单元素的值序列化成对象

    function serializeObject(form){ var o={}; $.each(form.serializeArray(),function(index){ if(o[this['n ...

  8. thymeleaf中的th:remove用法

    一.删除模板片段使用th:remove属性 th:remove的值如下: 1.all:删除包含标签和所有的孩子. 2.body:不包含标记删除,但删除其所有的孩子. 3.tag:包含标记的删除,但不删 ...

  9. Java中的Filter过滤器

    Filter简介 Filter也称之为过滤器,它是Servlet技术中最实用的技术,Web开发人员通过Filter技术,对web服务器管理的所有web资源:例如Jsp, Servlet, 静态图片文件 ...

  10. -webkit-appearance: none;去处select默认小箭头样式

    Html <select class="sel_house_type"> <option value="0">请选择</optio ...