org.apache.hadoop.fs-BlockLocation
工具类吧
package org.apache.hadoop.fs; import org.apache.hadoop.io.*;
//IO包下的类还没涉及到。遇到一个分析一个。
import java.io.*; /*
* A BlockLocation lists hosts, offset and length
* of block.
*
*/
//记录block的元数据信息,如所在host,长度和偏移量
public class BlockLocation implements Writable {
//针对集群块位置的类
static { // register a ctor
WritableFactories.setFactory
(BlockLocation.class,
new WritableFactory() {
public Writable newInstance() { return new BlockLocation(); }
});
}
//注册了一个Writable子类BlockLocation的工厂。内部类。详细可看http://book.2cto.com/201305/21915.html
private String[] hosts; //hostnames of datanodes
//节点的主机名数组
private String[] names; //hostname:portNumber of datanodes
//节点的名称数组。名称的格式。
private String[] topologyPaths; // full path name in network topology
//节点在网络拓扑结构中的地址
private long offset; //offset of the of the block in the file
//块在文件中的偏移量
private long length;
//块长度 /**
* Default Constructor
*/
public BlockLocation() {
this(new String[0], new String[0], 0L, 0L);
}
//默认构造方法
/**
* Constructor with host, name, offset and length
*/
public BlockLocation(String[] names, String[] hosts, long offset,
long length) {
if (names == null) {
this.names = new String[0];
} else {
this.names = names;
}
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
this.offset = offset;
this.length = length;
this.topologyPaths = new String[0];
}
//根据名称主机偏移量长度初始化一个块对象
/**
* Constructor with host, name, network topology, offset and length
*/
public BlockLocation(String[] names, String[] hosts, String[] topologyPaths,
long offset, long length) {
this(names, hosts, offset, length);
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
}
//根据.........
/**
* Get the list of hosts (hostname) hosting this block
*/
public String[] getHosts() throws IOException {
if ((hosts == null) || (hosts.length == 0)) {
return new String[0];
} else {
return hosts;
}
}
//获得块的主机
/**
* Get the list of names (hostname:port) hosting this block
*/
public String[] getNames() throws IOException {
if ((names == null) || (names.length == 0)) {
return new String[0];
} else {
return this.names;
}
}
//。。。。
/**
* Get the list of network topology paths for each of the hosts.
* The last component of the path is the host.
*/
public String[] getTopologyPaths() throws IOException {
if ((topologyPaths == null) || (topologyPaths.length == 0)) {
return new String[0];
} else {
return this.topologyPaths;
}
}
//。。。。。
/**
* Get the start offset of file associated with this block
*/
public long getOffset() {
return offset;
}
//。。。。。
/**
* Get the length of the block
*/
public long getLength() {
return length;
}
//。。。。。
/**
* Set the start offset of file associated with this block
*/
public void setOffset(long offset) {
this.offset = offset;
}
//。。。。。
/**
* Set the length of block
*/
public void setLength(long length) {
this.length = length;
}
//。。。。。
/**
* Set the hosts hosting this block
*/
public void setHosts(String[] hosts) throws IOException {
if (hosts == null) {
this.hosts = new String[0];
} else {
this.hosts = hosts;
}
}
//。。。。。
/**
* Set the names (host:port) hosting this block
*/
public void setNames(String[] names) throws IOException {
if (names == null) {
this.names = new String[0];
} else {
this.names = names;
}
}
//。。。。。
/**
* Set the network topology paths of the hosts
*/
public void setTopologyPaths(String[] topologyPaths) throws IOException {
if (topologyPaths == null) {
this.topologyPaths = new String[0];
} else {
this.topologyPaths = topologyPaths;
}
}
//。。。。。
/**
* Implement write of Writable
*/
public void write(DataOutput out) throws IOException {
out.writeLong(offset);
out.writeLong(length);
out.writeInt(names.length);
for (int i=0; i < names.length; i++) {
Text name = new Text(names[i]);
name.write(out);
}
out.writeInt(hosts.length);
for (int i=0; i < hosts.length; i++) {
Text host = new Text(hosts[i]);
host.write(out);
}
out.writeInt(topologyPaths.length);
for (int i=0; i < topologyPaths.length; i++) {
Text host = new Text(topologyPaths[i]);
host.write(out);
}
}
//把块信息写到输出流中。用到了Writable子类Long,Int,Text的Write方法。序列化
/**
* Implement readFields of Writable
*/
public void readFields(DataInput in) throws IOException {
this.offset = in.readLong();
this.length = in.readLong();
int numNames = in.readInt();
this.names = new String[numNames];
for (int i = 0; i < numNames; i++) {
Text name = new Text();
name.readFields(in);
names[i] = name.toString();
}
int numHosts = in.readInt();
for (int i = 0; i < numHosts; i++) {
Text host = new Text();
host.readFields(in);
hosts[i] = host.toString();
}
int numTops = in.readInt();
Text path = new Text();
for (int i = 0; i < numTops; i++) {
path.readFields(in);
topologyPaths[i] = path.toString();
}
}
//把块信息从输入流中读出来。....。反序列化
public String toString() {
StringBuilder result = new StringBuilder();
result.append(offset);
result.append(',');
result.append(length);
for(String h: hosts) {
result.append(',');
result.append(h);
}
return result.toString();
}
//。。。。。。
}
org.apache.hadoop.fs-BlockLocation的更多相关文章
- 用java运行Hadoop程序报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.
用java运行Hadoop例程报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.所写代码如下: package ...
- spark-shell报错:Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream
环境: openSUSE42.2 hadoop2.6.0-cdh5.10.0 spark1.6.0-cdh5.10.0 按照网上的spark安装教程安装完之后,启动spark-shell,出现如下报错 ...
- 报错:Exception in thread "main" java.lang.NoClassDefFoundError: Lorg/apache/hadoop/fs/FileSystem
报错现象: Exception in thread "main" java.lang.NoClassDefFoundError: Lorg/apache/hadoop/fs/Fil ...
- Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/CanUnbuffer
在执行spark on hive 的时候在 sql.show()处报错 : Exception in thread "main" java.lang.NoClassDefFoun ...
- hive启动时报错 java.lang.IllegalArgumentException: java.net.URISyntaxException: Relative path in absolute URI: ${system:java.io.tmpdir%7D/$%7Bsystem:user.name%7D at org.apache.hadoop.fs.Path.initialize
错误提示信息如下 错误信息如下 [root@node1 bin]# ./hive Logging initialized -bin/lib/hive-common-.jar!/hive-log4j.p ...
- org.apache.hadoop.fs.FsUrlStreamHandlerFactory 在哪个jar包
org.apache.hadoop.fs.FsUrlStreamHandlerFactory在org.apache.hadoop类中,org.apache.hadoop在hadoop安装目录下.
- sparkOnYarn报错org.apache.hadoop.fs.FSDataInputStream
Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInpu ...
- Hadoop 3.1.2报错:xception in thread "main" org.apache.hadoop.fs.UnsupportedFileSystemException: No FileSystem for scheme "hdfs"
报错内容如下: Exception in thread "main" org.apache.hadoop.fs.UnsupportedFileSystemException: No ...
- 你遇到了吗?Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.fs.FileAlreadyExistsException)
我在使用 Structured Streaming 的 ForeachWriter,写 HDFS 文件时,出现了这个异常 这个异常出现的原因是HDFS作为一个分布式文件系统,支持多线程读,但是不支持多 ...
- 错误Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/fs/FSDataInputStream排查思路
spark1(默认CDH自带版本)不存在这个问题,主要是升级了spark2(CDHparcel升级)版本安装后需要依赖到spark1的旧配置去读取hadoop集群的依赖包. 1./etc/spark2 ...
随机推荐
- 多级联动导航栏(top导航)
http://www.dynamicdrive.com/style/csslibrary/item/jquery_multi_level_css_menu_2/ This is a multi-lev ...
- SDN环境搭建(mininet,OVS,ryu安装及命令)
1.mininet安装与使用 1.1mininet安装 ubuntu 12.04/14.04/14.10 命令行 sudo apt-get install mininet 1.2 mini ...
- web.py实现jsonp
浏览器端请求 $.getJSON("/currenttime?callback=?", function (json){ $("#time").html(jso ...
- 【Spark学习】Apache Spark配置
Spark版本:1.1.1 本文系从官方文档翻译而来,转载请尊重译者的工作,注明以下链接: http://www.cnblogs.com/zhangningbo/p/4137969.html Spar ...
- Struts2在Action中访问WEB资源
什么是WEB资源? 这里所说的WEB资源是指:HttpServletRequest, HttpSession, ServletContext 等原生的 Servlet API. 为什么需要访问WEB资 ...
- Lucene:信息检索与全文检索
目录 信息检索的概念 信息检索技术的分类 全文检索与数据库查询对比 全文检索工具一般由三部分构成 全文检索中建立索引和进行检索的流程 索引里面究竟存什么 如何创建索引 如何对索引进行检索 Lucene ...
- python字符串相关的函数
有些是字符串对象的方法,有些是内建库的方法 split分割字符串 find 查找字符串 for c in str: 遍历字符串 len 获取字符串长度 int 将字符串转换成int str ...
- Linux下的C高级编程---学习
一.进程 一个正在运行的程序称为进程.例如在屏幕上正打开两个终端窗口,则说明同一个终端程序正在做为两个进程而同时执行,而每个终端窗口又都在执行shell,则每个shell又是另外一个进程 ...
- flex-mp3
Mp3Player.as package ddw.adobe { import flash.events.Event; import flash.events.IOErrorEvent; import ...
- 【转】一个安全测试的CheckList
转自:http://hi.baidu.com/dontcry/item/90c2bc466558c217886d1075 不登录系统,直接输入登录后的页面的URL是否可以访问: 不登录系统,直接输入下 ...