HDFS文件的基本操作
HDFS文件的基本操作:
package wjn; import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.Scanner; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.BasicConfigurator; public class ww { public static Scanner sc = new Scanner(System.in);
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub main9();
} public static void main10() throws IOException{
String p1 = "/user/hadoop/text/text1";
String p2 = "/user/hadoop";
if(mv(p1,p2)){
System.out.println("文件移动成功");
}else{
System.out.println("文件移动失败");
}
} public static void main9() throws IOException{
String hdfspath = "/user/hadoop/text2"; if(deleteFileFromHDFS(hdfspath)){
System.out.println("已删除文件"+hdfspath);
}else{
System.out.println("删除文件失败");
}
} public static void main7() throws IOException{ String hdfspath1 = "/user/hadoop";
boolean forcedelete = false;
if(!ifex(hdfspath1)){
mkdir(hdfspath1);
System.out.println("创建目录"+hdfspath1);
}else{
if(isempty(hdfspath1)||forcedelete){
rmDir(hdfspath1);
System.out.println("删除目录"+hdfspath1);
}else{
System.out.println("目录不为空,不删除");
}
} } public static void main6() throws IOException{ String hdfspath = "/user/hadoop/text2";
String hdfspath1 = "/user/hadoop";
if(ifex(hdfspath)){
deleteFileFromHDFS(hdfspath);
System.out.println("该路径存在,删除路径"+hdfspath);
}else{
if(!ifex(hdfspath1)){
mkdir(hdfspath1);
System.out.println("创建文件夹"+hdfspath1);
}
touchz(hdfspath);
System.out.println("创建路径"+hdfspath);
}
} public static void main5() throws IOException{ String hdfspath = "/user/hadoop";
System.out.println("所有文件信息如下");
lsDir(hdfspath);
} public static void main4() throws IOException{
String hdfspath = "/user/hadoop/text2";
System.out.println("文件信息如下");
ls(hdfspath);
} public static void main3() throws IOException{
String hdfspath = "/user/hadoop/text2";
cat(hdfspath);
System.out.println("读取完成");
} public static void main2() throws IOException{
String localpath = "/home/hadoop/1234.txt";
String hdfspath = "/user/hadoop/text2";
download(hdfspath,localpath);
System.out.println("文件下载成功");
} public static void main1() throws IOException{
String localpath = "/home/hadoop/123.txt";
String hdfspath = "/user/hadoop/text2";
if(ifex(hdfspath)){
System.out.println("文件存在,请选择追加(1)还是覆盖(2)");
int i = sc.nextInt();
if(i==1){
appendFileToHDFS(hdfspath,localpath);
System.out.println("文件追加成功");
}else if(i==2){
deleteFileFromHDFS(hdfspath);
update(localpath,hdfspath);
System.out.println("文件覆盖成功");
}else{
System.out.println("输入有误");
} }else{
update(localpath,hdfspath);
System.out.println("文件不存在,上传成功");
}
} public static void update(String localpath , String hdfspath) throws IOException{ InputStream in = new BufferedInputStream(new FileInputStream(localpath));
FileSystem fileSystem = FileSystem.get(URI.create(hdfspath), new Configuration());
OutputStream out = fileSystem.create(new Path(hdfspath)); IOUtils.copyBytes(in, out, 4096, true);
fileSystem.close(); } //判断hdfs中文件是否存在
public static boolean ifex(String hdfspath) throws IOException{ Configuration conf = new Configuration();
conf.set("fs.defaultFS","hdfs://localhost:9000");
conf.set("fs.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem");
FileSystem fs = FileSystem.get(conf);
if (fs.exists(new Path(hdfspath))){
fs.close();
return true;
}else{
fs.close();
return false;
}
} //创建目录
public static void mkdir(String hdfspath) throws IOException{ FileSystem fileSystem = FileSystem.get(URI.create(hdfspath), new Configuration());
Path dirPath = new Path(hdfspath);
fileSystem.mkdirs(dirPath);
fileSystem.close();
} //创建文件
public static void touchz(String hdfspath) throws IOException{ FileSystem fileSystem = FileSystem.get(URI.create(hdfspath), new Configuration());
Path dirPath = new Path(hdfspath);
FSDataOutputStream outputStream = fileSystem.create(dirPath);
outputStream.close();
fileSystem.close();
} public static void appendFileToHDFS(String hdfsPath, String localFilePath) throws IOException {
Configuration config = new Configuration();
config.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
config.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true"); FileSystem fileSystem = FileSystem.get(URI.create(hdfsPath), config); InputStream in = new BufferedInputStream(new FileInputStream(localFilePath));
FSDataOutputStream out = fileSystem.append(new Path(hdfsPath)); IOUtils.copyBytes(in, out, 4096, true);
fileSystem.close();
} //删除文件
public static boolean deleteFileFromHDFS(String hdfsPath) throws IOException {
FileSystem fileSystem = FileSystem.get(URI.create(hdfsPath), new Configuration());
boolean result = fileSystem.deleteOnExit(new Path(hdfsPath));
fileSystem.close();
return result;
} //删除目录
public static void rmDir(String hdfspath) throws IOException{ FileSystem fileSystem = FileSystem.get(URI.create(hdfspath), new Configuration());
fileSystem.delete(new Path(hdfspath),true);
fileSystem.close();
} //判断目录是否为空
public static boolean isempty(String hdfspath) throws IOException{ FileSystem fileSystem = FileSystem.get(URI.create(hdfspath), new Configuration());
RemoteIterator<LocatedFileStatus> remoteIterator = fileSystem.listFiles(new Path(hdfspath), true);
return !remoteIterator.hasNext();
} public static void download (String hdfsPath, String localPath) throws IOException {
FileSystem fileSystem = FileSystem.get(URI.create(hdfsPath), new Configuration()); FSDataInputStream in = fileSystem.open(new Path(hdfsPath));
OutputStream out = new FileOutputStream(localPath); IOUtils.copyBytes(in, out, 4096, true);
fileSystem.close();
} //根据hdfs路径输出其内容到终端
public static void cat(String hdfspath) throws IOException{ FileSystem fileSystem = FileSystem.get(URI.create(hdfspath), new Configuration()); FSDataInputStream in = fileSystem.open(new Path(hdfspath)); BufferedReader d = new BufferedReader(new InputStreamReader(in));
String line = null;
while((line = d.readLine())!=null){
System.out.println(line);
}
d.close();
in.close();
} //显示hdfs中指定文件的信息
public static void ls(String hdfspath) throws IOException{ FileSystem fileSystem = FileSystem.get(URI.create(hdfspath), new Configuration());
Path path = new Path(hdfspath);
FileStatus[] fileStatus = fileSystem.listStatus(path);
for (FileStatus s : fileStatus){
System.out.println("路径:"+s.getPath().toString());
System.out.println("权限:"+s.getPermission().toString());
System.out.println("大小:"+s.getLen());
Long time = s.getModificationTime();
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String date = format.format(time);
System.out.println("时间:"+date);
}
} //显示文件夹下所有文件的信息
public static void lsDir(String hdfspath) throws IOException{ FileSystem fileSystem = FileSystem.get(URI.create(hdfspath), new Configuration());
Path dirPath = new Path(hdfspath);
RemoteIterator<LocatedFileStatus> remoteIterator = fileSystem.listFiles(dirPath, true);
while(remoteIterator.hasNext()){ FileStatus s = remoteIterator.next();
System.out.println("路径:"+s.getPath().toString());
System.out.println("权限:"+s.getPermission().toString());
System.out.println("大小:"+s.getLen());
Long time = s.getModificationTime();
SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
String date = format.format(time);
System.out.println("时间:"+date);
System.out.println();
}
fileSystem.close();
} //移动文件
public static boolean mv(String path1,String path2) throws IOException{ FileSystem fileSystem = FileSystem.get(URI.create(path1), new Configuration()); Path p1 = new Path(path1);
Path p2 = new Path(path2);
boolean result = fileSystem.rename(p1, p2);
fileSystem.close();
return result;
} }
HDFS文件的基本操作的更多相关文章
- [bigdata] 使用Flume hdfs sink, hdfs文件未关闭的问题
现象: 执行mapreduce任务时失败 通过hadoop fsck -openforwrite命令查看发现有文件没有关闭. [root@com ~]# hadoop fsck -openforwri ...
- HDFS文件和HIVE表的一些操作
1. hadoop fs -ls 可以查看HDFS文件 后面不加目录参数的话,默认当前用户的目录./user/当前用户 $ hadoop fs -ls 16/05/19 10:40:10 WARN ...
- Hadoop之HDFS文件操作常有两种方式(转载)
摘要:Hadoop之HDFS文件操作常有两种方式,命令行方式和JavaAPI方式.本文介绍如何利用这两种方式对HDFS文件进行操作. 关键词:HDFS文件 命令行 Java API HD ...
- Spark读取HDFS文件,文件格式为GB2312,转换为UTF-8
package iie.udps.example.operator.spark; import scala.Tuple2; import org.apache.hadoop.conf.Configur ...
- spark读hdfs文件实现wordcount并将结果存回hdfs
package iie.udps.example.operator.spark; import scala.Tuple2; import org.apache.spark.SparkConf; imp ...
- HDFS 文件读写过程
HDFS 文件读写过程 HDFS 文件读取剖析 客户端通过调用FileSystem对象的open()来读取希望打开的文件.对于HDFS来说,这个对象是分布式文件系统的一个实例. Distributed ...
- Hadoop HDFS文件常用操作及注意事项
Hadoop HDFS文件常用操作及注意事项 1.Copy a file from the local file system to HDFS The srcFile variable needs t ...
- Hadoop HDFS文件常用操作及注意事项(更新)
1.Copy a file from the local file system to HDFS The srcFile variable needs to contain the full name ...
- hadoop的hdfs文件操作实现上传文件到hdfs
这篇文章主要介绍了使用hadoop的API对HDFS上的文件访问,其中包括上传文件到HDFS上.从HDFS上下载文件和删除HDFS上的文件,需要的朋友可以参考下hdfs文件操作操作示例,包括上传文件到 ...
随机推荐
- Codeforces Round #392 (Div. 2) - A
题目链接:http://codeforces.com/contest/758/problem/A 题意:给定N个城市的福利,国王现在想让每个城市的福利都一致.问最少需要花多少钱使得N个城市的福利值都一 ...
- java ArrayList存储基本类型
package java06; /* 如果希望像集合ArrayList中存储基本数据类型数据,必须使用基本数据类型对应的“包装类” 基本数据类型 包装类(引用类型,包装类都位于java.lang包下 ...
- C#基础知识之类和结构
虽然项目中一直在使用类.结构体等类型,仔细琢磨,还真无法系统的说出个所以然.记录一下类.结构体.类和结构体区别 一.类 对于类,大家都特别熟悉.简单的介绍一下类的结构,然后记录一下Class需要注意的 ...
- better-scroll 的使用
1.安装 cnpm install better-scroll --save 2.引入 import BScroll from "better-scroll"; 3.初始化 dat ...
- python之 matplotlib模块之绘制堆叠柱状图
我们先来看一个结果图 看到这个图,我个人的思路是 1 设置标题 import numpy as np import matplotlib.pyplot as plt plt.title('Scores ...
- VM安装OSX进度条一半时卡住不动,【附】OSX10.10 ISO镜像文件
安装OSX10.10真是一波多折,先是下载了一个5G多的原版dmg文件,转成ISO后在虚拟机上无法识别,后按网上的说的方法在提取出来的BaseSystem.dmg文件,再转成ISO文件,可以 ...
- PCB下元器件重叠放置--Altium Designer
这里指的是同层元件重叠,即在大的器件下放置小的器件:虽然程序设计规则默认同层元件重叠是错误的,但是在实际设计中,同层元件重叠却经常使用. 以Altium Designer9.0为例:不过好像规则里并没 ...
- java File类的使用以及一些函数
package file; import java.io.File; import java.io.IOException; import org.junit.jupiter.api.Test; /* ...
- && 和 || 逻辑运算符的短路运算
&&和||的短路运算,是指如果在进行前面的表达式的运算过程,通过判断已经明确的知道整个表达式的结果,那么就不会进行后面表达式的运算判断. 表达式1 || 表达式2 || 表达式3... ...
- WCDB错误"No matching constructor for initialization of 'WCTColumnBinding'"
开始看到这个错误有点匪夷所思,完全不知道问题指向哪里, 最后用过排除法,把之前建立多个类进行了一一排除,发现有个属性是 @property(nonatomic, assign) NSInteger * ...