package com.hy.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException; public class HDFSCommand { public static final Logger log = LoggerFactory.getLogger(HDFSCommand.class); public static void main(String[] args) throws Exception {
String hdfsURI = "hdfs://10.1.23.240:9000";
String srcPath = "D:" + File.separator + "readme.txt";
String descPath = "/xhy";
String data = "haohaohaohaohao\r\n善字\r\n善生\r\n善行\r\n守善\r\n愿善";
Configuration conf = new Configuration();
copyFromLocalFile(hdfsURI, srcPath, descPath, conf);
uploadFile(hdfsURI, data, descPath, conf);
RemoteIterator<LocatedFileStatus> locatedFileStatusRemoteIterator = listFile(hdfsURI, descPath, conf, true);
while (locatedFileStatusRemoteIterator.hasNext()) {
LocatedFileStatus next = locatedFileStatusRemoteIterator.next();
System.out.println("listFile:" + next.toString());
}
FileStatus[] fileStatuses = listFileAndFolder(hdfsURI, descPath, conf);
for (FileStatus f : fileStatuses) {
System.out.println("listFileAndFolder:" + f.toString());
} } /**
* 本地指定路径文件上传到hdfs
*
* @param hdfsURI
* @param srcPath
* @param descPath
* @param conf
*/
public static void copyFromLocalFile(String hdfsURI, String srcPath, String descPath, Configuration conf) throws URISyntaxException, IOException {
log.info(">> copyFromLocalFile, srcPath is {}, descPath is {}", srcPath, descPath);
FileSystem fs = FileSystem.get(new URI(hdfsURI), conf);
fs.copyFromLocalFile(new Path(srcPath), new Path(descPath));
log.info("<< copyFromLocalFile success");
fs.close();
/*
* 底层是通过
* fs.open(new Path(srcPath), 4096);
* fs.create(new Path(descPath));
* IOUtils.copyBytes(in, out, conf, true);
*/
} /**
* 将数据写入到hdfs
*
* @param hdfsURI
* @param data
* @param descPath
* @param conf
*/
public static void uploadFile(String hdfsURI, String data, String descPath, Configuration conf) throws Exception {
log.info(">> uploadFile, descPath is {}, data is {}", descPath, data);
FileSystem fs = FileSystem.get(new URI(hdfsURI), conf);
/*FSDataOutputStream fsOutputStream = fs.create(new Path(descPath), new Progressable() {
@Override
public void progress() {
log.info("<< 写入hdfs成功,文件路径为:{}", descPath);
}
});*/
FSDataOutputStream fsOutputStream = fs.create(new Path(descPath),
() -> log.info("<< 写入hdfs成功,文件路径为:{}", descPath));
fsOutputStream.write(data.getBytes(), 0, data.getBytes().length);
/*
* 以下几种方式会出现中文乱码
* fsOutputStream.writeBytes(data);
* fsOutputStream.writeUTF(data);
* fsOutputStream.writeChars(data);
*/
fsOutputStream.close();
fs.close();
} /**
* 查找hdfs指定路径下的文件
*
* @param hdfsURI
* @param path
* @param conf
* @param recursive 是否递归查找
* @throws Exception
*/
public static RemoteIterator<LocatedFileStatus> listFile(String hdfsURI, String path, Configuration conf, boolean recursive) throws Exception {
log.info(">> listFile, path is {}, recursive is {}", path, recursive);
FileSystem fs = FileSystem.get(new URI(hdfsURI), conf);
RemoteIterator<LocatedFileStatus> result = fs.listFiles(new Path(path), recursive);
log.info("<< listFile, result is {}", result);
return result;
} /**
* 查找hdfs指定路径下的文件和文件夹
*
* @param hdfsURI
* @param path
* @param conf
*/
public static FileStatus[] listFileAndFolder(String hdfsURI, String path, Configuration conf) throws Exception {
log.info(">> listFileAndFolder, path is {}", path);
FileSystem fs = FileSystem.get(new URI(hdfsURI), conf);
FileStatus[] result = fs.listStatus(new Path(path));
log.info("<< listFileAndFolder, result is {}", result.toString());
return result;
// 方法二
} /**
* 创建文件夹
*
* @param hdfsURI
* @param path
* @param conf
* @throws Exception
*/
public static void mkDir(String hdfsURI, String path, Configuration conf) throws Exception {
log.info(">> mkDir, path is {}", path);
FileSystem fs = FileSystem.get(new URI(hdfsURI), conf);
boolean result = fs.mkdirs(new Path(path));
if (result) {
log.info("<< mkDir {} success", path);
} else {
log.error("<< mkDir {} error", path);
}
} /**
* 删除指定路径
*
* @param hdfsURI
* @param path
* @param conf
* @throws IOException
*/
public static void delete(String hdfsURI, String path, Configuration conf) throws IOException {
log.info(">> delete, path is {}", path);
conf.set("fs.defaultFS", hdfsURI);
FileSystem fs = FileSystem.get(conf);
if (!fs.exists(new Path(path))) {
log.info("<< delete {} error, path no exists", path);
return;
}
boolean result = fs.delete(new Path(path), true);
if (result) {
log.info("<< delete {} success", path);
} else {
log.error("<< delete {} error", path);
}
} /**
* 从hdfs上面下载
*
* @param hdfsURI
* @param srcPath
* @param descPath
* @param conf
* @throws Exception
*/
public static void downloadFile(String hdfsURI, String srcPath, String descPath, Configuration conf) throws Exception {
log.info(">> downloadFile, srcPath is {}, descPath is {}", srcPath, descPath);
FileSystem fs = FileSystem.get(new URI(hdfsURI), conf);
FSDataInputStream in = fs.open(new Path(srcPath));
OutputStream out = new FileOutputStream(new File(descPath));
IOUtils.copyBytes(in, out, conf);
} public static void catFile(String hdfsURI, String path, Configuration conf) throws Exception {
log.info(">> catFile, path is {}", path);
FileSystem fs = FileSystem.get(URI.create(hdfsURI), conf);
FSDataInputStream in = fs.open(new Path(path));
try {
IOUtils.copyBytes(in, System.out, 4096, false);
} finally {
IOUtils.closeStream(in);
fs.close();
}
} }

Java代码操作HDFS的更多相关文章

  1. Java代码操作HDFS测试类

    1.Java代码操作HDFS需要用到Jar包和Java类 Jar包: hadoop-common-2.6.0.jar和hadoop-hdfs-2.6.0.jar Java类: java.net.URL ...

  2. Java代码操作HDFS(在/user/root/下面創建目錄)

    1.创建HDFS目录并打成jar包 package Hdfs; import java.io.IOException; import java.net.URI; import org.apache.h ...

  3. 大数据之路week07--day01(HDFS学习,Java代码操作HDFS,将HDFS文件内容存入到Mysql)

    一.HDFS概述 数据量越来越多,在一个操作系统管辖的范围存不下了,那么就分配到更多的操作系统管理的磁盘中,但是不方便管理和维护,因此迫切需要一种系统来管理多台机器上的文件,这就是分布式文件管理系统 ...

  4. 使用Java API操作HDFS文件系统

    使用Junit封装HFDS import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.*; import org ...

  5. 使用java代码操作Redis

    1导入pom.xml依赖 <dependency> <groupId>redis.clients</groupId> <artifactId>jedis ...

  6. java代码操作Redis

    1.导入需要的pom依赖 <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEn ...

  7. Java代码操作zookeeper

    .personSunflowerP { background: rgba(51, 153, 0, 0.66); border-bottom: 1px solid rgba(0, 102, 0, 1); ...

  8. Hadoop Java API操作HDFS文件系统(Mac)

    1.下载Hadoop的压缩包 tar.gz   https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/common/stable/ 2.关联jar包 在 ...

  9. 使用Java Api 操作HDFS

    如题 我就是一个标题党  就是使用JavaApi操作HDFS,使用的是MAVEN,操作的环境是Linux 首先要配置好Maven环境,我使用的是已经有的仓库,如果你下载的jar包 速度慢,可以改变Ma ...

随机推荐

  1. ESP8266产品ID

    ESP.getChipId() https://github.com/espressif/arduino-esp32/blob/master/libraries/ESP32/examples/Chip ...

  2. session和application内置对象

    一.Session内置对象 分析得知request内置对象中的属性只是在当次请求中有效,经过客户端跳转之后就无效,因为客户端跳转属于第二个请求,也就是说request只代表当次请求的对象,如果要让客户 ...

  3. Hadoop的NullWritable

    NullWritable是Writable的一个特殊类,实现方法为空实现,不从数据流中读数据,也不写入数据,只充当占位符,如在MapReduce中,如果你不需要使用键或值,你就可以将键或值声明为Nul ...

  4. html2canvas截屏在H5微信移动端踩坑,ios和安卓均可显示

    1.最近在做移动端开发,框架是vue,一产品需求是,后台返回数据,通过qrcode.js(代码比较简单,百度上已经很多了)生成二维码,然后通过html2canvas,将html元素转化为canvas, ...

  5. spring boot 表单验证

    1 设置某个字段的取值范围 1.1 取值范围验证:@Min,@Max ① 实例类的属性添加注解@Min ② Controller中传入参数使用@Valid注解 1.2 不能为空验证:@NotNull ...

  6. 我遇到的Spring的@Value注解失效问题

    项目使用的是SSM体系,spring的配置如下,配置没问题,因为我发现其他文件中的@Value可以使用,只有一处@Value失效了. spring-servlet.xml <?xml versi ...

  7. linux python2.x 升级python3.x

    Linux下python升级步骤  Python2 ->Python3 多数情况下,系统自动的Python版本是2.x 或者yum直接安装的也是2.x 但是,现在多数情况下建议使用3.x 那么如 ...

  8. 一、Swagger配置

    一.Swagger配置 1.注解不显示 SwaggerConfig文件下   //c.IncludeXmlComments(GetXmlCommentsPath()):  内下面添加: c.Inclu ...

  9. Python——Pycharm打包exe文件

    一.安装pyinstraller    pip install  PyInstaller 二.打包程序   pyinstaller.py -F -w -i tubiao.ico 文件名.py -F 表 ...

  10. Linux 学习 (四) 帮助命令

    Linux达人养成计划 I 学习笔记 man 命令 获取指定命令的帮助 man的级别 1:查看命令的帮助 2:查看可被内核调用的函数的帮助 3:查看函数和函数库的帮助 4:查看特殊文件的帮助(主要是/ ...