最近公司准备接ceph储存,研究了一番,准备用亚马逊的s3接口实现,实现类如下:

/**
* Title: S3Manager
* Description: Ceph储存的s3接口实现,参考文档:
* https://docs.aws.amazon.com/zh_cn/AmazonS3/latest/dev/RetrievingObjectUsingJava.html
* http://docs.ceph.org.cn/radosgw/s3/
* author: xu jun
* date: 2018/10/22
*/
@Slf4j
@Service
public class S3Manager extends StorageManagerBase implements StorageManager {
private final UKID ukid;
private final S3ClientConfig s3ClientConfig;
private final RedisManage redisManage;
private AmazonS3 amazonClient; @Autowired
public S3Manager(UKID ukid, S3ClientConfig s3ClientConfig, RedisManage redisManage) {
this.ukid = ukid;
this.s3ClientConfig = s3ClientConfig;
this.redisManage = redisManage;
} private AmazonS3 getAmazonClient() {
if (amazonClient == null) {
String accessKey = s3ClientConfig.getAccessKey();
String secretKey = s3ClientConfig.getSecretKey();
String endpoint = s3ClientConfig.getEndPoint(); AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.setProtocol(Protocol.HTTP); AmazonS3 conn = AmazonS3ClientBuilder.standard()
.withClientConfiguration(clientConfig)
.withCredentials(new AWSStaticCredentialsProvider(credentials))
.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, ""))
.withPathStyleAccessEnabled(true)
.build(); //检查储存空间是否创建
checkBucket(conn);
amazonClient = conn;
}
return amazonClient;
} @Override
public String uploadFile(byte[] fileData, String extension) {
log.info("Storage s3 api, upload file start"); //生成上传文件的随机序号
long fileId = ukid.getGeneratorID();
String fileName = Long.toString(fileId);
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient(); PutObjectResult result = conn.putObject(bucketName, fileName, new ByteArrayInputStream(fileData), null);
log.info("Storage s3 api, put object result :{}", result); log.info("Storage s3 api, upload file end, file name:" + fileName);
return fileName;
} @Override
public String uploadAppenderFile(byte[] fileData, String extension) {
log.info("Storage s3 api, upload appender file start"); //生成上传文件的随机序号
long ukId = ukid.getGeneratorID();
String fileName = Long.toString(ukId);
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
List<PartETag> partETags = new ArrayList<>();
//初始化分片上传
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, fileName);
InitiateMultipartUploadResult initResponse = conn.initiateMultipartUpload(initRequest);
String uploadId = initResponse.getUploadId(); ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(fileData);
Integer contentLength = fileData.length;
// 文件上传
UploadPartRequest uploadPartRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(fileName)
.withUploadId(uploadId)
.withPartNumber()
.withPartSize(contentLength)
.withInputStream(byteArrayInputStream);
UploadPartResult uploadPartResult = conn.uploadPart(uploadPartRequest); try {
byteArrayInputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
}
partETags.add(uploadPartResult.getPartETag());
Integer partNumber = uploadPartResult.getPartNumber(); S3CacheMode cacheMode = new S3CacheMode();
cacheMode.setPartETags(partETags);
cacheMode.setPartNumber(partNumber);
cacheMode.setUploadId(uploadId);
redisManage.set(fileName, cacheMode); log.info("Storage s3 api, upload appender file end, fileName: {}", fileName);
return fileName;
} @Override
public void uploadChunkFile(ChunkFileSaveParams chunkFileSaveParams) {
log.info("Storage s3 api, upload chunk file start"); String fileName = chunkFileSaveParams.getFileAddress();
Result result = redisManage.get(fileName);
JSONObject jsonObject = (JSONObject) result.getData();
if (jsonObject == null) {
throw FileCenterExceptionConstants.CACHE_DATA_NOT_EXIST;
}
S3CacheMode cacheMode = jsonObject.toJavaObject(S3CacheMode.class);
Integer partNumber = cacheMode.partNumber;
String uploadId = cacheMode.getUploadId();
List<PartETag> partETags = cacheMode.partETags; //储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(chunkFileSaveParams.getBytes());
Integer contentLength = chunkFileSaveParams.getBytes().length; UploadPartRequest uploadPartRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(fileName)
.withUploadId(uploadId)
.withPartNumber(partNumber + )
.withPartSize(contentLength)
.withInputStream(byteArrayInputStream); UploadPartResult uploadPartResult = conn.uploadPart(uploadPartRequest);
partETags.add(uploadPartResult.getPartETag());
partNumber = uploadPartResult.getPartNumber(); try {
byteArrayInputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} S3CacheMode cacheModeUpdate = new S3CacheMode();
cacheModeUpdate.setPartETags(partETags);
cacheModeUpdate.setPartNumber(partNumber);
cacheModeUpdate.setUploadId(uploadId);
redisManage.set(fileName, cacheModeUpdate); if (chunkFileSaveParams.getChunk().equals(chunkFileSaveParams.getChunks() - )) {
//完成分片上传,生成储存对象
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, fileName,
uploadId, partETags);
conn.completeMultipartUpload(compRequest);
} log.info("Storage s3 api, upload chunk file end");
} @Override
public byte[] downloadFile(String fileName) {
log.info("Storage s3 api, download file start");
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
S3Object object;
if (conn.doesObjectExist(bucketName, fileName)) {
object = conn.getObject(bucketName, fileName);
} else {
throw FileCenterExceptionConstants.OBJECT_NOT_EXIST;
}
log.debug("Storage s3 api, get object result :{}", object); byte[] fileByte;
InputStream inputStream;
inputStream = object.getObjectContent();
try {
fileByte = IOUtils.toByteArray(inputStream);
inputStream.close();
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
log.error(e.getMessage());
}
}
}
log.info("Storage s3 api, download file end");
return fileByte;
} @Override
public byte[] downloadFile(String fileName, long fileOffset, long fileSize) {
log.info("Storage s3 api, download file by block start");
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
AmazonS3 conn = getAmazonClient();
S3Object object;
if (conn.doesObjectExist(bucketName, fileName)) {
GetObjectRequest getObjectRequest = new GetObjectRequest(bucketName, fileName)
.withRange(fileOffset, fileOffset + fileSize);
//范围下载。
object = conn.getObject(getObjectRequest);
} else {
throw FileCenterExceptionConstants.OBJECT_NOT_EXIST;
}
log.info("Storage s3 api, get object result :{}", object); // 读取数据。
byte[] buf;
InputStream in = object.getObjectContent();
try {
buf = inputToByte(in, (int) fileSize);
} catch (IOException e) {
throw FileCenterExceptionConstants.INTERNAL_IO_EXCEPTION;
} finally {
try {
in.close();
} catch (IOException e) {
log.error(e.getMessage());
}
}
log.info("Storage s3 api, download file by block end");
return buf;
} @Override
public String fileSecret(String filePath) {
return null;
} @Override
public String fileDecrypt(String filePath) {
return null;
} @Override
public String getDomain() {
return null;
} /**
* 检查储存空间是否已创建
*
* @param conn 客户端连接
*/
private void checkBucket(AmazonS3 conn) {
//储存空间名
String bucketName = s3ClientConfig.getBucketName();
if (conn.doesBucketExist(bucketName)) {
log.debug("Storage s3 api, bucketName is found: " + bucketName);
} else {
log.warn("Storage s3 api, bucketName is not exist, create it: " + bucketName);
conn.createBucket(bucketName);
}
} /**
* inputStream转byte[]
*
* @param inStream 输入
* @param fileSize 文件大小
* @return 输出
* @throws IOException 异常
*/
private static byte[] inputToByte(InputStream inStream, int fileSize) throws IOException {
ByteArrayOutputStream swapStream = new ByteArrayOutputStream();
byte[] buff = new byte[fileSize];
int rc;
while ((rc = inStream.read(buff, , fileSize)) > ) {
swapStream.write(buff, , rc);
}
return swapStream.toByteArray();
} /**
* 调试用的方法,可以在控制台看到io的数据
*
* @param input 输入
* @throws IOException 异常
private static void displayTextInputStream(InputStream input) throws IOException {
// Read the text input stream one line at a time and display each line.
BufferedReader reader = new BufferedReader(new InputStreamReader(input));
String line;
while ((line = reader.readLine()) != null) {
log.info(line);
}
}
*/
}

业务接口要实现包括分片上传(支持断点续传)、分片下载等功能,上面类是底层类不包含业务逻辑。

maven依赖:

        <!-- ceph储存的接口 -->
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk</artifactId>
<version>1.11.</version>
</dependency>

开发感受:

  1.ceph官网上提供的s3接口文档(java版),内容又少又旧,已经基本不能当做参考了。所以API和代码示例要去亚马逊官网上看(提供了中文版,好评)

  2.s3接口本身不提供文件追加储存的功能。所以在实现分片上传的时候,比较麻烦(不想fastDFS和OSS那么方便)

  3.分片上传默认最小限制是5M,要修改可以在服务器配置上做

  4.如果使用域名做端点的话,默认会把bucket的名字,作为子域名来访问(需要域名解析,所以不建议)。如果想作为路径来访问,需要在连接配置中指定。

ceph储存的S3接口实现(支持断点续传)的更多相关文章

  1. 基于LAMP php7.1搭建owncloud云盘与ceph对象存储S3借口整合案例

    ownCloud简介 是一个来自 KDE 社区开发的免费软件,提供私人的 Web 服务.当前主要功能包括文件管理(内建文件分享).音乐.日历.联系人等等,可在PC和服务器上运行. 简单来说就是一个基于 ...

  2. 使用COSBench工具对ceph s3接口进行压力测试--续

    之前写的使用COSBench工具对ceph s3接口进行压力测试是入门,在实际使用是,配置内容各不一样,下面列出 压力脚本是xml格式的,套用UserGuide文档说明,如下 有很多模板的例子,在co ...

  3. 使用COSBench工具对ceph s3接口进行压力测试

    一.COSBench安装 COSBench是Intel团队基于java开发,对云存储的测试工具,全称是Cloud object Storage Bench 吐槽下,貌似这套工具是intel上海团队开发 ...

  4. 分布式存储系统之Ceph集群访问接口启用

    前文我们使用ceph-deploy工具简单拉起了ceph底层存储集群RADOS,回顾请参考https://www.cnblogs.com/qiuhom-1874/p/16724473.html:今天我 ...

  5. FTP文件上传 支持断点续传 并 打印下载进度(二) —— 单线程实现

    这个就看代码,哈哈哈哈哈  需要用到的jar包是: <dependency> <groupId>commons-net</groupId> <artifact ...

  6. web大附件上传,支持断点续传

    一. 功能性需求与非功能性需求 要求操作便利,一次选择多个文件和文件夹进行上传:支持PC端全平台操作系统,Windows,Linux,Mac 支持文件和文件夹的批量下载,断点续传.刷新页面后继续传输. ...

  7. java大附件上传,支持断点续传

    一. 功能性需求与非功能性需求 要求操作便利,一次选择多个文件和文件夹进行上传:支持PC端全平台操作系统,Windows,Linux,Mac 支持文件和文件夹的批量下载,断点续传.刷新页面后继续传输. ...

  8. 【FTP】FTP文件上传下载-支持断点续传

    Jar包:apache的commons-net包: 支持断点续传 支持进度监控(有时出不来,搞不清原因) 相关知识点 编码格式: UTF-8等; 文件类型: 包括[BINARY_FILE_TYPE(常 ...

  9. 【SFTP】使用Jsch实现Sftp文件下载-支持断点续传和进程监控

    参考上篇文章: <[SFTP]使用Jsch实现Sftp文件下载-支持断点续传和进程监控>:http://www.cnblogs.com/ssslinppp/p/6248763.html  ...

随机推荐

  1. #define 宏定义

    gcc Semaphores.c -lpthread --std=c99 1. 边际效应 2. \ 宏定义中一行写不下时,不能换行,必须\+换行 #define CPE_MAIN_PRINTF(fmt ...

  2. Java程序员必会英语单词

    Complie: 编译 line: 行 variable: 变量 parameter: 参数 defaul: 默认 access: 访问 operation:  操作运算 member-variabl ...

  3. UI常用接口使用规范

    //////////////////////////////////////////////////////////////////////////////////////////////// /// ...

  4. JavaScript面向对象之get和set设置读写属性

    之前我们通过this和prototype申明的属性都是可读写的属性,如果想实现单独控制,就必须使用get和set存取期. 基本方法的 步骤一般包含两个步骤,1,使用var关键字定义一个私有属性作为中间 ...

  5. [学习] 从 函数式编程 到 lambda演算 到 函数的本质 到 组合子逻辑

    函数式编程 阮一峰 <函数式编程初探>,阮一峰是<黑客与画家>的译者. wiki <函数编程语言> 一本好书,<计算机程序的构造与解释>有讲到schem ...

  6. 常用oracle hints

    在SQL语句优化过程中,经常会用到hint, 以下是在SQL优化过程中常见Oracle中"HINT"的30个用法 1. /*+ALL_ROWS*/ 表明对语句块选择基于开销的优化方 ...

  7. C#基础加强(1)之索引器

    索引器 介绍 索引器,初学者可能听起来有些陌生,但其实我们经常会用到它,例如: // 字符串的索引器 string str = "hello world"; ]; // 获取到字符 ...

  8. Kafka实践1--Producer

    一.Kafka设计原理参考: http://blog.csdn.net/suifeng3051/article/details/48053965?locationNum=2 http://www.cn ...

  9. python模拟面试技术题答案

      目录 Python4期模拟面试技术面试题答案............................................................................ ...

  10. MongoDB 知识点

    左边是mongodb查询语句,右边是sql语句.对照着用,挺方便. db.users.find() select * from users db.users.find({"age" ...