SpringBoot集成亚马逊的S3对象存储
依赖导入:aws-java-sdk-s3
<dependencyManagement>
<dependencies>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-bom</artifactId>
<version>1.12.642</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
</dependency>
</dependencies>
组装好客户端-Service |EdsOssFileUploadService
public interface EdsOssFileUploadService {
/**
* 高级API大文件分片上传
* @param keyName 唯一文件标识
* @param file file
* @return keyName 唯一文件标识
* @throws InterruptedException
*/
String highLevelMultipartUpload(String keyName, File file) throws InterruptedException;
/**
* 文件分片上传
* @param keyName 唯一文件标识
* @param bucketName 桶名称
* @param file file
* @return CompleteMultipartUploadResult
*/
CompleteMultipartUploadResult bigFileListShardingUpload(String keyName, String bucketName, File file);
/**
* 文件分片上传
* @param keyName 唯一文件标识
* @param file file
* @return CompleteMultipartUploadResult
*/
CompleteMultipartUploadResult bigFileListShardingUpload(String keyName, File file);
/**
* 文件分片上传 指定桶
* @param bucketName 桶名称
* @param fileInputStream 文件流
* @param fileContentLength 原始文件的length
* @param keyName 唯一文件标识
* @return CompleteMultipartUploadResult
*/
CompleteMultipartUploadResult uploadLargeFileToS3(String bucketName, FileInputStream fileInputStream, long fileContentLength, String keyName);
/**
* 文件分片上传 默认桶
* @param fileInputStream 文件流
* @param fileLength 原始文件的length
* @param key 唯一文件标识
* @return CompleteMultipartUploadResult
*/
CompleteMultipartUploadResult uploadLargeFileToS3(FileInputStream fileInputStream, long fileLength, String key);
/**
* 文件下载
* @param key 唯一文件标识
* @param bucketName 桶名称
* @return InputStream
*/
InputStream downloadToEDS(String key, String bucketName);
/**
* 文件下载 默认桶
* @param key 唯一文件标识
* @return InputStream
*/
InputStream downloadToEDS(String key);
/**
* 简单文件上传 --流 指定桶
* @param keyName 唯一文件标识
* @param inputStream 文件流
* @param bucketName 桶名称
* @return PutObjectResult
*/
PutObjectResult streamUploadToEDS(String keyName, InputStream inputStream, String bucketName);
/**
* 简单文件上传 --流 默认桶
* @param keyName 唯一文件标识
* @param inputStream 文件流
* @return PutObjectResult
*/
PutObjectResult streamUploadToEDS(String keyName, InputStream inputStream);
/**
* 简单文件上传 --file 指定桶
* @param keyName 唯一文件标识
* @param file 文件
* @param bucketName 桶名称
* @return PutObjectResult
*/
PutObjectResult simpleUploadToEDS(String keyName, File file, String bucketName);
/**
* 简单文件上传 --file 默认桶
* @param keyName 唯一文件标识
* @param file 文件
* @return PutObjectResult
*/
PutObjectResult simpleUploadToEDS(String keyName, File file);
/**
* 创建文件上传的预签名--URL
* @param bucketName 桶名称
* @param keyName 唯一文件标识
* @return URL
*/
URL createSignedUrlForStringPut(String bucketName, String keyName);
/**
* 创建文件下载的预签名--URL
* @param bucketName 桶名称
* @param keyName 唯一文件标识
* @return URL
*/
URL createSignedUrlForStringGet(String bucketName, String keyName);
}
组装好客户端-ServiceImpl |EdsOssFileUploadServiceImpl
@Service
public class EdsOssFileUploadServiceImpl implements EdsOssFileUploadService {
private Logger logger = LoggerFactory.getLogger(EdsOssFileUploadServiceImpl.class);
/**
* 深信服EDS云对象存储用户的access key :示例L8O3KRQZTXGVDIBQ0WON
*/
@Value("${eds.cloud.oss.accessKey:MOTNMUS8FJ8P3YTXQSB5}")
private String accessKey;
/**
* 深信服EDS云对象存储用户的的secret key:示例Bm0kLoFdLu70RKTOtjdP5Q9oOVDgEXHOmbrelxeb
*/
@Value("${eds.cloud.oss.secretKey:sOMNfGbrCP9vv3OwGyHrc3ET4OQGIt5QqHCVAMW5}")
private String secretKey;
/**
* EDS对象存储的地址:对象存储服务的端口号:示例http://10.212.27.56:12001
*/
@Value("${eds.cloud.oss.endPoint:10.1.2.16:12001}")
private String endPoint;
/**
* EDS对象存储的bucket名称:示例eds-cloud-oss
*/
@Value("${eds.cloud.oss.bucketName:digtal_resources}")
private String bucketName;
/**
* EDS对象存储的region:示例cn-north-1
*/
@Value("${eds.cloud.oss.region:}")
private String region;
private AmazonS3 amazonS3Client;
private TransferManager transferManager;
@PostConstruct
public void init() {
AWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
ClientConfiguration clientConfig = new ClientConfiguration();
clientConfig.setProtocol(Protocol.HTTP);
clientConfig.setConnectionTimeout(300000);
clientConfig.setClientExecutionTimeout(300000);
clientConfig.setConnectionMaxIdleMillis(300000);
// 初始化AmazonS3Client
this.amazonS3Client = AmazonS3ClientBuilder.standard().
withClientConfiguration(clientConfig).
withCredentials(new AWSStaticCredentialsProvider(credentials)).
withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endPoint, region)).
build();
this.transferManager = TransferManagerBuilder.standard().withS3Client(amazonS3Client).build();
}
/**
* AWS SDK 公开了一个名为 TransferManager 的高级别 API,用于简化分段上传。有关更多信息,请参阅 使用分段上传来上传和复制对象。
* <p>
* 您可以从文件或流上传数据。您还可以设置高级选项,例如,您想要用于分段上传的分段大小或在上传分段时要使用的并发线程数。
* 您也可以设置可选的对象属性、存储类或访问控制列表 (ACL)。您可以使用 PutObjectRequest 和 TransferManagerConfiguration 类来设置这些高级选项。
*
* @param keyName keyName
* @param file file
* @return CompleteMultipartUploadResult
*/
@Override
public String highLevelMultipartUpload(String keyName, File file) throws InterruptedException {
String upPrefix = UUID.randomUUID().toString();
String key = upPrefix + "/" + keyName;
// 使用 TransferManager 上传
Upload upload = transferManager.upload(bucketName, key, file);
upload.waitForCompletion();
logger.info("Object upload complete");
return key;
}
/**
* EDS提供的分片上传(Multipart Upload)功能,将要上传的较大文件(object)分成多个分片(Part)来分别上传
* 上传完成后再调用completeMultipartUpload接口将这些Part组合成一个object来达到断点续传的效果。
*
* @param keyName keyName
* @param bucketName yourBucketName
* @param file file
* @return CompleteMultipartUploadResult
*/
@Override
public CompleteMultipartUploadResult bigFileListShardingUpload(String keyName, String bucketName, File file) {
List<PartETag> partETags = new ArrayList<>();
long filePosition = 0;
CompleteMultipartUploadResult result = null;
String uploadId = null;
InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
InitiateMultipartUploadResult initResponse = amazonS3Client.initiateMultipartUpload(initRequest);
uploadId = initResponse.getUploadId();
try {
long contentLength = file.length();
long partSize = 25 * 1024 * 1024; // Set part size to 25 MB.
logger.info("-------开始只能进入分片上传阶段-------");
for (int i = 1; filePosition < contentLength; i++) {
partSize = Math.min(partSize, (contentLength - filePosition));
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(keyName)
.withUploadId(uploadId)
.withPartNumber(i)
.withFileOffset(filePosition)
.withFile(file)
.withPartSize(partSize);
UploadPartResult uploadResult = amazonS3Client.uploadPart(uploadRequest);
// 返回文件的ETag值,用于验证文件是否被正确上传
partETags.add(uploadResult.getPartETag());
filePosition += partSize;
logger.info("文件分片上传--->" + filePosition);
}
logger.info("-------所有分片上传完整------->进入分片合并阶段-------");
// 完成分片上传
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName, keyName,
initResponse.getUploadId(), partETags);
result = amazonS3Client.completeMultipartUpload(compRequest);
} catch (SdkClientException e) {
logger.error("分片上传错误,第{}片发生意外", filePosition, e);
throw new RuntimeException(e);
}
logger.info("-------大文件分片上传完成--->" + filePosition);
return result;
}
@Override
public CompleteMultipartUploadResult bigFileListShardingUpload(String keyName, File file) {
return bigFileListShardingUpload(keyName, bucketName, file);
}
/**
* 分片上传
*
* @param bucketName 桶名称
* @param fileInputStream 文件流
* @param fileContentLength 原始文件的length
* @param keyName 唯一文件标识
* @return
*/
@Override
public CompleteMultipartUploadResult uploadLargeFileToS3(String bucketName, FileInputStream fileInputStream, long fileContentLength, String keyName) {
InitiateMultipartUploadResult initResponse = null;
InitiateMultipartUploadRequest initRequest = null;
CompleteMultipartUploadResult completeMultipartUploadResult = null;
try {
// 初始化多部分上传
initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
initResponse = amazonS3Client.initiateMultipartUpload(initRequest);
// 每个分片大小(例如:5MB)
long partSize = 5 * 1024 * 1024;
List<PartETag> partETags = new ArrayList<>();
long bytePosition = 0;
for (int i = 1; bytePosition < fileContentLength; i++) {
long bytesRemaining = fileContentLength - bytePosition;
partSize = Math.min(bytesRemaining, partSize);
UploadPartRequest uploadRequest = new UploadPartRequest()
.withBucketName(bucketName)
.withKey(keyName)
.withUploadId(initResponse.getUploadId())
.withPartNumber(i)
.withFileOffset(bytePosition)
.withInputStream(fileInputStream)
.withPartSize(partSize);
UploadPartResult uploadResult = amazonS3Client.uploadPart(uploadRequest);
partETags.add(uploadResult.getPartETag());
bytePosition += partSize;
}
// 完成多部分上传
CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
bucketName,
keyName,
initResponse.getUploadId(),
partETags);
completeMultipartUploadResult = amazonS3Client.completeMultipartUpload(compRequest);
} catch (SdkClientException e) {
// 处理异常,可能需要回滚已上传的部分
logger.error("Error uploading file to S3", e);
// 如果有错误,尝试取消上传
AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest(
bucketName,
keyName,
initResponse.getUploadId());
amazonS3Client.abortMultipartUpload(abortMPURequest);
} finally {
try {
fileInputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return completeMultipartUploadResult;
}
@Override
public CompleteMultipartUploadResult uploadLargeFileToS3(FileInputStream fileInputStream, long fileLength, String key) {
return uploadLargeFileToS3(bucketName, fileInputStream, fileLength, key);
}
/**
* 简单下载文件流
*
* @param key key
* @return InputStream
*/
@Override
public InputStream downloadToEDS(String key, String bucketName) {
logger.info("Downloading {} from S3 bucket {}...\n", key, bucketName);
S3Object object = amazonS3Client.getObject(new GetObjectRequest(bucketName, key));
return object.getObjectContent();
}
@Override
public InputStream downloadToEDS(String key) {
return downloadToEDS(key, bucketName);
}
/**
* 流式上传文件到EDS
*
* @param keyName fileName
* @param inputStream InputStream
* @return PutObjectResult
*/
@Override
public PutObjectResult streamUploadToEDS(String keyName, InputStream inputStream, String bucketName) {
try {
//创建上传Object的Metadata
ObjectMetadata metadata = new ObjectMetadata();
//上传的文件的长度
metadata.setContentLength(inputStream.available());
//指定该Object被下载时的网页的缓存行为
metadata.setCacheControl("no-cache");
//指定该Object下设置Header
metadata.setHeader("Pragma", "no-cache");
//指定该Object被下载时的内容编码格式
metadata.setContentEncoding("utf-8");
//文件的MIME,定义文件的类型及网页编码,决定浏览器将以什么形式、什么编码读取文件。如果用户没有指定则根据Key或文件名的扩展名生成,
//如果没有扩展名则填默认值application/octet-stream
metadata.setContentType("application/octet-stream");
return amazonS3Client.putObject(bucketName, keyName, inputStream, metadata);
} catch (IOException e) {
logger.error("文件上传失败。。。。" + e.getMessage());
throw new RuntimeException(e);
} catch (SdkClientException e) {
throw new RuntimeException(e);
}
}
@Override
public PutObjectResult streamUploadToEDS(String keyName, InputStream inputStream) {
return streamUploadToEDS(keyName,inputStream,bucketName);
}
/**
* 简单上传文件到EDS
*
* @param keyName fileName
* @param file file
* @return PutObjectResult
*/
@Override
public PutObjectResult simpleUploadToEDS(String keyName, File file, String bucketName) {
return amazonS3Client.putObject(bucketName, keyName, file);
}
@Override
public PutObjectResult simpleUploadToEDS(String keyName, File file) {
return amazonS3Client.putObject(bucketName, keyName, file);
}
/**
* 创建文件上传的预签名--URL
* @param bucketName 桶名称
* @param keyName 唯一文件标识
* @return URL
*/
@Override
public URL createSignedUrlForStringPut(String bucketName, String keyName) {
Date expiration = new Date();
long expirationNumber = expiration.getTime()+3600*1000;
expiration.setTime(expirationNumber);
GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(bucketName, keyName,HttpMethod.PUT);
generatePresignedUrlRequest.setExpiration(expiration);
return amazonS3Client.generatePresignedUrl(generatePresignedUrlRequest);
}
/**
* 创建文件下载的预签名--URL
* @param bucketName 桶名称
* @param keyName 唯一文件标识
* @return URL
*/
@Override
public URL createSignedUrlForStringGet(String bucketName, String keyName) {
Date expiration = new Date();
long expirationNumber = expiration.getTime()+3600*1000;
expiration.setTime(expirationNumber);
GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(bucketName, keyName,HttpMethod.GET);
generatePresignedUrlRequest.setExpiration(expiration);
return amazonS3Client.generatePresignedUrl(generatePresignedUrlRequest);
}
}
SpringBoot测试:@SpringBootTest
@SpringBootTest(classes = FilestoreApplication.class)
@Import(EdsOssFileUploadServiceImpl.class)
public class FilestoreApplicationTest {
private Logger logger = LoggerFactory.getLogger(FilestoreApplicationTest.class);
@Autowired
private EdsOssFileUploadService edsOssFileUploadService;
/**
* 文件上传EDS云对象存储测试:通过
* <p>
* 2024-03-06 14:07:03.079 INFO 15144 --- [ main] c.c.c.f.FilestoreApplicationTest : Started FilestoreApplicationTest in 2.059 seconds (JVM running for 2.728)
* 2024-03-06 14:07:03.270 INFO 15144 --- [ main] c.c.c.f.FilestoreApplicationTest : 上传生成的=26438e0c-e379-4343-b645-03e8056baaf6/硬件检测.jpg
* 2024-03-06 14:07:03.628 INFO 15144 --- [ main] c.c.c.f.a.i.EdsOssFileUploadServiceImpl : -------开始只能进入分片上传阶段-------
* 2024-03-06 14:07:03.918 INFO 15144 --- [ main] c.c.c.f.a.i.EdsOssFileUploadServiceImpl : 文件分片上传--->196369
* 2024-03-06 14:07:03.919 INFO 15144 --- [ main] c.c.c.f.a.i.EdsOssFileUploadServiceImpl : -------所有分片上传完整------->进入分片合并阶段-------
* 2024-03-06 14:07:03.969 INFO 15144 --- [ main] c.c.c.f.a.i.EdsOssFileUploadServiceImpl : -------大文件分片上传完成--->196369
* 2024-03-06 14:07:03.969 INFO 15144 --- [ main] c.c.c.f.FilestoreApplicationTest : 上传执行时间(ms):699
* 2024-03-06 14:07:03.969 INFO 15144 --- [ main] c.c.c.f.FilestoreApplicationTest : 上传成功:1afe99dec706c24e174f5d22e08b48b7-1
*/
@Test
void contextLoads() {
try {
File fileLocal = new File("D:\\硬件检测.jpg");
String originalFilename = fileLocal.getName();
String key = UUID.randomUUID() + "/" + originalFilename;
logger.info("上传生成的=" + key);
StopWatch stopWatch = new StopWatch();
stopWatch.start();
CompleteMultipartUploadResult result = edsOssFileUploadService.bigFileListShardingUpload(key, fileLocal);
stopWatch.stop();
logger.info("上传执行时间(ms):" + stopWatch.getTotalTimeMillis());
// 处理上传结果
String eTag = result.getETag();
// ...
logger.info("上传成功:" + eTag);
} catch (Exception e) {
logger.error("上传失败", e);
}
}
/**
* 高级API大文件上传EDS云对象存储测试
*/
@Test
void contextLoadsBigFile() {
try {
File fileLocal = new File("D:\\fileTemp\\深信服OSS\\aws-java-sdk-1.12.643.zip");
String originalFilename = fileLocal.getName();
String key = UUID.randomUUID() + "/" + originalFilename;
logger.info("上传生成的=" + key);
StopWatch stopWatch = new StopWatch();
stopWatch.start();
String eTag = edsOssFileUploadService.highLevelMultipartUpload(key, fileLocal);
stopWatch.stop();
logger.info("上传执行时间(ms):" + stopWatch.getTotalTimeMillis());
// ...
logger.info("上传成功:" + eTag);
} catch (Exception e) {
logger.error("上传失败", e);
}
}
/**
* 分片API大文件上传EDS云对象存储测试
*/
@Test
void contextLoadsBigFileShare() {
try {
File fileLocal = new File("D:\\fileTemp\\深信服OSS\\aws-java-sdk-1.12.643.zip");
String originalFilename = fileLocal.getName();
String key = UUID.randomUUID() + "/" + originalFilename;
logger.info("上传生成的=" + key);
StopWatch stopWatch = new StopWatch();
stopWatch.start();
CompleteMultipartUploadResult result = edsOssFileUploadService.uploadLargeFileToS3(new FileInputStream(fileLocal), fileLocal.length(), key);
stopWatch.stop();
logger.info("上传执行时间(ms):" + stopWatch.getTotalTimeMillis());
// 处理上传结果
String eTag = result.getETag();
// ...
logger.info("上传成功:" + eTag);
} catch (Exception e) {
logger.error("上传失败", e);
}
}
/**
* 文件下载
*/
@Test
void contextLoadsDownFile() {
try {
String key = "26438e0c-e379-4343-b645-03e8056baaf6/硬件检测.jpg";
logger.info("文件下载的key=" + key);
StopWatch stopWatch = new StopWatch();
stopWatch.start();
InputStream inputStream = edsOssFileUploadService.downloadToEDS(key);
stopWatch.stop();
logger.info("文件下载执行时间(ms):" + stopWatch.getTotalTimeMillis());
// 处理结果
if (inputStream != null) {
// ...
logger.info("文件下载:");
}
} catch (Exception e) {
logger.error("文件下载", e);
}
}
/**
* 预签名URL+文件下载: 测试通过
* 2024-03-06 14:10:26.721 INFO 29356 --- [ main] c.c.c.f.FilestoreApplicationTest : 文件下载的key=26438e0c-e379-4343-b645-03e8056baaf6/硬件检测.jpg
* 2024-03-06 14:10:26.763 INFO 29356 --- [ main] c.c.c.f.FilestoreApplicationTest : 文件下载执行时间(ms):41
* 2024-03-06 14:10:26.763 INFO 29356 --- [ main] c.c.c.f.FilestoreApplicationTest : 文件下载:http://10.1.2.16:12001/digtal_resources/26438e0c-e379-4343-b645-03e8056baaf6/%E7%A1%AC%E4%BB%B6%E6%A3%80%E6%B5%8B.jpg?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20240306T061026Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3599&X-Amz-Credential=MOTNMUS8FJ8P3YTXQSB5%2F20240306%2F%2Fs3%2Faws4_request&X-Amz-Signature=9536999bbc0f1b6de5bb3cf910af0def5ee1f04389446a1688724f27d5655e65
*/
@Test
void createSignedUrlForStringGet() {
try {
String key = "26438e0c-e379-4343-b645-03e8056baaf6/硬件检测.jpg";
logger.info("文件下载的key=" + key);
StopWatch stopWatch = new StopWatch();
stopWatch.start();
URL url = edsOssFileUploadService.createSignedUrlForStringGet("digtal_resources", key);
stopWatch.stop();
logger.info("文件下载执行时间(ms):" + stopWatch.getTotalTimeMillis());
// 处理结果
if (url != null) {
// ...
logger.info("文件下载:" + url);
}
} catch (Exception e) {
logger.error("文件下载", e);
}
}
/**
* 预签名文件上传
*/
@Test
void createSignedUrlForStringPut() {
try {
String key = "26438e0c-e379-4343-b645-03e8056baaf6/硬件检测.jpg";
logger.info("文件上传的key=" + key);
StopWatch stopWatch = new StopWatch();
stopWatch.start();
URL url = edsOssFileUploadService.createSignedUrlForStringPut("digtal_resources", key);
stopWatch.stop();
logger.info("文件上传执行时间(ms):" + stopWatch.getTotalTimeMillis());
// 处理结果
if (url != null) {
// ...
logger.info("文件上传:" + url);
}
} catch (Exception e) {
logger.error("文件上传", e);
}
}
}
SpringBoot集成亚马逊的S3对象存储的更多相关文章
- Node开发文件上传系统及向七牛云存储和亚马逊AWS S3的文件上传
背景起,有奏乐: 有伟人曰:学习技能的最好途径莫过于理论与实践相结合. 初学Node这货时,每每读教程必会Fall asleep. 当真要开发系统时,顿觉精神百倍,即便踩坑无数也不失斗志. 因为同团队 ...
- laravel 上传文件到亚马逊 aws s3
参考: https://github.com/aws/aws-sdk-php-laravel https://www.jianshu.com/p/e48d82bff20b
- 亚马逊云储存器S3 BCUKET安全性学习笔记
亚马逊云储存器S3 BCUKET安全性学习笔记 Bugs_Bunny CTF – Walk walk CTF 昨天玩了会这个比赛,碰到这题是知识盲点,来记录一下. 先从题目看起吧. http://ww ...
- 借助亚马逊S3和RapidMiner将机器学习应用到文本挖掘
本挖掘典型地运用了机器学习技术,例如聚类,分类,关联规则,和预测建模.这些技术揭示潜在内容中的意义和关系.文本发掘应用于诸如竞争情报,生命科学,客户呼声,媒体和出版,法律和税收,法律实施,情感分析和趋 ...
- 亚马逊S3下载上传文件
引用网址: http://www.jxtobo.com/27697.html 下载 CloudBerry Explorer http://www.cloudberrylab.com/download- ...
- AWS系列之一 亚马逊云服务概述
云计算经过这几年的发展,已经不再是是一个高大上的名词,而是已经应用到寻常百姓家的技术.每天如果你和互联网打交道,那么或多或少都会和云扯上关系.gmail.github.各种网盘.GAE.heroku等 ...
- Amazon EMR(Elastic MapReduce):亚马逊Hadoop托管服务运行架构&Hadoop云服务之战:微软vs.亚马逊
http://s3tools.org/s3cmd Amazon Elastic MapReduce (Amazon EMR)简介 Amazon Elastic MapReduce (Amazon EM ...
- 完全基于 Java 的开源深度学习平台,亚马逊的大佬带你上手
本文适合有 Java 基础的人群 作者:DJL-Lanking HelloGitHub 推出的<讲解开源项目>系列.有幸邀请到了亚马逊 + Apache 的工程师:Lanking( htt ...
- HTML5游戏开发进阶指南(亚马逊5星畅销书,教你用HTML5和JavaScript构建游戏!)
HTML5游戏开发进阶指南(亚马逊星畅销书,教你用HTML5和JavaScript构建游戏!) [印]香卡(Shankar,A.R.)著 谢光磊译 ISBN 978-7-121-21226-0 201 ...
- A亚马逊WS网上系列讲座——怎么样AWS云平台上千万用户的应用建设
用户选择云计算平台构建应用程序的一个重要原因是高弹性的云平台和可扩展性. 面向Internet应用程序通常需要支持用户使用大量,但要建立一个高度可扩展.具有一定的挑战,高度可用的应用程序,只有立足AW ...
随机推荐
- [转]v-mode 提示错误 v-model directives require the attribute value which is valid as LHS.
v-mode总是提示错误 v-model directives require the attribute value which is valid as LHS. 为什么调用过滤总是提示这个错呢? ...
- SuperMap Objects .NET知识库:SQL查询以及通配符
1 SQL 语句的构建 在SuperMap组件产品中,有许多接口都用到了过滤条件,也就是标准 SQL 语句中的 WHERE 子句部分,比如各种涉及属性查询的接口.网络分析中弧段的过滤条件.拓扑 ...
- (三).NET6.0使用Autofac实现依赖注入
1.添加依赖注入的两个关键包 Autofac.Extensions.DependencyInjection 和 Autofac.Extras.DynamicProxy 2.在Program中添加Aut ...
- Java工具类HttpClientUtil
1. 依赖包 <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId> ...
- ABAP配置:OY01 定义国家/地区
配置:OY01 定义国家/地区 事务代码:OY01 配置路径: SPRO-ABAP平台-常规设置-设置国家-定义国家/地区 配置路径截图: 配置描述: 国家是SAP里面一个非常重要的概念,SAP国家概 ...
- CDS标准视图:安排维护计划的调用 I_MAINTENANCEPLANSCHEDULE
视图名称:安排维护计划的调用 I_MAINTENANCEPLANSCHEDULE 视图类型: 视图代码: 点击查看代码 @AbapCatalog.compiler.compareFilter: tru ...
- CDS标准视图:维护项目数据 C_MaintenanceItemDEX
视图名称:维护项目数据 C_MaintenanceItemDEX 视图类型:基础 视图代码: 点击查看代码 @AbapCatalog.sqlViewName: 'CMAINTITEMDEX' @Aba ...
- Nibbles PG walkthrough Intermediate
nmap nmap -p- -A 192.168.239.47 Starting Nmap 7.95 ( https://nmap.org ) at 2025-01-15 02:26 UTC Nmap ...
- NOIp 2024 游记
要是 T3 T4 挂分就寄了. Day-11 运动会上 vp 了 NOIp2023 和 NOIp2022,NOIp2023 被 T2 硬控了一会,最后口胡的做法感觉可以拿 \(100+100+35+1 ...
- Luogu P4933 大师 题解 [ 绿 ] [ 线性 dp ] [ dp 细节处理 ] [ 限制转移条件优化 ]
依据值域的 \(O(n^2)\) 做法 这种做法只适用于这种值域小的题,下一种做法才是求等差数列的通解. 我们定义 \(f[i][j]\) 表示以 \(h_i\) 为最后一个数,公差为 \(j\) 的 ...