Spark MLlib - LFW
val path = "/usr/data/lfw-a/*" val rdd = sc.wholeTextFiles(path) val first = rdd.first println(first)
val files = rdd.map { case (fileName, content) =>
fileName.replace("file:", "") }
println(files.first)
println(files.count)
%pyspark import matplotlib.pyplot as plt path = "/usr/data/lfw-a/Aaron_Eckhart/Aaron_Eckhart_0001.jpg" ae = plt.imread(path) plt.imshow(ae) plt.show()
import java.awt.image.BufferedImage
def loadImageFromFile(path: String): BufferedImage = {
import javax.imageio.ImageIO
import java.io.File
ImageIO.read(new File(path))
}
val aePath = "/usr/data/lfw-a/Aaron_Eckhart/Aaron_Eckhart_0001.jpg" val aeImage = loadImageFromFile(aePath)
import java.awt.image
def processImage(image: BufferedImage, width: Int, height: Int):
BufferedImage = {
val bwImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY)
val g = bwImage.getGraphics()
g.drawImage(image, , , width, height, null)
g.dispose()
bwImage
}
val grayImage = processImage(aeImage, , )
import javax.imageio.ImageIO
import java.io.File
ImageIO.write(grayImage, "jpg", new File("/tmp/aeGray.jpg"))
%pyspark import matplotlib.pyplot as plt tmpPath = "/tmp/aeGray.jpg" aeGary = plt.imread(tmpPath) plt.imshow(aeGary, cmap=plt.cm.gray) plt.show()
def getPixelsFromImage(image: BufferedImage): Array[Double] = {
val width = image.getWidth
val height = image.getHeight
val pixels = Array.ofDim[Double](width * height)
image.getData.getPixels(, , width, height, pixels)
}
def extractPixels(path: String, width: Int, height: Int):
Array[Double] = {
val raw = loadImageFromFile(path)
val processed = processImage(raw, width, height)
getPixelsFromImage(processed)
}
val pixels = files.map(f => extractPixels(f, , ))
println(pixels.take().map(_.take().mkString
("", ",", ", ...")).mkString("\n"))
import org.apache.spark.mllib.linalg.Vectors
val vectors = pixels.map(p => Vectors.dense(p))
vectors.setName("image-vectors")
vectors.cache
import org.apache.spark.mllib.linalg.Matrix import org.apache.spark.mllib.linalg.distributed.RowMatrix import org.apache.spark.mllib.feature.StandardScaler val scaler = new StandardScaler(withMean = true, withStd = false).fit(vectors)
val scaledVectors = vectors.map(v => scaler.transform(v))
import org.apache.spark.mllib.linalg.Matrix import org.apache.spark.mllib.linalg.distributed.RowMatrix val matrix = new RowMatrix(scaledVectors) val K = val pc = matrix.computePrincipalComponents(K)
val rows = pc.numRows val cols = pc.numCols println(rows, cols)
import breeze.linalg.DenseMatrix
val pcBreeze = new DenseMatrix(rows, cols, pc.toArray)
import breeze.linalg.csvwrite
csvwrite(new File("/tmp/pc.csv"), pcBreeze)
%pyspark
import numpy as np
import matplotlib.pyplot as plt
pcs = np.loadtxt("/tmp/pc.csv", delimiter=",")
print(pcs.shape)
%pyspark
import numpy as np
import matplotlib.pyplot as plt
def plot_gallery(images, h, w, n_row=, n_col=):
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=, left=., right=., top=.,hspace=.)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + )
plt.imshow(images[:, i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(), size=)
plt.xticks(())
plt.yticks(())
%pyspark plot_gallery(pcs, , ) plt.show()
val projected = matrix.multiply(pc) println(projected.numRows, projected.numCols)
println(projected.rows.take().mkString("\n"))
val svd = matrix.computeSVD(, computeU = true)
println(s"U dimension: (${svd.U.numRows}, ${svd.U.numCols})")
println(s"S dimension: (${svd.s.size}, )")
println(s"V dimension: (${svd.V.numRows}, ${svd.V.numCols})")
def approxEqual(array1: Array[Double], array2: Array[Double],
tolerance: Double = 1e-): Boolean = {
val bools = array1.zip(array2).map { case (v1, v2) => if
(math.abs(math.abs(v1) - math.abs(v2)) > 1e-) false else true }
bools.fold(true)(_ & _)
}
val breezeS = breeze.linalg.DenseVector(svd.s.toArray)
val projectedSVD = svd.U.rows.map { v =>
val breezeV = breeze.linalg.DenseVector(v.toArray)
val multV = breezeV :* breezeS
Vectors.dense(multV.data)
}
projected.rows.zip(projectedSVD).map { case (v1, v2) =>
approxEqual(v1.toArray, v2.toArray) }.filter(b => true).count
val sValues = ( to ).map { i => matrix.computeSVD(i, computeU =
false).s }
sValues.foreach(println)
val svd300 = matrix.computeSVD(, computeU = false)
val sMatrix = , , svd300.s.toArray)
csvwrite(new File("/tmp/s.csv"), sMatrix)
%pyspark
import numpy as np
import matplotlib.pyplot as plt
s = np.loadtxt("/tmp/s.csv", delimiter=",")
print(s.shape)
plt.plot(s)
plt.show()
%pyspark
import numpy as np
import matplotlib.pyplot as plt
s = np.loadtxt("/tmp/s.csv", delimiter=",")
plt.plot(np.cumsum(s))
plt.yscale('log')
plt.show()
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import java.awt.image.BufferedImage
import java.awt.image
import javax.imageio.ImageIO
import java.io.File
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.feature.StandardScaler
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import breeze.linalg.DenseMatrix
import org.jblas.DoubleMatrix
import breeze.linalg.csvwrite
import org.apache.spark.mllib.recommendation.Rating
/**
* Created by ysp on 16-10-30.
*/
object LFW {
def main(args: Array[String]) {
val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("lfw"))
val path = "/usr/data/lfw-a/*"
val rdd = sc.wholeTextFiles(path)
val files = rdd.map { case (fileName, content) => fileName.replace("file:", "") }
def loadImageFromFile(path: String): BufferedImage = {
import javax.imageio.ImageIO
import java.io.File
ImageIO.read(new File(path))
}
def processImage(image: BufferedImage, width: Int, height: Int):
BufferedImage = {
val bwImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY)
val g = bwImage.getGraphics()
g.drawImage(image, , , width, height, null)
g.dispose()
bwImage
}
def getPixelsFromImage(image: BufferedImage): Array[Double] = {
val width = image.getWidth
val height = image.getHeight
val pixels = Array.ofDim[Double](width * height)
image.getData.getPixels(, , width, height, pixels)
}
def extractPixels(path: String, width: Int, height: Int):
Array[Double] = {
val raw = loadImageFromFile(path)
val processed = processImage(raw, width, height)
getPixelsFromImage(processed)
}
val pixels = files.map(f => extractPixels(f, , ))
val vectors = pixels.map(p => Vectors.dense(p))
vectors.setName("image-vectors")
vectors.cache
val scaler = new StandardScaler(withMean = true, withStd = false).fit(vectors)
val scaledVectors = vectors.map(v => scaler.transform(v))
val matrix = new RowMatrix(scaledVectors)
val K =
val pc = matrix.computePrincipalComponents(K)
val projected = matrix.multiply(pc)
println(projected.numRows, projected.numCols)
println(projected.rows.take().mkString("\n"))
val projecteds = projected.rows.take().map{ case ( x ) =>
i +=
(i,x)
}
val ps = projecteds.map{ case (i,x) =>
val j= i.toInt
val y= x.toArray
(j,y)
}
val itemFactor =projected.rows.take().map{case ( x ) =>
val y= x.toArray
(y)
}
val itemVector = new DoubleMatrix(itemFactor)
def cosineSimilarity(vec1: DoubleMatrix, vec2: DoubleMatrix): Double = {
vec1.dot(vec2) / (vec1.norm2() * vec2.norm2())
}
val sims = ps.map{ case (id, factor) =>
val factorVector = new DoubleMatrix(factor)
val sim = cosineSimilarity(factorVector, itemVector)
(id, sim)
}
println(sims.take().mkString("\n"))
val sortedSims = sims.sortBy(-_._2)
println(sortedSims.take(10).mkString("\n"))
var i = 0
val fs = files.map{ case ( add ) =>
i += 1
(i.toString+"|"+add)
}
val titles = fs.map(line => line.split("\\|").take(2)).map(array
=> (array(0).toInt,
array(1))).collectAsMap()
}
}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import java.awt.image.BufferedImage
import java.awt.image
import javax.imageio.ImageIO
import java.io.File
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.feature.StandardScaler
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import breeze.linalg.DenseMatrix
import org.jblas.DoubleMatrix
import breeze.linalg.csvwrite
import org.apache.spark.mllib.recommendation.Rating
/**
* Created by ysp on 2016-12-30.
*/
object LFW {
def main(args: Array[String]) {
val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("lfw"))
val path = "/usr/data/lfw-a/*"
val rdd = sc.wholeTextFiles(path)
val files = rdd.map { case (fileName, content) => fileName.replace("file:", "") }
def loadImageFromFile(path: String): BufferedImage = {
import javax.imageio.ImageIO
import java.io.File
ImageIO.read(new File(path))
}
def processImage(image: BufferedImage, width: Int, height: Int):
BufferedImage = {
val bwImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY)
val g = bwImage.getGraphics()
g.drawImage(image, , , width, height, null)
g.dispose()
bwImage
}
def getPixelsFromImage(image: BufferedImage): Array[Double] = {
val width = image.getWidth
val height = image.getHeight
val pixels = Array.ofDim[Double](width * height)
image.getData.getPixels(, , width, height, pixels)
}
def extractPixels(path: String, width: Int, height: Int):
Array[Double] = {
val raw = loadImageFromFile(path)
val processed = processImage(raw, width, height)
getPixelsFromImage(processed)
}
val pixels = files.map(f => extractPixels(f, , ))
val vectors = pixels.map(p => Vectors.dense(p))
vectors.setName("image-vectors")
vectors.cache
val scaler = new StandardScaler(withMean = true, withStd = false).fit(vectors)
val scaledVectors = vectors.map(v => scaler.transform(v))
val matrix = new RowMatrix(scaledVectors)
val K =
val pc = matrix.computePrincipalComponents(K)
val projected = matrix.multiply(pc)
println(projected.numRows, projected.numCols)
println(projected.rows.take().mkString("\n"))
val projecteds = projected.rows.take().map{ case ( x ) =>
i +=
(i,x)
}
val ps = projecteds.map{ case (i,x) =>
val j= i.toInt
val y= x.toArray
(j,y)
}
val fs = files.map{ case ( add ) =>
j +=
(j.toString+"|"+add)
}
val t = fs.map(line => line.split()).map(array=>
(array(),array())).collectAsMap()
val itemFactor =projected.rows.take().map{case ( x ) =>
val y= x.toArray
(y)
}
println(t(.toString))
val itemVector = new DoubleMatrix(itemFactor)
def cosineSimilarity(vec1: DoubleMatrix, vec2: DoubleMatrix): Double = {
vec1.dot(vec2) / (vec1.norm2() * vec2.norm2())
}
val sims = ps.map{ case (id, factor) =>
val factorVector = new DoubleMatrix(factor)
val sim = cosineSimilarity(factorVector, itemVector)
val add = t(id.toString)
(id, sim ,add)
}
println(sims.take().mkString("\n"))
val sortedSims = sims.sortBy(-_._2)
println(sortedSims.take().mkString("\n"))
}
}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import java.awt.image.BufferedImage
import java.awt.image
import javax.imageio.ImageIO
import java.io.File
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.feature.StandardScaler
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import breeze.linalg.DenseMatrix
import org.jblas.DoubleMatrix
import breeze.linalg.csvwrite
import org.apache.spark.mllib.recommendation.Rating
/**
* Created by ysp on 2016-12-30.
*/
object LFW {
def main(args: Array[String]) {
val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("lfw"))
val path = "/usr/data/lfw-a/*"
val rdd = sc.wholeTextFiles(path)
val files = rdd.map { case (fileName, content) => fileName.replace("file:", "") }
def loadImageFromFile(path: String): BufferedImage = {
import javax.imageio.ImageIO
import java.io.File
ImageIO.read(new File(path))
}
def processImage(image: BufferedImage, width: Int, height: Int):
BufferedImage = {
val bwImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY)
val g = bwImage.getGraphics()
g.drawImage(image, , , width, height, null)
g.dispose()
bwImage
}
def getPixelsFromImage(image: BufferedImage): Array[Double] = {
val width = image.getWidth
val height = image.getHeight
val pixels = Array.ofDim[Double](width * height)
image.getData.getPixels(, , width, height, pixels)
}
def extractPixels(path: String, width: Int, height: Int):
Array[Double] = {
val raw = loadImageFromFile(path)
val processed = processImage(raw, width, height)
getPixelsFromImage(processed)
}
val pixels = files.map(f => extractPixels(f, , ))
val vectors = pixels.map(p => Vectors.dense(p))
vectors.setName("image-vectors")
vectors.cache
val scaler = new StandardScaler(withMean = true, withStd = false).fit(vectors)
val scaledVectors = vectors.map(v => scaler.transform(v))
val matrix = new RowMatrix(scaledVectors)
val K =
val pc = matrix.computePrincipalComponents(K)
val projected = matrix.multiply(pc)
println(projected.numRows, projected.numCols)
println(projected.rows.take().mkString("\n"))
val projecteds = projected.rows.take().map{ case ( x ) =>
i +=
(i,x)
}
val ps = projecteds.map{ case (i,x) =>
val j= i.toInt
val y= x.toArray
(j,y)
}
val fs = files.map{ case ( add ) =>
j +=
(j.toString+"|"+add)
}
val t = fs.map(line => line.split()).map(array=>
(array(),array())).collectAsMap()
val itemFactor =projected.rows.take().map{case ( x ) =>
val y= x.toArray
(y)
}
val itemVector = new DoubleMatrix(itemFactor)
def cosineSimilarity(vec1: DoubleMatrix, vec2: DoubleMatrix): Double = {
vec1.dot(vec2) / (vec1.norm2() * vec2.norm2())
}
val sims = ps.map{ case (id, factor) =>
val factorVector = new DoubleMatrix(factor)
val sim = cosineSimilarity(factorVector, itemVector)
val add = t(id.toString)
(id, sim ,add)
}
println(sims.take().mkString("\n"))
val sortedSims = sims.sortBy(-_._2)
println(sortedSims.slice(,)mkString("\n"))
}
}
val item = sc.textFile("/usr/data/Art.item")
val t = item.map(line => line.split()).map(array=>
(array().toInt,array())
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.recommendation.{ALS, Rating}
import java.awt.image.BufferedImage
import java.awt.image
import javax.imageio.ImageIO
import java.io.File
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.feature.StandardScaler
import org.apache.spark.mllib.linalg.Matrix
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import breeze.linalg.DenseMatrix
import org.jblas.DoubleMatrix
import breeze.linalg.csvwrite
import org.apache.spark.mllib.recommendation.Rating
val item = sc.textFile("/home/ysp/Art.item")
val t = item.map(line => line.split()).map(array=>
(array().toInt,array())).collectAsMap()
val fs = item.map(line => line.split())
val file = fs.map(array=>array())
println(file.count)
file.first()
t()
def loadImageFromFile(path: String): BufferedImage = {
import javax.imageio.ImageIO
import java.io.File
ImageIO.read(new File(path))
}
def processImage(image: BufferedImage, width: Int, height: Int):
BufferedImage = {
val bwImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY)
val g = bwImage.getGraphics()
g.drawImage(image, , , width, height, null)
g.dispose()
bwImage
}
def getPixelsFromImage(image: BufferedImage): Array[Double] = {
val width = image.getWidth
val height = image.getHeight
val pixels = Array.ofDim[Double](width * height)
image.getData.getPixels(, , width, height, pixels)
}
def extractPixels(path: String, width: Int, height: Int):
Array[Double] = {
val raw = loadImageFromFile(path)
val processed = processImage(raw, width, height)
getPixelsFromImage(processed)
}
file.first()
val pixels = file.map(f => extractPixels(f, , ))
val vectors = pixels.map(p => Vectors.dense(p))
vectors.setName("image-vectors")
vectors.cache
val scaler = new StandardScaler(withMean = true, withStd = false).fit(vectors)
val scaledVectors = vectors.map(v => scaler.transform(v))
val matrix = new RowMatrix(scaledVectors)
val K =
val pc = matrix.computePrincipalComponents(K)
val projected = matrix.multiply(pc)
println(projected.numRows, projected.numCols)
println(projected.rows.take().mkString("\n"))
val ps = projected.rows.take().map{ case ( x ) =>
i = i+
val y= x.toArray
val address = t(i)
(i, y, address)
}
val itemFactor =projected.rows.take().map{case ( x ) =>
val y= x.toArray
(y)
}
val itemVector = new DoubleMatrix(itemFactor)
def cosineSimilarity(vec1: DoubleMatrix, vec2: DoubleMatrix): Double = {
vec1.dot(vec2) / (vec1.norm2() * vec2.norm2())
}
val sims = ps.map{ case (id, factor, add) =>
val factorVector = new DoubleMatrix(factor)
val sim = cosineSimilarity(factorVector, itemVector)
(id, sim, add)
}
println(sims.take().mkString("\n"))
val sortedSims = sims.sortBy(-_._2)
println()
println("推荐:")
println(sortedSims.slice(,)mkString("\n"))
Spark MLlib - LFW的更多相关文章
- 《Spark MLlib机器学习实践》内容简介、目录
http://product.dangdang.com/23829918.html Spark作为新兴的.应用范围最为广泛的大数据处理开源框架引起了广泛的关注,它吸引了大量程序设计和开发人员进行相 ...
- Spark MLlib 之 Basic Statistics
Spark MLlib提供了一些基本的统计学的算法,下面主要说明一下: 1.Summary statistics 对于RDD[Vector]类型,Spark MLlib提供了colStats的统计方法 ...
- Spark MLlib Data Type
MLlib 支持存放在单机上的本地向量和矩阵,也支持通过多个RDD实现的分布式矩阵.因此MLlib的数据类型主要分为两大类:一个是本地单机向量:另一个是分布式矩阵.下面分别介绍一下这两大类都有哪些类型 ...
- Spark MLlib - Decision Tree源码分析
http://spark.apache.org/docs/latest/mllib-decision-tree.html 以决策树作为开始,因为简单,而且也比较容易用到,当前的boosting或ran ...
- Spark入门实战系列--8.Spark MLlib(上)--机器学习及SparkMLlib简介
[注]该系列文章以及使用到安装包/测试数据 可以在<倾情大奉送--Spark入门实战系列>获取 .机器学习概念 1.1 机器学习的定义 在维基百科上对机器学习提出以下几种定义: l“机器学 ...
- Spark入门实战系列--8.Spark MLlib(下)--机器学习库SparkMLlib实战
[注]该系列文章以及使用到安装包/测试数据 可以在<倾情大奉送--Spark入门实战系列>获取 .MLlib实例 1.1 聚类实例 1.1.1 算法说明 聚类(Cluster analys ...
- spark mllib配置pom.xml错误 Multiple markers at this line Could not transfer artifact net.sf.opencsv:opencsv:jar:2.3 from/to central (https://repo.maven.apache.org/maven2): repo.maven.apache.org
刚刚spark mllib,在maven repository网站http://mvnrepository.com/中查询mllib后得到相关库的最新dependence为: <dependen ...
- Apache Spark源码走读之23 -- Spark MLLib中拟牛顿法L-BFGS的源码实现
欢迎转载,转载请注明出处,徽沪一郎. 概要 本文就拟牛顿法L-BFGS的由来做一个简要的回顾,然后就其在spark mllib中的实现进行源码走读. 拟牛顿法 数学原理 代码实现 L-BFGS算法中使 ...
- spark Mllib基本功系列编程入门之 SVM实现分类
话不多说.直接上代码咯.欢迎交流. /** * Created by whuscalaman on 1/7/16. */import org.apache.spark.{SparkConf, Spar ...
随机推荐
- Windows Phone Toolkit 的 DatePicker 控件本地化的问题
用到 The Windows Phone Toolkit 里的 DatePicker 控件,但是多语言的时候出现了问题: 手机设置为中文,虽然月份跟星期有效,但是 Title 却还是默认的语言:CHO ...
- 通过pycharm使用git[图文详解]
前言 使用git+pycharm有一段时间了,算是稍有点心得,这边整理一下,可能有的方法不是最优,欢迎交流,可能还是习惯敲命令去使用git,不过其实pycharm已经帮忙做了很多了,我们可以不用记住那 ...
- 不到一百行实现一个小siri
想要容易理解核心的特征计算的话建议先去看看我之前的听歌识曲的文章,传送门:http://www.cnblogs.com/chuxiuhong/p/6063602.html 本文主要是实现了一个简单的命 ...
- Razor语法中绑定一个值给checkbox
在ASP.NET MVC开发中,需要绑定一个值给checkbox标签,如下面写法,它们运行时是没有问题,照样能跑. 看看上面的语法,在绑定时,它却出现绿浪线.提不绑定的值is not a valid ...
- C++进程间通信
# C++进程间通信 # 进程间通讯的四种方式:剪贴板.匿名管道.命名管道和邮槽 ## 剪切板 ## //设置剪切板内容 CString str; this->GetDlgItemText(ID ...
- 小试ASP.NET MVC——一个邀请页面的实现
上篇博客我们大体介绍了ASP.NET MVC以及如何去新建项目,这篇博客我们讲点干货.小试ASP.NET MVC,我们来写一个简单的邀请WEB. 先来建立一个Models,叫GuestResponse ...
- Java语言中的面向对象特性总结
Java语言中的面向对象特性 (总结得不错) [课前思考] 1. 什么是对象?什么是类?什么是包?什么是接口?什么是内部类? 2. 面向对象编程的特性有哪三个?它们各自又有哪些特性? 3. 你知 ...
- 企业级应用架构模式N-Tier多层架构
先来看经典的3层架构,看下图: 涉及到平台可以是: Ruby on Rails, Java EE, ASP.NET, PHP, ColdFusion, Perl, Python 层 ...
- JDBC关于时间的存取
Oracle数据库默认时间存储是java.sql.date,而java程序中的时间默认是java.util.date,所以通过JDBC存取的 时候会涉及到时间的转换问题. 1.日期存取 存入数据库的时 ...
- atitit.日期,星期,时候的显示方法ISO 8601标准
atitit.日期,星期,时候的显示方法ISO 8601标准 1. ISO 86011 2. DAte日期的显示1 2.1. Normal1 2.2. 顺序日期表示法(可以将一年内的天数直接表示)1 ...