KMeans聚类算法Hadoop实现
Assistance.java 辅助类,功能详见注释
package KMeans; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader; import java.io.IOException;
import java.util.*; public class Assistance {
//读取聚类中心点信息:聚类中心ID、聚类中心点
public static List<ArrayList<Float>> getCenters(String inputpath){
List<ArrayList<Float>> result = new ArrayList<ArrayList<Float>>();
Configuration conf = new Configuration();
try {
FileSystem hdfs = FileSystem.get(conf);
Path in = new Path(inputpath);
FSDataInputStream fsIn = hdfs.open(in);
LineReader lineIn = new LineReader(fsIn, conf);
Text line = new Text();
while (lineIn.readLine(line) > 0){
String record = line.toString();
/*
因为Hadoop输出键值对时会在键跟值之间添加制表符,
所以用空格代替之。
*/
String[] fields = record.replace("\t", " ").split(" ");
List<Float> tmplist = new ArrayList<Float>();
for (int i = 0; i < fields.length; ++i){
tmplist.add(Float.parseFloat(fields[i]));
}
result.add((ArrayList<Float>) tmplist);
}
fsIn.close();
} catch (IOException e){
e.printStackTrace();
}
return result;
} //删除上一次MapReduce作业的结果
public static void deleteLastResult(String path){
Configuration conf = new Configuration();
try {
FileSystem hdfs = FileSystem.get(conf);
Path path1 = new Path(path);
hdfs.delete(path1, true);
} catch (IOException e){
e.printStackTrace();
}
}
//计算相邻两次迭代结果的聚类中心的距离,判断是否满足终止条件
public static boolean isFinished(String oldpath, String newpath, int k, float threshold)
throws IOException{
List<ArrayList<Float>> oldcenters = Assistance.getCenters(oldpath);
List<ArrayList<Float>> newcenters = Assistance.getCenters(newpath);
float distance = 0;
for (int i = 0; i < k; ++i){
for (int j = 1; j < oldcenters.get(i).size(); ++j){
float tmp = Math.abs(oldcenters.get(i).get(j) - newcenters.get(i).get(j));
distance += Math.pow(tmp, 2);
}
}
System.out.println("Distance = " + distance + " Threshold = " + threshold);
if (distance < threshold)
return true;
/*
如果不满足终止条件,则用本次迭代的聚类中心更新聚类中心
*/
Assistance.deleteLastResult(oldpath);
Configuration conf = new Configuration();
FileSystem hdfs = FileSystem.get(conf);
hdfs.copyToLocalFile(new Path(newpath), new Path("/home/hadoop/class/oldcenter.data"));
hdfs.delete(new Path(oldpath), true);
hdfs.moveFromLocalFile(new Path("/home/hadoop/class/oldcenter.data"), new Path(oldpath));
return false;
}
}
KMeansDriver.java 作业驱动类
package KMeans; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser; import java.io.IOException; public class KMeansDriver{
public static void main(String[] args) throws Exception{
int repeated = 0; /*
不断提交MapReduce作业指导相邻两次迭代聚类中心的距离小于阈值或到达设定的迭代次数
*/
do {
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 6){
System.err.println("Usage: <int> <out> <oldcenters> <newcenters> <k> <threshold>");
System.exit(2);
}
conf.set("centerpath", otherArgs[2]);
conf.set("kpath", otherArgs[4]);
Job job = new Job(conf, "KMeansCluster");//新建MapReduce作业
job.setJarByClass(KMeansDriver.class);//设置作业启动类 Path in = new Path(otherArgs[0]);
Path out = new Path(otherArgs[1]);
FileInputFormat.addInputPath(job, in);//设置输入路径
FileSystem fs = FileSystem.get(conf);
if (fs.exists(out)){//如果输出路径存在,则先删除之
fs.delete(out, true);
}
FileOutputFormat.setOutputPath(job, out);//设置输出路径 job.setMapperClass(KMeansMapper.class);//设置Map类
job.setReducerClass(KMeansReducer.class);//设置Reduce类 job.setOutputKeyClass(IntWritable.class);//设置输出键的类
job.setOutputValueClass(Text.class);//设置输出值的类 job.waitForCompletion(true);//启动作业 ++repeated;
System.out.println("We have repeated " + repeated + " times.");
} while (repeated < 10 && (Assistance.isFinished(args[2], args[3], Integer.parseInt(args[4]), Float.parseFloat(args[5])) == false));
//根据最终得到的聚类中心对数据集进行聚类
Cluster(args);
}
public static void Cluster(String[] args)
throws IOException, InterruptedException, ClassNotFoundException{
Configuration conf = new Configuration();
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
if (otherArgs.length != 6){
System.err.println("Usage: <int> <out> <oldcenters> <newcenters> <k> <threshold>");
System.exit(2);
}
conf.set("centerpath", otherArgs[2]);
conf.set("kpath", otherArgs[4]);
Job job = new Job(conf, "KMeansCluster");
job.setJarByClass(KMeansDriver.class); Path in = new Path(otherArgs[0]);
Path out = new Path(otherArgs[1]);
FileInputFormat.addInputPath(job, in);
FileSystem fs = FileSystem.get(conf);
if (fs.exists(out)){
fs.delete(out, true);
}
FileOutputFormat.setOutputPath(job, out); //因为只是将样本点聚类,不需要reduce操作,故不设置Reduce类
job.setMapperClass(KMeansMapper.class); job.setOutputKeyClass(IntWritable.class);
job.setOutputValueClass(Text.class); job.waitForCompletion(true);
}
}
KMeansMapper.java
package KMeans; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException;
import java.util.ArrayList;
import java.util.List; public class KMeansMapper extends Mapper<Object, Text, IntWritable, Text> {
public void map(Object key, Text value, Context context)
throws IOException, InterruptedException{
String line = value.toString();
String[] fields = line.split(" ");
List<ArrayList<Float>> centers = Assistance.getCenters(context.getConfiguration().get("centerpath"));
int k = Integer.parseInt(context.getConfiguration().get("kpath"));
float minDist = Float.MAX_VALUE;
int centerIndex = k;
//计算样本点到各个中心的距离,并把样本聚类到距离最近的中心点所属的类
for (int i = 0; i < k; ++i){
float currentDist = 0;
for (int j = 0; j < fields.length; ++j){
float tmp = Math.abs(centers.get(i).get(j + 1) - Float.parseFloat(fields[j]));
currentDist += Math.pow(tmp, 2);
}
if (minDist > currentDist){
minDist = currentDist;
centerIndex = i;
}
}
context.write(new IntWritable(centerIndex), new Text(value));
}
}
KMeansReducer.java
package KMeans; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException;
import java.util.ArrayList;
import java.util.List; public class KMeansReducer extends Reducer<IntWritable, Text, IntWritable, Text> {
public void reduce(IntWritable key, Iterable<Text> value, Context context)
throws IOException, InterruptedException{
List<ArrayList<Float>> assistList = new ArrayList<ArrayList<Float>>();
String tmpResult = "";
for (Text val : value){
String line = val.toString();
String[] fields = line.split(" ");
List<Float> tmpList = new ArrayList<Float>();
for (int i = 0; i < fields.length; ++i){
tmpList.add(Float.parseFloat(fields[i]));
}
assistList.add((ArrayList<Float>) tmpList);
}
//计算新的聚类中心
for (int i = 0; i < assistList.get(0).size(); ++i){
float sum = 0;
for (int j = 0; j < assistList.size(); ++j){
sum += assistList.get(j).get(i);
}
float tmp = sum / assistList.size();
if (i == 0){
tmpResult += tmp;
}
else{
tmpResult += " " + tmp;
}
}
Text result = new Text(tmpResult);
context.write(key, result);
}
}
作业运行情况:
hadoop@shaobo-ThinkPad-E420:~/class$ hadoop jar KMeans.jar KMeans.KMeansDriver input/iris.data output input/oldcenter.data output/part-r-00000 3 0.0001
14/04/17 16:15:50 INFO input.FileInputFormat: Total input paths to process : 1
14/04/17 16:15:51 INFO mapred.JobClient: Running job: job_201404171511_0012
14/04/17 16:15:52 INFO mapred.JobClient: map 0% reduce 0%
14/04/17 16:16:07 INFO mapred.JobClient: map 100% reduce 0%
14/04/17 16:16:19 INFO mapred.JobClient: map 100% reduce 100%
14/04/17 16:16:24 INFO mapred.JobClient: Job complete: job_201404171511_0012
14/04/17 16:16:24 INFO mapred.JobClient: Counters: 25
14/04/17 16:16:24 INFO mapred.JobClient: Job Counters
14/04/17 16:16:24 INFO mapred.JobClient: Launched reduce tasks=1
14/04/17 16:16:24 INFO mapred.JobClient: SLOTS_MILLIS_MAPS=12041
14/04/17 16:16:24 INFO mapred.JobClient: Total time spent by all reduces waiting after reserving slots (ms)=0
14/04/17 16:16:24 INFO mapred.JobClient: Total time spent by all maps waiting after reserving slots (ms)=0
14/04/17 16:16:24 INFO mapred.JobClient: Launched map tasks=1
14/04/17 16:16:24 INFO mapred.JobClient: Data-local map tasks=1
14/04/17 16:16:24 INFO mapred.JobClient: SLOTS_MILLIS_REDUCES=10030
14/04/17 16:16:24 INFO mapred.JobClient: File Output Format Counters
14/04/17 16:16:24 INFO mapred.JobClient: Bytes Written=125
14/04/17 16:16:24 INFO mapred.JobClient: FileSystemCounters
14/04/17 16:16:24 INFO mapred.JobClient: FILE_BYTES_READ=3306
14/04/17 16:16:24 INFO mapred.JobClient: HDFS_BYTES_READ=11214
14/04/17 16:16:24 INFO mapred.JobClient: FILE_BYTES_WRITTEN=48901
14/04/17 16:16:24 INFO mapred.JobClient: HDFS_BYTES_WRITTEN=125
14/04/17 16:16:24 INFO mapred.JobClient: File Input Format Counters
14/04/17 16:16:24 INFO mapred.JobClient: Bytes Read=2550
14/04/17 16:16:24 INFO mapred.JobClient: Map-Reduce Framework
14/04/17 16:16:24 INFO mapred.JobClient: Reduce input groups=3
14/04/17 16:16:24 INFO mapred.JobClient: Map output materialized bytes=3306
14/04/17 16:16:24 INFO mapred.JobClient: Combine output records=0
14/04/17 16:16:24 INFO mapred.JobClient: Map input records=150
14/04/17 16:16:24 INFO mapred.JobClient: Reduce shuffle bytes=0
14/04/17 16:16:24 INFO mapred.JobClient: Reduce output records=3
14/04/17 16:16:24 INFO mapred.JobClient: Spilled Records=300
14/04/17 16:16:24 INFO mapred.JobClient: Map output bytes=3000
14/04/17 16:16:24 INFO mapred.JobClient: Combine input records=0
14/04/17 16:16:24 INFO mapred.JobClient: Map output records=150
14/04/17 16:16:24 INFO mapred.JobClient: SPLIT_RAW_BYTES=114
14/04/17 16:16:24 INFO mapred.JobClient: Reduce input records=150
We have repeated 1 times.
Distance = 0.35025704 Threshold = 1.0E-4
14/04/17 16:16:24 INFO input.FileInputFormat: Total input paths to process : 1
14/04/17 16:16:25 INFO mapred.JobClient: Running job: job_201404171511_0013
14/04/17 16:16:26 INFO mapred.JobClient: map 0% reduce 0%
14/04/17 16:16:40 INFO mapred.JobClient: map 100% reduce 0%
14/04/17 16:16:52 INFO mapred.JobClient: map 100% reduce 100%
14/04/17 16:16:57 INFO mapred.JobClient: Job complete: job_201404171511_0013
14/04/17 16:16:57 INFO mapred.JobClient: Counters: 25
14/04/17 16:16:57 INFO mapred.JobClient: Job Counters
14/04/17 16:16:57 INFO mapred.JobClient: Launched reduce tasks=1
14/04/17 16:16:57 INFO mapred.JobClient: SLOTS_MILLIS_MAPS=12077
14/04/17 16:16:57 INFO mapred.JobClient: Total time spent by all reduces waiting after reserving slots (ms)=0
14/04/17 16:16:57 INFO mapred.JobClient: Total time spent by all maps waiting after reserving slots (ms)=0
14/04/17 16:16:57 INFO mapred.JobClient: Launched map tasks=1
14/04/17 16:16:57 INFO mapred.JobClient: Data-local map tasks=1
14/04/17 16:16:57 INFO mapred.JobClient: SLOTS_MILLIS_REDUCES=10048
14/04/17 16:16:57 INFO mapred.JobClient: File Output Format Counters
14/04/17 16:16:57 INFO mapred.JobClient: Bytes Written=116
14/04/17 16:16:57 INFO mapred.JobClient: FileSystemCounters
14/04/17 16:16:57 INFO mapred.JobClient: FILE_BYTES_READ=3306
14/04/17 16:16:57 INFO mapred.JobClient: HDFS_BYTES_READ=21414
14/04/17 16:16:57 INFO mapred.JobClient: FILE_BYTES_WRITTEN=48901
14/04/17 16:16:57 INFO mapred.JobClient: HDFS_BYTES_WRITTEN=116
14/04/17 16:16:57 INFO mapred.JobClient: File Input Format Counters
14/04/17 16:16:57 INFO mapred.JobClient: Bytes Read=2550
14/04/17 16:16:57 INFO mapred.JobClient: Map-Reduce Framework
14/04/17 16:16:57 INFO mapred.JobClient: Reduce input groups=3
14/04/17 16:16:57 INFO mapred.JobClient: Map output materialized bytes=3306
14/04/17 16:16:57 INFO mapred.JobClient: Combine output records=0
14/04/17 16:16:57 INFO mapred.JobClient: Map input records=150
14/04/17 16:16:57 INFO mapred.JobClient: Reduce shuffle bytes=3306
14/04/17 16:16:57 INFO mapred.JobClient: Reduce output records=3
14/04/17 16:16:57 INFO mapred.JobClient: Spilled Records=300
14/04/17 16:16:57 INFO mapred.JobClient: Map output bytes=3000
14/04/17 16:16:57 INFO mapred.JobClient: Combine input records=0
14/04/17 16:16:57 INFO mapred.JobClient: Map output records=150
14/04/17 16:16:57 INFO mapred.JobClient: SPLIT_RAW_BYTES=114
14/04/17 16:16:57 INFO mapred.JobClient: Reduce input records=150
We have repeated 2 times.
Distance = 0.006297064 Threshold = 1.0E-4
14/04/17 16:16:57 INFO input.FileInputFormat: Total input paths to process : 1
14/04/17 16:16:58 INFO mapred.JobClient: Running job: job_201404171511_0014
14/04/17 16:16:59 INFO mapred.JobClient: map 0% reduce 0%
14/04/17 16:17:14 INFO mapred.JobClient: map 100% reduce 0%
14/04/17 16:17:25 INFO mapred.JobClient: map 100% reduce 100%
14/04/17 16:17:30 INFO mapred.JobClient: Job complete: job_201404171511_0014
14/04/17 16:17:30 INFO mapred.JobClient: Counters: 25
14/04/17 16:17:30 INFO mapred.JobClient: Job Counters
14/04/17 16:17:30 INFO mapred.JobClient: Launched reduce tasks=1
14/04/17 16:17:30 INFO mapred.JobClient: SLOTS_MILLIS_MAPS=12046
14/04/17 16:17:30 INFO mapred.JobClient: Total time spent by all reduces waiting after reserving slots (ms)=0
14/04/17 16:17:30 INFO mapred.JobClient: Total time spent by all maps waiting after reserving slots (ms)=0
14/04/17 16:17:30 INFO mapred.JobClient: Launched map tasks=1
14/04/17 16:17:30 INFO mapred.JobClient: Data-local map tasks=1
14/04/17 16:17:30 INFO mapred.JobClient: SLOTS_MILLIS_REDUCES=10051
14/04/17 16:17:30 INFO mapred.JobClient: File Output Format Counters
14/04/17 16:17:30 INFO mapred.JobClient: Bytes Written=116
14/04/17 16:17:30 INFO mapred.JobClient: FileSystemCounters
14/04/17 16:17:30 INFO mapred.JobClient: FILE_BYTES_READ=3306
14/04/17 16:17:30 INFO mapred.JobClient: HDFS_BYTES_READ=20064
14/04/17 16:17:30 INFO mapred.JobClient: FILE_BYTES_WRITTEN=48901
14/04/17 16:17:30 INFO mapred.JobClient: HDFS_BYTES_WRITTEN=116
14/04/17 16:17:30 INFO mapred.JobClient: File Input Format Counters
14/04/17 16:17:30 INFO mapred.JobClient: Bytes Read=2550
14/04/17 16:17:30 INFO mapred.JobClient: Map-Reduce Framework
14/04/17 16:17:30 INFO mapred.JobClient: Reduce input groups=3
14/04/17 16:17:30 INFO mapred.JobClient: Map output materialized bytes=3306
14/04/17 16:17:30 INFO mapred.JobClient: Combine output records=0
14/04/17 16:17:30 INFO mapred.JobClient: Map input records=150
14/04/17 16:17:30 INFO mapred.JobClient: Reduce shuffle bytes=0
14/04/17 16:17:30 INFO mapred.JobClient: Reduce output records=3
14/04/17 16:17:30 INFO mapred.JobClient: Spilled Records=300
14/04/17 16:17:30 INFO mapred.JobClient: Map output bytes=3000
14/04/17 16:17:30 INFO mapred.JobClient: Combine input records=0
14/04/17 16:17:30 INFO mapred.JobClient: Map output records=150
14/04/17 16:17:30 INFO mapred.JobClient: SPLIT_RAW_BYTES=114
14/04/17 16:17:30 INFO mapred.JobClient: Reduce input records=150
We have repeated 3 times.
Distance = 0.0 Threshold = 1.0E-4
14/04/17 16:17:30 INFO input.FileInputFormat: Total input paths to process : 1
14/04/17 16:17:30 INFO mapred.JobClient: Running job: job_201404171511_0015
14/04/17 16:17:31 INFO mapred.JobClient: map 0% reduce 0%
14/04/17 16:17:47 INFO mapred.JobClient: map 100% reduce 0%
14/04/17 16:17:59 INFO mapred.JobClient: map 100% reduce 100%
14/04/17 16:18:04 INFO mapred.JobClient: Job complete: job_201404171511_0015
14/04/17 16:18:04 INFO mapred.JobClient: Counters: 25
14/04/17 16:18:04 INFO mapred.JobClient: Job Counters
14/04/17 16:18:04 INFO mapred.JobClient: Launched reduce tasks=1
14/04/17 16:18:04 INFO mapred.JobClient: SLOTS_MILLIS_MAPS=12036
14/04/17 16:18:04 INFO mapred.JobClient: Total time spent by all reduces waiting after reserving slots (ms)=0
14/04/17 16:18:04 INFO mapred.JobClient: Total time spent by all maps waiting after reserving slots (ms)=0
14/04/17 16:18:04 INFO mapred.JobClient: Launched map tasks=1
14/04/17 16:18:04 INFO mapred.JobClient: Data-local map tasks=1
14/04/17 16:18:04 INFO mapred.JobClient: SLOTS_MILLIS_REDUCES=10050
14/04/17 16:18:04 INFO mapred.JobClient: File Output Format Counters
14/04/17 16:18:04 INFO mapred.JobClient: Bytes Written=2700
14/04/17 16:18:04 INFO mapred.JobClient: FileSystemCounters
14/04/17 16:18:04 INFO mapred.JobClient: FILE_BYTES_READ=3306
14/04/17 16:18:04 INFO mapred.JobClient: HDFS_BYTES_READ=20064
14/04/17 16:18:04 INFO mapred.JobClient: FILE_BYTES_WRITTEN=48717
14/04/17 16:18:04 INFO mapred.JobClient: HDFS_BYTES_WRITTEN=2700
14/04/17 16:18:04 INFO mapred.JobClient: File Input Format Counters
14/04/17 16:18:04 INFO mapred.JobClient: Bytes Read=2550
14/04/17 16:18:04 INFO mapred.JobClient: Map-Reduce Framework
14/04/17 16:18:04 INFO mapred.JobClient: Reduce input groups=3
14/04/17 16:18:04 INFO mapred.JobClient: Map output materialized bytes=3306
14/04/17 16:18:04 INFO mapred.JobClient: Combine output records=0
14/04/17 16:18:04 INFO mapred.JobClient: Map input records=150
14/04/17 16:18:04 INFO mapred.JobClient: Reduce shuffle bytes=0
14/04/17 16:18:04 INFO mapred.JobClient: Reduce output records=150
14/04/17 16:18:04 INFO mapred.JobClient: Spilled Records=300
14/04/17 16:18:04 INFO mapred.JobClient: Map output bytes=3000
14/04/17 16:18:04 INFO mapred.JobClient: Combine input records=0
14/04/17 16:18:04 INFO mapred.JobClient: Map output records=150
14/04/17 16:18:04 INFO mapred.JobClient: SPLIT_RAW_BYTES=114
14/04/17 16:18:04 INFO mapred.JobClient: Reduce input records=150
KMeans聚类算法Hadoop实现的更多相关文章
- Hadoop平台K-Means聚类算法分布式实现+MapReduce通俗讲解
Hadoop平台K-Means聚类算法分布式实现+MapReduce通俗讲解 在Hadoop分布式环境下实现K-Means聚类算法的伪代码如下: 输入:参数0--存储样本数据的文本文件inpu ...
- 数据分析与挖掘 - R语言:K-means聚类算法
一个简单的例子!环境:CentOS6.5Hadoop集群.Hive.R.RHive,具体安装及调试方法见博客内文档. 1.分析题目--有一个用户点击数据样本(husercollect)--按用户访问的 ...
- 改用MyAnalyzer的KMeans聚类算法
<strong><span style="font-size:18px;">/*** * @author YangXin * @info 改用MyAnaly ...
- K-Means 聚类算法
K-Means 概念定义: K-Means 是一种基于距离的排他的聚类划分方法. 上面的 K-Means 描述中包含了几个概念: 聚类(Clustering):K-Means 是一种聚类分析(Clus ...
- k-means聚类算法python实现
K-means聚类算法 算法优缺点: 优点:容易实现缺点:可能收敛到局部最小值,在大规模数据集上收敛较慢使用数据类型:数值型数据 算法思想 k-means算法实际上就是通过计算不同样本间的距离来判断他 ...
- K-Means 聚类算法原理分析与代码实现
前言 在前面的文章中,涉及到的机器学习算法均为监督学习算法. 所谓监督学习,就是有训练过程的学习.再确切点,就是有 "分类标签集" 的学习. 现在开始,将进入到非监督学习领域.从经 ...
- Kmeans聚类算法原理与实现
Kmeans聚类算法 1 Kmeans聚类算法的基本原理 K-means算法是最为经典的基于划分的聚类方法,是十大经典数据挖掘算法之一.K-means算法的基本思想是:以空间中k个点为中心进行聚类,对 ...
- 机器学习六--K-means聚类算法
机器学习六--K-means聚类算法 想想常见的分类算法有决策树.Logistic回归.SVM.贝叶斯等.分类作为一种监督学习方法,要求必须事先明确知道各个类别的信息,并且断言所有待分类项都有一个类别 ...
- 转载: scikit-learn学习之K-means聚类算法与 Mini Batch K-Means算法
版权声明:<—— 本文为作者呕心沥血打造,若要转载,请注明出处@http://blog.csdn.net/gamer_gyt <—— 目录(?)[+] ================== ...
随机推荐
- delphi 获取 TreeView选中的文件路径
//获取 TreeView选中的文件路径 unit Unit1; interface uses Windows, Messages, SysUtils, Variants, Classes, G ...
- yii 隐藏index.php的步骤
Apache 1.开启apache的mod_rewrite模块 去掉LoadModule rewrite_module modules/mod_rewrite.so前的“#”符号 ...
- 自问自答-hadoop自带哪些案例(0.20.2)
1)aggregatewordcount 计算输入文件中文字个数的基于聚合的MapReduce程序 2)aggregatewordhist 生成输入文件中文字个数的统计图的基于聚合的MapReduce ...
- UrlPathEncode与UrlEncode的区别
UrlEncode与UrlPathEncode 的基本作用都是对 URL 字符串进行编码 不同点总结如下: 不同点 UrlEncode UrlPathEncode 处理空格的方式 替换成“+” 替换成 ...
- ArcGIS动态文本
处理动态文本 来自:http://resources.arcgis.com/zh-cn/help/main/10.2/index.html#/na/00s900000013000000/ Deskto ...
- Apache服务器中配置虚拟机的方法
新浪微博虚拟机开发配置步骤及介绍.1.由于后面虚拟机中需要用到Rewrite所以先编辑Apache的conf目录下的httpd.conf文件.(可根据实际需要操作)添加mod_rewrite.so模块 ...
- 解决Deprecated: mysql_connect(): The mysql extension is deprecated and will be removed in the future: use mysqli or PDO instead in
php 5个版本,5.2.5.3.5.4.5.5,怕跟不上时代,新的服务器直接上5.5,但是程序出现如下错误:Deprecated: mysql_connect(): The mysql extens ...
- javaweb学习总结十五(web开发的相关概念以及常用服务器介绍)
一:java web开发的相关概念 1:web分为静态web和动态web 2:模拟web服务器 web页面如果想让外部网络访问,必须通过网络程序读取资源,流程: a:用户通过浏览器访问网络程序 b:网 ...
- vb.net Linq 筛选(像 select distinct) DateTable 日期数据中的年份
Private Sub initDDLByYear(ByVal dt As DataTable) ddlByYear.Items.Clear() ddlByYear.Items.Add(") ...
- 【转】六年软件测试感悟-从博彦到VMware
不知不觉已经从事软件测试六年了,2006毕业到进入外包公司外包给微软做软件测试, 到现在加入著名的外企.六年的时间过得真快. 长期的测试工作也让我对软件测试有了比较深入的认识.但是我至今还是一个底层的 ...