Naive Bayes在mapreduce上的实现
package hadoop.MachineLearning.Bayes.Pro; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class PriorProbability {//用于求各个类别下的单词数,为后面求先验概率 public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String input="hdfs://10.107.8.110:9000/Bayes/Bayes_input/";
String output="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Pro/";
Job job = Job.getInstance(conf, "ProirProbability");
job.setJarByClass(hadoop.MachineLearning.Bayes.Pro.PriorProbability.class);
// TODO: specify a mapper
job.setMapperClass(MyMapper.class);
//job.setMapInputKeyClass(LongWritable.class);
// TODO: specify a reducer
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setReducerClass(MyReducer.class); // TODO: specify output types
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class); // TODO: specify input and output DIRECTORIES (not files)
FileInputFormat.setInputPaths(job, new Path(input));
FileOutputFormat.setOutputPath(job, new Path(output)); if (!job.waitForCompletion(true))
return;
} } package hadoop.MachineLearning.Bayes.Pro; import java.io.IOException; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Mapper.Context; public class MyMapper extends Mapper<LongWritable, Text, Text, Text> { public void map(LongWritable ikey, Text ivalue, Context context)
throws IOException, InterruptedException {
String[] line=ivalue.toString().split(":| ");
int size=line.length-1;
context.write(new Text(line[0]),new Text(String.valueOf(size)));
} } package hadoop.MachineLearning.Bayes.Pro; import java.io.IOException; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context; public class MyReducer extends Reducer<Text, Text, Text, IntWritable> { public void reduce(Text _key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
// process values
int sum=0;
for (Text val : values) {
sum+=Integer.parseInt(val.toString());
}
context.write(_key,new IntWritable(sum));
} }
package hadoop.MachineLearning.Bayes.Count; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class Count {//计算文档中的单词种类数目 public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, "Count");
String input="hdfs://10.107.8.110:9000/Bayes/Bayes_input";
String output="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Count";
job.setJarByClass(hadoop.MachineLearning.Bayes.Count.Count.class);
// TODO: specify a mapper
job.setMapperClass(MyMapper.class);
// TODO: specify a reducer
job.setCombinerClass(MyCombiner.class);
job.setReducerClass(MyReducer.class); // TODO: specify output types
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class); // TODO: specify input and output DIRECTORIES (not files)
FileInputFormat.setInputPaths(job, new Path(input));
FileOutputFormat.setOutputPath(job, new Path(output)); if (!job.waitForCompletion(true))
return;
} } package hadoop.MachineLearning.Bayes.Count; import java.io.IOException; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper; public class MyMapper extends Mapper<LongWritable, Text, Text, Text> { public void map(LongWritable ikey, Text ivalue, Context context)
throws IOException, InterruptedException {
String[] line=ivalue.toString().split(":| ");
String key="1";
System.out.println(" ");
System.out.println(" ");
System.out.println(" ");
for(int i=1;i<line.length;i++){ System.out.println(line[i]);
context.write(new Text(key),new Text(line[i]));//以相同的key进行输出,使得能最后输出到一个reduce中
}
} } package hadoop.MachineLearning.Bayes.Count; import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set; import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; public class MyCombiner extends Reducer<Text, Text, Text, Text> {//先在本地的节点上利用set删去重复的单词 public void reduce(Text _key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
// process values
Set set=new HashSet();
for (Text val : values) {
set.add(val.toString());
}
for(Iterator it=set.iterator();it.hasNext();){
context.write(new Text("1"),new Text(it.next().toString()));
}
} } package hadoop.MachineLearning.Bayes.Count; import java.io.IOException;
import java.util.HashSet;
import java.util.Set; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; public class MyReducer extends Reducer<Text, Text, Text, Text> {//通过combiner后,再利用set对单词进行去重,最后得到种类数 public void reduce(Text _key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
// process values
Set set=new HashSet();
for (Text val : values) {
set.add(val.toString());
}
context.write(new Text("num is "),new Text(String.valueOf(set.size())));
} }
package hadoop.MachineLearning.Bayes.Cond; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class CondiPro {//用于求条件概率 public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
String input="hdfs://10.107.8.110:9000/Bayes/Bayes_input";
String output="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Con";
String proPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Pro";//这是之前求各个类别下单词数目的输出
String countPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Count";//这是之前求的单词种类数
conf.set("propath",proPath);
conf.set("countPath",countPath);
Job job = Job.getInstance(conf, "ConditionPro"); job.setJarByClass(hadoop.MachineLearning.Bayes.Cond.CondiPro.class);
// TODO: specify a mapper
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
// TODO: specify a reducer
job.setReducerClass(MyReducer.class); // TODO: specify output types
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class); // TODO: specify input and output DIRECTORIES (not files)
FileInputFormat.setInputPaths(job, new Path(input));
FileOutputFormat.setOutputPath(job, new Path(output)); if (!job.waitForCompletion(true))
return;
} } package hadoop.MachineLearning.Bayes.Cond; import java.io.IOException;
import java.util.Map; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper; public class MyMapper extends Mapper<LongWritable, Text, Text, IntWritable> { public void map(LongWritable ikey, Text ivalue, Context context)
throws IOException, InterruptedException {
String[] line=ivalue.toString().split(":| ");
for(int i=1;i<line.length;i++){
String key=line[0]+":"+line[i];
context.write(new Text(key),new IntWritable(1));
}
} } package hadoop.MachineLearning.Bayes.Cond; import java.io.IOException;
import java.util.Map; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; public class MyReducer extends Reducer<Text, IntWritable, Text, DoubleWritable> {
public Map<String,Integer> map;
public int count=0;
public void setup(Context context) throws IOException{
Configuration conf=context.getConfiguration(); String proPath=conf.get("propath");
String countPath=conf.get("countPath");//
map=Utils.getMapFormHDFS(proPath);//获得各个类别下的单词数
count=Utils.getCountFromHDFS(countPath);//获得单词种类数
}
public void reduce(Text _key, Iterable<IntWritable> values, Context context)
throws IOException, InterruptedException {
// process values
int sum=0;
for (IntWritable val : values) {
sum+=val.get();
}
int type=Integer.parseInt(_key.toString().split(":")[0]);
double probability=0.0;
for(Map.Entry<String,Integer> entry:map.entrySet()){
if(type==Integer.parseInt(entry.getKey())){
probability=(sum+1)*1.0/(entry.getValue()+count);//条件概率的计算
}
}
context.write(_key,new DoubleWritable(probability));
} } package hadoop.MachineLearning.Bayes.Cond; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader; public class Utils { /**
* @param args
* @throws IOException
*/ public static Map<String,Integer> getMapFormHDFS(String input) throws IOException{
Configuration conf=new Configuration();
Path path=new Path(input);
FileSystem fs=path.getFileSystem(conf); FileStatus[] stats=fs.listStatus(path);
Map<String,Integer> map=new HashMap();
for(int i=0;i<stats.length;i++){
if(stats[i].isFile()){
FSDataInputStream infs=fs.open(stats[i].getPath());
LineReader reader=new LineReader(infs,conf);
Text line=new Text();
while(reader.readLine(line)>0){
String[] temp=line.toString().split(" ");
//System.out.println(temp.length);
map.put(temp[0],Integer.parseInt(temp[1]));
}
reader.close();
}
} return map; } public static Map<String,Double> getMapFormHDFS(String input,boolean j) throws IOException{
Configuration conf=new Configuration();
Path path=new Path(input);
FileSystem fs=path.getFileSystem(conf); FileStatus[] stats=fs.listStatus(path);
Map<String,Double> map=new HashMap();
for(int i=0;i<stats.length;i++){
if(stats[i].isFile()){
FSDataInputStream infs=fs.open(stats[i].getPath());
LineReader reader=new LineReader(infs,conf);
Text line=new Text();
while(reader.readLine(line)>0){
String[] temp=line.toString().split(" ");
//System.out.println(temp.length);
map.put(temp[0],Double.parseDouble(temp[1]));
}
reader.close();
}
} return map; } public static int getCountFromHDFS(String input) throws IOException{
Configuration conf=new Configuration();
Path path=new Path(input);
FileSystem fs=path.getFileSystem(conf); FileStatus[] stats=fs.listStatus(path); int count=0;
for(int i=0;i<stats.length;i++){
if(stats[i].isFile()){
FSDataInputStream infs=fs.open(stats[i].getPath());
LineReader reader=new LineReader(infs,conf);
Text line=new Text();
while(reader.readLine(line)>0){
String[] temp=line.toString().split(" ");
//System.out.println(temp.length);
count=Integer.parseInt(temp[1]);
}
reader.close();
}
}
return count;
} public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
String proPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Pro";
String countPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Count/";
Map<String,Integer> map=Utils.getMapFormHDFS(proPath);
for(Map.Entry<String,Integer> entry:map.entrySet()){
System.out.println(entry.getKey()+"->"+entry.getValue());
} int count=Utils.getCountFromHDFS(countPath);
System.out.println("count is "+count);
} }
package hadoop.MachineLearning.Bayes.Predict; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; public class Predict { public static void main(String[] args) throws Exception {//预测
Configuration conf = new Configuration();
String input="hdfs://10.107.8.110:9000/Bayes/Predict_input";
String output="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Predict";
String condiProPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Con";
String proPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Pro";
String countPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Count";
conf.set("condiProPath",condiProPath);
conf.set("proPath",proPath);
conf.set("countPath",countPath);
Job job = Job.getInstance(conf, "Predict");
job.setJarByClass(hadoop.MachineLearning.Bayes.Predict.Predict.class);
// TODO: specify a mapper
job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
// TODO: specify a reducer
job.setReducerClass(MyReducer.class); // TODO: specify output types
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class); // TODO: specify input and output DIRECTORIES (not files)
FileInputFormat.setInputPaths(job, new Path(input));
FileOutputFormat.setOutputPath(job, new Path(output)); if (!job.waitForCompletion(true))
return;
} } package hadoop.MachineLearning.Bayes.Predict; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper; public class MyMapper extends Mapper<LongWritable, Text, Text, Text> { public Map<String,Integer> map=new HashMap(); public void setup(Context context) throws IOException{
Configuration conf=context.getConfiguration();
String proPath=conf.get("proPath");
map=Utils.getMapFormHDFS(proPath);
} public void map(LongWritable ikey, Text ivalue, Context context)
throws IOException, InterruptedException {
for(Map.Entry<String,Integer> entry:map.entrySet()){
context.write(new Text(entry.getKey()),ivalue);//对每一行数据,打上所有类别,方便后续的求条件概率
}
} } package hadoop.MachineLearning.Bayes.Predict; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; public class MyReducer extends Reducer<Text, Text, Text, DoubleWritable> { public Map<String,Double> mapDouble=new HashMap();//存放条件概率 public Map<String,Integer> mapInteger=new HashMap();//存放各个类别下的单词数 public Map<String,Double> noFind=new HashMap();//用于那些单词没有出现在某个类别中的 public Map<String,Double> prePro=new HashMap();//求的后的先验概率 public void setup(Context context) throws IOException{
Configuration conf=context.getConfiguration(); String condiProPath=conf.get("condiProPath");
String proPath=conf.get("proPath");
String countPath=conf.get("countPath");
mapDouble=Utils.getMapFormHDFS(condiProPath,true);
mapInteger=Utils.getMapFormHDFS(proPath);
int count=Utils.getCountFromHDFS(countPath);
for(Map.Entry<String,Integer> entry:mapInteger.entrySet()){
double pro=0.0;
noFind.put(entry.getKey(),(1.0/(count+entry.getValue())));
}
int sum=0;
for(Map.Entry<String,Integer> entry:mapInteger.entrySet()){
sum+=entry.getValue();
} for(Map.Entry<String,Integer> entry:mapInteger.entrySet()){
prePro.put(entry.getKey(),(entry.getValue()*1.0/sum));
} } public void reduce(Text _key, Iterable<Text> values, Context context)
throws IOException, InterruptedException {
// process values
String type=_key.toString();
double pro=1.0;
for (Text val : values) {
String[] words=val.toString().split(" ");
for(int i=0;i<words.length;i++){
String condi=type+":"+words[i];
if(mapDouble.get(condi)!=null){//如果该单词出现在该类别中,说明有条件概率
pro=pro*mapDouble.get(condi);
}else{//如果该单词不在该类别中,就采用默认的条件概率
pro=pro*noFind.get(type);
}
}
}
pro=pro*prePro.get(type);
context.write(new Text(type),new DoubleWritable(pro));
} } package hadoop.MachineLearning.Bayes.Predict; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.LineReader; public class Utils { /**
* @param args
* @throws IOException
*/ public static Map<String,Integer> getMapFormHDFS(String input) throws IOException{
Configuration conf=new Configuration();
Path path=new Path(input);
FileSystem fs=path.getFileSystem(conf); FileStatus[] stats=fs.listStatus(path);
Map<String,Integer> map=new HashMap();
for(int i=0;i<stats.length;i++){
if(stats[i].isFile()){
FSDataInputStream infs=fs.open(stats[i].getPath());
LineReader reader=new LineReader(infs,conf);
Text line=new Text();
while(reader.readLine(line)>0){
String[] temp=line.toString().split(" ");
//System.out.println(temp.length);
map.put(temp[0],Integer.parseInt(temp[1]));
}
reader.close();
}
} return map; } public static Map<String,Double> getMapFormHDFS(String input,boolean j) throws IOException{
Configuration conf=new Configuration();
Path path=new Path(input);
FileSystem fs=path.getFileSystem(conf); FileStatus[] stats=fs.listStatus(path);
Map<String,Double> map=new HashMap();
for(int i=0;i<stats.length;i++){
if(stats[i].isFile()){
FSDataInputStream infs=fs.open(stats[i].getPath());
LineReader reader=new LineReader(infs,conf);
Text line=new Text();
while(reader.readLine(line)>0){
String[] temp=line.toString().split(" ");
//System.out.println(temp.length);
map.put(temp[0],Double.parseDouble(temp[1]));
}
reader.close();
}
} return map; } public static int getCountFromHDFS(String input) throws IOException{
Configuration conf=new Configuration();
Path path=new Path(input);
FileSystem fs=path.getFileSystem(conf); FileStatus[] stats=fs.listStatus(path); int count=0;
for(int i=0;i<stats.length;i++){
if(stats[i].isFile()){
FSDataInputStream infs=fs.open(stats[i].getPath());
LineReader reader=new LineReader(infs,conf);
Text line=new Text();
while(reader.readLine(line)>0){
String[] temp=line.toString().split(" ");
//System.out.println(temp.length);
count=Integer.parseInt(temp[1]);
}
reader.close();
}
}
return count;
} public static void main(String[] args) throws IOException {
// TODO Auto-generated method stub
String proPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Pro";
String countPath="hdfs://10.107.8.110:9000/Bayes/Bayes_output/Count/";
Map<String,Integer> map=Utils.getMapFormHDFS(proPath);
for(Map.Entry<String,Integer> entry:map.entrySet()){
System.out.println(entry.getKey()+"->"+entry.getValue());
} int count=Utils.getCountFromHDFS(countPath);
System.out.println("count is "+count);
} }
Naive Bayes在mapreduce上的实现的更多相关文章
- Naive Bayes在mapreduce上的实现(转)
Naive Bayes在mapreduce上的实现 原文地址 http://www.cnblogs.com/sunrye/p/4553732.html Naive Bayes是比较常用的分类器,因为思 ...
- (转载)微软数据挖掘算法:Microsoft Naive Bayes 算法(3)
介绍: Microsoft Naive Bayes 算法是一种基于贝叶斯定理的分类算法,可用于探索性和预测性建模. Naïve Bayes 名称中的 Naïve 一词派生自这样一个事实:该算法使用贝叶 ...
- [Machine Learning & Algorithm] 朴素贝叶斯算法(Naive Bayes)
生活中很多场合需要用到分类,比如新闻分类.病人分类等等. 本文介绍朴素贝叶斯分类器(Naive Bayes classifier),它是一种简单有效的常用分类算法. 一.病人分类的例子 让我从一个例子 ...
- Spark MLlib 之 Naive Bayes
1.前言: Naive Bayes(朴素贝叶斯)是一个简单的多类分类算法,该算法的前提是假设各特征之间是相互独立的.Naive Bayes 训练主要是为每一个特征,在给定的标签的条件下,计算每个特征在 ...
- Naive Bayes理论与实践
Naive Bayes: 简单有效的常用分类算法,典型用途:垃圾邮件分类 假设:给定目标值时属性之间相互条件独立 同样,先验概率的贝叶斯估计是 优点: 1. 无监督学习的一种,实现简单,没有迭代,学习 ...
- [ML] Naive Bayes for Text Classification
TF-IDF Algorithm From http://www.ruanyifeng.com/blog/2013/03/tf-idf.html Chapter 1, 知道了"词频" ...
- 朴素贝叶斯方法(Naive Bayes Method)
朴素贝叶斯是一种很简单的分类方法,之所以称之为朴素,是因为它有着非常强的前提条件-其所有特征都是相互独立的,是一种典型的生成学习算法.所谓生成学习算法,是指由训练数据学习联合概率分布P(X,Y ...
- 朴素贝叶斯算法(Naive Bayes)
朴素贝叶斯算法(Naive Bayes) 阅读目录 一.病人分类的例子 二.朴素贝叶斯分类器的公式 三.账号分类的例子 四.性别分类的例子 生活中很多场合需要用到分类,比如新闻分类.病人分类等等. 本 ...
- Naive Bayes (NB Model) 初识
1,Bayes定理 P(A,B)=P(A|B)P(B); P(A,B)=P(B|A)P(A); P(A|B)=P(B|A)P(A)/P(B); 贝叶斯定理变形 2,概率图模型 2.1 定义 概 ...
随机推荐
- Floodfill算法——求独立子图个数
Floodfill--漫水填充法(也称种子填充法)简单来说就是求一个图中独立子图的个数并将其描述出不同的状态.Floodfill在计算机图形学有着非常广泛的运用,比如图像分割.物体识别之类.基于Flo ...
- iOS中date和string的相互转换
必须知道的内容 G: 公元时代,例如AD公元 yy: 年的后2位 yyyy: 完整年 MM: 月,显示为1-12 MMM: 月,显示为英文月份简写,如 Jan ...
- Hadoop2.6.0 动态增加节点
本文主要从基础准备,添加DataNode和添加NodeManager三个部分详细说明在Hadoop2.6.0环境下,如何动态新增节点到集群中. 基础准备 在基础准备部分,主要是设置hadoop运行的系 ...
- linux tar包追加问题【转】
只能已归档的文件才能追加文件. 如果tar.gz文件是如此生成:#tar -zcvf test.tar.gz a.txt即tar.gz是压缩(-z)和归档(-c)文件,则无法给它追加文件:若果tar ...
- 贪心+树状数组维护一下 Intel Code Challenge Final Round (Div. 1 + Div. 2, Combined) D
http://codeforces.com/contest/724/problem/D 题目大意:给你一个串,从串中挑选字符,挑选是有条件的,按照这个条件所挑选出来的字符集合sort一定是最后选择当中 ...
- dos cmd重启2003命令shutdown -r -t 0
用cmd命令来关闭或重启电脑. at 2:00 /every:Sunday,Monday,Tuesday,Wednesday,Thursday,Friday,Saturday shutdown /r ...
- UML类图图示样例
下图来自<大话设计模式>一书:
- innerHTML,innerText,outerHTML,outerText,value浅析
首先是一个例子: <div id= "aa">0<br/>0<span>11</span>22</div><inp ...
- 非常简单的oracle和mysql数据互传
工具是navicat,我用的是Navicat Premium 10: 这个工具可以同时连接mysql和oracle,如图: 同时连接上这两个库之后 工具->数据传输 左边是数据源,右边是导入目标 ...
- 关于Arduino 步进电机Stepper库的一些想法
官方提供了一些库,使Arduino入门起来更加快速,我们连原理都不用懂,就能通过函数控制终端.但是,这样也带来了很多的缺陷,比如,库函数的功能有限,有些无法实现.然后还有库函数因为要考虑其他的情况,你 ...