使用hadoop统计多个文本中每个单词数目
程序源码
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; public class WordCount {
public static class WordCountMap extends
Mapper<LongWritable, Text, Text, IntWritable> {
private final IntWritable one = new IntWritable(1);//输出的值 1
private Text word = new Text();//输出的键 单词 public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {//处理经过 TextInputFormat 产生的 <k1,v1>,然后产生 <k2,v2>
String line = value.toString();//读取文本中
StringTokenizer token = new StringTokenizer(line);//按照空格对单词进行切割
while (token.hasMoreTokens()) {
word.set(token.nextToken());//读取到的单词作为键值
context.write(word, one);//以 单词,1的中间形式交给reduce处理
}
}
} public static class WordCountReduce extends
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
} public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJarByClass(WordCount.class);
job.setJobName("wordcount");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(WordCountMap.class);
job.setReducerClass(WordCountReduce.class);
job.setInputFormatClass(TextInputFormat.class);//生成可供Map处理的键值对
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
}
}
1 编译源码
javac -classpath /opt/hadoop-1.2.1/hadoop-core-1.2.1.jar:/opt/hadoop-1.2.1/lib/commons-cli-1.2.jar -d ./word_count_class/ WordCount.java
将源码编译成class文件并放在当前文件夹下的word_count_class目录,当然,首先需要创建该目录
2 将源码打成jar包
进入源码目录
jar -cvf wordcount.jar *
3 上传输入文件
先在hadoop中为本次任务创建一个输入文件存放目录
hadoop fs -mkdir input_wordcount
将input目录下的所有文本文件上传到hadoop中的input_wordcount目录下
hadoop fs -put input/* input_wordcount/
注意:不能在运行前穿创建输出文件夹
4 上传jar并执行
hadoop jar word_count_class/wordcount.jar input_wordcount output_wordcount
5 查看计算结果
程序输出目录
hadoop fs -ls output_wordcount
程序输出内容
hadoop fs -cat output_wordcount/part-r-00000
版本二:自己实际操作中的程序
Map程序
package com.zln.chapter03; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter; import java.io.IOException;
import java.util.StringTokenizer; /**
* Created by sherry on 15-7-12.
*/
public class WordCountMap extends MapReduceBase implements Mapper<LongWritable,Text,Text,IntWritable> {
private final static IntWritable one = new IntWritable(1);//每个单词 +1
private Text word = new Text(); @Override
public void map(LongWritable longWritable, Text text, OutputCollector<Text, IntWritable> outputCollector, Reporter reporter) throws IOException {
String line = text.toString();
StringTokenizer tokenizer = new StringTokenizer(line);//分割出单词
while (tokenizer.hasMoreTokens()){
word.set(tokenizer.nextToken());
outputCollector.collect(word,one);
}
}
}
Reduce程序
package com.zln.chapter03; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter; import java.io.IOException;
import java.util.Iterator; /**
* Created by sherry on 15-7-12.
*/
public class WordCountReduce extends MapReduceBase implements Reducer<Text,IntWritable,Text,IntWritable> {
@Override
public void reduce(Text text, Iterator<IntWritable> iterator, OutputCollector<Text, IntWritable> outputCollector, Reporter reporter) throws IOException {
int sum = 0;
while (iterator.hasNext()){
sum += iterator.next().get();
}
outputCollector.collect(text,new IntWritable(sum));
}
}
主函数
package com.zln.chapter03; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*; import java.io.IOException; /**
* Created by sherry on 15-7-12.
*/
public class WordCount {
public static void main(String[] args) throws IOException {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("wordCount"); //设置输出格式
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class); //设置MapReduce类
conf.setMapperClass(WordCountMap.class);
conf.setReducerClass(WordCountReduce.class); //设置处理输入类
conf.setInputFormat(TextInputFormat.class);
//设置处理输出类
conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1])); JobClient.runJob(conf);
}
}
准备输入文件
file1
Hello Word By Word
Hello Word By zln
file2
Hello Hadoop
Hello GoodBye
放在同一个目录下:/home/sherry/IdeaProjects/Hadoop/WordCount/输入文件准备
编译class打成一个jar包
我使用IDEA进行编译。注意不要忘记指定main函数
上传输入文件
root@sherry:/opt/hadoop-1.2.# hadoop fs -mkdir /user/root/zln/WordCount/InputFiles
root@sherry:/opt/hadoop-1.2.# hadoop fs -put /home/sherry/IdeaProjects/Hadoop/WordCount/输入文件准备/* /user/root/zln/WordCount/InputFiles
上传jar并执行
root@sherry:/opt/hadoop-1.2.# hadoop jar /home/sherry/IdeaProjects/Hadoop/out/artifacts/WordCount_jar/WordCount.jar /user/root/zln/WordCount/InputFiles /user/root/zln/WordCount/OutputFiles
查看执行结果
root@sherry:/opt/hadoop-1.2.# hadoop fs -ls /user/root/zln/WordCount/OutputFiles
root@sherry:/opt/hadoop-1.2.# hadoop fs -text /user/root/zln/WordCount/OutputFiles/part-
版本三:使用新版本的API对Map Reduce main函数进行重写
Map
package com.zln.chapter03; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException;
import java.util.StringTokenizer; /**
* Created by sherry on 15-7-12.
*/
public class WordCountMap extends Mapper<LongWritable,Text,Text,IntWritable> {
private final static IntWritable one = new IntWritable(1);//每个单词 +1
private Text word = new Text(); @Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);//分割出单词
while (tokenizer.hasMoreTokens()){
word.set(tokenizer.nextToken());
context.write(word,one);
}
} }
Reduce
package com.zln.chapter03; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; /**
* Created by sherry on 15-7-12.
*/
public class WordCountReduce extends Reducer<Text,IntWritable,Text,IntWritable> { @Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable intWritable:values){
sum += intWritable.get();
}
context.write(key,new IntWritable(sum));
}
}
Main
package com.zln.chapter03; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; /**
* Created by sherry on 15-7-12.
*/
public class WordCount extends Configured implements Tool{ public int run(String[] args) throws Exception {
Job job = new Job(getConf());
job.setJarByClass(WordCount.class);
job.setJobName("WordCount"); job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class); job.setMapperClass(WordCountMap.class);
job.setReducerClass(WordCountReduce.class); job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1])); boolean success = job.waitForCompletion(true);
return success?0:1;
} public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new WordCount(),args);
System.exit(ret);
}
}
使用hadoop统计多个文本中每个单词数目的更多相关文章
- C#统计给定的文本中字符出现的次数,使用循环和递归两种方法
前几天看了一个.net程序员面试题目,题目是”统计给定的文本中字符出现的次数,使用循环和递归两种方法“. 下面是我对这个题目的解法: 1.使用循环: /// <summary> /// 使 ...
- python统计文本中每个单词出现的次数
.python统计文本中每个单词出现的次数: #coding=utf-8 __author__ = 'zcg' import collections import os with open('abc. ...
- Perl-统计文本中各个单词出现的次数(NVDIA2019笔试)
1.原题 2.perl脚本 print "================ Method 1=====================\n"; open IN,'<','an ...
- Python的 counter内置函数,统计文本中的单词数量
counter是 colletions内的一个类 可以理解为一个简单的计数 import collections str1=['a','a','b','d'] m=collections.Counte ...
- C#统计英文文本中的单词数并排序
思路如下:1.使用的Hashtable(高效)集合,记录每个单词出现的次数2.采用ArrayList对Hashtable中的Keys按字母序排列3.排序使用插入排序(稳定) public void S ...
- C++统计一段文字中各单词出现的频率
#include <iostream> using namespace std; /* run this program using the console pauser or add y ...
- 一个简单的程序,统计文本文档中的单词和汉字数,逆序排列(出现频率高的排在最前面)。python实现。
仅简单统计英文. from collections import Counter f = open('1') c = Counter() for line in f: g = (x for x in ...
- ruby的hash学习笔记例: 将字符串文本中的单词存放在map中
text = 'The rain in Spain falls mainly in the plain.'first = Hash.new []second = Hash.new {|hash,key ...
- python统计英文文本中的回文单词数
1. 要求: 给定一篇纯英文的文本,统计其中回文单词的比列,并输出其中的回文单词,文本数据如下: This is Everyday Grammar. I am Madam Lucija And I a ...
随机推荐
- 小程序navigateTo和redirectTo的使用
最近公司商城项目,有个小问题,就是在商品详情页>购物车页>确认下单页>支付成功和取消支付都会前往订单详情页.当时我没想这么多就全部跳转都用了navigateTo,这样做的话,第一个问 ...
- js表格打印自动分页demo
本文翻译自:How Does setState Know What to Do? 原作者:Dan Abramov 如果有任何版权问题,请联系shuirong1997@icloud.com 当你在组件中 ...
- vue学习--父子组件通讯
this.$parent // 子组件访问父组件 this.$root // 访问根实例 this.$children // 父组件的所有子元素 *一 ...
- form表单submit按钮提交页面不跳转
方案一 <html> <body> <form action="" method="post" target="nm_i ...
- Java源码解析——Java IO包
一.基础知识: 1. Java IO一般包含两个部分:1)java.io包中阻塞型IO:2)java.nio包中的非阻塞型IO,通常称为New IO.这里只考虑到java.io包中堵塞型IO: 2. ...
- day1_作业2(三级菜单)
#!/usr/local/bin/python3 # -*- coding:utf-8 -*- province={'江苏省':['南京市','苏州市','无锡市'],'浙江省':['杭州市','温州 ...
- VIM配置IDE
转载于:https://blog.csdn.net/andre617/article/details/53496490#%E8%84%9A%E6%B3%A8 由于YCM需要vim支持python,需要 ...
- MySQL忘记密码怎么重置
1打开mysql.exe和mysqld.exe所在的文件夹,复制路径地址 输入命令 mysqld --skip-grant-tables 回车,此时就跳过了mysql的用户验证.注意输入此命令之后 ...
- Android 第三方库RxLifecycle使用
1.简单介绍RxLifecycle 1.1.使用原因. 在使用rxjava的时候,如果没有及时解除订阅,在退出activity的时候,异步线程还在执行. 对activity还存在引用,此时就会产生内存 ...
- vue命令集合
创建vuecli脚手架:npm install -g @vue/cli拉取2的版本:npm install -g @vue/cli-init 创建webpack:npm i webpack@3.12. ...