使用hadoop统计多个文本中每个单词数目
程序源码
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; public class WordCount {
public static class WordCountMap extends
Mapper<LongWritable, Text, Text, IntWritable> {
private final IntWritable one = new IntWritable(1);//输出的值 1
private Text word = new Text();//输出的键 单词 public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {//处理经过 TextInputFormat 产生的 <k1,v1>,然后产生 <k2,v2>
String line = value.toString();//读取文本中
StringTokenizer token = new StringTokenizer(line);//按照空格对单词进行切割
while (token.hasMoreTokens()) {
word.set(token.nextToken());//读取到的单词作为键值
context.write(word, one);//以 单词,1的中间形式交给reduce处理
}
}
} public static class WordCountReduce extends
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
context.write(key, new IntWritable(sum));
}
} public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = new Job(conf);
job.setJarByClass(WordCount.class);
job.setJobName("wordcount");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class);
job.setMapperClass(WordCountMap.class);
job.setReducerClass(WordCountReduce.class);
job.setInputFormatClass(TextInputFormat.class);//生成可供Map处理的键值对
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
job.waitForCompletion(true);
}
}
1 编译源码
javac -classpath /opt/hadoop-1.2.1/hadoop-core-1.2.1.jar:/opt/hadoop-1.2.1/lib/commons-cli-1.2.jar -d ./word_count_class/ WordCount.java
将源码编译成class文件并放在当前文件夹下的word_count_class目录,当然,首先需要创建该目录
2 将源码打成jar包
进入源码目录
jar -cvf wordcount.jar *
3 上传输入文件
先在hadoop中为本次任务创建一个输入文件存放目录
hadoop fs -mkdir input_wordcount
将input目录下的所有文本文件上传到hadoop中的input_wordcount目录下
hadoop fs -put input/* input_wordcount/
注意:不能在运行前穿创建输出文件夹
4 上传jar并执行
hadoop jar word_count_class/wordcount.jar input_wordcount output_wordcount
5 查看计算结果
程序输出目录
hadoop fs -ls output_wordcount
程序输出内容
hadoop fs -cat output_wordcount/part-r-00000
版本二:自己实际操作中的程序
Map程序
package com.zln.chapter03; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter; import java.io.IOException;
import java.util.StringTokenizer; /**
* Created by sherry on 15-7-12.
*/
public class WordCountMap extends MapReduceBase implements Mapper<LongWritable,Text,Text,IntWritable> {
private final static IntWritable one = new IntWritable(1);//每个单词 +1
private Text word = new Text(); @Override
public void map(LongWritable longWritable, Text text, OutputCollector<Text, IntWritable> outputCollector, Reporter reporter) throws IOException {
String line = text.toString();
StringTokenizer tokenizer = new StringTokenizer(line);//分割出单词
while (tokenizer.hasMoreTokens()){
word.set(tokenizer.nextToken());
outputCollector.collect(word,one);
}
}
}
Reduce程序
package com.zln.chapter03; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter; import java.io.IOException;
import java.util.Iterator; /**
* Created by sherry on 15-7-12.
*/
public class WordCountReduce extends MapReduceBase implements Reducer<Text,IntWritable,Text,IntWritable> {
@Override
public void reduce(Text text, Iterator<IntWritable> iterator, OutputCollector<Text, IntWritable> outputCollector, Reporter reporter) throws IOException {
int sum = 0;
while (iterator.hasNext()){
sum += iterator.next().get();
}
outputCollector.collect(text,new IntWritable(sum));
}
}
主函数
package com.zln.chapter03; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.*; import java.io.IOException; /**
* Created by sherry on 15-7-12.
*/
public class WordCount {
public static void main(String[] args) throws IOException {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("wordCount"); //设置输出格式
conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class); //设置MapReduce类
conf.setMapperClass(WordCountMap.class);
conf.setReducerClass(WordCountReduce.class); //设置处理输入类
conf.setInputFormat(TextInputFormat.class);
//设置处理输出类
conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(args[0]));
FileOutputFormat.setOutputPath(conf, new Path(args[1])); JobClient.runJob(conf);
}
}
准备输入文件
file1
Hello Word By Word
Hello Word By zln
file2
Hello Hadoop
Hello GoodBye
放在同一个目录下:/home/sherry/IdeaProjects/Hadoop/WordCount/输入文件准备
编译class打成一个jar包
我使用IDEA进行编译。注意不要忘记指定main函数
上传输入文件
root@sherry:/opt/hadoop-1.2.# hadoop fs -mkdir /user/root/zln/WordCount/InputFiles
root@sherry:/opt/hadoop-1.2.# hadoop fs -put /home/sherry/IdeaProjects/Hadoop/WordCount/输入文件准备/* /user/root/zln/WordCount/InputFiles
上传jar并执行
root@sherry:/opt/hadoop-1.2.# hadoop jar /home/sherry/IdeaProjects/Hadoop/out/artifacts/WordCount_jar/WordCount.jar /user/root/zln/WordCount/InputFiles /user/root/zln/WordCount/OutputFiles
查看执行结果
root@sherry:/opt/hadoop-1.2.# hadoop fs -ls /user/root/zln/WordCount/OutputFiles
root@sherry:/opt/hadoop-1.2.# hadoop fs -text /user/root/zln/WordCount/OutputFiles/part-
版本三:使用新版本的API对Map Reduce main函数进行重写
Map
package com.zln.chapter03; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper; import java.io.IOException;
import java.util.StringTokenizer; /**
* Created by sherry on 15-7-12.
*/
public class WordCountMap extends Mapper<LongWritable,Text,Text,IntWritable> {
private final static IntWritable one = new IntWritable(1);//每个单词 +1
private Text word = new Text(); @Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);//分割出单词
while (tokenizer.hasMoreTokens()){
word.set(tokenizer.nextToken());
context.write(word,one);
}
} }
Reduce
package com.zln.chapter03; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; /**
* Created by sherry on 15-7-12.
*/
public class WordCountReduce extends Reducer<Text,IntWritable,Text,IntWritable> { @Override
protected void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable intWritable:values){
sum += intWritable.get();
}
context.write(key,new IntWritable(sum));
}
}
Main
package com.zln.chapter03; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; /**
* Created by sherry on 15-7-12.
*/
public class WordCount extends Configured implements Tool{ public int run(String[] args) throws Exception {
Job job = new Job(getConf());
job.setJarByClass(WordCount.class);
job.setJobName("WordCount"); job.setOutputKeyClass(Text.class);
job.setOutputValueClass(IntWritable.class); job.setMapperClass(WordCountMap.class);
job.setReducerClass(WordCountReduce.class); job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class); FileInputFormat.setInputPaths(job,new Path(args[0]));
FileOutputFormat.setOutputPath(job,new Path(args[1])); boolean success = job.waitForCompletion(true);
return success?0:1;
} public static void main(String[] args) throws Exception {
int ret = ToolRunner.run(new WordCount(),args);
System.exit(ret);
}
}
使用hadoop统计多个文本中每个单词数目的更多相关文章
- C#统计给定的文本中字符出现的次数,使用循环和递归两种方法
前几天看了一个.net程序员面试题目,题目是”统计给定的文本中字符出现的次数,使用循环和递归两种方法“. 下面是我对这个题目的解法: 1.使用循环: /// <summary> /// 使 ...
- python统计文本中每个单词出现的次数
.python统计文本中每个单词出现的次数: #coding=utf-8 __author__ = 'zcg' import collections import os with open('abc. ...
- Perl-统计文本中各个单词出现的次数(NVDIA2019笔试)
1.原题 2.perl脚本 print "================ Method 1=====================\n"; open IN,'<','an ...
- Python的 counter内置函数,统计文本中的单词数量
counter是 colletions内的一个类 可以理解为一个简单的计数 import collections str1=['a','a','b','d'] m=collections.Counte ...
- C#统计英文文本中的单词数并排序
思路如下:1.使用的Hashtable(高效)集合,记录每个单词出现的次数2.采用ArrayList对Hashtable中的Keys按字母序排列3.排序使用插入排序(稳定) public void S ...
- C++统计一段文字中各单词出现的频率
#include <iostream> using namespace std; /* run this program using the console pauser or add y ...
- 一个简单的程序,统计文本文档中的单词和汉字数,逆序排列(出现频率高的排在最前面)。python实现。
仅简单统计英文. from collections import Counter f = open('1') c = Counter() for line in f: g = (x for x in ...
- ruby的hash学习笔记例: 将字符串文本中的单词存放在map中
text = 'The rain in Spain falls mainly in the plain.'first = Hash.new []second = Hash.new {|hash,key ...
- python统计英文文本中的回文单词数
1. 要求: 给定一篇纯英文的文本,统计其中回文单词的比列,并输出其中的回文单词,文本数据如下: This is Everyday Grammar. I am Madam Lucija And I a ...
随机推荐
- 1、SpringBoot------表单校验
开发工具:STS 代码下载链接:https://github.com/theIndoorTrain/Springboot/tree/1ef5e597a6f866e73387c0238dbcdf46cf ...
- Vim---配置实用的.vimrc文件
配置自己电脑的vim,配置一个根据个人习惯使用的.vimrc文件.我的有以下功能等,读者可以根据自己的 个人喜好去配置自己的vim. 1.自动插入文件头 ,新建C.C++源文件时自动插入表头:包括文件 ...
- 用Java读取xml文件内容
在AXP中,DOM解析器是1 Document Builder类的一个实例,该实例由 DocumenBailderfactorv类负责创,步如下 DocumentBuilderFactory fa ...
- springboot+Druid+mybatis整合
一.添加Druid.MySQL连接池.mybatis依赖 <!--整合Druid--> <dependency> <groupId>com.alibaba</ ...
- Golang反射机制
Go反射机制:在编译不知道类型的情况下,可更新变量.在运行时查看值.调用方法以及直接对它们的布局进行操作. 为什么使用反射 有时需要封装统一接口对不同类型数据做处理,而这些类型可能无法共享同一个接口, ...
- oracle之bitmap索引
oracle常见的索引是BTree索引和Bitmap索引. BTree索引特点: 默认索引 适合大量增删改查 不能用or操作符 适合高基数的列(即唯一值多) 创建sql:create index li ...
- 45.VUE学习之--组件之父组件使用scope定义子组件模板样式结构实例讲解
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8&quo ...
- windows系统下用VScode配置远程编辑服务器文件的环境!通过Rmate方法
虽然公司电脑win可以通过Xshell通过SSH远程连接家中内网linux服务器了,但是只能用vim编辑文件有点不爽. 于是上网查询,windows下使用vscode远程编辑服务器文件的办法.参照博文 ...
- linux-shell——02
Linux命令的通用命令格式 :命令字 [选项] [参数] 选项: 作用:用于调节命令的具体功能 "-"引导短格式选项(单个字符) EX:“-l” "--"引导 ...
- Error:Java home supplied via 'org.gradle.java.home' is invalid
Finally i found my solution. In the project root i found gradle.properties configure this java home ...