hadoop实现共同出现的单词(Word co-occurrence)
共同出现的单词(Word co-occurrence)是指在一个句子中相邻的两个单词。每一个相邻的单词就是一个Co-Occurrence对。
Sample Input:
a b cc, c d d c
I Love U.
dd ee f g s sa dew ad da
So shaken as we are, so wan with care.
Find we a time for frighted peace to pant.
And breathe short-winded accents of new broil.
To be commenced in strands afar remote.
I Love U U love i.
i i i i
Sample Output:
a:b 1
a:time1
a:we1
accents:of1
accents:short-winded1
ad:da1
ad:dew1
afar:remote1
afar:strands1
and:breathe1
are:so1
are:we1
as:shaken1
as:we1
b:cc1
be:commenced1
be:to1
breathe:short-winded1
broil:new1
c:cc1
c:d2
care:with1
commenced:in1
d:d1
dd:ee1
dew:sa1
ee:f1
f:g1
find:we1
for:frighted1
for:time1
frighted:peace1
g:s1
i:i3
i:love3
in:strands1
love:u3
new:of1
pant:to1
peace:to1
s:sa1
shaken:so1
so:wan1
u:u1
wan:with1
Code:
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.util.GenericOptionsParser; public class CoOccurrence { public static class TextPair implements WritableComparable<TextPair> {
private Text first;
private Text second; public TextPair(){
set(new Text(), new Text());
}
public TextPair(String left, String right) {
set(new Text(left), new Text(right));
}
public TextPair(Text left, Text right) {
set(left, right);
} public void set(Text left, Text right){
String l = left.toString();
String r = right.toString();
int cmp = l.compareTo(r);
if(cmp <= 0){
this.first = left;
this.second = right;
}else{
this.first = right;
this.second = left;
}
} public Text getFirst() {
return first;
}
public Text getSecond() {
return second;
} @Override
public void readFields(DataInput in) throws IOException {
first.readFields(in);
second.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
first.write(out);
second.write(out);
}
@Override
public int hashCode() {
return first.hashCode() * 163 + second.hashCode();//May be some trouble here. why 163? sometimes 157
}
@Override
public boolean equals(Object o) {
if (o instanceof TextPair) {
TextPair tp = (TextPair) o;
return first.equals(tp.first) && second.equals(tp.second);
}
return false;
}
@Override
public String toString(){
return first + ":" + second;
}
@Override
public int compareTo(TextPair tp) {
int cmp = first.compareTo(tp.first);
if(cmp != 0)
return cmp;
return second.compareTo(tp.second);
} // A Comparator that com.pares serialized StringPair.
public static class Comparator extends WritableComparator {
private static final Text.Comparator TEXT_COMPARATOR = new Text.Comparator();
public Comparator() {
super(TextPair.class);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2){
try {
int firstl1 = WritableUtils.decodeVIntSize(b1[s1]) + readVInt(b1, s1);
int firstl2 = WritableUtils.decodeVIntSize(b2[s2]) + readVInt(b2, s2);
int cmp = TEXT_COMPARATOR.compare(b1, s1, firstl1, b2, s2, firstl2);
if(cmp != 0)
return cmp;
return TEXT_COMPARATOR.compare(b1, s1 + firstl1, l1 - firstl1,
b2, s2 + firstl2, l1 - firstl2);
}catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
}//End of Comparator
static { // register this comparator
WritableComparator.define(TextPair.class, new Comparator());
} // Compare only the first part of the pair, so that reduce is called once for each value of the first part.
public static class FirstComparator extends WritableComparator {
private static final Text.Comparator TEXT_COMPARATOR = new Text.Comparator();
public FirstComparator() {
super(TextPair.class);
}
@Override
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2){
try {
int firstl1 = WritableUtils.decodeVIntSize(b1[s1]) + readVInt(b1, s1);
int firstl2 = WritableUtils.decodeVIntSize(b2[s2]) + readVInt(b2, s2);
return TEXT_COMPARATOR.compare(b1, s1, firstl1, b2, s2, firstl2);
}catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
/*
@Override
public int compare(WritableComparator a, WritableComparator b) {
if(a instanceof TextPair && b instanceof TextPair)
return ((TextPair)a).first.compareTo(((TextPair)b).first);
return super.compare(a, b);
}*/
}//End of FirstComparator
}//End of TextPair //Partition based on the first part of the pair.
public static class FirstPartitioner extends Partitioner<TextPair,IntWritable>{
@Override
public int getPartition(TextPair key, IntWritable value, int numPartitions) {
return Math.abs(key.getFirst().toString().indexOf(0) * 127) % numPartitions;//May be some trouble here.
}
}//End of FirstPartitioner public static class MyMapper extends Mapper<LongWritable, Text, TextPair, IntWritable> {
private final static IntWritable one = new IntWritable(1);
private static Text word0 = new Text();
private static Text word1 = new Text();
private String pattern = "[^a-zA-Z0-9-']"; @Override
public void map(LongWritable inKey, Text inValue, Context context)throws IOException, InterruptedException {
String line = inValue.toString();
line = line.replaceAll(pattern, " ");
line = line.toLowerCase();
String[] str = line.split(" +");
for(int i=0; i< str.length-1; i++)
{
word0.set(str[i]);
word1.set(str[i+1]);
TextPair pair = new TextPair(word0, word1);
context.write(pair, one);
}
}
}//End of MapClass
public static class MyReducer extends Reducer<TextPair, IntWritable, TextPair, IntWritable> {
private IntWritable result = new IntWritable(); @Override
public void reduce(TextPair inKey, Iterable<IntWritable> inValues, Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable val : inValues) {
sum += val.get();
}
result.set(sum);
context.write(inKey, result);
}
}//End of MyReducer public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
//conf.set("Hadoop.job.ugi", "sunguoli,cs402");
String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
//if (otherArgs.length != 2) {
// System.err.println("Usage: CoOccurrence <in> <out>");
// System.exit(2);
//}
Job job = new Job(conf, "Co-Occurrence");
job.setJarByClass(CoOccurrence.class); job.setMapperClass(MyMapper.class);
job.setMapOutputKeyClass(TextPair.class);
job.setMapOutputValueClass(IntWritable.class); job.setCombinerClass(MyReducer.class); // group and partition by the first int in the pair
//job.setPartitionerClass(FirstPartitioner.class);
//job.setGroupingComparatorClass(FirstGroupingComparator.class); // the reduce output is Text, IntWritable
job.setReducerClass(MyReducer.class);
job.setOutputKeyClass(TextPair.class);
job.setOutputValueClass(IntWritable.class); //FileInputFormat.addInputPath(job, new Path("../shakespeareinput"));
//FileOutputFormat.setOutputPath(job, new Path("output"));
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}//End of main
}//End of CoOccurrence
hadoop实现共同出现的单词(Word co-occurrence)的更多相关文章
- Hadoop 统计文件中某个单词出现的次数
如文件word.txt内容如下: what is you name? my name is zhang san. 要求统计word.txt中出现“is”的次数? 代码如下: PerWordMapper ...
- Hadoop入门实例——WordCount统计单词
首先要说明的是运行Hadoop需要jdk1.6或以上版本,如果你还没有搭建好Hadoop集群,请参考我的另一篇文章: Linux环境搭建Hadoop伪分布模式 马上进入正题. 1.启动Hadoop集群 ...
- linux makefile字符串操作函数 替换subst、模式替换patsubst、去首尾空格strip、查找字符串findstring、过滤filter、反过滤filter-out、排序函数sort、取单词word、取单词串wordlist、个数统计words
1.1 字符操作函数使用 在Makefile中可以使用函数来处理变量,从而让我们的命令或是规则更为的灵活和具有智能.make所支持的函数也不算很多,不过已经足够我们的操作了.函数调用后,函 ...
- [LeetCode] Shortest Completing Word 最短完整的单词
Find the minimum length word from a given dictionary words, which has all the letters from the strin ...
- Hadoop:使用原生python编写MapReduce
功能实现 功能:统计文本文件中所有单词出现的频率功能. 下面是要统计的文本文件 [/root/hadooptest/input.txt] foo foo quux labs foo bar quux ...
- Hadoop上路-03_Hadoop JavaAPI
一.Eclipse安装 1.下载解压 下载:http://www.eclipse.org/downloads/ 解压:SHELL$ sudo tar -zxvf eclipse.tar.gz 2.快捷 ...
- 大数据【四】MapReduce(单词计数;二次排序;计数器;join;分布式缓存)
前言: 根据前面的几篇博客学习,现在可以进行MapReduce学习了.本篇博客首先阐述了MapReduce的概念及使用原理,其次直接从五个实验中实践学习(单词计数,二次排序,计数器,join,分 ...
- Hadoop世界中的HelloWorld之WordCount具体分析
MapReduce 应用举例:单词计数 WorldCount可以说是MapReduce中的helloworld了,下面来看看hadoop中的例子worldcount对其进行的处理过程,也能对mapre ...
- 在Hadoop上用Python实现WordCount
一.简单说明 本例中我们用Python写一个简单的运行在Hadoop上的MapReduce程序,即WordCount(读取文本文件并统计单词的词频).这里我们将要输入的单词文本input.txt和Py ...
随机推荐
- Vim记录
Command Mode下: . 代表当前行 % 代表所有行 $ 代表结束行 :1,$normal i# 全部行前加#,同下 :%normal i# :read ! cd /usr/bin/; ...
- cocos2d-x lua脚本开发 1
自从开始关注OpenResty之后,逐渐关注Lua语言,发现这个语言真真是容易让人喜爱的语言.偶然间发现了cocos2d-x,还支持lua,所以果断尝试一下. 这里是在cocos2d-x官方网站下载了 ...
- Keil C51总线外设操作问题的深入分析
阅读了<单片机与嵌入式系统应用>2005年第10期杂志<经验交流>栏目的一篇文章<Keil C51对同一端口的连续读取方法>(原文)后,笔者认为该文并未就此问题进行 ...
- 【转】(DT系列三)系统启动时, dts 是怎么被加载的
原文网址:http://www.cnblogs.com/biglucky/p/4057481.html 一,主要问题:系统在启动的时候,是怎么加载 dts的:Lk,kernel中都应调查. 二:参考文 ...
- Hyper-V连接虚拟机异常,“无法进行连接,因为可能无法将凭据发送到远程计算机”
前两天把公司的TFS从2010升级到TFS2012(昨天又升级到TFS2013).今天使用服务器Hyper-V连接虚拟机时居然报错了. 翻看各种日志,虽然错误大一对一对地,但找不到任何有用的信息.无奈 ...
- HDU_1429——胜利大逃亡续,十位二进制状态压缩,状态判重
Problem Description Ignatius再次被魔王抓走了(搞不懂他咋这么讨魔王喜欢)……这次魔王汲取了上次的教训,把Ignatius关在一个n*m的地牢里,并在地牢的某些地方安装了带锁 ...
- 《Linear Algebra and Its Applications》-chaper1-线性方程组-线性相关性
这篇文章主要简单的记录所谓的“线性相关性”. 线性相关性的对象是向量R^n,对于向量方程,如果说x1v1 + x2v2 + …+xmvm = 0(其中xi是常数,vi是向量)有且仅有一个平凡解,那么我 ...
- java二进制文件复制
package com.starain.io; import java.io.BufferedInputStream;import java.io.BufferedOutputStream;impor ...
- 【Python排序搜索基本算法】之Prim算法
Prim算法是实现最简单的最小生成树(MST)算法,适合于稠密图.要实现Prim算法,我们主要关注的是增量的变化,也就是从每个非树顶点到树顶点的最短距离,使得最后生成一棵包括所有顶点的树,并且这棵树的 ...
- webservice2-wsimport的使用
打开Eclipse,新建java项目,service_start cmd下运行wsimport(要配置jdk的环境变量,wsimport在JAVA_HOME/bin下) wsimport -d d:/ ...