hadoop2.2.0 MapReduce求和并排序
javabean必须实现WritableComparable接口,并实现该接口的序列化,反序列话和比较方法
package com.my.hadoop.mapreduce.sort;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableComparable;
public class InfoBean implements WritableComparable<InfoBean> {
private String account;
private double income;
private double expences;
private double surplus;
public void set(String account, double income, double expences){
this.account = account;
this.income = income;
this.expences = expences;
this.surplus = income - expences;
}
@Override
public String toString() {
return income+"\t"+expences+"\t"+surplus;
}
@Override
public void readFields(DataInput in) throws IOException {
this.account = in.readUTF();
this.income = in.readDouble();
this.expences = in.readDouble();
this.surplus = in.readDouble();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(this.account);
out.writeDouble(this.income);
out.writeDouble(this.expences);
out.writeDouble(this.surplus);
}
@Override
public int compareTo(InfoBean o) {
if (this.income == o.getIncome()) {
return this.expences > o.getExpences() ? 1 : -1;
} else {
return this.income > o.getIncome() ? -1 : 1;
}
}
public String getAccount() {
return account;
}
public void setAccount(String account) {
this.account = account;
}
public double getIncome() {
return income;
}
public void setIncome(double income) {
this.income = income;
}
public double getExpences() {
return expences;
}
public void setExpences(double expences) {
this.expences = expences;
}
public double getSurplus() {
return surplus;
}
public void setSurplus(double surplus) {
this.surplus = surplus;
}
}
先求和
package com.my.hadoop.mapreduce.sort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class SumStep {
public static class SumMap extends Mapper<LongWritable, Text, Text, InfoBean>{
private Text k = new Text();
private InfoBean v = new InfoBean();
@Override
public void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
String[] fields = value.toString().split("\t");
String account = fields[0];
double in = Double.parseDouble(fields[1]);
double out = Double.parseDouble(fields[2]);
k.set(account);
v.set(account, in, out);
context.write(k, v);
}
}
public static class SumReduce extends Reducer<Text, InfoBean, Text, InfoBean>{
private InfoBean v = new InfoBean();
@Override
public void reduce(Text key, Iterable<InfoBean> value, Context context) throws java.io.IOException ,InterruptedException {
double in_sum = 0;
double out_sum = 0;
for (InfoBean bean : value) {
in_sum += bean.getIncome();
out_sum += bean.getExpences();
}
v.set("", in_sum, out_sum);
context.write(key, v);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, SumStep.class.getSimpleName());
job.setJarByClass(SumStep.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
job.setMapperClass(SumMap.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(InfoBean.class);
job.setReducerClass(SumReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(InfoBean.class);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 :1);
}
}
后排序
package com.my.hadoop.mapreduce.sort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class SortStep {
public static class SortMap extends Mapper<LongWritable, Text, InfoBean, NullWritable>{
private InfoBean k = new InfoBean();
@Override
public void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
System.out.println("===="+value.toString()+"====");
String[] fields = value.toString().split("\t");
String account = fields[0];
double in = Double.parseDouble(fields[1]);
double out = Double.parseDouble(fields[2]);
k.set(account, in, out);
context.write(k, NullWritable.get());
}
}
public static class SortReduce extends Reducer<InfoBean, NullWritable, Text, InfoBean>{
private Text k = new Text();
@Override
public void reduce(InfoBean bean, Iterable<NullWritable> value, Context context) throws java.io.IOException ,InterruptedException {
k.set(bean.getAccount());
context.write(k, bean);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, SortStep.class.getSimpleName());
job.setJarByClass(SortStep.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
job.setMapperClass(SortMap.class);
job.setMapOutputKeyClass(InfoBean.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(SortReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(InfoBean.class);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 :1);
}
}
hadoop2.2.0 MapReduce求和并排序的更多相关文章
- hadoop2.2.0 MapReduce分区
package com.my.hadoop.mapreduce.partition; import java.util.HashMap;import java.util.Map; import org ...
- hadoop2.2.0 MapReduce的序列化
package com.my.hadoop.mapreduce.dataformat; import java.io.DataInput;import java.io.DataOutput;impor ...
- 【hadoop2.6.0】用C++ 编写mapreduce
hadoop通过hadoop streaming 来实现用非Java语言写的mapreduce代码. 对于一个一点Java都不会的我来说,这真是个天大的好消息. 官网上hadoop streaming ...
- 一脸懵逼学习Hadoop中的序列化机制——流量求和统计MapReduce的程序开发案例——流量求和统计排序
一:序列化概念 序列化(Serialization)是指把结构化对象转化为字节流.反序列化(Deserialization)是序列化的逆过程.即把字节流转回结构化对象.Java序列化(java.io. ...
- 国内最全最详细的hadoop2.2.0集群的MapReduce的最简单配置
简介 hadoop2的中的MapReduce不再是hadoop1中的结构已经没有了JobTracker,而是分解成ResourceManager和ApplicationMaster.这次大变革被称为M ...
- 编写简单的Mapreduce程序并部署在Hadoop2.2.0上运行
今天主要来说说怎么在Hadoop2.2.0分布式上面运行写好的 Mapreduce 程序. 可以在eclipse写好程序,export或用fatjar打包成jar文件. 先给出这个程序所依赖的Mave ...
- Hadoop2.2.0 第一步完成MapReduce wordcount计算文本数量
1.完成Hadoop2.2.0单机版环境搭建之后需要利用一个例子程序来检验hadoop2 的mapreduce的功能 //启动hdfs和yarn sbin/start-dfs.sh sbin/star ...
- 使用命令行编译打包运行自己的MapReduce程序 Hadoop2.6.0
使用命令行编译打包运行自己的MapReduce程序 Hadoop2.6.0 网上的 MapReduce WordCount 教程对于如何编译 WordCount.java 几乎是一笔带过… 而有写到的 ...
- Eclipse中部署hadoop2.3.0
1 eclipse中hadoop环境部署概览 eclipse 中部署hadoop包括两大部分:hdfs环境部署和mapreduce任务执行环境部署.一般hdfs环境部署比较简单,部署后就 可以在ecl ...
随机推荐
- [转] PostgreSQL的时间/日期函数使用
PS:http://blog.csdn.net/love_rongrong/article/details/6712883 字符串模糊比较 日期类型的模糊查询是不能直接进行的,要先转换成字符串然后再查 ...
- NYOJ-569最大公约数之和
题目链接:http://acm.nyist.net/JudgeOnline/problem.php?pid=569 此题目可以用筛选法的思想来做,但是用到一个欧拉函数 gcd(1,12)=1,gcd( ...
- css内容生成器
一,内容生成器:content 补充before和after伪类选择器: 1):将内容添加到某个选择器定义的单个或者多个元素的每一个实例之前或者之后 2)与before选择器配合使用(同理大家想下会不 ...
- json数组传递到后台controller
现前台有如下格式的数据需要传递到后台的controller, public class UpdatePara { public int RoleID { get; set; } public List ...
- java获取字符串格式日期向前或向后n天的日期
private void setTilteMessage(){ BaseDao dao = new BaseDao(); String titleData = da ...
- Android Studio中常用设置与快捷键
常用设置: 1.Tab不用4个空格Code Style->Java->Tabs and Indents->Use tab characterCode Style->Genera ...
- Android开发手记(11) 滑动条SeekBar
安卓滑动条的操作特别简单,通过getProgress()可以获得SeekBar的位置,通过setProgress(int progress)可以设置SeekBar的位置.要想动态获取用户对SeekBa ...
- Xaml 页面布局学习
对于一开始设计xaml界面的初学者,总是习惯性的拖拽控件进行布局,这样也许方便.简单.快捷,但偶尔会出现一些小错误, 当需要将控件进行很细微的挪动时也比较吃力. 这里,我个人建议用一些代码将xaml界 ...
- Oracle 错误码
Oracle作为一款比较优秀同时也比较难以掌握的大型数据库,在我们学习使用的过程中,不可避免的会遇到一些错误,为此 Oracle 给出了一套完备的错误消息提示机制 我们可以根据Oracle给出的消息提 ...
- 武汉科技大学ACM :1006: A+B for Input-Output Practice (VI)
Problem Description Your task is to calculate the sum of some integers. Input Input contains multipl ...