hadoop2.2.0 MapReduce求和并排序
javabean必须实现WritableComparable接口,并实现该接口的序列化,反序列话和比较方法
package com.my.hadoop.mapreduce.sort;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.WritableComparable;
public class InfoBean implements WritableComparable<InfoBean> {
private String account;
private double income;
private double expences;
private double surplus;
public void set(String account, double income, double expences){
this.account = account;
this.income = income;
this.expences = expences;
this.surplus = income - expences;
}
@Override
public String toString() {
return income+"\t"+expences+"\t"+surplus;
}
@Override
public void readFields(DataInput in) throws IOException {
this.account = in.readUTF();
this.income = in.readDouble();
this.expences = in.readDouble();
this.surplus = in.readDouble();
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(this.account);
out.writeDouble(this.income);
out.writeDouble(this.expences);
out.writeDouble(this.surplus);
}
@Override
public int compareTo(InfoBean o) {
if (this.income == o.getIncome()) {
return this.expences > o.getExpences() ? 1 : -1;
} else {
return this.income > o.getIncome() ? -1 : 1;
}
}
public String getAccount() {
return account;
}
public void setAccount(String account) {
this.account = account;
}
public double getIncome() {
return income;
}
public void setIncome(double income) {
this.income = income;
}
public double getExpences() {
return expences;
}
public void setExpences(double expences) {
this.expences = expences;
}
public double getSurplus() {
return surplus;
}
public void setSurplus(double surplus) {
this.surplus = surplus;
}
}
先求和
package com.my.hadoop.mapreduce.sort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class SumStep {
public static class SumMap extends Mapper<LongWritable, Text, Text, InfoBean>{
private Text k = new Text();
private InfoBean v = new InfoBean();
@Override
public void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
String[] fields = value.toString().split("\t");
String account = fields[0];
double in = Double.parseDouble(fields[1]);
double out = Double.parseDouble(fields[2]);
k.set(account);
v.set(account, in, out);
context.write(k, v);
}
}
public static class SumReduce extends Reducer<Text, InfoBean, Text, InfoBean>{
private InfoBean v = new InfoBean();
@Override
public void reduce(Text key, Iterable<InfoBean> value, Context context) throws java.io.IOException ,InterruptedException {
double in_sum = 0;
double out_sum = 0;
for (InfoBean bean : value) {
in_sum += bean.getIncome();
out_sum += bean.getExpences();
}
v.set("", in_sum, out_sum);
context.write(key, v);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, SumStep.class.getSimpleName());
job.setJarByClass(SumStep.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
job.setMapperClass(SumMap.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(InfoBean.class);
job.setReducerClass(SumReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(InfoBean.class);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 :1);
}
}
后排序
package com.my.hadoop.mapreduce.sort;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
public class SortStep {
public static class SortMap extends Mapper<LongWritable, Text, InfoBean, NullWritable>{
private InfoBean k = new InfoBean();
@Override
public void map(LongWritable key, Text value, Context context) throws java.io.IOException ,InterruptedException {
System.out.println("===="+value.toString()+"====");
String[] fields = value.toString().split("\t");
String account = fields[0];
double in = Double.parseDouble(fields[1]);
double out = Double.parseDouble(fields[2]);
k.set(account, in, out);
context.write(k, NullWritable.get());
}
}
public static class SortReduce extends Reducer<InfoBean, NullWritable, Text, InfoBean>{
private Text k = new Text();
@Override
public void reduce(InfoBean bean, Iterable<NullWritable> value, Context context) throws java.io.IOException ,InterruptedException {
k.set(bean.getAccount());
context.write(k, bean);
}
}
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
Job job = Job.getInstance(conf, SortStep.class.getSimpleName());
job.setJarByClass(SortStep.class);
FileInputFormat.setInputPaths(job, new Path(args[0]));
job.setMapperClass(SortMap.class);
job.setMapOutputKeyClass(InfoBean.class);
job.setMapOutputValueClass(NullWritable.class);
job.setReducerClass(SortReduce.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(InfoBean.class);
FileOutputFormat.setOutputPath(job, new Path(args[1]));
System.exit(job.waitForCompletion(true) ? 0 :1);
}
}
hadoop2.2.0 MapReduce求和并排序的更多相关文章
- hadoop2.2.0 MapReduce分区
package com.my.hadoop.mapreduce.partition; import java.util.HashMap;import java.util.Map; import org ...
- hadoop2.2.0 MapReduce的序列化
package com.my.hadoop.mapreduce.dataformat; import java.io.DataInput;import java.io.DataOutput;impor ...
- 【hadoop2.6.0】用C++ 编写mapreduce
hadoop通过hadoop streaming 来实现用非Java语言写的mapreduce代码. 对于一个一点Java都不会的我来说,这真是个天大的好消息. 官网上hadoop streaming ...
- 一脸懵逼学习Hadoop中的序列化机制——流量求和统计MapReduce的程序开发案例——流量求和统计排序
一:序列化概念 序列化(Serialization)是指把结构化对象转化为字节流.反序列化(Deserialization)是序列化的逆过程.即把字节流转回结构化对象.Java序列化(java.io. ...
- 国内最全最详细的hadoop2.2.0集群的MapReduce的最简单配置
简介 hadoop2的中的MapReduce不再是hadoop1中的结构已经没有了JobTracker,而是分解成ResourceManager和ApplicationMaster.这次大变革被称为M ...
- 编写简单的Mapreduce程序并部署在Hadoop2.2.0上运行
今天主要来说说怎么在Hadoop2.2.0分布式上面运行写好的 Mapreduce 程序. 可以在eclipse写好程序,export或用fatjar打包成jar文件. 先给出这个程序所依赖的Mave ...
- Hadoop2.2.0 第一步完成MapReduce wordcount计算文本数量
1.完成Hadoop2.2.0单机版环境搭建之后需要利用一个例子程序来检验hadoop2 的mapreduce的功能 //启动hdfs和yarn sbin/start-dfs.sh sbin/star ...
- 使用命令行编译打包运行自己的MapReduce程序 Hadoop2.6.0
使用命令行编译打包运行自己的MapReduce程序 Hadoop2.6.0 网上的 MapReduce WordCount 教程对于如何编译 WordCount.java 几乎是一笔带过… 而有写到的 ...
- Eclipse中部署hadoop2.3.0
1 eclipse中hadoop环境部署概览 eclipse 中部署hadoop包括两大部分:hdfs环境部署和mapreduce任务执行环境部署.一般hdfs环境部署比较简单,部署后就 可以在ecl ...
随机推荐
- [转] Ubuntu 12.04下LAMP安装配置 (Linux+Apache+Mysql+PHP)
我是一个Linux新手,想要安装一台Ubuntu 12.04版的Linux服务器,用这台服务器上的LAMP套件来运行我自己的个人网站.LAMP套件就是 “Linux+Apache+Mysql+PHP这 ...
- sql 删除多项
delete from 表名 where 字段 in(值,值,值.......)
- springxml配置构造函数入参
springxml配置构造函数入参有深入的理解 集合mockito创建对象的方法.功能等同于创建对象的代码. spring配置文件中定义bean的好处 便于集中管理,系统任何地方都可以引用使用.如果不 ...
- GridView下DropDownList 的选择方法onselectedindexchanged 实现方法
在GridView下面绑定好了下拉框,我们常常会遇到一个问题, 选择方法怎么实现呢,用js总是难的去算是在GridView的第几行第几个元素,因为服务器的id和客户端的id经常变化让js根本无从找起, ...
- iOS改变图片尺寸
- (UIImage *)originImage:(UIImage *)image scaleToSize:(CGSize)size { UIGraphicsBeginImageContext(siz ...
- nat123 与微信公众号开发者测试账号配合调试
由于公司本身是做互联网 电商行业的,微信也是一个大块,近期开始花费时间在整合,总结自己的经验,看看之前的实现是否有明显的问题. 花了点钱(8块钱)充值了nat123,进行了内网穿透.之前也有使用花生壳 ...
- jsp页面间的传值
很多的时候我们只是把我们需要的数据,查询出来,然后用request.setAttribute("" ,"" )方法保存这个数据集合.再在我们能跳转到的下一个js ...
- 使用HTML5 API(AudioContext)实现可视化频谱效果
如今的HTML5技术正让网页变得越来越强大,通过其Canvas标签与AudioContext对象可以轻松实现之前在Flash或Native App中才能实现的频谱指示器的功能. Demo: Cyand ...
- redis php
redis php 学习1.连接$redis = new redis();$result = $redis->connect('127.0.0.1',6379); 2.set$redis = n ...
- python 基础,包括列表,元组,字典,字符串,set集合,while循环,for循环,运算符。
1.continue 的作用:跳出一次循环,进行下一次循环 2.break 跳出不再循环 3.常量 (全是大写)NAME = cjk 一般改了会出错 4.py ...