1、概念

2、Hadoop默认分组机制--所有的Key分到一个组,一个Reduce任务处理

3、代码示例

FlowBean

package com.ares.hadoop.mr.flowgroup;

import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException; import org.apache.hadoop.io.WritableComparable; public class FlowBean implements WritableComparable<FlowBean>{
private String phoneNum;
private long upFlow;
private long downFlow;
private long sumFlow; public FlowBean() {
// TODO Auto-generated constructor stub
}
// public FlowBean(String phoneNum, long upFlow, long downFlow, long sumFlow) {
// super();
// this.phoneNum = phoneNum;
// this.upFlow = upFlow;
// this.downFlow = downFlow;
// this.sumFlow = sumFlow;
// } public String getPhoneNum() {
return phoneNum;
} public void setPhoneNum(String phoneNum) {
this.phoneNum = phoneNum;
} public long getUpFlow() {
return upFlow;
} public void setUpFlow(long upFlow) {
this.upFlow = upFlow;
} public long getDownFlow() {
return downFlow;
} public void setDownFlow(long downFlow) {
this.downFlow = downFlow;
} public long getSumFlow() {
return sumFlow;
} public void setSumFlow(long sumFlow) {
this.sumFlow = sumFlow;
} @Override
public void readFields(DataInput in) throws IOException {
// TODO Auto-generated method stub
phoneNum = in.readUTF();
upFlow = in.readLong();
downFlow = in.readLong();
sumFlow = in.readLong();
} @Override
public void write(DataOutput out) throws IOException {
// TODO Auto-generated method stub
out.writeUTF(phoneNum);
out.writeLong(upFlow);
out.writeLong(downFlow);
out.writeLong(sumFlow);
} @Override
public String toString() {
return "" + phoneNum + "\t" + upFlow + "\t" + downFlow + "\t" + sumFlow;
} @Override
public int compareTo(FlowBean o) {
// TODO Auto-generated method stub
return sumFlow>o.getSumFlow()?-:;
} }

FlowGroup

package com.ares.hadoop.mr.flowgroup;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger; import com.ares.hadoop.mr.exception.LineException;
import com.ares.hadoop.mr.flowgroup.FlowBean;; public class FlowGroup extends Configured implements Tool {
private static final Logger LOGGER = Logger.getLogger(FlowGroup.class);
enum Counter {
LINESKIP
} public static class FlowGroupMapper extends Mapper<LongWritable, Text,
Text, FlowBean> {
private String line;
private int length;
private final static char separator = '\t'; private String phoneNum;
private long upFlow;
private long downFlow;
//private long sumFlow; private Text text = new Text();
private FlowBean flowBean = new FlowBean(); @Override
protected void map(
LongWritable key,
Text value,
Mapper<LongWritable, Text, Text, FlowBean>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.map(key, value, context);
String errMsg;
try {
line = value.toString();
String[] fields = StringUtils.split(line, separator);
length = fields.length;
if (length != ) {
throw new LineException(key.get() + ", " + line + " LENGTH INVALID, IGNORE...");
} phoneNum = fields[];
upFlow = Long.parseLong(fields[length-]);
downFlow = Long.parseLong(fields[length-]);
//sumFlow = upFlow + downFlow; text.set(phoneNum);
flowBean.setPhoneNum(phoneNum);
flowBean.setUpFlow(upFlow);
flowBean.setDownFlow(downFlow);
//flowBean.setSumFlow(sumFlow); context.write(text, flowBean);
} catch (LineException e) {
// TODO: handle exception
LOGGER.error(e);
System.out.println(e);
context.getCounter(Counter.LINESKIP).increment();
return;
} catch (NumberFormatException e) {
// TODO: handle exception
errMsg = key.get() + ", " + line + " FLOW DATA INVALID, IGNORE...";
LOGGER.error(errMsg);
System.out.println(errMsg);
context.getCounter(Counter.LINESKIP).increment();
return;
} catch (Exception e) {
// TODO: handle exception
LOGGER.error(e);
System.out.println(e);
context.getCounter(Counter.LINESKIP).increment();
return;
}
}
} public static class FlowGroupReducer extends Reducer<Text, FlowBean,
FlowBean, NullWritable> { private FlowBean flowBean = new FlowBean(); @Override
protected void reduce(
Text key,
Iterable<FlowBean> values,
Reducer<Text, FlowBean, FlowBean, NullWritable>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.reduce(arg0, arg1, arg2);
long upFlowCounter = ;
long downFlowCounter = ; for (FlowBean flowBean : values) {
upFlowCounter += flowBean.getUpFlow();
downFlowCounter += flowBean.getDownFlow();
}
flowBean.setPhoneNum(key.toString());
flowBean.setUpFlow(upFlowCounter);
flowBean.setDownFlow(downFlowCounter);
flowBean.setSumFlow(upFlowCounter + downFlowCounter); context.write(flowBean, NullWritable.get());
}
} @Override
public int run(String[] args) throws Exception {
// TODO Auto-generated method stub
String errMsg = "FlowGroup: TEST STARTED...";
LOGGER.debug(errMsg);
System.out.println(errMsg); Configuration conf = new Configuration();
//FOR Eclipse JVM Debug
//conf.set("mapreduce.job.jar", "flowsum.jar");
Job job = Job.getInstance(conf); // JOB NAME
job.setJobName("FlowGroup"); // JOB MAPPER & REDUCER
job.setJarByClass(FlowGroup.class);
job.setMapperClass(FlowGroupMapper.class);
job.setReducerClass(FlowGroupReducer.class); // JOB PARTITION
job.setPartitionerClass(FlowGroupPartition.class); // JOB REDUCE TASK NUMBER
job.setNumReduceTasks(); // MAP & REDUCE
job.setOutputKeyClass(FlowBean.class);
job.setOutputValueClass(NullWritable.class);
// MAP
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(FlowBean.class); // JOB INPUT & OUTPUT PATH
//FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.setInputPaths(job, args[]);
FileOutputFormat.setOutputPath(job, new Path(args[])); // VERBOSE OUTPUT
if (job.waitForCompletion(true)) {
errMsg = "FlowGroup: TEST SUCCESSFULLY...";
LOGGER.debug(errMsg);
System.out.println(errMsg);
return ;
} else {
errMsg = "FlowGroup: TEST FAILED...";
LOGGER.debug(errMsg);
System.out.println(errMsg);
return ;
} } public static void main(String[] args) throws Exception {
if (args.length != ) {
String errMsg = "FlowGroup: ARGUMENTS ERROR";
LOGGER.error(errMsg);
System.out.println(errMsg);
System.exit(-);
} int result = ToolRunner.run(new Configuration(), new FlowGroup(), args);
System.exit(result);
}
}

FlowGroupPartition

package com.ares.hadoop.mr.flowgroup;

import java.util.HashMap;

import org.apache.hadoop.mapreduce.Partitioner;

public class FlowGroupPartition<KEY, VALUE> extends Partitioner<KEY, VALUE>{
private static HashMap<String, Integer> groupMap = new HashMap<String, Integer>();
static {
groupMap.put("", );
groupMap.put("", );
groupMap.put("", );
groupMap.put("", );
} @Override
public int getPartition(KEY key, VALUE value, int numPartitions) {
// TODO Auto-generated method stub
return (groupMap.get(key.toString().substring(, )) == null)?:
groupMap.get(key.toString().substring(, ));
} }

【Hadoop】Hadoop MR 自定义分组 Partition机制的更多相关文章

  1. 【Hadoop】MapReduce自定义分区Partition输出各运营商的手机号码

    MapReduce和自定义Partition MobileDriver主类 package Partition; import org.apache.hadoop.io.NullWritable; i ...

  2. 2 weekend110的hadoop的自定义排序实现 + mr程序中自定义分组的实现

    我想得到按流量来排序,而且还是倒序,怎么达到实现呢? 达到下面这种效果, 默认是根据key来排, 我想根据value里的某个排, 解决思路:将value里的某个,放到key里去,然后来排 下面,开始w ...

  3. 一脸懵逼学习Hadoop中的MapReduce程序中自定义分组的实现

    1:首先搞好实体类对象: write 是把每个对象序列化到输出流,readFields是把输入流字节反序列化,实现WritableComparable,Java值对象的比较:一般需要重写toStrin ...

  4. Hadoop自定义分组Group

    matadata: hadoop a spark a hive a hbase a tachyon a storm a redis a 自定义分组 import org.apache.hadoop.c ...

  5. Hadoop mapreduce自定义分组RawComparator

    本文发表于本人博客. 今天接着上次[Hadoop mapreduce自定义排序WritableComparable]文章写,按照顺序那么这次应该是讲解自定义分组如何实现,关于操作顺序在这里不多说了,需 ...

  6. hadoop提交作业自定义排序和分组

    现有数据如下: 3 3 3 2 3 1 2 2 2 1 1 1 要求为: 先按第一列从小到大排序,如果第一列相同,按第二列从小到大排序 如果是hadoop默认的排序方式,只能比较key,也就是第一列, ...

  7. Hadoop【MR的分区、排序、分组】

    [toc] 一.分区 问题:按照条件将结果输出到不同文件中 自定义分区步骤 1.自定义继承Partitioner类,重写getPartition()方法 2.在job驱动Driver中设置自定义的Pa ...

  8. Hadoop日记Day18---MapReduce排序分组

    本节所用到的数据下载地址为:http://pan.baidu.com/s/1bnfELmZ MapReduce的排序分组任务与要求 我们知道排序分组是MapReduce中Mapper端的第四步,其中分 ...

  9. Hadoop Mapreduce分区、分组、二次排序过程详解[转]

    原文地址:Hadoop Mapreduce分区.分组.二次排序过程详解[转]作者: 徐海蛟 教学用途 1.MapReduce中数据流动   (1)最简单的过程:  map - reduce   (2) ...

随机推荐

  1. UVA 11478(差分约束 + 二分)

    题意: 给定一个有向图,每条边都有一个权值,每次你可以选择一个结点和一个整数的,把所有以v为终点的边的权值减去d, 把所有以v为起点的边的权值加上d 最后要让所有边的权的最小值非负且尽量大 代码 #i ...

  2. windows 系统下git 的安装

    在linux系统下,可以直接在命令窗口安装和使用git.但是,在windows系统下,想要达到同样的效果,可以安装git,使用git bash到达效果.具体安装步骤如下: 第一步:官网上下载git 网 ...

  3. 转:ExecutorService

    在Java5之后,并发线程这块发生了根本的变化,最重要的莫过于新的启动.调度.管理线程的一大堆API了.在Java5以后,通过 Executor来启动线程比用Thread的start()更好.在新特征 ...

  4. iphone上做webapp时总会识别一串数字为手机号码并变黑显示

    iphone上网页里总会识别一串数字为手机号码并变黑显示 只需要在head里加上一个特别的meta即可 <meta name="format-detection" conte ...

  5. rest项目的基础返回类设计

    package com.hmy.erp.api.vo; import java.io.Serializable; import lombok.Data; /** * erp基本状态返回类 * * @a ...

  6. 【ZOJ4061】Magic Multiplication(构造)

    题意:定义一个新运算为两个数A,B上每一位相乘,然后顺次接在一起,现在给定结果C和原来两个数字的长度,要求恢复成原来的数字A,B 若有多解输出A字典序最小的,A相同输出B字典序最小的,无解输出Impo ...

  7. 分享一下我写的.net 2.0的orm类,实现mvc。可以用于webform等环境中,这是orm的原理部分。

    using System;using System.Collections.Generic;using System.Configuration;using System.Data;using Sys ...

  8. 培训补坑(day4:网络流建模与二分图匹配)

    补坑时间到QAQ 好吧今天讲的是网络流建模与二分图匹配... day3的网络流建模好像说的差不多了.(囧) 那就接着补点吧.. 既然昨天讲了建图思想,那今天就讲讲网络流最重要的技巧:拆点. 拆点,顾名 ...

  9. YYH的苍天大竹(NOIP模拟赛Round 6)

    题目描述 YYH擅长种竹子.今天他收获了一根竹子,准备将这根柱子卖给CHS.这个竹子有n-1个竹节.CHS要求一定要从竹节的地方砍,而且砍成若干段后每一段竹子中最长的一小段竹子和最短的一小段的长度差不 ...

  10. pageHelper插件+传统分页处理

    为什么要使用pageHelper:https://www.cnblogs.com/ljdblog/p/6725094.html 配置文件详解:https://www.cnblogs.com/cksvs ...