【Hadoop离线基础总结】MapReduce案例之自定义groupingComparator
MapReduce案例之自定义groupingComparator
求取Top 1的数据
- 需求
求出每一个订单中成交金额最大的一笔交易
订单id 商品id 成交金额
Order_0000005 Pdt_01 222.8
Order_0000005 Pdt_05 25.8
Order_0000002 Pdt_03 322.8
Order_0000002 Pdt_04 522.4
Order_0000002 Pdt_05 822.4
Order_0000003 Pdt_01 222.8
- 代码实现
自定义一个javaBean,命名为OrderBean
package cn.itcast.demo5;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class OrderBean implements WritableComparable<OrderBean> {
//定义orderId和price变量
private String orderId;
private Double price;
/**
* 重写compareTo方法
*
* @param o
* @return
*/
@Override
public int compareTo(OrderBean o) {
//先对orderId进行比较,如果相同,将它们的price放一起比较,不同就不比较
int result = this.orderId.compareTo(o.orderId);
//进行判断
if (result == 0) {
int i = this.price.compareTo(o.price);
return -i; //返回i求取最小值,返回-i求取最大值
}
return result;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(orderId);
out.writeDouble(price);
}
@Override
public void readFields(DataInput in) throws IOException {
this.orderId = in.readUTF();
this.price = in.readDouble();
}
//生成get(),set()方法
public String getOrderId() {
return orderId;
}
public void setOrderId(String orderId) {
this.orderId = orderId;
}
public double getPrice() {
return price;
}
public void setPrice(Double price) {
this.price = price;
}
//生成toString()方法
@Override
public String toString() {
return orderId + "\t" + price;
}
}
定义一个Mapper类
package cn.itcast.demo5;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class GroupMapper extends Mapper<LongWritable, Text, OrderBean, NullWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//分割获取到的数据
String[] split = value.toString().split("\t");
//创建orderBean对象
OrderBean orderBean = new OrderBean();
//给orderId赋值
orderBean.setOrderId(split[0]);
//给price赋值
orderBean.setPrice(Double.valueOf(split[2]));
context.write(orderBean, NullWritable.get());
}
}
自定义分区(Partition)规则
package cn.itcast.demo5;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Partitioner;
public class GroupPartitioner extends Partitioner<OrderBean, NullWritable> {
/**
* 重写分区方法
*
* @param orderBean
* @param nullWritable
* @param i
* @return
*/
@Override
public int getPartition(OrderBean orderBean, NullWritable nullWritable, int i) {
//参照HashPartitioner的重写方法
return (orderBean.getOrderId().hashCode() & Integer.MAX_VALUE) % i;
}
}
自定义分组(groupingComparator)规则
package cn.itcast.demo5;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
public class GroupComparator extends WritableComparator {
//重写无参构造方法,定义反射出来的对象是OrderBean类
public GroupComparator() {
super(OrderBean.class, true);
}
@Override
public int compare(WritableComparable a, WritableComparable b) {
OrderBean first = (OrderBean) a;
OrderBean second = (OrderBean) b;
//比较orderId,如果相同就认为是同一组数据
return first.getOrderId().compareTo(second.getOrderId());
}
}
定义一个Reducer类
package cn.itcast.demo5;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class GroupReducer extends Reducer<OrderBean, NullWritable, OrderBean, NullWritable> {
/**
* 直接将收到的k2,v2的值转换为k3,v3输出
*
* @param key
* @param values
* @param context
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void reduce(OrderBean key, Iterable<NullWritable> values, Context context) throws IOException, InterruptedException {
context.write(key, values.iterator().next());
}
}
程序main函数入口
package cn.itcast.demo5;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class GroupMain extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
//获取Job对象
Job job = Job.getInstance(super.getConf(), "myGroupComparator");
//输入数据,设置输入路径
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.setInputPaths(job, new Path("file:////Volumes/赵壮备份/大数据离线课程资料/5.大数据离线第五天/自定义groupingComparator/input/orders.txt"));
//自定义Map逻辑
job.setMapperClass(GroupMapper.class);
//设置k2,v2输出类型
job.setMapOutputKeyClass(OrderBean.class);
job.setMapOutputValueClass(NullWritable.class);
//自定义Partition逻辑
job.setPartitionerClass(GroupPartitioner.class);
//自定义分组逻辑
job.setGroupingComparatorClass(GroupComparator.class);
//自定义reduce逻辑
job.setReducerClass(GroupReducer.class);
//设置k3,v3输出类型
job.setOutputKeyClass(OrderBean.class);
job.setOutputValueClass(NullWritable.class);
//输出数据,设置输出路径
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job, new Path("file:////Volumes/赵壮备份/大数据离线课程资料/5.大数据离线第五天/自定义groupingComparator/output_top1"));
//提交任务至集群
boolean b = job.waitForCompletion(true);
return b ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int run = ToolRunner.run(new Configuration(), new GroupMain(), args);
System.exit(run);
}
}
- 运行结果
Order_0000002 822.4
Order_0000003 222.8
Order_0000005 222.8
求取TopN的数据
需求
求取Top1运用了GroupBy的规则,排序后,不需要再进行操作,就会自动输出首个数据
如果要获取TopN的数据就需要在Reduce逻辑中添加循环遍历,所有的NullWritable转换为DoubleWritable,其他都不变代码实现
自定义一个javaBean,命名为OrderBean
package cn.itcast.demo6;
import org.apache.hadoop.io.WritableComparable;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
public class OrderBean implements WritableComparable<OrderBean> {
//定义orderId和price变量
private String orderId;
private Double price;
/**
* 重写compareTo方法
*
* @param o
* @return
*/
@Override
public int compareTo(OrderBean o) {
//先对orderId进行比较,如果相同,将它们的price放一起比较,不同就不比较
int result = this.orderId.compareTo(o.orderId);
//进行判断
if (result == 0) {
int i = this.price.compareTo(o.price);
return -i; //返回i求取最小值,返回-i求取最大值
}
return result;
}
@Override
public void write(DataOutput out) throws IOException {
out.writeUTF(orderId);
out.writeDouble(price);
}
@Override
public void readFields(DataInput in) throws IOException {
this.orderId = in.readUTF();
this.price = in.readDouble();
}
//生成get(),set()方法
public String getOrderId() {
return orderId;
}
public void setOrderId(String orderId) {
this.orderId = orderId;
}
public double getPrice() {
return price;
}
public void setPrice(Double price) {
this.price = price;
}
//生成toString()方法
@Override
public String toString() {
return orderId + "\t" + price;
}
}
定义一个Mapper类
package cn.itcast.demo6;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import java.io.IOException;
public class GroupMapper extends Mapper<LongWritable, Text, OrderBean, DoubleWritable> {
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//分割获取到的数据
String[] split = value.toString().split("\t");
//创建orderBean对象
OrderBean orderBean = new OrderBean();
//给orderId赋值
orderBean.setOrderId(split[0]);
//给price赋值
orderBean.setPrice(Double.valueOf(split[2]));
DoubleWritable doubleWritable = new DoubleWritable(Double.valueOf(split[2]));
context.write(orderBean, doubleWritable);
}
}
自定义分区(Partition)规则
package cn.itcast.demo6;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Partitioner;
public class GroupPartitioner extends Partitioner<OrderBean, DoubleWritable> {
/**
* 重写分区方法
*
* @param orderBean
* @param doubleWritable
* @param i
* @return
*/
@Override
public int getPartition(OrderBean orderBean, DoubleWritable doubleWritable, int i) {
//参照HashPartitioner的重写方法
return (orderBean.getOrderId().hashCode() & Integer.MAX_VALUE) % i;
}
}
自定义分组(groupingComparator)规则
package cn.itcast.demo6;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
public class GroupComparator extends WritableComparator {
//重写无参构造方法,定义反射出来的对象是OrderBean类
public GroupComparator() {
super(OrderBean.class, true);
}
@Override
public int compare(WritableComparable a, WritableComparable b) {
OrderBean first = (OrderBean) a;
OrderBean second = (OrderBean) b;
//比较orderId,如果相同就认为是同一组数据
return first.getOrderId().compareTo(second.getOrderId());
}
}
定义一个Reducer类
package cn.itcast.demo6;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Reducer;
import java.io.IOException;
public class GroupReducer extends Reducer<OrderBean, DoubleWritable, OrderBean, DoubleWritable> {
/**
* 直接将收到的k2,v2的值转换为k3,v3输出
*
* @param key
* @param values
* @param context
* @throws IOException
* @throws InterruptedException
*/
@Override
protected void reduce(OrderBean key, Iterable<DoubleWritable> values, Context context) throws IOException, InterruptedException {
int i = 0;
for (DoubleWritable value : values) {
i++;
if (i <= 2) {
context.write(key, value);
} else {
break;
}
}
}
}
程序main函数入口
package cn.itcast.demo6;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.DoubleWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
public class GroupMain extends Configured implements Tool {
@Override
public int run(String[] args) throws Exception {
//获取Job对象
Job job = Job.getInstance(super.getConf(), "myGroupComparator");
//输入数据,设置输入路径
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.setInputPaths(job, new Path("file:////Volumes/赵壮备份/大数据离线课程资料/5.大数据离线第五天/自定义groupingComparator/input/orders.txt"));
//自定义Map逻辑
job.setMapperClass(GroupMapper.class);
//设置k2,v2输出类型
job.setMapOutputKeyClass(OrderBean.class);
job.setMapOutputValueClass(DoubleWritable.class);
//自定义Partition逻辑
job.setPartitionerClass(GroupPartitioner.class);
//自定义分组逻辑
job.setGroupingComparatorClass(GroupComparator.class);
//自定义reduce逻辑
job.setReducerClass(GroupReducer.class);
//设置k3,v3输出类型
job.setOutputKeyClass(OrderBean.class);
job.setOutputValueClass(DoubleWritable.class);
//输出数据,设置输出路径
job.setOutputFormatClass(TextOutputFormat.class);
TextOutputFormat.setOutputPath(job, new Path("file:////Volumes/赵壮备份/大数据离线课程资料/5.大数据离线第五天/自定义groupingComparator/output_top2"));
//提交任务至集群
boolean b = job.waitForCompletion(true);
return b ? 0 : 1;
}
public static void main(String[] args) throws Exception {
int run = ToolRunner.run(new Configuration(), new GroupMain(), args);
System.exit(run);
}
}
- 运行结果
Order_0000002 822.4 822.4
Order_0000002 522.4 522.4
Order_0000003 222.8 222.8
Order_0000005 222.8 222.8
Order_0000005 25.8 25.8
【Hadoop离线基础总结】MapReduce案例之自定义groupingComparator的更多相关文章
- 【Hadoop离线基础总结】Hue的简单介绍和安装部署
目录 Hue的简单介绍 概述 核心功能 安装部署 下载Hue的压缩包并上传到linux解压 编译安装启动 启动Hue进程 hue与其他框架的集成 Hue与Hadoop集成 Hue与Hive集成 Hue ...
- 【Hadoop离线基础总结】oozie的安装部署与使用
目录 简单介绍 概述 架构 安装部署 1.修改core-site.xml 2.上传oozie的安装包并解压 3.解压hadooplibs到与oozie平行的目录 4.创建libext目录,并拷贝依赖包 ...
- 【Hadoop离线基础总结】impala简单介绍及安装部署
目录 impala的简单介绍 概述 优点 缺点 impala和Hive的关系 impala如何和CDH一起工作 impala的架构及查询计划 impala/hive/spark 对比 impala的安 ...
- 【Hadoop离线基础总结】Hive调优手段
Hive调优手段 最常用的调优手段 Fetch抓取 MapJoin 分区裁剪 列裁剪 控制map个数以及reduce个数 JVM重用 数据压缩 Fetch的抓取 出现原因 Hive中对某些情况的查询不 ...
- 【Hadoop离线基础总结】流量日志分析网站整体架构模块开发
目录 数据仓库设计 维度建模概述 维度建模的三种模式 本项目中数据仓库的设计 ETL开发 创建ODS层数据表 导入ODS层数据 生成ODS层明细宽表 统计分析开发 流量分析 受访分析 访客visit分 ...
- 【Hadoop离线基础总结】Sqoop常用命令及参数
目录 常用命令 常用公用参数 公用参数:数据库连接 公用参数:import 公用参数:export 公用参数:hive 常用命令&参数 从关系表导入--import 导出到关系表--expor ...
- 【Hadoop离线基础总结】MapReduce增强(上)
MapReduce增强 MapReduce的分区与reduceTask的数量 概述 MapReduce当中的分区:物以类聚,人以群分.相同key的数据,去往同一个reduce. ReduceTask的 ...
- 【Hadoop离线基础总结】MapReduce倒排索引建立
MapReduce倒排索引建立 求某些单词在文章中出现多少次 有三个文档的内容,求hello,tom,jerry三个单词在其中各出现多少次 hello tom hello jerry hello to ...
- 【Hadoop离线基础总结】工作流调度器azkaban
目录 Azkaban概述 工作流调度系统的作用 工作流调度系统的实现 常见工作流调度工具对比 Azkaban简单介绍 安装部署 Azkaban的编译 azkaban单服务模式安装与使用 azkaban ...
随机推荐
- H - Bear and Three Balls
Limak is a little polar bear. He has n balls, the i-th ball has size ti. Limak wants to give one bal ...
- jmeter if控制器使用
if控制器有两种用法 1.不勾选“interpret condition as variable expression” 直接输入我们需要判断的表达式即可,判断表达式为真时,执行if控制器下的请求 2 ...
- python-Django收集主机信息json格式
Control: from django.conf.urls import patterns, include, url from django.contrib import admin admin. ...
- Kettle7.1创建资源库,资源库颜色灰色,没有Connect按钮解决办法
我们在官网下载的Ketlle7.1工具,在本地运行时会发现标题中提到的问题:工具-资源库里面的按钮都是灰色的,无法点击.查找Connect整个页面找了个遍,也没有找到. 于是乎开始百度.谷歌的搜索啊. ...
- 数据库 MySQL 练习
一.sql语句基础 1.顯示德國 Germany 的人口 SELECT population FROM world WHERE name = 'Germany' 2.查詢面積為 5,000,000 ...
- win10好用的桌面工具分享+网盘下载链接
1.Everything Everything是voidtools开发的一款文件搜索工具,官网描述为“基于名称实时定位文件和目录(Locate files and folders by name in ...
- [http 1.1] M-POST w3
5. Mandatory HTTP Requests An HTTP request is called a mandatory request if it includes at least one ...
- java并发中ExecutorService的使用
文章目录 创建ExecutorService 为ExecutorService分配Tasks 关闭ExecutorService Future ScheduledExecutorService Exe ...
- 第三方库 正则表达式re模块
正则表通常被用来检索.替换那些符合某个模式(规则)的文本. 正则表达式通常缩写成“regex”,单数有regexp.regex,复数有regexps.regexes.regexen. 正则表达式是对字 ...
- 在CentOS 7中安装配置JDK8
为什么80%的码农都做不了架构师?>>> ###说明 参考博客:http://blog.csdn.net/czmchen/article/details/41047187 系统环 ...