关于MapReduce中自定义分组类(三)
/**
* Define the comparator that controls which keys are grouped together
* for a single call to
* {@link Reducer#reduce(Object, Iterable,
* org.apache.hadoop.mapreduce.Reducer.Context)}
* @param cls the raw comparator to use
* @throws IllegalStateException if the job is submitted
* @see #setCombinerKeyGroupingComparatorClass(Class)
*/
publicvoid setGroupingComparatorClass(Class<? extends RawComparator> cls
) throws IllegalStateException{
ensureState(JobState.DEFINE);
conf.setOutputValueGroupingComparator(cls);
}
/**
* Set the user defined {@link RawComparator} comparator for
* grouping keys in the input to the reduce.
*
* <p>This comparator should be provided if the equivalence rules for keys
* for sorting the intermediates are different from those for grouping keys
* before each call to
* {@link Reducer#reduce(Object, java.util.Iterator, OutputCollector, Reporter)}.</p>
*
* <p>For key-value pairs (K1,V1) and (K2,V2), the values (V1, V2) are passed
* in a single call to the reduce function if K1 and K2 compare as equal.</p>
*
* <p>Since {@link #setOutputKeyComparatorClass(Class)} can be used to control
* how keys are sorted, this can be used in conjunction to simulate
* <i>secondary sort on values</i>.</p>
*
* <p><i>Note</i>: This is not a guarantee of the reduce sort being
* <i>stable</i> in any sense. (In any case, with the order of available
* map-outputs to the reduce being non-deterministic, it wouldn't make
* that much sense.)</p>
*
* @param theClass the comparator class to be used for grouping keys.
* It should implement <code>RawComparator</code>.
* @see #setOutputKeyComparatorClass(Class)
* @see #setCombinerKeyGroupingComparator(Class)
*/
publicvoid setOutputValueGroupingComparator(
Class<? extends RawComparator> theClass){
setClass(JobContext.GROUP_COMPARATOR_CLASS,
theClass,RawComparator.class);
}
/**
* Get the user defined {@link WritableComparable} comparator for
* grouping keys of inputs to the reduce.
*
* @return comparator set by the user for grouping values.
* @see #setOutputValueGroupingComparator(Class) for details.
*/
publicRawComparator getOutputValueGroupingComparator(){
Class<? extends RawComparator> theClass = getClass(
JobContext.GROUP_COMPARATOR_CLASS, null,RawComparator.class);
if(theClass == null){
return getOutputKeyComparator();
}
returnReflectionUtils.newInstance(theClass,this);
}
RawComparator comparator = job.getOutputValueGroupingComparator();
if(useNewApi){
runNewReducer(job, umbilical, reporter, rIter, comparator,
keyClass, valueClass);
}else{
runOldReducer(job, umbilical, reporter, rIter, comparator,
keyClass, valueClass);
}
private<INKEY,INVALUE,OUTKEY,OUTVALUE>void runNewReducer(JobConf job,
final TaskUmbilicalProtocol umbilical,
final TaskReporter reporter,
RawKeyValueIterator rIter,
RawComparator<INKEY> comparator,
Class<INKEY> keyClass,
Class<INVALUE> valueClass
) throws IOException,InterruptedException,
ClassNotFoundException{
// wrap value iterator to report progress.
final RawKeyValueIterator rawIter = rIter;
rIter =newRawKeyValueIterator(){
publicvoid close() throws IOException{
rawIter.close();
}
publicDataInputBuffer getKey() throws IOException{
return rawIter.getKey();
}
publicProgress getProgress(){
return rawIter.getProgress();
}
publicDataInputBuffer getValue() throws IOException{
return rawIter.getValue();
}
public boolean next() throws IOException{
boolean ret = rawIter.next();
reporter.setProgress(rawIter.getProgress().getProgress());
return ret;
}
};
// make a task context so we can get the classes
org.apache.hadoop.mapreduce.TaskAttemptContext taskContext =
new org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl(job,
getTaskID(), reporter);
// make a reducer
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer =
(org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>)
ReflectionUtils.newInstance(taskContext.getReducerClass(), job);
org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> trackedRW =
newNewTrackingRecordWriter<OUTKEY, OUTVALUE>(this, taskContext);
job.setBoolean("mapred.skip.on", isSkipping());
job.setBoolean(JobContext.SKIP_RECORDS, isSkipping());
org.apache.hadoop.mapreduce.Reducer.Context
reducerContext = createReduceContext(reducer, job, getTaskID(),
rIter, reduceInputKeyCounter,
reduceInputValueCounter,
trackedRW,
committer,
reporter, comparator, keyClass,
valueClass);
try{
reducer.run(reducerContext);
} finally {
trackedRW.close(reducerContext);
}
}
@SuppressWarnings("unchecked")
protectedstatic<INKEY,INVALUE,OUTKEY,OUTVALUE>
org.apache.hadoop.mapreduce.Reducer<INKEY,INVALUE,OUTKEY,OUTVALUE>.Context
createReduceContext(org.apache.hadoop.mapreduce.Reducer
<INKEY,INVALUE,OUTKEY,OUTVALUE> reducer,
Configuration job,
org.apache.hadoop.mapreduce.TaskAttemptID taskId,
RawKeyValueIterator rIter,
org.apache.hadoop.mapreduce.Counter inputKeyCounter,
org.apache.hadoop.mapreduce.Counter inputValueCounter,
org.apache.hadoop.mapreduce.RecordWriter<OUTKEY,OUTVALUE> output,
org.apache.hadoop.mapreduce.OutputCommitter committer,
org.apache.hadoop.mapreduce.StatusReporter reporter,
RawComparator<INKEY> comparator,
Class<INKEY> keyClass,Class<INVALUE> valueClass
) throws IOException,InterruptedException{
org.apache.hadoop.mapreduce.ReduceContext<INKEY, INVALUE, OUTKEY, OUTVALUE>
reduceContext =
newReduceContextImpl<INKEY, INVALUE, OUTKEY, OUTVALUE>(job, taskId,
rIter,
inputKeyCounter,
inputValueCounter,
output,
committer,
reporter,
comparator,
keyClass,
valueClass);
publicReduceContextImpl(Configuration conf,TaskAttemptID taskid,
RawKeyValueIterator input,
Counter inputKeyCounter,
Counter inputValueCounter,
RecordWriter<KEYOUT,VALUEOUT> output,
OutputCommitter committer,
StatusReporter reporter,
RawComparator<KEYIN> comparator,
Class<KEYIN> keyClass,
Class<VALUEIN> valueClass
) throws InterruptedException,IOException{
super(conf, taskid, output, committer, reporter);
this.input = input;
this.inputKeyCounter = inputKeyCounter;
this.inputValueCounter = inputValueCounter;
this.comparator = comparator;
this.serializationFactory =newSerializationFactory(conf);
this.keyDeserializer = serializationFactory.getDeserializer(keyClass);
this.keyDeserializer.open(buffer);
this.valueDeserializer = serializationFactory.getDeserializer(valueClass);
this.valueDeserializer.open(buffer);
hasMore = input.next();
this.keyClass = keyClass;
this.valueClass = valueClass;
this.conf = conf;
this.taskid = taskid;
}
/**
* Advance to the next key/value pair.
*/
@Override
public boolean nextKeyValue() throws IOException,InterruptedException{
if(!hasMore){
key = null;
value = null;
returnfalse;
}
firstValue =!nextKeyIsSame;
DataInputBuffer nextKey = input.getKey();
currentRawKey.set(nextKey.getData(), nextKey.getPosition(),
nextKey.getLength()- nextKey.getPosition());
buffer.reset(currentRawKey.getBytes(),0, currentRawKey.getLength());
key = keyDeserializer.deserialize(key);
DataInputBuffer nextVal = input.getValue();
buffer.reset(nextVal.getData(), nextVal.getPosition(), nextVal.getLength()
- nextVal.getPosition());
value = valueDeserializer.deserialize(value);
currentKeyLength = nextKey.getLength()- nextKey.getPosition();
currentValueLength = nextVal.getLength()- nextVal.getPosition();
if(isMarked){
backupStore.write(nextKey, nextVal);
}
hasMore = input.next();
if(hasMore){
nextKey = input.getKey();
nextKeyIsSame = comparator.compare(currentRawKey.getBytes(),0,
currentRawKey.getLength(),
nextKey.getData(),
nextKey.getPosition(),
nextKey.getLength()- nextKey.getPosition()
)==0;
}else{
nextKeyIsSame =false;
}
inputValueCounter.increment(1);
returntrue;
}
if(theClass == null){
return getOutputKeyComparator();
}
/**
* Get the {@link RawComparator} comparator used to compare keys.
*
* @return the {@link RawComparator} comparator used to compare keys.
*/
publicRawComparator getOutputKeyComparator(){
Class<? extends RawComparator> theClass = getClass(
JobContext.KEY_COMPARATOR, null,RawComparator.class);
if(theClass != null)
returnReflectionUtils.newInstance(theClass,this);
returnWritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class),this);
}
returnReflectionUtils.newInstance(theClass,this);
关于MapReduce中自定义分组类(三)的更多相关文章
- 关于MapReduce中自定义分区类(四)
MapTask类 在MapTask类中找到run函数 if(useNewApi){ runNewMapper(job, splitMetaInfo, umbilical, reporter ...
- 关于MapReduce中自定义Combine类(一)
MRJobConfig public static fina COMBINE_CLASS_ATTR 属性COMBINE_CLASS_ATTR = "mapreduce.j ...
- 2 weekend110的hadoop的自定义排序实现 + mr程序中自定义分组的实现
我想得到按流量来排序,而且还是倒序,怎么达到实现呢? 达到下面这种效果, 默认是根据key来排, 我想根据value里的某个排, 解决思路:将value里的某个,放到key里去,然后来排 下面,开始w ...
- 关于MapReduce中自定义带比较key类、比较器类(二)——初学者从源码查看其原理
Job类 /** * Define the comparator that controls * how the keys are sorted before they * are pa ...
- flask中自定义日志类
一:项目架构 二:自定义日志类 1. 建立log.conf的配置文件 log.conf [log] LOG_PATH = /log/ LOG_NAME = info.log 2. 定义日志类 LogC ...
- python3.4中自定义数组类(即重写数组类)
'''自定义数组类,实现数组中数字之间的四则运算,内积运算,大小比较,数组元素访问修改及成员测试等功能''' class MyArray: '''保证输入值为数字元素(整型,浮点型,复数)''' de ...
- 一脸懵逼学习Hadoop中的MapReduce程序中自定义分组的实现
1:首先搞好实体类对象: write 是把每个对象序列化到输出流,readFields是把输入流字节反序列化,实现WritableComparable,Java值对象的比较:一般需要重写toStrin ...
- 读取SequenceFile中自定义Writable类型值
1)hadoop允许程序员创建自定义的数据类型,如果是key则必须要继承WritableComparable,因为key要参与排序,而value只需要继承Writable就可以了.以下定义一个Doub ...
- Java中自定义注解类,并加以运用
在Java框架中,经常会使用注解,而且还可以省很多事,来了解下自定义注解. 注解是一种能被添加到java代码中的元数据,类.方法.变量.参数和包都可以用注解来修饰.注解对于它所修饰的代码并没有直接的影 ...
随机推荐
- sql 优化
1.选择最有效率的表名顺序(只在基于规则的优化器中有效): oracle的解析器按照从右到左的顺序处理 from 子句中的表名,from子句中写在最后的表(基础表driving table)将被最先处 ...
- android 获取IMSI信息(判断是移动,联通,电信手机卡)
首先我们需要知道手机IMSI号前面3位460是国家,紧接着后面2位00 02是中国移动,01是中国联通,03是中国电信.那么第一步就是先获取手机IMSI号码:代码如下 /** *获取IMSI信息 * ...
- RabbitMQ调试与测试工具-v1.0.1 -提供下载测试与使用
最近几天在看RabbitMQ,所以发了两天时间写了一个调试和测试工具.方便使用. 下载地址:RabbitMQTool-V1.0.1.zip
- java基础算法之插入排序
一.插入排序介绍 插入排序(Insertion Sort)是一种简单直观的排序算法.它的工作原理是通过构建有序序列,对于未排序数据,在已排序序列中从后向前扫描,找到相应位置并插入.插入排序在实现上,通 ...
- 个人CTF资源聚合
i春秋 幻泉 CTF入门课程笔记 视频地址 能力 思维能力 快速学习能力 技术能力 基础 编程基础 (c语言 汇编语言 脚本语言) 数学基础 (算法 密码学) 脑洞 (天马行空的想象推理) 体力耐力( ...
- Java程序设计之打印100~999的水仙花数
package printDaffodilNumber; /* * 题目:打印出所有的"水仙花数",所谓"水仙花数"是指一个三位数,其各位数字立方和等于该数本身 ...
- [LeetCode] Sort List 链表排序
Sort a linked list in O(n log n) time using constant space complexity. 常见排序方法有很多,插入排序,选择排序,堆排序,快速排序, ...
- 2016第三届C++大会参会感悟(上)
继05年第一届C++大会,09年第二届,2016年10月28日-29日,在上海举行第三届C++大会.讲师主要有C++之父 / Bjarne Stroustrup,前Facebook研究科学家 / An ...
- iOS事件传递->处理->响应
前言: 按照时间顺序,事件的生命周期是这样的: 事件的产生和传递(事件如何从父控件传递到子控件并寻找到最合适的view.寻找最合适的view的底层实现.拦截事件的处理)->找到最合适的view后 ...
- ORB-SLAM(六)回环检测
上一篇提到,无论在单目.双目还是RGBD中,追踪得到的位姿都是有误差的.随着路径的不断延伸,前面帧的误差会一直传递到后面去,导致最后一帧的位姿在世界坐标系里的误差有可能非常大.除了利用优化方法在局部和 ...