1、概念、方案

2、代码示例

InverseIndexOne

package com.ares.hadoop.mr.inverseindex;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger; public class InverseIndexOne extends Configured implements Tool { private static final Logger LOGGER = Logger.getLogger(InverseIndexOne.class);
enum Counter {
LINESKIP
} public static class InverseIndexOneMapper
extends Mapper<LongWritable, Text, Text, LongWritable> { private String line;
private final static char separatorA = ' ';
private final static char separatorB = '-';
private String fileName; private Text text = new Text();
private final static LongWritable ONE = new LongWritable(1L); @Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.map(key, value, context);
try {
line = value.toString();
String[] fields = StringUtils.split(line, separatorA); FileSplit fileSplit = (FileSplit) context.getInputSplit();
fileName = fileSplit.getPath().getName(); for (int i = ; i < fields.length; i++) {
text.set(fields[i] + separatorB + fileName);
context.write(text, ONE);
}
} catch (Exception e) {
// TODO: handle exception
LOGGER.error(e);
System.out.println(e);
context.getCounter(Counter.LINESKIP).increment();
return;
}
}
} public static class InverseIndexOneReducer
extends Reducer<Text, LongWritable, Text, LongWritable> {
private LongWritable result = new LongWritable(); @Override
protected void reduce(Text key, Iterable<LongWritable> values,
Reducer<Text, LongWritable, Text, LongWritable>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.reduce(arg0, arg1, arg2);
long count = ;
for (LongWritable value : values) {
count += value.get();
}
result.set(count);
context.write(key, result);
}
} @Override
public int run(String[] args) throws Exception {
// TODO Auto-generated method stub
//return 0;
String errMsg = "InverseIndexOne: TEST STARTED...";
LOGGER.debug(errMsg);
System.out.println(errMsg); Configuration conf = new Configuration();
//FOR Eclipse JVM Debug
//conf.set("mapreduce.job.jar", "flowsum.jar");
Job job = Job.getInstance(conf); // JOB NAME
job.setJobName("InverseIndexOne"); // JOB MAPPER & REDUCER
job.setJarByClass(InverseIndexOne.class);
job.setMapperClass(InverseIndexOneMapper.class);
job.setReducerClass(InverseIndexOneReducer.class); // JOB PARTITION
//job.setPartitionerClass(FlowGroupPartition.class); // JOB REDUCE TASK NUMBER
//job.setNumReduceTasks(5); // MAP & REDUCE
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(LongWritable.class);
// MAP
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(LongWritable.class); // JOB INPUT & OUTPUT PATH
//FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.setInputPaths(job, args[]);
Path output = new Path(args[]);
// FileSystem fs = FileSystem.get(conf);
// if (fs.exists(output)) {
// fs.delete(output, true);
// }
FileOutputFormat.setOutputPath(job, output); // VERBOSE OUTPUT
if (job.waitForCompletion(true)) {
errMsg = "InverseIndexOne: TEST SUCCESSFULLY...";
LOGGER.debug(errMsg);
System.out.println(errMsg);
return ;
} else {
errMsg = "InverseIndexOne: TEST FAILED...";
LOGGER.debug(errMsg);
System.out.println(errMsg);
return ;
}
} public static void main(String[] args) throws Exception {
if (args.length != ) {
String errMsg = "InverseIndexOne: ARGUMENTS ERROR";
LOGGER.error(errMsg);
System.out.println(errMsg);
System.exit(-);
} int result = ToolRunner.run(new Configuration(), new InverseIndexOne(), args);
System.exit(result);
}
}

InverseIndexTwo

package com.ares.hadoop.mr.inverseindex;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger; public class InverseIndexTwo extends Configured implements Tool{
private static final Logger LOGGER = Logger.getLogger(InverseIndexOne.class);
enum Counter {
LINESKIP
} public static class InverseIndexTwoMapper extends
Mapper<LongWritable, Text, Text, Text> { private String line;
private final static char separatorA = '\t';
private final static char separatorB = '-'; private Text textKey = new Text();
private Text textValue = new Text(); @Override
protected void map(LongWritable key, Text value,
Mapper<LongWritable, Text, Text, Text>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.map(key, value, context);
try {
line = value.toString();
String[] fields = StringUtils.split(line, separatorA);
String[] wordAndfileName = StringUtils.split(fields[], separatorB);
long count = Long.parseLong(fields[]);
String word = wordAndfileName[];
String fileName = wordAndfileName[]; textKey.set(word);
textValue.set(fileName + separatorB + count);
context.write(textKey, textValue);
} catch (Exception e) {
// TODO: handle exception
LOGGER.error(e);
System.out.println(e);
context.getCounter(Counter.LINESKIP).increment();
return;
}
}
} public static class InverseIndexTwoReducer extends
Reducer<Text, Text, Text, Text> { private Text textValue = new Text(); @Override
protected void reduce(Text key, Iterable<Text> values,
Reducer<Text, Text, Text, Text>.Context context)
throws IOException, InterruptedException {
// TODO Auto-generated method stub
//super.reduce(arg0, arg1, arg2);
StringBuilder index = new StringBuilder("");
// for (Text text : values) {
// if (condition) {
//
// }
// index.append(text.toString() + separatorA);
// }
String separatorA = "";
for (Text text : values) {
index.append(separatorA + text.toString());
separatorA = ",";
}
textValue.set(index.toString());
context.write(key, textValue);
}
} @Override
public int run(String[] args) throws Exception {
// TODO Auto-generated method stub
//return 0;
String errMsg = "InverseIndexTwo: TEST STARTED...";
LOGGER.debug(errMsg);
System.out.println(errMsg); Configuration conf = new Configuration();
//FOR Eclipse JVM Debug
//conf.set("mapreduce.job.jar", "flowsum.jar");
Job job = Job.getInstance(conf); // JOB NAME
job.setJobName("InverseIndexTwo"); // JOB MAPPER & REDUCER
job.setJarByClass(InverseIndexTwo.class);
job.setMapperClass(InverseIndexTwoMapper.class);
job.setReducerClass(InverseIndexTwoReducer.class); // JOB PARTITION
//job.setPartitionerClass(FlowGroupPartition.class); // JOB REDUCE TASK NUMBER
//job.setNumReduceTasks(5); // MAP & REDUCE
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
// MAP
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class); // JOB INPUT & OUTPUT PATH
//FileInputFormat.addInputPath(job, new Path(args[0]));
FileInputFormat.setInputPaths(job, args[]);
Path output = new Path(args[]);
// FileSystem fs = FileSystem.get(conf);
// if (fs.exists(output)) {
// fs.delete(output, true);
// }
FileOutputFormat.setOutputPath(job, output); // VERBOSE OUTPUT
if (job.waitForCompletion(true)) {
errMsg = "InverseIndexTwo: TEST SUCCESSFULLY...";
LOGGER.debug(errMsg);
System.out.println(errMsg);
return ;
} else {
errMsg = "InverseIndexTwo: TEST FAILED...";
LOGGER.debug(errMsg);
System.out.println(errMsg);
return ;
}
} public static void main(String[] args) throws Exception {
if (args.length != ) {
String errMsg = "InverseIndexOne: ARGUMENTS ERROR";
LOGGER.error(errMsg);
System.out.println(errMsg);
System.exit(-);
} int result = ToolRunner.run(new Configuration(), new InverseIndexTwo(), args);
System.exit(result);
} }

参考资料:

How to check if processing the last item in an Iterator?:http://stackoverflow.com/questions/9633991/how-to-check-if-processing-the-last-item-in-an-iterator

【Hadoop】Hadoop MR 如何实现倒排索引算法?的更多相关文章

  1. hadoop修改MR的提交的代码程序的副本数

    hadoop修改MR的提交的代码程序的副本数 Under-Replicated Blocks的数量很多,有7万多个.hadoop fsck -blocks 检查发现有很多replica missing ...

  2. 腾讯公司数据分析岗位的hadoop工作 线性回归 k-means算法 朴素贝叶斯算法 SpringMVC组件 某公司的广告投放系统 KNN算法 社交网络模型 SpringMVC注解方式

    腾讯公司数据分析岗位的hadoop工作 线性回归 k-means算法 朴素贝叶斯算法 SpringMVC组件 某公司的广告投放系统 KNN算法 社交网络模型 SpringMVC注解方式 某移动公司实时 ...

  3. Hadoop【MR开发规范、序列化】

    Hadoop[MR开发规范.序列化] 目录 Hadoop[MR开发规范.序列化] 一.MapReduce编程规范 1.Mapper阶段 2.Reducer阶段 3.Driver阶段 二.WordCou ...

  4. [Hadoop]Hadoop章2 HDFS原理及读写过程

    HDFS(Hadoop Distributed File System )Hadoop分布式文件系统. HDFS有很多特点: ① 保存多个副本,且提供容错机制,副本丢失或宕机自动恢复.默认存3份. ② ...

  5. hadoop hadoop install (1)

    vmuser@vmuser-VirtualBox:~$ sudo useradd -m hadoop -s /bin/bash[sudo] vmuser 的密码: vmuser@vmuser-Virt ...

  6. MR案例:倒排索引

    1.map阶段:将单词和URI组成Key值(如“MapReduce :1.txt”),将词频作为value. 利用MR框架自带的Map端排序,将同一文档的相同单词的词频组成列表,传递给Combine过 ...

  7. Hadoop hadoop 机架感知配置

    机架感知脚本 使用python3编写机架感知脚本,报存到topology.py,给予执行权限 import sys import os DEFAULT_RACK="/default-rack ...

  8. hadoop之 mr输出到hbase

    1.注意问题: 1.在开发过程中一定要导入hbase源码中的lib库否则出现如下错误 TableMapReducUtil 找不到什么-- 2.编码: import java.io.IOExceptio ...

  9. Hadoop案例(四)倒排索引(多job串联)与全局计数器

    一. 倒排索引(多job串联) 1. 需求分析 有大量的文本(文档.网页),需要建立搜索索引 xyg pingping xyg ss xyg ss a.txt xyg pingping xyg pin ...

随机推荐

  1. BZOJ4888 [Tjoi2017]异或和 【树状数组】

    题目链接 BZOJ4888 题解 要求所有连续异或和,转化为任意两个前缀和相减 要求最后的异或和,转化为求每一位\(1\)的出现次数 所以我们只需要对每一个\(i\)快速求出\(sum[i] - su ...

  2. BZOJ1823 [JSOI2010]满汉全席 【2-sat】

    题目 满汉全席是中国最丰盛的宴客菜肴,有许多种不同的材料透过满族或是汉族的料理方式,呈现在數量繁多的菜色之中.由于菜色众多而繁杂,只有极少數博学多闻技艺高超的厨师能够做出满汉全席,而能够烹饪出经过专家 ...

  3. 幸运数字(luckly)

    幸运数字(luckly) 题目描述 A国共有 nn 座城市,这些城市由 TeX parse error: Misplaced & 条道路相连,使得任意两座城市可以互达,且路径唯一.每座城市都有 ...

  4. Vue,watch观察对象中的某个属性的变化

    你只需要属性这样写,用引号引起来

  5. jquery使滚动条滚动到最底部

    $('body').scrollTop($('body')[0].scrollHeight); //想要加载页面自动到最底部要写入function中使用setTimeout function top1 ...

  6. HDU 1841 Find the Shortest Common Superstring----KMP

    题意:给两个字符串,问包含这两个字符串的最小的字符串的长度 kmp返回匹配串长度 #include "iostream" #include<cstdio> #inclu ...

  7. Javascript时间差计算函数代码实例

    Javascript时间差计算函数代码实例 <script language="javascript"> Date.prototype.dateDiff = funct ...

  8. Webpack & The Hot Module Replacement热模块替换原理解析

    Webpack & The Hot Module Replacement热模块替换原理解析 The Hot Module Replacement(HMR)俗称热模块替换.主要用来当代码产生变化 ...

  9. windows下xampp安装PHP的pthreads多线程扩展

    我的运行环境: 系统:windows10 ,64位 PHP:5.6.8 TS,VC11 ,32位 Apache: 2.0 我安装的是xampp集成环境 pthreads的windows扩展文件下载地址 ...

  10. 钩子注入呼出与隐藏DLL窗口

    / MFC_DLL.cpp : 定义 DLL 的初始化例程. // #include "stdafx.h" #include "MFC_DLL.h" #incl ...