MapReduce编程系列 — 6:多表关联
1、项目名称:

package com.mtjoin; import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser; public class MTjoin {
public static int time = 0;
public static class Map extends Mapper<Object, Text, Text, Text>{
public void map(Object key, Text value, Context context)throws IOException,InterruptedException{
System.out.println("mapper........................");
String line = value.toString();
if(line.contains("factoryname")==true || line.contains("addressID")== true){
return ;
}
int i = 0;
while(line.charAt(i) >= '9'|| line.charAt(i) <= '0'){
i++;
} if(line.charAt(0) >= '9'|| line.charAt(0) <= '0'){
int j = i-1;
while(line.charAt(j) != ' ') j--;
System.out.println("key:"+line.substring(i)+" value:"+line.substring(0,j)); String values[] = {line.substring(0, j),line.substring(i)}; context.write(new Text(values[1]), new Text("1+"+values[0]));
}
else {
int j = i + 1;
while(line.charAt(j)!=' ') j++;
System.out.println("key:"+line.substring(0, i+1)+" value:"+line.substring(j));
String values[] ={line.substring(0,i+1),line.substring(j)};
context.write(new Text(values[0]), new Text("2+"+values[1]));
}
}
} public static class Reduce extends Reducer<Text, Text, Text, Text>{
public void reduce(Text key, Iterable<Text> values, Context context)throws IOException,InterruptedException{
System.out.println("reducer........................");
if( time == 0){
context.write(new Text("factoryname"), new Text("addressname"));
time++;
}
int factorynum = 0;
String factory[] = new String[10];
int addressnum = 0;
String address[] = new String[10]; Iterator ite = values.iterator();
while(ite.hasNext()){
String record = ite.next().toString();
char type = record.charAt(0);
if(type == '1'){
factory[factorynum] = record.substring(2);
factorynum++;
}
else{
address[addressnum] = record.substring(2);
addressnum++;
}
}
if(factorynum != 0 && addressnum != 0){
for(int m = 0 ; m < factorynum ; m++){
for(int n = 0; n < addressnum; n++){
context.write(new Text(factory[m]), new Text(address[n]));
System.out.println("factoryname:"+factory[m]+" addressname:"+address[n]);
}
}
}
}
}
public static void main(String [] args)throws Exception{
Configuration conf = new Configuration();
String otherArgs[] = new GenericOptionsParser(conf,args).getRemainingArgs();
if(otherArgs.length != 2){
System.err.println("Usage:MTjoin<in><out>");
System.exit(2);
}
Job job = new Job(conf,"multiple table join");
job.setJarByClass(MTjoin.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class); job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true)? 0:1);
}
}
版本二(简化版):
package com.mtjoin; import java.io.IOException;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.GenericOptionsParser; public class MTjoin {
public static int time = 0;
public static class Map extends Mapper<Object, Text, Text, Text>{
public void map(Object key, Text value, Context context)throws IOException,InterruptedException{
System.out.println("mapper........................");
String line = value.toString();
if(line.contains("factoryname")==true || line.contains("addressID")== true){
return ;
}
int len = line.length(); if(line.charAt(0) > '9'|| line.charAt(0) < '0'){
System.out.println("key:"+line.substring(len-1)+" value:"+line.substring(0,len-2)); String values[] = {line.substring(0, len-2),line.substring(len-1)}; context.write(new Text(values[1]), new Text("1+"+values[0]));
}
else {
System.out.println("key:"+line.substring(0, 1)+" value:"+line.substring(2));
String values[] ={line.substring(0,1),line.substring(2)};
context.write(new Text(values[0]), new Text("2+"+values[1]));
}
}
} public static class Reduce extends Reducer<Text, Text, Text, Text>{
public void reduce(Text key, Iterable<Text> values, Context context)throws IOException,InterruptedException{
System.out.println("reducer........................");
if( time == 0){
context.write(new Text("factoryname"), new Text("addressname"));
time++;
}
int factorynum = 0;
String factory[] = new String[10];
int addressnum = 0;
String address[] = new String[10]; Iterator ite = values.iterator();
while(ite.hasNext()){
String record = ite.next().toString();
char type = record.charAt(0);
if(type == '1'){
factory[factorynum] = record.substring(2);
factorynum++;
}
else{
address[addressnum] = record.substring(2);
addressnum++;
}
}
if(factorynum != 0 && addressnum != 0){
for(int m = 0 ; m < factorynum ; m++){
for(int n = 0; n < addressnum; n++){
context.write(new Text(factory[m]), new Text(address[n]));
System.out.println("factoryname:"+factory[m]+" addressname:"+address[n]);
}
}
}
}
} public static void main(String [] args)throws Exception{
Configuration conf = new Configuration();
String otherArgs[] = new GenericOptionsParser(conf,args).getRemainingArgs();
if(otherArgs.length != 2){
System.err.println("Usage:MTjoin<in><out>");
System.exit(2);
}
Job job = new Job(conf,"multiple table join");
job.setJarByClass(MTjoin.class);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class); job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class); FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));
System.exit(job.waitForCompletion(true)? 0:1);
}
}
1 Beijing
2 Guangzhou
3 Shenzhen
4 Xian
Beijing Red Star 1
Shenzhen Thunder 3
Guangzhou Honda 2
Beijing Rising 1
Guangzhou Development Bank 2
Tencent 3
Bank of Beijing 1
14/09/24 09:39:55 WARN mapred.JobClient: No job jar file set. User classes may not be found. See JobConf(Class) or JobConf#setJar(String).
14/09/24 09:39:55 INFO input.FileInputFormat: Total input paths to process : 2
14/09/24 09:39:55 WARN snappy.LoadSnappy: Snappy native library not loaded
14/09/24 09:39:55 INFO mapred.JobClient: Running job: job_local_0001
14/09/24 09:39:55 INFO util.ProcessTree: setsid exited with exit code 0
14/09/24 09:39:55 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@e095722
14/09/24 09:39:55 INFO mapred.MapTask: io.sort.mb = 100
14/09/24 09:39:55 INFO mapred.MapTask: data buffer = 79691776/99614720
14/09/24 09:39:55 INFO mapred.MapTask: record buffer = 262144/327680
mapper........................
mapper........................
key:1 value:Beijing Red Star
mapper........................
key:3 value:Shenzhen Thunder
mapper........................
key:2 value:Guangzhou Honda
mapper........................
key:1 value:Beijing Rising
mapper........................
key:2 value:Guangzhou Development Bank
mapper........................
key:3 value:Tencent
mapper........................
key:1 value:Bank of Beijing
14/09/24 09:39:55 INFO mapred.MapTask: Starting flush of map output
14/09/24 09:39:55 INFO mapred.MapTask: Finished spill 0
14/09/24 09:39:55 INFO mapred.Task: Task:attempt_local_0001_m_000000_0 is done. And is in the process of commiting
14/09/24 09:39:56 INFO mapred.JobClient: map 0% reduce 0%
14/09/24 09:39:58 INFO mapred.LocalJobRunner:
14/09/24 09:39:58 INFO mapred.Task: Task 'attempt_local_0001_m_000000_0' done.
14/09/24 09:39:58 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@7dabd20
14/09/24 09:39:58 INFO mapred.MapTask: io.sort.mb = 100
14/09/24 09:39:58 INFO mapred.MapTask: data buffer = 79691776/99614720
14/09/24 09:39:58 INFO mapred.MapTask: record buffer = 262144/327680
mapper........................
mapper........................
key:1 value:Beijing
mapper........................
key:2 value:Guangzhou
mapper........................
key:3 value:Shenzhen
mapper........................
key:4 value:Xian
14/09/24 09:39:58 INFO mapred.MapTask: Starting flush of map output
14/09/24 09:39:58 INFO mapred.MapTask: Finished spill 0
14/09/24 09:39:58 INFO mapred.Task: Task:attempt_local_0001_m_000001_0 is done. And is in the process of commiting
14/09/24 09:39:59 INFO mapred.JobClient: map 100% reduce 0%
14/09/24 09:40:01 INFO mapred.LocalJobRunner:
14/09/24 09:40:01 INFO mapred.Task: Task 'attempt_local_0001_m_000001_0' done.
14/09/24 09:40:01 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@49fa6f3c
14/09/24 09:40:01 INFO mapred.LocalJobRunner:
14/09/24 09:40:01 INFO mapred.Merger: Merging 2 sorted segments
14/09/24 09:40:01 INFO mapred.Merger: Down to the last merge-pass, with 2 segments left of total size: 218 bytes
14/09/24 09:40:01 INFO mapred.LocalJobRunner:
reducer........................
factoryname:Beijing Red Star addressname:Beijing
factoryname:Beijing Rising addressname:Beijing
factoryname:Bank of Beijing addressname:Beijing
reducer........................
factoryname:Guangzhou Honda addressname:Guangzhou
factoryname:Guangzhou Development Bank addressname:Guangzhou
reducer........................
factoryname:Shenzhen Thunder addressname:Shenzhen
factoryname:Tencent addressname:Shenzhen
reducer........................
14/09/24 09:40:01 INFO mapred.Task: Task:attempt_local_0001_r_000000_0 is done. And is in the process of commiting
14/09/24 09:40:01 INFO mapred.LocalJobRunner:
14/09/24 09:40:01 INFO mapred.Task: Task attempt_local_0001_r_000000_0 is allowed to commit now
14/09/24 09:40:01 INFO output.FileOutputCommitter: Saved output of task 'attempt_local_0001_r_000000_0' to hdfs://localhost:9000/user/hadoop/mtjoin_output02
14/09/24 09:40:04 INFO mapred.LocalJobRunner: reduce > reduce
14/09/24 09:40:04 INFO mapred.Task: Task 'attempt_local_0001_r_000000_0' done.
14/09/24 09:40:05 INFO mapred.JobClient: map 100% reduce 100%
14/09/24 09:40:05 INFO mapred.JobClient: Job complete: job_local_0001
14/09/24 09:40:05 INFO mapred.JobClient: Counters: 22
14/09/24 09:40:05 INFO mapred.JobClient: Map-Reduce Framework
14/09/24 09:40:05 INFO mapred.JobClient: Spilled Records=22
14/09/24 09:40:05 INFO mapred.JobClient: Map output materialized bytes=226
14/09/24 09:40:05 INFO mapred.JobClient: Reduce input records=11
14/09/24 09:40:05 INFO mapred.JobClient: Virtual memory (bytes) snapshot=0
14/09/24 09:40:05 INFO mapred.JobClient: Map input records=13
14/09/24 09:40:05 INFO mapred.JobClient: SPLIT_RAW_BYTES=238
14/09/24 09:40:05 INFO mapred.JobClient: Map output bytes=192
14/09/24 09:40:05 INFO mapred.JobClient: Reduce shuffle bytes=0
14/09/24 09:40:05 INFO mapred.JobClient: Physical memory (bytes) snapshot=0
14/09/24 09:40:05 INFO mapred.JobClient: Reduce input groups=4
14/09/24 09:40:05 INFO mapred.JobClient: Combine output records=0
14/09/24 09:40:05 INFO mapred.JobClient: Reduce output records=8
14/09/24 09:40:05 INFO mapred.JobClient: Map output records=11
14/09/24 09:40:05 INFO mapred.JobClient: Combine input records=0
14/09/24 09:40:05 INFO mapred.JobClient: CPU time spent (ms)=0
14/09/24 09:40:05 INFO mapred.JobClient: Total committed heap usage (bytes)=813170688
14/09/24 09:40:05 INFO mapred.JobClient: File Input Format Counters
14/09/24 09:40:05 INFO mapred.JobClient: Bytes Read=216
14/09/24 09:40:05 INFO mapred.JobClient: FileSystemCounters
14/09/24 09:40:05 INFO mapred.JobClient: HDFS_BYTES_READ=586
14/09/24 09:40:05 INFO mapred.JobClient: FILE_BYTES_WRITTEN=122093
14/09/24 09:40:05 INFO mapred.JobClient: FILE_BYTES_READ=1658
14/09/24 09:40:05 INFO mapred.JobClient: HDFS_BYTES_WRITTEN=202
14/09/24 09:40:05 INFO mapred.JobClient: File Output Format Counters
14/09/24 09:40:05 INFO mapred.JobClient: Bytes Written=202
Beijing Red Star Beijing
Beijing Rising Beijing
Bank of Beijing Beijing
Guangzhou Honda Guangzhou
Guangzhou Development Bank Guangzhou
Shenzhen Thunder Shenzhen
Tencent Shenzhen
MapReduce编程系列 — 6:多表关联的更多相关文章
- MapReduce编程系列 — 5:单表关联
1.项目名称: 2.项目数据: chile parentTom LucyTom JackJone LucyJone JackLucy MaryLucy Ben ...
- 【原创】MapReduce编程系列之表连接
问题描述 需要连接的表如下:其中左边是child,右边是parent,我们要做的是找出grandchild和grandparent的对应关系,为此需要进行表的连接. Tom Lucy Tom Jim ...
- 【原创】MapReduce编程系列之二元排序
普通排序实现 普通排序的实现利用了按姓名的排序,调用了默认的对key的HashPartition函数来实现数据的分组.partition操作之后写入磁盘时会对数据进行排序操作(对一个分区内的数据作排序 ...
- MapReduce编程系列 — 4:排序
1.项目名称: 2.程序代码: package com.sort; import java.io.IOException; import org.apache.hadoop.conf.Configur ...
- MapReduce编程系列 — 3:数据去重
1.项目名称: 2.程序代码: package com.dedup; import java.io.IOException; import org.apache.hadoop.conf.Configu ...
- MapReduce编程系列 — 2:计算平均分
1.项目名称: 2.程序代码: package com.averagescorecount; import java.io.IOException; import java.util.Iterator ...
- MapReduce编程系列 — 1:计算单词
1.代码: package com.mrdemo; import java.io.IOException; import java.util.StringTokenizer; import org.a ...
- MapReduce 编程 系列九 Reducer数目
本篇介绍怎样控制reduce的数目.前面观察结果文件,都会发现通常是以part-r-00000 形式出现多个文件,事实上这个reducer的数目有关系.reducer数目多,结果文件数目就多. 在初始 ...
- MapReduce 编程 系列七 MapReduce程序日志查看
首先,假设须要打印日志,不须要用log4j这些东西,直接用System.out.println就可以,这些输出到stdout的日志信息能够在jobtracker网站终于找到. 其次,假设在main函数 ...
随机推荐
- Zend-MVC intro
Zend-MVC intro Zend MVC层建立在servicemanager.eventmanager.http.stdlib.几个组件之上.相关组件介绍会在其他文章中详细说明. 除了以上4大组 ...
- openwrt无线中继教程
1.设置自己路由lan口的IP地址,网段不能跟上级路由的一样. 2.在无线标签下点击"扫描网络". 3.在新出现的界面中,会列出你附近的无线网络.点击你需要中继的网络右边的&quo ...
- Java中的继承和多态
1. 什么是继承,继承的特点? 子类继承父类的特征和行为,使得子类具有父类的各种属性和方法.或子类从父类继承方法,使得子类具有父类相同的行为. 特点:在继承关系中,父类更通用.子类更具体.父类具有更 ...
- hadoop的核心思想
hadoop的核心思想 1.1.1. hadoop的核心思想 Hadoop包括两大核心,分布式存储系统和分布式计算系统. 1.1.1.1. 分布式存储 为什么数据需要存储在分布式的系统中哪,难道单一的 ...
- Java架构师之路:JAVA程序员必看的15本书
作为Java程序员来说,最痛苦的事情莫过于可以选择的范围太广,可以读的书太多,往往容易无所适从.我想就我自己读过的技术书籍中挑选出来一些,按照学习的先后顺序,推荐给大家,特别是那些想不断提高自己技术水 ...
- cocos2dx中的内存管理机制及引用计数
1.内存管理的两大策略: 谁申请,谁释放原则(类似于,谁污染了内存,最后由谁来清理内存)--------->适用于过程性函数 引用计数原则(创建时,引用数为1,每引用一次,计数加1,调用结束时, ...
- bnuoj 25662 A Famous Grid (构图+BFS)
http://www.bnuoj.com/bnuoj/problem_show.php?pid=25662 #include <iostream> #include <stdio.h ...
- AvalonDock 2.0+Caliburn.Micro+MahApps.Metro实现Metro风格插件式系统(菜单篇)
这章主要说插件的菜单,可以说菜单是最核心的部分,前面我们已经实现了Document添加,现在主要就是生成具有层级关系的菜单,以及把菜单跟我们自定义的Document关联起来,也就是MenuPart-& ...
- 1515 跳 - Wikioi
题目描述 Description邪教喜欢在各种各样空间内跳.现在,邪教来到了一个二维平面.在这个平面内,如果邪教当前跳到了(x,y),那么他下一步可以选择跳到以下4个点:(x-1,y), (x+1,y ...
- Leetcode#81 Search in Rotated Sorted Array II
原题地址 如果不存在重复元素,仅通过判断数组的首尾元素即可判断数组是否连续,但是有重复元素的话就不行了,最坏情况下所有元素都一样,此时只能通过线性扫描确定是否连续. 设对于规模为n的问题的工作量为T( ...