1、dirver

package com.kangaroo.hadoop.drive;

import java.util.Map;
import java.util.Properties; import com.kangaroo.hadoop.mapper.AggregateMapper;
import com.kangaroo.hadoop.reducer.AggregateReducer;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import com.kangaroo.hadoop.utils.PropertiesUtil; public class DriveMain extends Configured implements Tool { private static final Logger logger = LoggerFactory.getLogger(DriveMain.class);
private Configuration conf;
private PropertiesUtil propUtil; public DriveMain() {
this.conf = new Configuration();
this.propUtil = new PropertiesUtil("configure.properties");
} public int run(String[] args) throws Exception {
try {
logger.info("MapReduce Job Beginning.");
String dbName = args[0];
String tableName = args[1];
String partition = args[2];
String sumField = args[3];
String outPath = args[4];
String partFilter = partitionFormat(partition);
logger.info("[Params] dbName:{}; tableName:{}, partition:{}, sumField:{}, outPath:{}, partFilter:{}",
dbName, tableName, partition, sumField, outPath, partFilter);
this.conf.set("sumField", sumField);
this.setMapRedConfiguration();
Job job = this.setJobConfiguration(this.conf);
HCatInputFormat.setInput(job, dbName, tableName, partFilter);
logger.info("setInput successfully.");
FileOutputFormat.setOutputPath(job, new Path(outPath));
logger.info("setOutput successfully.");
return (job.waitForCompletion(true) ? 0 : 1);
} catch (Exception ex) {
logger.error(ex.getMessage());
throw ex;
}
} private Job setJobConfiguration(Configuration conf) throws Exception {
try {
logger.info("enter setJobConfiguration");
Job job = Job.getInstance(conf);
job.setJarByClass(DriveMain.class);
job.setInputFormatClass(HCatInputFormat.class);
job.setMapperClass(AggregateMapper.class);
job.setReducerClass(AggregateReducer.class); job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(Text.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setNumReduceTasks(1);
logger.info("setJobConfiguration successfully.");
return job;
} catch (Exception ex) {
logger.error("setJobConfiguration: " + ex.getMessage());
throw new Exception(ex);
}
} private void setMapRedConfiguration() {
try {
Properties properties = propUtil.getProperties();
logger.info("Load MapReduce Configuration Successfully.");
for (Map.Entry entry : properties.entrySet()) {
if (entry.getKey().toString().startsWith("mapred")) {
conf.set(entry.getKey().toString(), entry.getValue().toString());
logger.info("[MR][Config] key:{}, value:{}", entry.getKey().toString(), entry.getValue().toString());
}
}
logger.info("[MR][Config] Set MapReduce Configuration Successfully.");
} catch (Exception e) { } } private String partitionFormat(String partition) {
String format = "";
if(!partition.contains("pt") && ! partition.contains("dt")) {
String[] items = partition.split("/");
String[] keys = {"year","month","day", "hour"};
for(int i=0; i<items.length; i++) {
if (i == items.length-1) {
format += keys[i] + "='" + items[i] + "'";
} else {
format += keys[i] + "='" + items[i] + "' and ";
}
}
} else {
format = partition;
}
return format;
} public static void main(String[] args) throws Exception {
int exitCode = ToolRunner.run(new DriveMain(), args);
System.exit(exitCode);
} }

2、Mapper

package com.kangaroo.hadoop.mapper;

import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hive.hcatalog.data.HCatRecord;
import org.apache.hive.hcatalog.data.schema.HCatSchema;
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import java.io.IOException;
import java.util.HashMap;
import java.util.Map; @SuppressWarnings("rawtypes")
public class AggregateMapper extends Mapper<WritableComparable, HCatRecord, Text, Text> { private static final Logger logger = LoggerFactory.getLogger(AggregateMapper.class); private HCatSchema schema;
private Text outKey;
private Text outValue;
private IntWritable one; @Override
protected void setup(Context context) throws IOException, InterruptedException {
outKey = new Text();
outValue = new Text();
schema = HCatInputFormat.getTableSchema(context.getConfiguration());
} @Override
protected void map(WritableComparable key, HCatRecord value, Context context) throws IOException, InterruptedException {
String sumField = context.getConfiguration().get("sumField");
Map<String, String> recordMap = new HashMap<String, String>();
for (String fieldName : schema.getFieldNames()) {
logger.info("fieldName={}", fieldName);
String fieldValue = value.get(fieldName, schema).toString();
logger.info("fieldName={}, fieldValue={}", fieldName, fieldValue);
recordMap.put(fieldName, fieldValue);
logger.info("recordMap={}", recordMap.toString());
}
outKey.set(recordMap.get(sumField));
outValue.set("1");
} @Override
protected void cleanup(Context context) throws IOException, InterruptedException {
context.write(outKey, outValue);
}
}

3、Reducer

package com.kangaroo.hadoop.reducer;

import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hive.hcatalog.data.schema.HCatSchema;
import org.apache.hive.hcatalog.mapreduce.HCatInputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import java.io.IOException; @SuppressWarnings("rawtypes")
public class AggregateReducer extends Reducer<Text, Text, Text, Text> {
protected static final Logger logger = LoggerFactory.getLogger(AggregateReducer.class);
HCatSchema schema;
Text outKey;
Text outValue; @Override
protected void setup(Context context) throws IOException, InterruptedException {
schema = HCatInputFormat.getTableSchema(context.getConfiguration());
} @Override
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException,InterruptedException {
outKey.set(key);
int sum = 0;
for (Text value : values) {
sum += Integer.parseInt(value.toString());
}
outValue.set(String.valueOf(sum));
} protected void cleanup(Context context) throws IOException, InterruptedException {
context.write(outKey, outValue);
}
}

4、propertyUtil

package com.kangaroo.hadoop.utils;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties; import java.io.IOException;
import java.io.InputStream;
import java.util.Properties; public class PropertiesUtil {
private String filePath; public PropertiesUtil() {
this.filePath = "configure.properties";
} public PropertiesUtil(String filePath) {
this.filePath = filePath;
} public Properties getProperties() throws IOException {
Properties prop;
InputStream inStream = null;
try {
inStream = PropertiesUtil.class.getClassLoader()
.getResourceAsStream(this.filePath);
prop = new Properties();
prop.load(inStream); return prop;
} finally {
if (inStream != null)
inStream.close();
}
}
}

5、配置

mapred.job.queue.name=root.XXX
mapred.jar=./XXX.jar
mapred.map.tasks=300
mapred.reduce.tasks=100
#mapred.map.capacity=1
#mapred.reduce.capacity=1
mapred.job.priority=HIGH
mapred.job.name=XXX

Hadoop通过HCatalog编写Mapreduce任务访问hive库中schema数据的更多相关文章

  1. 如何将hive表中的数据导出

    近期经常将现场的数据带回公司测试,所以写下该文章,梳理一下思路. 1.首先要查询相应的hive表,比如我要将c_cons这张表导出,我先查出hive中是否有这张表. 查出数据,证明该表在hive中存在 ...

  2. 访问cv::Mat中的数据时遇到的指针类型问题

    在用Opencv的时候由于下图原本的图像尺寸是1111*1111,要进行resize,代码如下: cv::Mat img = cv::imread("//Users//apple//td3/ ...

  3. 【Hadoop测试程序】编写MapReduce测试Hadoop环境

    我们使用之前搭建好的Hadoop环境,可参见: <[Hadoop环境搭建]Centos6.8搭建hadoop伪分布模式>http://www.cnblogs.com/ssslinppp/p ...

  4. 在hadoop上进行编写mapreduce程序,统计关键词在text出现次数

    mapreduce的处理过程分为2个阶段,map阶段,和reduce阶段.在要求统计指定文件里的全部单词的出现次数时. map阶段把每一个关键词写到一行上以逗号进行分隔.并初始化数量为1(同样的单词h ...

  5. HBase结合MapReduce批量导入(HDFS中的数据导入到HBase)

    HBase结合MapReduce批量导入 package hbase; import java.text.SimpleDateFormat; import java.util.Date; import ...

  6. 批量查询hive库中所有表的count

    一.准备文件 mkdir /query_hive_table_count touch query_db_name_table touch query_table_result.txt 二.编辑文件 2 ...

  7. 小记---------spark组件与其他组件的比较 spark/mapreduce ;spark sql/hive ; spark streaming/storm

    Spark与Hadoop的对比   Scala是Spark的主要编程语言,但Spark还支持Java.Python.R作为编程语言 Hadoop的编程语言是Java    

  8. hive上传下载数据

    ------------------------------------------read me--方式1:适用于工具传输--方式2:适用于手动临时性传输---------------------- ...

  9. Hive扩展功能(三)--使用UDF函数将Hive中的数据插入MySQL中

    软件环境: linux系统: CentOS6.7 Hadoop版本: 2.6.5 zookeeper版本: 3.4.8 主机配置: 一共m1, m2, m3这五部机, 每部主机的用户名都为centos ...

随机推荐

  1. 教学服务系统设计之PHP后台设计

    项目简介 本项目是与@nameoverflow同学合作.该同学负责vue 前端的设计,我负责php后台的接口实现.本文将主要记录php后台. 本项目的Github地址:https://github.c ...

  2. 转深入Java虚拟机 之四:类加载机制

    转载请注明出处:http://blog.csdn.net/ns_code/article/details/17881581 类加载过程     类从被加载到虚拟机内存中开始,到卸载出内存为止,它的整个 ...

  3. JS中的事件&对象

    一.JS中的事件 (一)JS中的事件分类 1.鼠标事件 click/dblclick/onmouseover/onmouseout 2.HTML事件 onload/onscroll/onsubmit/ ...

  4. 201521123005 《java程序设计》 第七周学习总结

    1. 本周学习总结 参考资料: XMind 2. 书面作业 Q1ArrayList代码分析 1.1 解释ArrayList的contains源代码 ArrayList的contains源代码 /** ...

  5. NA笔记

    常用配置命令 mstsc 远程桌面控制指令(在运行中输入) cmd 运行 copy running-config start 正在启动文件 copy running-config startup-ci ...

  6. Junit4学习(五)Junit4测试套件

    一,背景 1,随着开发规模的深入和扩大,项目或越来越大,相应的我们的测试类也会越来越多:那么就带来一个问题,假如测试类很多,就需要多次运行,造成测试的成本增加:此时就可以使用junit批量运行测试类的 ...

  7. audio标签

    实例 一段简单的 HTML 5 音频: <audio src="someaudio.wav"> 您的浏览器不支持 audio 标签. </audio> 亲自 ...

  8. Sublime自定义语法

    以thinkphp框架的assign函数为例 在sublime\Data\Packages\PHP下 新建文件:assign.sublime-snippet 内容为 <snippet> & ...

  9. Spring - BeanPostProcessor接口(后处理器)讲解

    概述: BeanPostProcessor接口是众多Spring提供给开发者的bean生命周期内自定义逻辑拓展接口中的一个,其他还有类似InitializingBean,DisposableBean, ...

  10. mybatis-resultMap使用与详解

    1,当数据库的字段名与属性名称不一致时,在mybatis中如何处理? 第一种方式: 采用投影对字段重命名<select id="load" parameterType=&qu ...