一:UDF

1.自定义UDF

  

二:UDAF

2.UDAF

  

3.介绍AbstractGenericUDAFResolver

  

4.介绍GenericUDAFEvaluator

  

5.程序

 package org.apache.hadoop.hive_udf;

 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFParameterInfo;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.AbstractPrimitiveWritableObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
import org.apache.hadoop.io.LongWritable; /**
*
* 需求:实现sum函数,支持int和double类型
*
*/ public class UdafProject extends AbstractGenericUDAFResolver{
public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo info)
throws SemanticException {
//判断参数是否是全部列
if(info.isAllColumns()){
throw new SemanticException("不支持*的参数");
} //判断是否只有一个参数
ObjectInspector[] inspector = info.getParameterObjectInspectors();
if(inspector.length != 1){
throw new SemanticException("参数只能有一个");
}
//判断输入列的数据类型是否为基本类型
if(inspector[0].getCategory() != ObjectInspector.Category.PRIMITIVE){
throw new SemanticException("参数必须为基本数据类型");
} AbstractPrimitiveWritableObjectInspector woi = (AbstractPrimitiveWritableObjectInspector) inspector[0]; //判断是那种基本数据类型 switch(woi.getPrimitiveCategory()){
case INT:
case LONG:
case BYTE:
case SHORT:
return new udafLong();
case FLOAT:
case DOUBLE:
return new udafDouble();
default:
throw new SemanticException("参数必须是基本类型,且不能为string等类型"); } } /**
* 对整形数据进行求和
*/
public static class udafLong extends GenericUDAFEvaluator{ //定义输入数据类型
public PrimitiveObjectInspector inputor; //实现自定义buffer
static class sumlongagg implements AggregationBuffer{
long sum;
boolean empty;
} //初始化方法
@Override
public ObjectInspector init(Mode m, ObjectInspector[] parameters)
throws HiveException {
// TODO Auto-generated method stub super.init(m, parameters);
if(parameters.length !=1 ){
throw new UDFArgumentException("参数异常");
}
if(inputor == null){
this.inputor = (PrimitiveObjectInspector) parameters[0];
}
//注意返回的类型要与最终sum的类型一致
return PrimitiveObjectInspectorFactory.writableLongObjectInspector;
} @Override
public AggregationBuffer getNewAggregationBuffer() throws HiveException {
// TODO Auto-generated method stub
sumlongagg slg = new sumlongagg();
this.reset(slg);
return slg;
} @Override
public void reset(AggregationBuffer agg) throws HiveException {
// TODO Auto-generated method stub
sumlongagg slg = (sumlongagg) agg;
slg.sum=0;
slg.empty=true;
} @Override
public void iterate(AggregationBuffer agg, Object[] parameters)
throws HiveException {
// TODO Auto-generated method stub
if(parameters.length != 1){
throw new UDFArgumentException("参数错误");
}
this.merge(agg, parameters[0]); } @Override
public Object terminatePartial(AggregationBuffer agg)
throws HiveException {
// TODO Auto-generated method stub
return this.terminate(agg);
} @Override
public void merge(AggregationBuffer agg, Object partial)
throws HiveException {
// TODO Auto-generated method stub
sumlongagg slg = (sumlongagg) agg;
if(partial != null){
slg.sum += PrimitiveObjectInspectorUtils.getLong(partial, inputor);
slg.empty=false;
}
} @Override
public Object terminate(AggregationBuffer agg) throws HiveException {
// TODO Auto-generated method stub
sumlongagg slg = (sumlongagg) agg;
if(slg.empty){
return null;
}
return new LongWritable(slg.sum);
} } /**
* 实现浮点型的求和
*/
public static class udafDouble extends GenericUDAFEvaluator{ //定义输入数据类型
public PrimitiveObjectInspector input; //实现自定义buffer
static class sumdoubleagg implements AggregationBuffer{
double sum;
boolean empty;
} //初始化方法
@Override
public ObjectInspector init(Mode m, ObjectInspector[] parameters)
throws HiveException {
// TODO Auto-generated method stub super.init(m, parameters);
if(parameters.length !=1 ){
throw new UDFArgumentException("参数异常");
}
if(input == null){
this.input = (PrimitiveObjectInspector) parameters[0];
}
//注意返回的类型要与最终sum的类型一致
return PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
} @Override
public AggregationBuffer getNewAggregationBuffer() throws HiveException {
// TODO Auto-generated method stub
sumdoubleagg sdg = new sumdoubleagg();
this.reset(sdg);
return sdg;
} @Override
public void reset(AggregationBuffer agg) throws HiveException {
// TODO Auto-generated method stub
sumdoubleagg sdg = (sumdoubleagg) agg;
sdg.sum=0;
sdg.empty=true;
} @Override
public void iterate(AggregationBuffer agg, Object[] parameters)
throws HiveException {
// TODO Auto-generated method stub
if(parameters.length != 1){
throw new UDFArgumentException("参数错误");
}
this.merge(agg, parameters[0]);
} @Override
public Object terminatePartial(AggregationBuffer agg)
throws HiveException {
// TODO Auto-generated method stub
return this.terminate(agg);
} @Override
public void merge(AggregationBuffer agg, Object partial)
throws HiveException {
// TODO Auto-generated method stub
sumdoubleagg sdg =(sumdoubleagg) agg;
if(partial != null){
sdg.sum += PrimitiveObjectInspectorUtils.getDouble(sdg, input);
sdg.empty=false;
}
} @Override
public Object terminate(AggregationBuffer agg) throws HiveException {
// TODO Auto-generated method stub
sumdoubleagg sdg = (sumdoubleagg) agg;
if (sdg.empty){
return null;
}
return new DoubleWritable(sdg.sum);
} } }

6.打成jar包

  并放入路径:/etc/opt/datas/

7.添加jar到path

  格式:

    add jar linux_path;

  即:

    add jar /etc/opt/datas/af.jar

8.创建方法

  create temporary function af as 'org.apache.hadoop.hive_udf.UdafProject';

9.在hive中运行

  select sum(id),af(id) from stu_info;

三:UDTF

1.UDTF

  

2.程序 

 package org.apache.hadoop.hive.udf;

 import java.util.ArrayList;

 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; public class UDTFtest extends GenericUDTF { @Override
public StructObjectInspector initialize(StructObjectInspector argOIs)
throws UDFArgumentException {
// TODO Auto-generated method stub
if(argOIs.getAllStructFieldRefs().size() != 1){
throw new UDFArgumentException("参数只能有一个");
}
ArrayList<String> fieldname = new ArrayList<String>();
fieldname.add("name");
fieldname.add("email");
ArrayList<ObjectInspector> fieldio = new ArrayList<ObjectInspector>();
fieldio.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
fieldio.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector); return ObjectInspectorFactory.getStandardStructObjectInspector(fieldname, fieldio);
} @Override
public void process(Object[] args) throws HiveException {
// TODO Auto-generated method stub
if(args.length == 1){
String name = args[0].toString();
String email = name + "@ibeifneg.com";
super.forward(new String[] {name,email});
}
} @Override
public void close() throws HiveException {
// TODO Auto-generated method stub
super.forward(new String[] {"complete","finish"});
} }

3.同样的步骤

4.在hive中运行

  select tf(ename) as (name,email) from emp;

064 UDF的更多相关文章

  1. SQL Server-聚焦在视图和UDF中使用SCHEMABINDING(二十六)

    前言 上一节我们讨论了视图中的一些限制以及建议等,这节我们讲讲关于在UDF和视图中使用SCHEMABINDING的问题,简短的内容,深入的理解,Always to review the basics. ...

  2. MySql UDF 调用外部程序和系统命令

    1.mysql利用mysqludf的一个mysql插件可以实现调用外部程序和系统命令 下载lib_mysqludf_sys程序:https://github.com/mysqludf/lib_mysq ...

  3. Hive UDF初探

    1. 引言 在前一篇中,解决了Hive表中复杂数据结构平铺化以导入Kylin的问题,但是平铺之后计算广告日志的曝光PV是翻倍的,因为一个用户对应于多个标签.所以,为了计算曝光PV,我们得另外创建视图. ...

  4. sparksql udf的运用----scala及python版(2016年7月17日前完成)

    问:udf在sparksql 里面的作用是什么呢? 答:oracle的存储过程会有用到定义函数,那么现在udf就相当于一个在sparksql用到的函数定义: 第二个问题udf是怎么实现的呢? regi ...

  5. Hive UDF开发实例学习

    1. 本地环境配置 必须包含的一些包. http://blog.csdn.net/azhao_dn/article/details/6981115 2. 去重UDF实例 http://blog.csd ...

  6. Adding New Functions to MySQL(User-Defined Function Interface UDF、Native Function)

    catalog . How to Add New Functions to MySQL . Features of the User-Defined Function Interface . User ...

  7. gearman mysql udf

    gearman安装 apt-get install gearman gearman-server libgearman-dev 配置bindip /etc/defalut/gearman-job-se ...

  8. HiveServer2 的jdbc方式创建udf的修改(add jar 最好不要使用),否则会造成异常: java.sql.SQLException: Error while processing statement: null

    自从Hive0.13.0开始,使用HiveServer2 的jdbc方式创建udf的临时函数的方法由: ADD JAR ${HiveUDFJarPath} create TEMPORARY funct ...

  9. HIVE: UDF应用实例

    数据文件内容 TEST DATA HERE Good to Go 我们准备写一个函数,把所有字符变为小写. 1.开发UDF package MyTestPackage; import org.apac ...

随机推荐

  1. aircrack-ng套件学习笔记

    Aircrack-ng套件 1.airdecap-ng 该工具主要用于对加密无线数据报文的解码. 1.当无线网络启用了WEP或者WPA-PASK加密,可以使用wireshark过滤,过滤条件为:IEE ...

  2. Python字符串解析方法汇总

    Python字符串方法解析 1.capitalize 将首字母大写,其余的变成小写 print('text'.capitalize()) print('tExt'.capitalize()) 结果: ...

  3. 微信小程序开发工具的基本应用

    全局配置: 1.1配置所有页面路径:在app.json的{pages:[配置所有页面]},将首页放置在第一位,在app.json必须写上所有页面的路径,要不然会报错,每个页面的wxss样式文件只在当前 ...

  4. SpringBoot常用Starter介绍和整合模板引擎Freemaker、thymeleaf 4节课

    1.SpringBoot Starter讲解 简介:介绍什么是SpringBoot Starter和主要作用 1.官网地址:https://docs.spring.io/spring-boot/doc ...

  5. freemark简单事例

    工作准备:开发环境myeclipse freemarker.jar(需要下载) 首先引入freemarker.jar包.然后,,,,直接贴代码: 1.创建一个FreemarkerUtil类: pack ...

  6. js 鼠标拖拽元素

    基础知识 event.clientX.event.clientY 鼠标相对于浏览器窗口可视区域的X,Y坐标(窗口坐标),可视区域不包括工具栏和滚动条.IE事件和标准事件都定义了这2个属性 event. ...

  7. SciPy模块应用

    1.图像模糊  图像的高斯模糊是非常经典的图像卷积例子.本质上,图像模糊就是将(灰度)图像I 和一个高斯核进行卷积操作:,其中是标准差为σ的二维高斯核.高斯模糊通常是其他图像处理操作的一部分,比如图像 ...

  8. adboost方法(转载)

    转载链接:http://blog.csdn.net/google19890102/article/details/46376603 一.集成方法(Ensemble Method)     集成方法主要 ...

  9. Linux之V4L2基础编程【转】

    转自:https://www.cnblogs.com/emouse/archive/2013/03/04/2943243.html 本文内容来源于网络,本博客进行整理. 1. 定义 V4L2(Vide ...

  10. Innodb ,MyISAM

    1. InnoDB不支持FULLTEXT类型的索引. 2. InnoDB 中不保存表的具体行数,也就是说,执行select count(*) from table时,InnoDB要扫描一遍整个表来计算 ...