一:UDF

1.自定义UDF

  

二:UDAF

2.UDAF

  

3.介绍AbstractGenericUDAFResolver

  

4.介绍GenericUDAFEvaluator

  

5.程序

 package org.apache.hadoop.hive_udf;

 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.udf.generic.AbstractGenericUDAFResolver;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFParameterInfo;
import org.apache.hadoop.hive.serde2.io.DoubleWritable;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.AbstractPrimitiveWritableObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils;
import org.apache.hadoop.io.LongWritable; /**
*
* 需求:实现sum函数,支持int和double类型
*
*/ public class UdafProject extends AbstractGenericUDAFResolver{
public GenericUDAFEvaluator getEvaluator(GenericUDAFParameterInfo info)
throws SemanticException {
//判断参数是否是全部列
if(info.isAllColumns()){
throw new SemanticException("不支持*的参数");
} //判断是否只有一个参数
ObjectInspector[] inspector = info.getParameterObjectInspectors();
if(inspector.length != 1){
throw new SemanticException("参数只能有一个");
}
//判断输入列的数据类型是否为基本类型
if(inspector[0].getCategory() != ObjectInspector.Category.PRIMITIVE){
throw new SemanticException("参数必须为基本数据类型");
} AbstractPrimitiveWritableObjectInspector woi = (AbstractPrimitiveWritableObjectInspector) inspector[0]; //判断是那种基本数据类型 switch(woi.getPrimitiveCategory()){
case INT:
case LONG:
case BYTE:
case SHORT:
return new udafLong();
case FLOAT:
case DOUBLE:
return new udafDouble();
default:
throw new SemanticException("参数必须是基本类型,且不能为string等类型"); } } /**
* 对整形数据进行求和
*/
public static class udafLong extends GenericUDAFEvaluator{ //定义输入数据类型
public PrimitiveObjectInspector inputor; //实现自定义buffer
static class sumlongagg implements AggregationBuffer{
long sum;
boolean empty;
} //初始化方法
@Override
public ObjectInspector init(Mode m, ObjectInspector[] parameters)
throws HiveException {
// TODO Auto-generated method stub super.init(m, parameters);
if(parameters.length !=1 ){
throw new UDFArgumentException("参数异常");
}
if(inputor == null){
this.inputor = (PrimitiveObjectInspector) parameters[0];
}
//注意返回的类型要与最终sum的类型一致
return PrimitiveObjectInspectorFactory.writableLongObjectInspector;
} @Override
public AggregationBuffer getNewAggregationBuffer() throws HiveException {
// TODO Auto-generated method stub
sumlongagg slg = new sumlongagg();
this.reset(slg);
return slg;
} @Override
public void reset(AggregationBuffer agg) throws HiveException {
// TODO Auto-generated method stub
sumlongagg slg = (sumlongagg) agg;
slg.sum=0;
slg.empty=true;
} @Override
public void iterate(AggregationBuffer agg, Object[] parameters)
throws HiveException {
// TODO Auto-generated method stub
if(parameters.length != 1){
throw new UDFArgumentException("参数错误");
}
this.merge(agg, parameters[0]); } @Override
public Object terminatePartial(AggregationBuffer agg)
throws HiveException {
// TODO Auto-generated method stub
return this.terminate(agg);
} @Override
public void merge(AggregationBuffer agg, Object partial)
throws HiveException {
// TODO Auto-generated method stub
sumlongagg slg = (sumlongagg) agg;
if(partial != null){
slg.sum += PrimitiveObjectInspectorUtils.getLong(partial, inputor);
slg.empty=false;
}
} @Override
public Object terminate(AggregationBuffer agg) throws HiveException {
// TODO Auto-generated method stub
sumlongagg slg = (sumlongagg) agg;
if(slg.empty){
return null;
}
return new LongWritable(slg.sum);
} } /**
* 实现浮点型的求和
*/
public static class udafDouble extends GenericUDAFEvaluator{ //定义输入数据类型
public PrimitiveObjectInspector input; //实现自定义buffer
static class sumdoubleagg implements AggregationBuffer{
double sum;
boolean empty;
} //初始化方法
@Override
public ObjectInspector init(Mode m, ObjectInspector[] parameters)
throws HiveException {
// TODO Auto-generated method stub super.init(m, parameters);
if(parameters.length !=1 ){
throw new UDFArgumentException("参数异常");
}
if(input == null){
this.input = (PrimitiveObjectInspector) parameters[0];
}
//注意返回的类型要与最终sum的类型一致
return PrimitiveObjectInspectorFactory.writableDoubleObjectInspector;
} @Override
public AggregationBuffer getNewAggregationBuffer() throws HiveException {
// TODO Auto-generated method stub
sumdoubleagg sdg = new sumdoubleagg();
this.reset(sdg);
return sdg;
} @Override
public void reset(AggregationBuffer agg) throws HiveException {
// TODO Auto-generated method stub
sumdoubleagg sdg = (sumdoubleagg) agg;
sdg.sum=0;
sdg.empty=true;
} @Override
public void iterate(AggregationBuffer agg, Object[] parameters)
throws HiveException {
// TODO Auto-generated method stub
if(parameters.length != 1){
throw new UDFArgumentException("参数错误");
}
this.merge(agg, parameters[0]);
} @Override
public Object terminatePartial(AggregationBuffer agg)
throws HiveException {
// TODO Auto-generated method stub
return this.terminate(agg);
} @Override
public void merge(AggregationBuffer agg, Object partial)
throws HiveException {
// TODO Auto-generated method stub
sumdoubleagg sdg =(sumdoubleagg) agg;
if(partial != null){
sdg.sum += PrimitiveObjectInspectorUtils.getDouble(sdg, input);
sdg.empty=false;
}
} @Override
public Object terminate(AggregationBuffer agg) throws HiveException {
// TODO Auto-generated method stub
sumdoubleagg sdg = (sumdoubleagg) agg;
if (sdg.empty){
return null;
}
return new DoubleWritable(sdg.sum);
} } }

6.打成jar包

  并放入路径:/etc/opt/datas/

7.添加jar到path

  格式:

    add jar linux_path;

  即:

    add jar /etc/opt/datas/af.jar

8.创建方法

  create temporary function af as 'org.apache.hadoop.hive_udf.UdafProject';

9.在hive中运行

  select sum(id),af(id) from stu_info;

三:UDTF

1.UDTF

  

2.程序 

 package org.apache.hadoop.hive.udf;

 import java.util.ArrayList;

 import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory;
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; public class UDTFtest extends GenericUDTF { @Override
public StructObjectInspector initialize(StructObjectInspector argOIs)
throws UDFArgumentException {
// TODO Auto-generated method stub
if(argOIs.getAllStructFieldRefs().size() != 1){
throw new UDFArgumentException("参数只能有一个");
}
ArrayList<String> fieldname = new ArrayList<String>();
fieldname.add("name");
fieldname.add("email");
ArrayList<ObjectInspector> fieldio = new ArrayList<ObjectInspector>();
fieldio.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector);
fieldio.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector); return ObjectInspectorFactory.getStandardStructObjectInspector(fieldname, fieldio);
} @Override
public void process(Object[] args) throws HiveException {
// TODO Auto-generated method stub
if(args.length == 1){
String name = args[0].toString();
String email = name + "@ibeifneg.com";
super.forward(new String[] {name,email});
}
} @Override
public void close() throws HiveException {
// TODO Auto-generated method stub
super.forward(new String[] {"complete","finish"});
} }

3.同样的步骤

4.在hive中运行

  select tf(ename) as (name,email) from emp;

064 UDF的更多相关文章

  1. SQL Server-聚焦在视图和UDF中使用SCHEMABINDING(二十六)

    前言 上一节我们讨论了视图中的一些限制以及建议等,这节我们讲讲关于在UDF和视图中使用SCHEMABINDING的问题,简短的内容,深入的理解,Always to review the basics. ...

  2. MySql UDF 调用外部程序和系统命令

    1.mysql利用mysqludf的一个mysql插件可以实现调用外部程序和系统命令 下载lib_mysqludf_sys程序:https://github.com/mysqludf/lib_mysq ...

  3. Hive UDF初探

    1. 引言 在前一篇中,解决了Hive表中复杂数据结构平铺化以导入Kylin的问题,但是平铺之后计算广告日志的曝光PV是翻倍的,因为一个用户对应于多个标签.所以,为了计算曝光PV,我们得另外创建视图. ...

  4. sparksql udf的运用----scala及python版(2016年7月17日前完成)

    问:udf在sparksql 里面的作用是什么呢? 答:oracle的存储过程会有用到定义函数,那么现在udf就相当于一个在sparksql用到的函数定义: 第二个问题udf是怎么实现的呢? regi ...

  5. Hive UDF开发实例学习

    1. 本地环境配置 必须包含的一些包. http://blog.csdn.net/azhao_dn/article/details/6981115 2. 去重UDF实例 http://blog.csd ...

  6. Adding New Functions to MySQL(User-Defined Function Interface UDF、Native Function)

    catalog . How to Add New Functions to MySQL . Features of the User-Defined Function Interface . User ...

  7. gearman mysql udf

    gearman安装 apt-get install gearman gearman-server libgearman-dev 配置bindip /etc/defalut/gearman-job-se ...

  8. HiveServer2 的jdbc方式创建udf的修改(add jar 最好不要使用),否则会造成异常: java.sql.SQLException: Error while processing statement: null

    自从Hive0.13.0开始,使用HiveServer2 的jdbc方式创建udf的临时函数的方法由: ADD JAR ${HiveUDFJarPath} create TEMPORARY funct ...

  9. HIVE: UDF应用实例

    数据文件内容 TEST DATA HERE Good to Go 我们准备写一个函数,把所有字符变为小写. 1.开发UDF package MyTestPackage; import org.apac ...

随机推荐

  1. [C++]数组与指针[二维数组与指针]

  2. CF101D Castle

    传送门 首先,一定要把所有点遍历一遍,这时答案应该是\(\frac{\sum 某个点第一次被遍历的时间点}{n-1}\quad\),而且每条边只能走两次,所以每次要遍历完某棵子树才能遍历其它子树. 考 ...

  3. 第18月第10天 iOS11 uicollectionview

    1. - (void)collectionView:(UICollectionView *)collectionView willDisplaySupplementaryView:(UICollect ...

  4. MacOs -bash: warning: setlocale: LC_CTYPE: cannot change locale (UTF-8): No such file or directory

    1解决iterm远程登录主机报错 -bash: warning: setlocale: LC_CTYPE: cannot change locale (UTF-8): No such file or ...

  5. 如何在同一台服务器上部署两个tomcat

    因为测试的需要,有时我们必须在同一个服务器上部署两个tomcat,然后去做应用的部署,那么很多同学可能会觉得比较为难,找的资料也比较的不齐全,那么今天华华就来给大家讲讲如何部署2个tomcat,并能够 ...

  6. MySQL复制框架

    一.复制框架 开始接触复制时,看到各种各样的复制,总想把不同类型对应起来,结果越理越乱~究其原因就是对比了不同维度的属性,不同维度得出的结果集之间必然存在交集,没有必要将不同维度的属性安插到成对的萝卜 ...

  7. jquery 操作表单的问题

    下拉框获取选中项的值: $("#ID").find("option:selected").val(); 设置下拉框选中项: $("#ID") ...

  8. 【转】Source Insight中文注释为乱码的解决办法

    我网上查了一堆解决办法,但是都是2017年以前的,并且都是针对于source insight 3.5及以下版本的解决方案,软件版本都到4.0了,应该有新方法出现. 干货:Source Insight ...

  9. 【转】SourceInsight4破解笔记

    时隔好多年,sourceinsight4以迅雷不及掩耳之势的来了.与3.5相比,sourceinsight4多了代码折叠以及文件标签功能,可谓是让sourceinsight迷兴奋了好几晚上.废话不多说 ...

  10. zabbix客户端日志报错no active checks on server [192.168.3.108:10051]: host [192.168.3.108] not found

    zabbix客户端日志报错: 45647:20160808:220507.717 no active checks on server [192.168.3.108:10051]: host [192 ...