(转)Pig 重写加载函数和存储函数UDF
pig自带的pigstorage不能指定行分隔符,所以自己重写了一个简单的UDF类,可以指定列和行的分隔符,之前研究过的简单的,
http://blog.csdn.net/ruishenh/article/details/12048067
但是弊端大,所以这次重写一下。
操作步骤打好包上传到服务器,
grunt> register /home/pig/pig-0.11.0/udflib/myStorage.jar
grunt> cat student; 1,xiaohouzi,25/2,xiaohouzi2,24/3,xiaohouzi3,23
grunt> a = load 'student' using com.hcr.hadoop.pig.MyStorage(',','/');
grunt> dump a;
(1,xiaohouzi,25) (2,xiaohouzi2,24) (3,xiaohouzi3,23)
grunt> store a into 'myStorageOut' using com.hcr.hadoop.pig.MyStorage(',','/');
执行提示成功后查看
grunt> cat myStorageOut 1,xiaohouzi,25/2,xiaohouzi2,24/3,xiaohouzi3,23/
源码类
package com.hcr.hadoop.pig; import java.io.DataOutputStream;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.LineRecordReader;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.pig.Expression;
import org.apache.pig.LoadFunc;
import org.apache.pig.LoadMetadata;
import org.apache.pig.PigException;
import org.apache.pig.ResourceSchema;
import org.apache.pig.ResourceSchema.ResourceFieldSchema;
import org.apache.pig.ResourceStatistics;
import org.apache.pig.StoreFunc;
import org.apache.pig.StoreFuncInterface;
import org.apache.pig.backend.executionengine.ExecException;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit;
import org.apache.pig.data.DataType;
import org.apache.pig.data.Tuple;
import org.apache.pig.data.TupleFactory;
import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema;
import org.apache.pig.impl.util.StorageUtil; public class MyStorage extends LoadFunc implements StoreFuncInterface,LoadMetadata { private static final Log LOG = LogFactory.getLog(MyStorage.class); private static final String utf8 = "UTF-8"; private static String fieldDel = "\t"; private static String recordDel = "\n"; protected RecordReader recordReader = null; protected RecordWriter writer = null; public MyStorage() {
} public MyStorage(String fieldDel) {
this(fieldDel, "\n");
} public MyStorage(String fieldDel, String recordDel) {
this.fieldDel = fieldDel;
this.recordDel = recordDel;
} @Override
public void setLocation(String s, Job job) throws IOException {
FileInputFormat.setInputPaths(job, s);
} @Override
public InputFormat getInputFormat() throws IOException {
return new MyStorageInputFormat(recordDel);
} @Override
public void prepareToRead(RecordReader recordReader, PigSplit pigSplit)
throws IOException {
this.recordReader = recordReader;
} @Override
public Tuple getNext() throws IOException {
try {
boolean flag = recordReader.nextKeyValue();
if (!flag) {
return null;
}
Text value = (Text) recordReader.getCurrentValue();
String[] strArray = value.toString().split(fieldDel);
List lst = new ArrayList<String>();
int i = 0;
for (String singleItem : strArray) {
lst.add(i++, singleItem);
}
return TupleFactory.getInstance().newTuple(lst);
} catch (InterruptedException e) {
throw new ExecException("Read data error",
PigException.REMOTE_ENVIRONMENT, e);
}
} /**
* */
@Override
public String relToAbsPathForStoreLocation(String location, Path curDir)
throws IOException {
return LoadFunc.getAbsolutePath(location, curDir);
} @Override
public OutputFormat getOutputFormat() throws IOException {
return new MyStorageOutputFormat(StorageUtil.parseFieldDel(fieldDel),
this.recordDel);
} @Override
public void setStoreLocation(String location, Job job) throws IOException {
job.getConfiguration().set("mapred.textoutputformat.separator", "");
FileOutputFormat.setOutputPath(job, new Path(location));
if ("true".equals(job.getConfiguration().get(
"output.compression.enabled"))) {
FileOutputFormat.setCompressOutput(job, true);
String codec = job.getConfiguration().get(
"output.compression.codec");
try {
FileOutputFormat.setOutputCompressorClass(job,
(Class<? extends CompressionCodec>) Class
.forName(codec));
} catch (ClassNotFoundException e) {
throw new RuntimeException("Class not found: " + codec);
}
} else {
// This makes it so that storing to a directory ending with ".gz" or
// ".bz2" works.
setCompression(new Path(location), job);
} } private void setCompression(Path path, Job job) {
String location = path.getName();
if (location.endsWith(".bz2") || location.endsWith(".bz")) {
FileOutputFormat.setCompressOutput(job, true);
FileOutputFormat.setOutputCompressorClass(job, BZip2Codec.class);
} else if (location.endsWith(".gz")) {
FileOutputFormat.setCompressOutput(job, true);
FileOutputFormat.setOutputCompressorClass(job, GzipCodec.class);
} else {
FileOutputFormat.setCompressOutput(job, false);
}
} @Override
public void checkSchema(ResourceSchema s) throws IOException {
// TODO Auto-generated method stub } @Override
public void prepareToWrite(RecordWriter writer) throws IOException {
this.writer = writer;
} @Override
public void putNext(Tuple t) throws IOException {
try {
writer.write(null, t);
} catch (InterruptedException e) {
throw new IOException(e);
}
} @Override
public void setStoreFuncUDFContextSignature(String signature) {
// TODO Auto-generated method stub } @Override
public void cleanupOnFailure(String location, Job job) throws IOException {
StoreFunc.cleanupOnFailureImpl(location, job);
} @Override
public void cleanupOnSuccess(String location, Job job) throws IOException {
// TODO Auto-generated method stub } @Override
public ResourceSchema getSchema(String location, Job job)
throws IOException {
ResourceSchema rs=new ResourceSchema();
FieldSchema c1 = new FieldSchema("c1", DataType.INTEGER);
FieldSchema c2 = new FieldSchema("c2", DataType.INTEGER);
FieldSchema c3 = new FieldSchema("c3", DataType.DOUBLE);
ResourceFieldSchema fs1 =new ResourceFieldSchema(c1);
ResourceFieldSchema fs2 =new ResourceFieldSchema(c2);
ResourceFieldSchema fs3 =new ResourceFieldSchema(c3);
rs.setFields(new ResourceFieldSchema[]{fs1,fs2,fs3});
return rs;
} @Override
public ResourceStatistics getStatistics(String location, Job job)
throws IOException {
// TODO Auto-generated method stub
return null;
} @Override
public String[] getPartitionKeys(String location, Job job)
throws IOException {
// TODO Auto-generated method stub
return null;
} @Override
public void setPartitionFilter(Expression partitionFilter)
throws IOException {
// TODO Auto-generated method stub }
} class MyStorageInputFormat extends TextInputFormat { private final String recordDel; public MyStorageInputFormat(String recordDel) {
this.recordDel = recordDel;
} @Override
public RecordReader<LongWritable, Text> createRecordReader(
InputSplit split, TaskAttemptContext context) {
String delimiter = context.getConfiguration().get(
"textinputformat.record.delimiter");
if (recordDel != null) {
delimiter = recordDel;
}
byte[] recordDelimiterBytes = null;
if (null != delimiter){
try {
recordDelimiterBytes = decode(delimiter).getBytes("UTF-8");
} catch (UnsupportedEncodingException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
return new LineRecordReader(recordDelimiterBytes);
}
/**
* 工作流传过来的列分隔符,有可能是特殊字符,用八进制或者十六进制表示
* @throws IOException
*/
public static String decode(String str) throws IOException {
String re = str;
if (str != null && str.startsWith("\\")) {
str = str.substring(1, str.length());
String[] chars = str.split("\\\\");
byte[] bytes = new byte[chars.length];
for (int i = 0; i < chars.length; i++) {
if (chars[i].equals("t")) {
bytes[i] = 9;
} else if (chars[i].equals("r")) {
bytes[i] = 13;
} else if (chars[i].equals("n")) {
bytes[i] = 10;
} else if (chars[i].equals("b")) {
bytes[i] = 8;
} else {
bytes[i] = Byte.decode(chars[i]);
}
}
try {
re = new String(bytes, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new IOException(str, e);
}
}
return re;
} } class MyStorageOutputFormat extends TextOutputFormat<WritableComparable, Tuple> { private final byte fieldDel; private final String recordDel; public MyStorageOutputFormat(byte delimiter) {
this(delimiter, "\n");
} public MyStorageOutputFormat(byte delimiter, String recordDel) {
this.fieldDel = delimiter;
this.recordDel = recordDel;
} protected static class MyRecordWriter extends
TextOutputFormat.LineRecordWriter<WritableComparable, Tuple> { private static byte[] newline; private final byte fieldDel; public MyRecordWriter(DataOutputStream out, byte fieldDel)
throws UnsupportedEncodingException {
this(out, fieldDel, "\n".getBytes("UTF-8"));
} public MyRecordWriter(DataOutputStream out, byte fieldDel, byte[] record) {
super(out);
this.fieldDel = fieldDel;
this.newline = record;
} public synchronized void write(WritableComparable key, Tuple value)
throws IOException {
int sz = value.size();
for (int i = 0; i < sz; i++) {
StorageUtil.putField(out, value.get(i));
if (i != sz - 1) {
out.writeByte(fieldDel);
}
}
out.write(newline);
}
} @Override
public RecordWriter<WritableComparable, Tuple> getRecordWriter(
TaskAttemptContext job) throws IOException, InterruptedException {
Configuration conf = job.getConfiguration();
boolean isCompressed = getCompressOutput(job);
CompressionCodec codec = null;
String extension = "";
if (isCompressed) {
Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(
job, GzipCodec.class);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass,
conf);
extension = codec.getDefaultExtension();
}
Path file = getDefaultWorkFile(job, extension);
FileSystem fs = file.getFileSystem(conf);
if (!isCompressed) {
FSDataOutputStream fileOut = fs.create(file, false);
return new MyRecordWriter(fileOut, fieldDel,
this.recordDel.getBytes());
} else {
FSDataOutputStream fileOut = fs.create(file, false);
return new MyRecordWriter(new DataOutputStream(
codec.createOutputStream(fileOut)), fieldDel,
this.recordDel.getBytes());
}
} }
grunt> register /home/pig/pig-0.11.0/udflib/myStorage.jar
grunt> cat X;
keyDataKNZKCZY:ZDKJS:616150:AFS:3842708d_20131219194420-642464756keyDataKNZKCZY:ZDKJS:616614:AFS:3843920d_20131219194420-642464756keyDataKNZKCZY:ZDKJS:616661:AFS:3844040d_20131219194420-642464756
grunt> a = load 'X' using com.hcr.hadoop.pig.MyStorage('\\001','\\002');
grunt> dump a;
(keyData,KNZKCZY:ZDKJS:616150:AFS:3842708,d_20131219194420-642464756)
(keyData,KNZKCZY:ZDKJS:616614:AFS:3843920,d_20131219194420-642464756)
(keyData,KNZKCZY:ZDKJS:616661:AFS:3844040,d_20131219194420-642464756)
grunt>
有的时候如果加载模式不想指定具体模式(比如太多了字段,或者不够公有化)就想使用已存在的模式
实现LoadMetadata接口,然后
重写
@Override
public ResourceSchema getSchema(String location, Job job)throws IOException {
ResourceSchema rs=new ResourceSchema();
FieldSchema c1 = new FieldSchema("c1", DataType.INTEGER);
FieldSchema c2 = new FieldSchema("c2", DataType.INTEGER);
FieldSchema c3 = new FieldSchema("c3", DataType.DOUBLE);
ResourceFieldSchema fs1 =new ResourceFieldSchema(c1);
ResourceFieldSchema fs2 =new ResourceFieldSchema(c2);
ResourceFieldSchema fs3 =new ResourceFieldSchema(c3);
rs.setFields(new ResourceFieldSchema[]{fs1,fs2,fs3});
return rs;
}
这一个简单的例子中就返回了直接使用模式的形式
grunt> register /home/pig/pig-0.11.0/udflib/myStorage.jar
grunt> a = load 'student' using com.hcr.hadoop.pig.MyStorage(',','/');
grunt> describe a; a: {c1: int,c2: int,c3: double}
grunt> b = foreach a generate c1,c2,c3;
grunt> describe b;
b: {c1: int,c2: int,c3: double}
摘录地址:http://blog.csdn.net/ruishenh/article/details/12192391
(转)Pig 重写加载函数和存储函数UDF的更多相关文章
- js加载事件和js函数定义
一 dom文档树加载完之后执行一个函数 在Dom加载完成后执行函数,下面这三个的作用是一样的,window.onload 是JavaScript的,window.onload是在dom文档树加载完和 ...
- PostgreSql扩展Sql-动态加载共享库(C函数)
基于 psql (PostgreSQL) 10.4 pg_language表定义了函数实现所使用的语言.主要支持了C语言和SQL语句.一些可选的语言包括pl/pgsql.tcl和perl. ligan ...
- 常用js,css文件统一加载方法,并在加载之后调用回调函数
原创内容,转载请注明出处! 为了方便资源管理和提升工作效率,常用的js和css文件的加载应该放在一个统一文件里面完成,也方便后续的资源维护.所以我用js写了以下方法,存放在“sourceControl ...
- php自动加载的两个函数__autoload和__sql_autoload_register
一.__autoload 这是一个自动加载函数,在PHP5中,当我们实例化一个未定义的类时,就会触发此函数.看下面例子: printit.class.php //文件 <?php class P ...
- 010.CI4框架CodeIgniter, autoload自动加载自己的helper函数类
01.自己定义了一个helper类,里面有个函数用来输出 02.定义一个Controller基本类,我们以后用到的Controllers类都继承自这个类.其中自动加载helper函数如图所示: 03. ...
- 八、React实战:可交互待办事务表(表单使用、数据的本地缓存local srtorage、生命同期函数(页面加载就会执行函数名固定为componentDidMount()))
一.项目功能概述 示例网址:http://www.todolist.cn/ 功能: 输入待做事项,回车,把任务添加到 [正在进行] [正在进行] 任务,勾选之后,变成已[经完成事项] [已完成事务], ...
- composer的autoload来自动加载自己编写的函数库与类库?
1.使用命令composer init生成composer.json文件,并编辑autoload选项内容如下: 其中又包含主要的两个选项: files 和 psr-4. files就是需要compos ...
- angular 页面加载时可以调用 函数处理
转载于 作者:海底苍鹰地址:http://blog.51yip.com/jsjquery/1599.html 我希望页面加载的时候,我能马上处理页面的数据,如请求API .... 所以这样设置 在某个 ...
- 页面框架加载完自动执行函数$(function(){});
页面中有一些大的资源文件,如图片,声音等,如果一个事件绑定写在这些加载资源代码的下方,那么要等资源加载完才会绑定,这样体验不够好. 于是想不等资源加载完,只要框架加载完成就绑定事件,就可以把代码放在以 ...
随机推荐
- [pixhawk笔记]6-uORB流程及关键函数解析
本文中将结合代码.文档及注释,给出uORB执行流程及关键函数的解析,由于uORB的机制实现较为复杂,所以本文主要学习如何使用uORB的接口来实现通信.回到上一篇笔记中的代码: #include < ...
- c语言数据类型字节长度
突然间就想到了long和int到底什么区别(发现有很多问题都是突然间想到的),然后百度.google各种查找,各种书籍:<C++ Primer>.<C程序设计语言>查看,终于明 ...
- xml简单介绍及libmxml编程
今天我们来简单介绍一下,关于xml的一些内容,包括自己编写一些程序进行生成和解析. 首先我们我们还是从xml的演化历史来了解一下它吧. 历史演化 GML: 在20世纪60年代为了促进数据交换和操作,通 ...
- How to Enable RPMForge Repository in RHEL/CentOS 7.x/6.x/5.x
RPMforge repository is a utility that is used to install third party software packages under Red Hat ...
- 爬虫bs4
CSS 选择器:BeautifulSoup4 和 lxml 一样,Beautiful Soup 也是一个HTML/XML的解析器,主要的功能也是如何解析和提取 HTML/XML 数据. lxml 只会 ...
- SQL Server 自动备份数据脚本
脚本: use master; go ---声明变量 declare @dbName nvarchar(max)='MG_DATA'; ),) +'_'+ DateName(hour,GetDate( ...
- 用maven按环境打包SpringBoot的不同配置文件
利用maven按环境打包SpringBoot的不同配置文件 application-dev.properties对应开发环境 application-test.properties对应测试环境 app ...
- 简单易用的分页类实例代码PHP
<?php /*********************************************** * @类名: page * @参数: $myde_total - 总记录数 * $m ...
- 'webpack' 不是内部或外部命令解决办法以及npm配置
昨天在笔记本上安装webpack,按照教程下来,使用webpack命令行,报错:'webpack' 不是内部或外部命令,也不是可运行的程序 或批处理文件.网上有大量的配置方法与解决办法,找了好久才成功 ...
- 教你如何使用理解懒Redis是更好的Redis
前言 大家都知道 Redis 是单线程的.真正的内行会告诉你,实际上 Redis 并不是完全单线程,因为在执行磁盘上的特定慢操作时会有多线程.目前为止多线程操作绝大部分集中在 I/O 上以至于在不同线 ...