1、从hbase中取数据,再把计算结果插入hbase中

package com.yeliang;
import java.io.IOException; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapred.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; /**
* 从hbase里取数据,分析完成后插入到hbase里
* @author liang.ye
*
*/
public class FamilyHBase { public static class Map extends TableMapper<Text, IntWritable>{ @Override
protected void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
for (Cell cell : value.rawCells())
{
if(new String(CellUtil.cloneQualifier(cell)).equals("GroupID")){
context.write(new Text(new String(CellUtil.cloneValue(cell))), new IntWritable(1));
}
}
}
} public static class Reduce extends
TableReducer<Text, IntWritable, NullWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable i : values) {
sum += i.get();
}
byte[] keyBytes = Bytes.toBytes(key.toString());
if(keyBytes.length>0){
Put put = new Put(keyBytes);
// Put实例化,每一个词存一行
put.add(Bytes.toBytes("content"), Bytes.toBytes("count"),
Bytes.toBytes(String.valueOf(sum)));
// 列族为content,列为count,列值为数目
context.write(NullWritable.get(), put);
}
}
} public static void createHBaseTable(String tableName) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor col = new HColumnDescriptor("content");
htd.addFamily(col);
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum","192.168.56.101,192.168.56.102,192.168.56.103");
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.tableExists(tableName)) {
System.out.println("table exists, trying to recreate table......");
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
System.out.println("create new table:" + tableName);
admin.createTable(htd);
} public static void main(String[] args) throws IOException,
InterruptedException, ClassNotFoundException {
String tableName = "family_sum_by_groupid";
Configuration conf = HBaseConfiguration.create();
//conf.set("hbase.zookeeper.quorum","192.168.56.101,192.168.56.102,192.168.56.103");
createHBaseTable(tableName);
Job job = new Job(conf, "family_sum_by_groupid ");
job.setJarByClass(FamilyHBase.class);
Scan scan = new Scan();
scan.addFamily("cf".getBytes());
TableMapReduceUtil.initTableMapperJob("family3", scan, Map.class, Text.class, IntWritable.class, job);
TableMapReduceUtil.initTableReducerJob(tableName, Reduce.class, job);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

  2、从hdfs中取数据,把计算的结果插入到hdfs中

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; /**
* 从hdfs中取数分析,然后插入到hbase里
* @author liang.ye
*
*/
public class WordCountHBase { public static class Map extends
Mapper<LongWritable, Text, Text, IntWritable> {
private IntWritable i = new IntWritable(1); public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String s[] = value.toString().trim().split(" ");
// 将输入的每行以空格分开
for (String m : s) {
context.write(new Text(m), i);
}
}
} public static class Reduce extends
TableReducer<Text, IntWritable, NullWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable i : values) {
sum += i.get();
}
byte[] keyBytes = Bytes.toBytes(key.toString());
if(keyBytes.length>0){
Put put = new Put(keyBytes);
// Put实例化,每一个词存一行
put.add(Bytes.toBytes("content"), Bytes.toBytes("count"),
Bytes.toBytes(String.valueOf(sum)));
// 列族为content,列为count,列值为数目
context.write(NullWritable.get(), put);
}
}
} public static void createHBaseTable(String tableName) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor col = new HColumnDescriptor("content");
htd.addFamily(col);
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum","192.168.56.101,192.168.56.102,192.168.56.103");
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.tableExists(tableName)) {
System.out.println("table exists, trying to recreate table......");
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
System.out.println("create new table:" + tableName);
admin.createTable(htd);
} public static void main(String[] args) throws IOException,
InterruptedException, ClassNotFoundException {
String tableName = "WordCount2";
Configuration conf = new Configuration();
conf.set(TableOutputFormat.OUTPUT_TABLE, tableName);
createHBaseTable(tableName);
String input = args[0];
Job job = new Job(conf, "WordCount table with " + input);
job.setJarByClass(WordCountHBase.class);
job.setNumReduceTasks(3);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TableOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(input));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

  

hadoop2的mapreduce操作hbase数据的更多相关文章

  1. Mapreduce操作HBase

    这个操作和普通的Mapreduce还不太一样,比如普通的Mapreduce输入可以是txt文件等,Mapreduce可以直接读取Hive中的表的数据(能够看见是以类似txt文件形式),但Mapredu ...

  2. Hadoop生态圈-使用MapReduce处理HBase数据

    Hadoop生态圈-使用MapReduce处理HBase数据 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.对HBase表中数据进行单词统计(TableInputFormat) ...

  3. HBase 相关API操练(三):MapReduce操作HBase

    MapReduce 操作 HBase 在 HBase 系统上运行批处理运算,最方便和实用的模型依然是 MapReduce,如下图所示. HBase Table 和 Region 的关系类似 HDFS ...

  4. 7.MapReduce操作Hbase

    7 HBase的MapReduce   HBase中Table和Region的关系,有些类似HDFS中File和Block的关系.由于HBase提供了配套的与MapReduce进行交互的API如 Ta ...

  5. Hbase第五章 MapReduce操作HBase

    容易遇到的坑: 当用mapReducer操作HBase时,运行jar包的过程中如果遇到 java.lang.NoClassDefFoundError 类似的错误时,一般是由于hadoop环境没有hba ...

  6. Hbase理论&&hbase shell&&python操作hbase&&python通过mapreduce操作hbase

    一.Hbase搭建: 二.理论知识介绍: 1Hbase介绍: Hbase是分布式.面向列的开源数据库(其实准确的说是面向列族).HDFS为Hbase提供可靠的底层数据存储服务,MapReduce为Hb ...

  7. HBase学习之路 (五)MapReduce操作Hbase

    MapReduce从HDFS读取数据存储到HBase中 现有HDFS中有一个student.txt文件,格式如下 95002,刘晨,女,19,IS 95017,王风娟,女,18,IS 95018,王一 ...

  8. 使用MapReduce读取HBase数据存储到MySQL

    Mapper读取HBase数据 package MapReduce; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hba ...

  9. MapReduce操作Hbase --table2file

    官方手册:http://hbase.apache.org/book.html#mapreduce.example 简单的操作,将hbase表中的数据写入到文件中. RunJob 源码: import ...

随机推荐

  1. maven 执行mvn package/clean命令出错

    mvn compile/test都没报错,但是执行mvn package和mvn clean时候就报错:a required class was missing while executing.... ...

  2. Scanner(键盘录入)

    注意事件: 1: 当使用Scanner类时 切记不要做从键盘输入一个int数 再输入一个字符串 这样会导致bug就是字符串会读取不到几所输入的内容 原因是因为:当你用了NextInt()方法时,再按了 ...

  3. idea启动tomcat报错:Error during artifact deployment. See server log for details.

    出现这种情况的原因老夫猜想是改变了artifact然而tomcat的配置中的artifact没有重新配就会出现这种报错 打开tomcat配置 删除原来的artifact 新添加artifact 保存 ...

  4. php字符的替换,截取,指定查找

    <?php/** * Created by 郭鹏. * User: msi * Date: 2017/9/27 * Time: 14:17 *///随机数生成器echo rand();echo ...

  5. iOS Storyboard约束详解

    链接:http://www.jianshu.com/p/b88c65ffc3eb 约束,就是指--此处略去1万字--都懂的,就不说了.直接进入实战环节. 本文的控件约束都是围绕着UITableView ...

  6. 第4章 同步控制 Synchronization ----死锁(DeadLock)

    Jeffrey Richter 在他所主持的 Win32 Q&A 专栏(Microsoft Systems Journal,1996/07)中曾经提到过,Windows NT 和 Window ...

  7. IO模型分析

    html { font-family: sans-serif } body { margin: 0 } article,aside,details,figcaption,figure,footer,h ...

  8. S2_OOP第三章

    第一章 多态 概念 多态是具有表现多种型生态的能力的特征,同一个实现接口,使用不同的实例而执行不同的操作 子类转换父类(向上转型) 用父类接受子类,向上转型 向上转型的规则: 讲一个父类的引用志向一个 ...

  9. PyCharm基本操作

    1.1 PyCharm基本使用 视频学习连接地址:http://edu.51cto.com/course/9043.html 1.1.1 在Pycharm下为你的Python项目配置Python解释器 ...

  10. 我的第一个python web开发框架(5)——开发前准备工作(了解编码前需要知道的一些常识)

    中午吃饭时间到了,小白赶紧向老菜坐的位置走过去. 小白:老大,中午请你吃饭. 老菜:哈哈...又遇到问题了吧,这次得狠狠宰你一顿才行. 小白:行行行,只要您赏脸,米饭任吃,嘻嘻,我们边走边聊. ... ...