1、从hbase中取数据,再把计算结果插入hbase中

package com.yeliang;
import java.io.IOException; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapred.TableInputFormat;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapper;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; /**
* 从hbase里取数据,分析完成后插入到hbase里
* @author liang.ye
*
*/
public class FamilyHBase { public static class Map extends TableMapper<Text, IntWritable>{ @Override
protected void map(ImmutableBytesWritable key, Result value, Context context)
throws IOException, InterruptedException {
for (Cell cell : value.rawCells())
{
if(new String(CellUtil.cloneQualifier(cell)).equals("GroupID")){
context.write(new Text(new String(CellUtil.cloneValue(cell))), new IntWritable(1));
}
}
}
} public static class Reduce extends
TableReducer<Text, IntWritable, NullWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable i : values) {
sum += i.get();
}
byte[] keyBytes = Bytes.toBytes(key.toString());
if(keyBytes.length>0){
Put put = new Put(keyBytes);
// Put实例化,每一个词存一行
put.add(Bytes.toBytes("content"), Bytes.toBytes("count"),
Bytes.toBytes(String.valueOf(sum)));
// 列族为content,列为count,列值为数目
context.write(NullWritable.get(), put);
}
}
} public static void createHBaseTable(String tableName) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor col = new HColumnDescriptor("content");
htd.addFamily(col);
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum","192.168.56.101,192.168.56.102,192.168.56.103");
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.tableExists(tableName)) {
System.out.println("table exists, trying to recreate table......");
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
System.out.println("create new table:" + tableName);
admin.createTable(htd);
} public static void main(String[] args) throws IOException,
InterruptedException, ClassNotFoundException {
String tableName = "family_sum_by_groupid";
Configuration conf = HBaseConfiguration.create();
//conf.set("hbase.zookeeper.quorum","192.168.56.101,192.168.56.102,192.168.56.103");
createHBaseTable(tableName);
Job job = new Job(conf, "family_sum_by_groupid ");
job.setJarByClass(FamilyHBase.class);
Scan scan = new Scan();
scan.addFamily("cf".getBytes());
TableMapReduceUtil.initTableMapperJob("family3", scan, Map.class, Text.class, IntWritable.class, job);
TableMapReduceUtil.initTableReducerJob(tableName, Reduce.class, job);
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

  2、从hdfs中取数据,把计算的结果插入到hdfs中

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.mapreduce.TableOutputFormat;
import org.apache.hadoop.hbase.mapreduce.TableReducer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; /**
* 从hdfs中取数分析,然后插入到hbase里
* @author liang.ye
*
*/
public class WordCountHBase { public static class Map extends
Mapper<LongWritable, Text, Text, IntWritable> {
private IntWritable i = new IntWritable(1); public void map(LongWritable key, Text value, Context context)
throws IOException, InterruptedException {
String s[] = value.toString().trim().split(" ");
// 将输入的每行以空格分开
for (String m : s) {
context.write(new Text(m), i);
}
}
} public static class Reduce extends
TableReducer<Text, IntWritable, NullWritable> {
public void reduce(Text key, Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable i : values) {
sum += i.get();
}
byte[] keyBytes = Bytes.toBytes(key.toString());
if(keyBytes.length>0){
Put put = new Put(keyBytes);
// Put实例化,每一个词存一行
put.add(Bytes.toBytes("content"), Bytes.toBytes("count"),
Bytes.toBytes(String.valueOf(sum)));
// 列族为content,列为count,列值为数目
context.write(NullWritable.get(), put);
}
}
} public static void createHBaseTable(String tableName) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor col = new HColumnDescriptor("content");
htd.addFamily(col);
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum","192.168.56.101,192.168.56.102,192.168.56.103");
HBaseAdmin admin = new HBaseAdmin(conf);
if (admin.tableExists(tableName)) {
System.out.println("table exists, trying to recreate table......");
admin.disableTable(tableName);
admin.deleteTable(tableName);
}
System.out.println("create new table:" + tableName);
admin.createTable(htd);
} public static void main(String[] args) throws IOException,
InterruptedException, ClassNotFoundException {
String tableName = "WordCount2";
Configuration conf = new Configuration();
conf.set(TableOutputFormat.OUTPUT_TABLE, tableName);
createHBaseTable(tableName);
String input = args[0];
Job job = new Job(conf, "WordCount table with " + input);
job.setJarByClass(WordCountHBase.class);
job.setNumReduceTasks(3);
job.setMapperClass(Map.class);
job.setReducerClass(Reduce.class);
job.setMapOutputKeyClass(Text.class);
job.setMapOutputValueClass(IntWritable.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TableOutputFormat.class);
FileInputFormat.addInputPath(job, new Path(input));
System.exit(job.waitForCompletion(true) ? 0 : 1);
}
}

  

hadoop2的mapreduce操作hbase数据的更多相关文章

  1. Mapreduce操作HBase

    这个操作和普通的Mapreduce还不太一样,比如普通的Mapreduce输入可以是txt文件等,Mapreduce可以直接读取Hive中的表的数据(能够看见是以类似txt文件形式),但Mapredu ...

  2. Hadoop生态圈-使用MapReduce处理HBase数据

    Hadoop生态圈-使用MapReduce处理HBase数据 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.对HBase表中数据进行单词统计(TableInputFormat) ...

  3. HBase 相关API操练(三):MapReduce操作HBase

    MapReduce 操作 HBase 在 HBase 系统上运行批处理运算,最方便和实用的模型依然是 MapReduce,如下图所示. HBase Table 和 Region 的关系类似 HDFS ...

  4. 7.MapReduce操作Hbase

    7 HBase的MapReduce   HBase中Table和Region的关系,有些类似HDFS中File和Block的关系.由于HBase提供了配套的与MapReduce进行交互的API如 Ta ...

  5. Hbase第五章 MapReduce操作HBase

    容易遇到的坑: 当用mapReducer操作HBase时,运行jar包的过程中如果遇到 java.lang.NoClassDefFoundError 类似的错误时,一般是由于hadoop环境没有hba ...

  6. Hbase理论&&hbase shell&&python操作hbase&&python通过mapreduce操作hbase

    一.Hbase搭建: 二.理论知识介绍: 1Hbase介绍: Hbase是分布式.面向列的开源数据库(其实准确的说是面向列族).HDFS为Hbase提供可靠的底层数据存储服务,MapReduce为Hb ...

  7. HBase学习之路 (五)MapReduce操作Hbase

    MapReduce从HDFS读取数据存储到HBase中 现有HDFS中有一个student.txt文件,格式如下 95002,刘晨,女,19,IS 95017,王风娟,女,18,IS 95018,王一 ...

  8. 使用MapReduce读取HBase数据存储到MySQL

    Mapper读取HBase数据 package MapReduce; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hba ...

  9. MapReduce操作Hbase --table2file

    官方手册:http://hbase.apache.org/book.html#mapreduce.example 简单的操作,将hbase表中的数据写入到文件中. RunJob 源码: import ...

随机推荐

  1. PHP 动态调整内存限制

    最近公司的一个PHP项目在操作大文件的时候总是抛出这个异常 Fixing PHP Fatal Error: Allowed Memory Size Exhausted 经过一番调试后发现是达到了PHP ...

  2. Windows下chm转换为html的超简单方法

    摘要:通过调用Windows命令,将chm 文件转换为html 文件 概述:很多程序员朋友都会遇到这样的问题,看一个离线版的帮助文档(chm文件),总会产生一个索引文件(该文件的chw文件), 而且有 ...

  3. 深入理解String的关键点和方法

    String是Java开发中最最常见的,本篇博客针对String的原理和常用的方法,以及String的在开发中常见问题做一个整体性的概括整理.因为之前对String的特性做过一些分析,所以不在详细描述 ...

  4. 一次生产环境下MongoDB备份还原数据

    最近开发一个版本的功能当中用到了MongoDB分页,懒于造数据,于是就研究了下从生产环境上导出数据到本地来进行测试. 研究了一下,发现MongoDB的备份还原和MySQL语法还挺类似,下面请看详细介绍 ...

  5. Javascript写的一个可拖拽排序的列表

    自己常试写了一个可拖拽进行自定义排序的列表,可能写的不太好,欢迎提供意见. 我的思路是将列表中的所有项都放进一个包裹层,将该包裹层设为相对定位,每当点击一个项时,将该项脱离文档并克隆一份重新添加到文档 ...

  6. Reshape the Matrix

    In MATLAB, there is a very useful function called 'reshape', which can reshape a matrix into a new o ...

  7. Merge Two Binary Trees

    Given two binary trees and imagine that when you put one of them to cover the other, some nodes of t ...

  8. ASP.NET Core 2.0 支付宝当面付之扫码支付

    前言 自从微软更换了CEO以后,微软的战略方向有了相当大的变化,不再是那么封闭,开源了许多东西,拥抱开源社区,.NET实现跨平台,收购xamarin并免费提供给开发者等等.我本人是很喜欢.net的,并 ...

  9. Python自学笔记-time模块(转)

    在Python中,通常有这几种方式来表示时间:1)时间戳 2)格式化的时间字符串 3)元组(struct_time)共九个元素.由于Python的time模块实现主要调用C库,所以各个平台可能有所不同 ...

  10. Python实战之字符串的详细简单练习

    ['__add__', '__class__', '__contains__', '__delattr__', '__dir__', '__doc__', '__eq__', '__format__' ...