hbase版本:1.3.1

目的:HBase新API的使用方法。

尝试并验证了如下几种java api的使用方法。

1.创建表

2.创建表(预分区)

3.单条插入

4.批量插入

5.批量插入(客户端缓存)

6.单条get

7.批量get

8.简单scan

9.混合使用

■实际代码

https://github.com/quchunhui/hbase_sample

■pom.xml文件

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hbase_sample</artifactId>
<groupId>hbase_sample</groupId>
<version>1.0</version>
<modelVersion>4.0.0</modelVersion>
<dependencies>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<version>1.3.1</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>1.3.1</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<version>1.3.1</version>
</dependency>
</dependencies>
<build>
<sourceDirectory>src/main/java</sourceDirectory>
<outputDirectory>target/classes</outputDirectory>
<plugins>
<plugin>
<artifactId>maven-compiler-plugin</artifactId>
<version>3.1</version>
<configuration>
<source>1.7</source>
<target>1.7</target>
</configuration>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<version>2.4</version>
<configuration>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

===1.创建表===

package api;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.regionserver.BloomType; public class create_table_sample1 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin(); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("TEST1"));
//MemStore大小。默认128M,不能小于1M
desc.setMemStoreFlushSize(2097152L);
//HFile最大size。默认10G。不能小于2M
desc.setMaxFileSize(10485760L);
//日志flush的时候是同步写,还是异步写
desc.setDurability(Durability.SYNC_WAL); HColumnDescriptor family1 = new HColumnDescriptor(constants.COLUMN_FAMILY_DF.getBytes());
family1.setTimeToLive(2 * 60 * 60 * 24); //过期时间
family1.setMaxVersions(2); //版本数
family1.setBlockCacheEnabled(true);
desc.addFamily(family1);
HColumnDescriptor family2 = new HColumnDescriptor(constants.COLUMN_FAMILY_EX.getBytes());
//数据生存时间
family2.setTimeToLive(3 * 60 * 60 * 24);
//最小版本数,默认0。
family2.setMinVersions(2);
//最大版本数,默认-1
family2.setMaxVersions(3);
//bloom过滤器,有ROW和ROWCOL,ROWCOL除了过滤ROW还要过滤列族。默认ROW。
family2.setBloomFilterType(BloomType.ROW);
//数据块的大小,单位bytes,默认值是65536。
family2.setBlocksize(65536);
//数据块缓存,保存着每个HFile数据块的startKey。默认true。
family2.setBlockCacheEnabled(true);
// //写的时候缓存bloom。默认false。
// family2.setCacheBloomsOnWrite(false);
// //写的时候缓存索引。默认false。
// family2.setCacheIndexesOnWrite(false);
// //存储的时候使用压缩算法。默认NONE。
// family2.setCompressionType(Compression.Algorithm.NONE);
// //进行compaction的时候使用压缩算法。默认NONE。
// family2.setCompactionCompressionType(Compression.Algorithm.NONE);
// //压缩内存和存储的数据,区别于Snappy。默认NONE。
// family2.setDataBlockEncoding(DataBlockEncoding.NONE);
// //关闭的时候,是否剔除缓存的块。默认false。
// family2.setEvictBlocksOnClose(false);
// //让数据块缓存在LRU缓存里面有更高的优先级。默认false。
// family2.setInMemory(false);
// //集群间复制的时候,如果被设置成REPLICATION_SCOPE_LOCAL就不能被复制了。默认0
// family2.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
desc.addFamily(family2); admin.createTable(desc);
admin.close();
connection.close();
}
}

===2.创建表(预分区)===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.Bytes; public class create_table_sample2 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");
Connection connection = ConnectionFactory.createConnection(conf);
Admin admin = connection.getAdmin(); TableName table_name = TableName.valueOf("TEST1");
if (admin.tableExists(table_name)) {
admin.disableTable(table_name);
admin.deleteTable(table_name);
} HTableDescriptor desc = new HTableDescriptor(table_name);
HColumnDescriptor family1 = new HColumnDescriptor(constants.COLUMN_FAMILY_DF.getBytes());
family1.setTimeToLive(3 * 60 * 60 * 24); //过期时间
family1.setBloomFilterType(BloomType.ROW); //按行过滤
family1.setMaxVersions(3); //版本数
desc.addFamily(family1);
HColumnDescriptor family2 = new HColumnDescriptor(constants.COLUMN_FAMILY_EX.getBytes());
family2.setTimeToLive(2 * 60 * 60 * 24); //过期时间
family2.setBloomFilterType(BloomType.ROW); //按行过滤
family2.setMaxVersions(2); //版本数
desc.addFamily(family2); byte[][] splitKeys = {
Bytes.toBytes("row01"),
Bytes.toBytes("row02"),
Bytes.toBytes("row04"),
Bytes.toBytes("row06"),
Bytes.toBytes("row08"),
}; admin.createTable(desc, splitKeys);
admin.close();
connection.close();
}
}

===3.单条插入===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes;
import java.util.Random; public class table_put_sample1 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME)); Random random = new Random();
String[] rows = new String[] {"01", "02", "03"};
String[] names = new String[] {"zhang san", "li si", "wang wu", "wei liu"};
String[] sexs = new String[] {"men", "women"};
String[] heights = new String[] {"165cm", "170cm", "175cm", "180cm"};
String[] weights = new String[] {"50kg", "55kg", "60kg", "65kg", "70kg", "75kg", "80kg"}; Put put = new Put(Bytes.toBytes("row" + rows[random.nextInt(rows.length)]));
String name = names[random.nextInt(names.length)];
put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes(), name.getBytes());
String sex = sexs[random.nextInt(sexs.length)];
put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes(), sex.getBytes());
String height = heights[random.nextInt(heights.length)];
put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes(), height.getBytes());
String weight = weights[random.nextInt(weights.length)];
put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes(), weight.getBytes()); table.put(put);
table.close();
connection.close();
}
}

===4.批量插入===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes; import java.util.ArrayList;
import java.util.List;
import java.util.Random; public class table_put_sample2 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME)); Random random = new Random();
String[] rows = new String[] {"01", "02", "03"};
String[] names = new String[] {"zhang san", "li si", "wang wu", "wei liu"};
String[] sexs = new String[] {"men", "women"};
String[] heights = new String[] {"165cm", "170cm", "175cm", "180cm"};
String[] weights = new String[] {"50kg", "55kg", "60kg", "65kg", "70kg", "75kg", "80kg"}; List<Put> puts = new ArrayList<>();
for(String row : rows) {
Put put = new Put(Bytes.toBytes("row" + row));
String name = names[random.nextInt(names.length)];
put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes(), name.getBytes());
String sex = sexs[random.nextInt(sexs.length)];
put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes(), sex.getBytes());
String height = heights[random.nextInt(heights.length)];
put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes(), height.getBytes());
String weight = weights[random.nextInt(weights.length)];
put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes(), weight.getBytes());
puts.add(put);
} table.put(puts);
table.close();
connection.close();
}
}

===5.批量插入(客户端缓存)===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.util.Bytes; import java.util.ArrayList;
import java.util.List;
import java.util.Random; public class table_put_sample4 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");
conf.set("hbase.client.write.buffer", "1048576");//1M
Connection connection = ConnectionFactory.createConnection(conf);
BufferedMutator table = connection.getBufferedMutator(TableName.valueOf(constants.TABLE_NAME));
System.out.print("[--------]write buffer size = " + table.getWriteBufferSize()); Random random = new Random();
String[] rows = new String[] {"01", "02", "03", "04", "05"};
String[] names = new String[] {"zhang san", "li si", "wang wu", "wei liu"};
String[] sexs = new String[] {"men", "women"};
String[] heights = new String[] {"165cm", "170cm", "175cm", "180cm"};
String[] weights = new String[] {"50kg", "55kg", "60kg", "65kg", "70kg", "75kg", "80kg"}; List<Mutation> batch = new ArrayList<>();
for(String row : rows) {
Put put = new Put(Bytes.toBytes("row" + row));
String name = names[random.nextInt(names.length)];
put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes(), name.getBytes());
String sex = sexs[random.nextInt(sexs.length)];
put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes(), sex.getBytes());
String height = heights[random.nextInt(heights.length)];
put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes(), height.getBytes());
String weight = weights[random.nextInt(weights.length)];
put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes(), weight.getBytes());
batch.add(put);
} table.mutate(batch);
table.flush();
table.close();
connection.close();
}
}

===6.单条get===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*; public class table_get_sample3 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.80,192.168.1.82");
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME)); Get get = new Get(("row01").getBytes());
get.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());
get.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes());
get.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes());
get.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes()); Result result = table.get(get);
byte[] name = result.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());
byte[] sex = result.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes());
byte[] height = result.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes());
byte[] weight = result.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes()); System.out.print("[------]name=" + new String(name) + "\n");
System.out.print("[------]sex=" + new String(sex) + "\n");
System.out.print("[------]height=" + new String(height) + "\n");
System.out.print("[------]weight=" + new String(weight) + "\n"); table.close();
connection.close();
}
}

===7.批量get===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*; import java.util.ArrayList;
import java.util.List; public class table_get_sample4 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME)); List<Get> gets = new ArrayList<>();
Get get1 = new Get(("row01").getBytes());
get1.addFamily(constants.COLUMN_FAMILY_DF.getBytes());
get1.addFamily(constants.COLUMN_FAMILY_EX.getBytes());
gets.add(get1);
Get get2 = new Get(("row02").getBytes());
get2.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());
get2.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes());
get2.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes());
get2.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());
gets.add(get2); Result[] results = table.get(gets);
for ( Result result : results) {
byte[] name = result.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());
byte[] sex = result.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes());
byte[] height = result.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes());
byte[] weight = result.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());
System.out.print("[------]name=" + new String(name) + "\n");
System.out.print("[------]sex=" + new String(sex) + "\n");
System.out.print("[------]height=" + new String(height) + "\n");
System.out.print("[------]weight=" + new String(weight) + "\n");
} table.close();
connection.close();
}
}

===8.简单scan===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*; public class table_scan_sample3 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");
Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME)); Scan scan = new Scan();
scan.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());
scan.addFamily(constants.COLUMN_FAMILY_EX.getBytes()); ResultScanner rs = table.getScanner(scan);
for (Result r = rs.next(); r != null; r = rs.next()) {
byte[] row_key = r.getRow();
System.out.print("[------]row_key=" + new String(row_key) + "\n");
byte[] name = r.getValue(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes());
System.out.print("[------]name=" + new String(name) + "\n");
byte[] weight = r.getValue(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());
System.out.print("[------]weight=" + new String(weight) + "\n");
} table.close();
connection.close();
}
}

===9.混合使用===

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*; import java.util.ArrayList;
import java.util.List; public class table_batch_sample2 {
public static void main(String[] args) throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "192.168.1.80,192.168.1.81,192.168.1.82");
conf.set("hbase.client.write.buffer", "1048576");//1M
Connection connection = ConnectionFactory.createConnection(conf);
BufferedMutator mutator = connection.getBufferedMutator(TableName.valueOf(constants.TABLE_NAME)); List<Mutation> batch = new ArrayList<>();
byte[] row_key = random.getRowKey(); Put put = new Put(row_key);
put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "name".getBytes(), random.getName());
put.addColumn(constants.COLUMN_FAMILY_DF.getBytes(), "sex".getBytes(), random.getSex());
put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "height".getBytes(), random.getHeight());
put.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes(), random.getWeight());
batch.add(put);
Delete delete = new Delete(row_key);
delete.addFamily(constants.COLUMN_FAMILY_DF.getBytes());
delete.addColumn(constants.COLUMN_FAMILY_EX.getBytes(), "weight".getBytes());
batch.add(delete);
mutator.mutate(batch); Table table = connection.getTable(TableName.valueOf(constants.TABLE_NAME));
Get get = new Get(row_key);
Result result1 = table.get(get);
System.out.print("[------]name=" + getValue(result1, constants.COLUMN_FAMILY_DF, "name") + "\n");
System.out.print("[------]sex=" + getValue(result1, constants.COLUMN_FAMILY_DF, "sex") + "\n");
System.out.print("[------]height=" + getValue(result1, constants.COLUMN_FAMILY_EX, "height") + "\n");
System.out.print("[------]weight=" + getValue(result1, constants.COLUMN_FAMILY_EX, "weight") + "\n"); mutator.flush();
Result result2 = table.get(get);
System.out.print("[------]name=" + getValue(result2, constants.COLUMN_FAMILY_DF, "name") + "\n");
System.out.print("[------]sex=" + getValue(result2, constants.COLUMN_FAMILY_DF, "sex") + "\n");
System.out.print("[------]height=" + getValue(result2, constants.COLUMN_FAMILY_EX, "height") + "\n");
System.out.print("[------]weight=" + getValue(result2, constants.COLUMN_FAMILY_EX, "weight") + "\n"); table.close();
mutator.close();
connection.close();
} private static String getValue(Result rs, String family, String column) {
byte[] value = rs.getValue(family.getBytes(), column.getBytes());
if (value == null) {
return "";
} else {
return new String(value);
}
}
}

===补充===

1)HTableDescriptor特性

可以通过 HTableDescriptor对象设置Table的相关特性 ,比如

//日志flush的时候是同步写,还是异步写
tb.setDurability(Durability.SYNC_WAL); //region size大小,当一个region中的最大store文件达到这个size时,region就开始分裂
tb.setMaxFileSize(1024*1024*1024); //MemStore大小,当memstore达到这个值时,开始往磁盘中刷数据
tb.setMemStoreFlushSize(256*1024*1024);

如果设置了MemStore时,HBase的数据会是先写入内存,数据累计达到内存阀值时才往磁盘中flush数据,

所以,如果在数据还没有flush进硬盘时,RegionServer down掉了,内存中的数据将丢失。

想解决这个场景的问题可以通过设置WAL日志级别来解决。即:tb.setDurability(Durability.SYNC_WAL);

setDurability(Durability d)方法可以在相关的三个对象中使用,分别是:HTableDescriptor,Delete,Put。

其中Delete和Put的该方法都是继承自父类org.apache.hadoop.hbase.client.Mutation。

分别针对表、插入操作、删除操作设定WAL日志写入级别。

需要注意的是,Delete和Put并不会继承Table的Durability级别(已实测验证)。

Durability是一个枚举变量,如果不通过该方法指定WAL日志级别,则为默认USE_DEFAULT级别。

USE_DEFAULT    //全局默认的WAL写入级别,即 SYNC_WAL

ASYNC_WAL    //当数据变动时,异步写WAL日志

SYNC_WAL    //当数据变动时,同步写WAL日志

FSYNC_WAL    //当数据变动时,同步写WAL日志,并且,强制将数据写入磁盘

SKIP_WAL    //不写WAL日志

2)HColumnDescriptor特性

可以通过 HColumnDescriptor对象设置ColumnFamily的特性 ,比如:

//压缩内存中和存储文件中的数据,默认NONE(不压缩)
tb.setDataBlockEncoding(DataBlockEncoding.PREFIX); //bloom过滤器:NONE,ROW(默认值)和ROWCOL.ROWCOL除了过滤ROW还要过滤列族
tb.setBloomFilterType(BloomType.ROW); //集群间复制的时候,如果被设置成REPLICATION_SCOPE_LOCAL(默认值)就不能被复制了
tb.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); //数据保存的最大版本数.默认是Long.MAX
tb.setMaxVersions(3); //数据保存的最小版本数.默认是0.配合TTL使用
tb.setMinVersions(1); //数据保存的最长时间,即TTL,单位是ms
tb.setTimeToLive(18000); //设定数据存储的压缩类型.默认无压缩(NONE)
tb.setCompressionType(Algorithm.SNAPPY); //是否保存那些已经删除掉的cell
tb.setKeepDeletedCells(false); //设置数据保存在内存中以提高响应速度
tb.setInMemory(true); //块缓存,保存着每个HFile数据块的startKey
tb.setBlockCacheEnabled(true); //块的大小,默认值是65536
tb.setBlocksize(64*1024);

--END--

hbase java api样例(版本1.3.1,新API)的更多相关文章

  1. Kafka Consumer API样例

    Kafka Consumer API样例 1. 自动确认Offset 说明参照:http://blog.csdn.net/xianzhen376/article/details/51167333 Pr ...

  2. java多线程样例

    这里我们做一个完整的样例来说明线程产生的方式不同而生成的线程的差别: package debug; import java.io.*;import java.lang.Thread; class My ...

  3. HTTP基本认证(Basic Authentication)的JAVA演示样例

    大家在登录站点的时候.大部分时候是通过一个表单提交登录信息.可是有时候浏览器会弹出一个登录验证的对话框.例如以下图,这就是使用HTTP基本认证.以下来看看一看这个认证的工作过程:第一步:  clien ...

  4. hyperledger fabric超级账本java sdk样例e2e代码流程分析

     一  checkConfig  Before     1.1  private static final TestConfig testConfig = TestConfig.getConfig() ...

  5. HBase Filter程序样例及Shell(图)

    ==过滤器执行流程== reset() : reset the filter state before filtering a new row. filterAllRemaining(): true ...

  6. 用MapReduce读HBase写MongoDB样例

    1.版本信息: Hadoop版本:2.7.1 HBase版本:1.2.1 MongDB版本:3.4.14 2.HBase表名及数据: 3.Maven依赖: <dependency> < ...

  7. Java 8 时间日期库的20个使用演示样例

    除了lambda表达式,stream以及几个小的改进之外,Java 8还引入了一套全新的时间日期API,在本篇教程中我们将通过几个简单的任务演示样例来学习怎样使用Java 8的这套API.Java对日 ...

  8. java 状态模式 解说演示样例代码

    package org.rui.pattern; import junit.framework.*; /** * 为了使同一个方法调用能够产生不同的行为,State 模式在代理(surrogate)的 ...

  9. 【gRPC】C++异步服务端优化版,多服务接口样例

    官方的C++异步服务端API样例可读性并不好,理解起来非常的费劲,各种状态机也并不明了,整个运行过程也容易读不懂,因此此处参考网上的博客进行了重写,以求顺利读懂. C++异步服务端实例,详细注释版 g ...

随机推荐

  1. 以太坊(二)安装Solidity编译器

    官方地址:https://solidity.readthedocs.io/en/develop/installing-solidity.html 推荐使用  remix   快速学习solidity  ...

  2. mac 电脑下添加 HTMLtestrunner.py 生成 报表

    HTMLTestRunner是Python标准库unittest模块的一个扩展.它生成易于使用的HTML测试报告. 1.下载HTMLTestRunner.py模块地址 http://tungwaiyi ...

  3. Java-Runoob:Java Character 类

    ylbtech-Java-Runoob:Java Character 类 1.返回顶部 1. Java Character 类 Character 类用于对单个字符进行操作. Character 类在 ...

  4. PubMed

    PubMed 是一个提供生物医学方面的论文搜寻以及摘要,并且免费搜寻的数据库.它的数据库来源为MEDLINE.其核心主题为医学,但亦包括其他与医学相关的领域,像是护理学或者其他健康学科. PubMed ...

  5. css/css3实现未知宽高元素的垂直居中和水平居中

    题目:.a{ width: 200px; height: 200px; background-color: #ccc;} <body> <div class="a" ...

  6. 100.64.0.0/10运营商级(Carrier-grade)NAT保留IP地址

    在一次跟踪路由的网络操作时发现自己路由器下一跳路由节点的IP地址比较奇怪,是100.64.0.1.好奇促使我查询了这个IP地址的归属,结果是保留地址,到这里觉得比较奇怪了,按照常理以IPv4为例保留的 ...

  7. 这样学Linux基本命令,事半功倍

    命令基本格式 (1)命令提示符 如下是命令行的命令提示符,以此为例,讲解含义. 其中: root 当前登录用户名 localhost 主机名 ~ 当前所在的目录(即家目录,用户登录的初始位置) # 超 ...

  8. 「小程序JAVA实战」小程序视图之条件判断(15)

    转自:https://idig8.com/2018/08/09/xiaochengxu-chuji-15/ 小程序里面也是有条件判断的,我相信大家在开发java if和jstl c:if c:when ...

  9. ffmpeg源码分析五:ffmpeg调用x264编码器的过程分析 (转5)

    原帖地址:http://blog.csdn.net/austinblog/article/details/25127533 该文将以X264编码器为例,解释说明FFMPEG是怎么调用第三方编码器来进行 ...

  10. ConcurrentHashMap源码分析(JDK8版本<转载>)

    注:本文源码是JDK8的版本,与之前的版本有较大差异 转载地址:http://blog.csdn.net/u010723709/article/details/48007881 ConcurrentH ...