kafka的Java客户端示例代码(kafka_2.11-0.8.2.2)
1.使用Producer API发送消息到Kafka
从版本0.9开始被KafkaProducer替代。
HelloWorldProducer.java
package cn.ljh.kafka.kafka_helloworld; import java.util.Date;
import java.util.Properties;
import java.util.Random; import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig; public class HelloWorldProducer {
public static void main(String[] args) {
long events = Long.parseLong(args[0]);
Random rnd = new Random(); Properties props = new Properties();
//配置kafka集群的broker地址,建议配置两个以上,以免其中一个失效,但不需要配全,集群会自动查找leader节点。
props.put("metadata.broker.list", "192.168.137.176:9092,192.168.137.176:9093");
//配置value的序列化类
//key的序列化类key.serializer.class可以单独配置,默认使用value的序列化类
props.put("serializer.class", "kafka.serializer.StringEncoder");
//配置partitionner选择策略,可选配置
props.put("partitioner.class", "cn.ljh.kafka.kafka_helloworld.SimplePartitioner");
props.put("request.required.acks", "1"); ProducerConfig config = new ProducerConfig(props); Producer<String, String> producer = new Producer<String, String>(config); for (long nEvents = 0; nEvents < events; nEvents++) {
long runtime = new Date().getTime();
String ip = "192.168.2." + rnd.nextInt(255);
String msg = runtime + ",www.example.com," + ip;
KeyedMessage<String, String> data = new KeyedMessage<String, String>("page_visits", ip, msg);
producer.send(data);
}
producer.close();
}
}
SimplePartitioner.java
package cn.ljh.kafka.kafka_helloworld; import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties; public class SimplePartitioner implements Partitioner {
public SimplePartitioner (VerifiableProperties props) { } public int partition(Object key, int a_numPartitions) {
int partition = 0;
String stringKey = (String) key;
int offset = stringKey.lastIndexOf('.');
if (offset > 0) {
partition = Integer.parseInt( stringKey.substring(offset+1)) % a_numPartitions;
}
return partition;
} }
2.使用Kafka High Level Consumer API接收消息
ConsumerGroupExample.java
package cn.ljh.kafka.kafka_helloworld; import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector; import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit; public class ConsumerGroupExample {
private final ConsumerConnector consumer;
private final String topic;
private ExecutorService executor; public ConsumerGroupExample(String a_zookeeper, String a_groupId, String a_topic) {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig(a_zookeeper, a_groupId));
this.topic = a_topic;
} public void shutdown() {
if (consumer != null) consumer.shutdown();
if (executor != null) executor.shutdown();
try {
if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
System.out.println("Timed out waiting for consumer threads to shut down, exiting uncleanly");
}
} catch (InterruptedException e) {
System.out.println("Interrupted during shutdown, exiting uncleanly");
}
} public void run(int a_numThreads) {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(a_numThreads));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); // now launch all the threads
//
executor = Executors.newFixedThreadPool(a_numThreads); // now create an object to consume the messages
//
int threadNumber = 0;
for (final KafkaStream stream : streams) {
executor.submit(new ConsumerTest(stream, threadNumber));
threadNumber++;
}
} private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
Properties props = new Properties();
props.put("zookeeper.connect", a_zookeeper);
props.put("group.id", a_groupId);
props.put("zookeeper.session.timeout.ms", "400");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000"); return new ConsumerConfig(props);
} public static void main(String[] args) {
// String zooKeeper = args[0];
// String groupId = args[1];
// String topic = args[2];
// int threads = Integer.parseInt(args[3]); String zooKeeper = "192.168.137.176:2181,192.168.137.176:2182,192.168.137.176:2183";
String groupId = "group1";
String topic = "page_visits";
int threads = 5; ConsumerGroupExample example = new ConsumerGroupExample(zooKeeper, groupId, topic);
example.run(threads); try {
Thread.sleep(10000);
} catch (InterruptedException ie) { }
example.shutdown();
}
}
ConsumerTest.java
package cn.ljh.kafka.kafka_helloworld; import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream; public class ConsumerTest implements Runnable {
private KafkaStream m_stream;
private int m_threadNumber; public ConsumerTest(KafkaStream a_stream, int a_threadNumber) {
m_threadNumber = a_threadNumber;
m_stream = a_stream;
} public void run() {
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
//线程会一直等待有消息进入
while (it.hasNext())
System.out.println("Thread " + m_threadNumber + ": " + new String(it.next().message()));
System.out.println("Shutting down Thread: " + m_threadNumber);
}
}
3.使用kafka Simple Consumer API接收消息
SimpleConsumerExample.java
package cn.ljh.kafka.kafka_helloworld; import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.BrokerEndPoint;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset; import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/*
Why use SimpleConsumer?
The main reason to use a SimpleConsumer implementation is
you want greater control over partition consumption than Consumer Groups give you.
For example you want to:
1.Read a message multiple times
2.Consume only a subset of the partitions in a topic in a process
3.Manage transactions to make sure a message is processed once and only once
Downsides of using SimpleConsumer
The SimpleConsumer does require a significant amount of work not needed in the Consumer Groups:
1.You must keep track of the offsets in your application to know where you left off consuming.
2.You must figure out which Broker is the lead Broker for a topic and partition
3.You must handle Broker leader changes
Steps for using a SimpleConsumer
1.Find an active Broker and find out which Broker is the leader for your topic and partition
2.Determine who the replica Brokers are for your topic and partition
3.Build the request defining what data you are interested in
4.Fetch the data
5.Identify and recover from leader changes
You can change the following items if necessary.
1.Maximum number of messages to read (so we don’t loop forever)
2.Topic to read from
3.Partition to read from
4.One broker to use for Metadata lookup
5.Port the brokers listen on
*/
public class SimpleConsumerExample {
public static void main(String args[]) {
SimpleConsumerExample example = new SimpleConsumerExample(); //Maximum number of messages to read (so we don’t loop forever)
long maxReads = 500;
//Topic to read from
String topic = "page_visits";
//Partition to read from
int partition = 2;
//One broker to use for Metadata lookup
List<String> seeds = new ArrayList<String>();
seeds.add("192.168.137.176");
//Port the brokers listen on
List<Integer> ports = new ArrayList<Integer>();
ports.add(9092);
try {
example.run(maxReads, topic, partition, seeds, ports);
} catch (Exception e) {
System.out.println("Oops:" + e);
e.printStackTrace();
}
} private List<String> m_replicaBrokers = new ArrayList<String>();
private List<Integer> m_replicaPorts = new ArrayList<Integer>(); public SimpleConsumerExample() {
m_replicaBrokers = new ArrayList<String>();
m_replicaPorts = new ArrayList<Integer>();
} public void run(long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, List<Integer> a_ports) throws Exception {
// find the meta data about the topic and partition we are interested in
//
PartitionMetadata metadata = findLeader(a_seedBrokers, a_ports, a_topic, a_partition);
if (metadata == null) {
System.out.println("Can't find metadata for Topic and Partition. Exiting");
return;
}
if (metadata.leader() == null) {
System.out.println("Can't find Leader for Topic and Partition. Exiting");
return;
}
String leadBroker = metadata.leader().host();
int a_port = metadata.leader().port();
String clientName = "Client_" + a_topic + "_" + a_partition; SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
// kafka.api.OffsetRequest.EarliestTime() finds the beginning of the data in the logs and starts streaming from there
long readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName); int numErrors = 0;
while (a_maxReads > 0) {
if (consumer == null) {
consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
}
// Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
FetchRequest req = new FetchRequestBuilder()
.clientId(clientName)
.addFetch(a_topic, a_partition, readOffset, 100000)
.build(); FetchResponse fetchResponse = consumer.fetch(req); //Identify and recover from leader changes
if (fetchResponse.hasError()) {
numErrors++;
// Something went wrong!
short code = fetchResponse.errorCode(a_topic, a_partition);
System.out.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
if (numErrors > 5) break;
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// We asked for an invalid offset. For simple case ask for the last element to reset
readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
continue;
}
consumer.close();
consumer = null;
//查找新的leader
metadata = findNewLeader(leadBroker, a_topic, a_partition, a_port);
leadBroker = metadata.leader().host();
a_port = metadata.leader().port();
continue;
}
numErrors = 0; //Fetch the data
long numRead = 0;
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
if(a_maxReads > 0){
long currentOffset = messageAndOffset.offset();
//This is needed since if Kafka is compressing the messages,
//the fetch request will return an entire compressed block even if the requested offset isn't the beginning of the compressed block.
if (currentOffset < readOffset) {
System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
continue;
}
readOffset = messageAndOffset.nextOffset();
ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
numRead++;
a_maxReads--;
}
} //If we didn't read anything on the last request we go to sleep for a second so we aren't hammering Kafka when there is no data.
if (numRead == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
if (consumer != null) consumer.close();
} public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) {
System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition) );
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
} private PartitionMetadata findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_oldLeader_port) throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(m_replicaBrokers, m_replicaPorts, a_topic, a_partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) &&
a_oldLeader_port == metadata.leader().port() && i == 0) {
// first time through if the leader hasn't changed, give ZooKeeper a second to recover
// second time, assume the broker did recover before failover, or it was a non-Broker issue
//
goToSleep = true;
} else {
return metadata;
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
System.out.println("Unable to find new leader after Broker failure. Exiting");
throw new Exception("Unable to find new leader after Broker failure. Exiting");
} private PartitionMetadata findLeader(List<String> a_seedBrokers, List<Integer> a_port, String a_topic, int a_partition) {
PartitionMetadata returnMetaData = null;
loop:
for (int i = 0; i < a_seedBrokers.size(); i++) {
String seed = a_seedBrokers.get(i);
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(seed, a_port.get(i), 100000, 64 * 1024, "leaderLookup");
List<String> topics = Collections.singletonList(a_topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == a_partition) {
returnMetaData = part;
break loop;
}
}
}
} catch (Exception e) {
System.out.println("Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic
+ ", " + a_partition + "] Reason: " + e);
} finally {
if (consumer != null) consumer.close();
}
}
if (returnMetaData != null) {
m_replicaBrokers.clear();
m_replicaPorts.clear();
for (BrokerEndPoint replica : returnMetaData.replicas()) {
m_replicaBrokers.add(replica.host());
m_replicaPorts.add(replica.port());
}
}
return returnMetaData;
}
}
kafka的Java客户端示例代码(kafka_2.11-0.8.2.2)的更多相关文章
- kafka的Java客户端示例代码(kafka_2.12-0.10.2.1)
使用0.9开始增加的KafkaProducer和KafkaConsumer. Pom.xml <project xmlns="http://maven.apache.org/POM/4 ...
- 4 kafka集群部署及kafka生产者java客户端编程 + kafka消费者java客户端编程
本博文的主要内容有 kafka的单机模式部署 kafka的分布式模式部署 生产者java客户端编程 消费者java客户端编程 运行kafka ,需要依赖 zookeeper,你可以使用已有的 zo ...
- 正则表达式学习笔记(附:Java版示例代码)
具体学习推荐:正则表达式30分钟入门教程 . 除换行符以外的任意字符\w word,正常字符,可以当做变量名的,字母.数字.下划线.汉字\s space,空白符 ...
- C# WebSocket 服务端示例代码 + HTML5客户端示例代码
WebSocket服务端 C#示例代码 using System; using System.Collections.Generic; using System.Linq; using System. ...
- HDFS的Java客户端操作代码(HDFS的查看、创建)
1.HDFS的put上传文件操作的java代码: package Hdfs; import java.io.FileInputStream; import java.io.FileNotFoundEx ...
- JAVA SSM 示例代码
SSM 即spring+spring mvc+mybatis,开发工具IDEA 1.先看下项目结构如图: 2.主要配置文件 spring-mvc.xml <?xml version=" ...
- kafka生产者java客户端
producer 包含一个用于保存待发送消息的缓冲池,缓冲池中消息是还没来得及传输到kafka集群的消息. 位于底层的kafka I/O线程负责将缓冲池中的消息转换成请求发送到集群.如果在结束prod ...
- java 综合示例代码
package javaenhance.src.cn.itcast.day3; import java.lang.reflect.Constructor; import java.lang.refle ...
- HDFS的java客户端操作代码(Windows上面打jar包,提交至linux运行)
1.通过java.net.URL实现屏幕显示demo1文件的内容 package Hdfs; import java.io.InputStream; import java.net.URL; impo ...
随机推荐
- mybatis教程5(延迟加载和缓存)
关联关系 在关系型数据库中,表与表之间很少是独立与其他表没关系的.所以在实际开发过程中我们会碰到很多复杂的关联关系.在此我们来分析下载mybatis中怎么处理这些关系 1对1关系 我们有一张员工表(T ...
- netty源码解解析(4.0)-1 核心架构
netty是java开源社区的一个优秀的网络框架.使用netty,我们可以迅速地开发出稳定,高性能,安全的,扩展性良好的服务器应用程序.netty封装简化了在服务器开发领域的一些有挑战性的问题:jdk ...
- csv文件格式说明
csv文件应用很广泛,历史也很悠久.有很多种类型的csv格式,常用的是rfc 4180定义的格式. csv文件包含一行或多行记录,每行记录中包含一个或多个字段.记录与记录之间使用换行符分隔,最后一个记 ...
- USDT与omniCore钱包
USDTUSDT,又称为泰达币,是由Tether公司在 2015年推出的一种与美元锚定的加密货币,理论上,1USDT=1美元,这种价格的稳定性基于Tether公司声称对每一个发行的Tether在他们的 ...
- angularjs学习第八天笔记(指令作用域研究)
您好,在前两天对指令的简单了解和系统指令学习后 今天主要研究其指针作用域的相关事情 每一个指令在创建时,其实就构成了自己的一个小的模块单元. 其对于的模块单元都有着其对于的作用域,其中作用域一般有两种 ...
- CSS学习笔记09 简单理解BFC
引子 在讲BFC之前,先来看看一个例子 <!DOCTYPE html> <html lang="en"> <head> <meta cha ...
- 【操作系统】二、JVM线程与Linux内核线程的映射
Linux从内核2.6开始使用NPTL (Native POSIX Thread Library)支持,但这时线程本质上还轻量级进程. Java里的线程是由JVM来管理的,它如何对应到操作系统的线程是 ...
- 【Tomcat】压力测试和优化
一.采用jmeter进行测试 为什么使用jmeter, 它免费开源, 不断发展, 功能逐渐强大. 可以做功能,负载, 性能测试.一套脚本可以同时用于功能和性能测试.Jmeter 有着众多的插件开发者, ...
- Debian、Ubuntu恢复误删除(或者说重装)的/var/lib/dpkg
在使用ubuntu的使用,有可能会碰到dpkg挂掉,网上的通用解决方法,如果不管用: 1, dpkg 被中断,您必须手工运行 sudo dpkg --configure -a解决此问题 2, sudo ...
- HDU4287
Intelligent IME Time Limit: 2000/1000 MS (Java/Others) Memory Limit: 32768/32768 K (Java/Others)T ...