kafkaconsumer SimpleExample
package kafka.simple; import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map; import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.OffsetResponse;
import kafka.javaapi.PartitionMetadata;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.TopicMetadataRequest;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset; public class SimpleExample { private List<String> m_replicaBrokers = new ArrayList<String>(); public SimpleExample() {
m_replicaBrokers = new ArrayList<String>();
} public static void main(String args[]) {
SimpleExample example = new SimpleExample();
// 最大读取消息数量
long maxReads = Long.parseLong("3");
// 要订阅的topic
String topic = "test";
// 要查找的分区
int partition = Integer.parseInt("0");
// broker节点的ip
List<String> seeds = new ArrayList<String>();
seeds.add("localhost");
//seeds.add("192.168.4.31");
//seeds.add("192.168.4.32");
// 端口
int port = Integer.parseInt("9092");
try {
example.run(maxReads, topic, partition, seeds, port);
} catch (Exception e) {
System.out.println("Oops:" + e);
e.printStackTrace();
}
} public void run(long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers,
int a_port) throws Exception {
// 获取指定Topic partition的元数据
PartitionMetadata metadata = findLeader(a_seedBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
System.out.println("Can't find metadata for Topic and Partition. Exiting");
return;
}
if (metadata.leader() == null) {
System.out.println("Can't find Leader for Topic and Partition. Exiting");
return;
}
String leadBroker = metadata.leader().host();
String clientName = "Client_" + a_topic + "_" + a_partition; SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
long readOffset = getLastOffset(consumer, a_topic, a_partition,
kafka.api.OffsetRequest.EarliestTime(), clientName);
int numErrors = 0;
while (a_maxReads > 0) {
if (consumer == null) {
consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
}
FetchRequest req = new FetchRequestBuilder().clientId(clientName)
.addFetch(a_topic, a_partition, readOffset, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req); if (fetchResponse.hasError()) {
numErrors++;
// Something went wrong!
short code = fetchResponse.errorCode(a_topic, a_partition);
System.out
.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
if (numErrors > 5) {
break;
}
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// We asked for an invalid offset. For simple case ask for
// the last element to reset
readOffset = getLastOffset(consumer, a_topic, a_partition,
kafka.api.OffsetRequest.LatestTime(), clientName);
continue;
}
consumer.close();
consumer = null;
leadBroker = findNewLeader(leadBroker, a_topic, a_partition, a_port);
continue;
}
numErrors = 0; long numRead = 0;
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
long currentOffset = messageAndOffset.offset();
if (currentOffset < readOffset) {
System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
continue;
} readOffset = messageAndOffset.nextOffset();
ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out
.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
numRead++;
a_maxReads--;
} if (numRead == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
if (consumer != null) {
consumer.close();
}
} public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo,
kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) {
System.out.println("Error fetching data Offset Data the Broker. Reason: " + response
.errorCode(topic, partition));
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
} /**
* @return String
* @throws Exception 找一个leader broker
*/
private String findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_port)
throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(m_replicaBrokers, a_port, a_topic, a_partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
// first time through if the leader hasn't changed give
// ZooKeeper a second to recover
// second time, assume the broker did recover before failover,
// or it was a non-Broker issue
//
goToSleep = true;
} else {
return metadata.leader().host();
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
System.out.println("Unable to find new leader after Broker failure. Exiting");
throw new Exception("Unable to find new leader after Broker failure. Exiting");
} private PartitionMetadata findLeader(List<String> a_seedBrokers, int a_port, String a_topic,
int a_partition) {
PartitionMetadata returnMetaData = null;
loop:
for (String seed : a_seedBrokers) {
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(seed, a_port, 100000, 64 * 1024, "leaderLookup");
List<String> topics = Collections.singletonList(a_topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == a_partition) {
returnMetaData = part;
break loop;
}
}
}
} catch (Exception e) {
System.out.println(
"Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic + ", "
+ a_partition + "] Reason: " + e);
} finally {
if (consumer != null) {
consumer.close();
}
}
}
if (returnMetaData != null) {
m_replicaBrokers.clear();
for (kafka.cluster.Broker replica : returnMetaData.replicas()) {
m_replicaBrokers.add(replica.host());
}
}
return returnMetaData;
}
}
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.8.2.1</version>
</dependency>
kafkaconsumer SimpleExample的更多相关文章
- 用java代码手动控制kafkaconsumer偏移量
为应对消费结果需要存储到关系数据库中,避免数据库down时consumer继续消费的场景 http://kafka.apache.org 查了很多源码都记录下来,省的下次还要过滤源码. //如果将结果 ...
- Kafka 0.10 KafkaConsumer流程简述
ConsumerConfig.scala 储存Consumer的配置 按照我的理解,0.10的Kafka没有专门的SimpleConsumer,仍然是沿用0.8版本的. 1.从poll开始 消费的规则 ...
- 【Kafka源码】KafkaConsumer
[TOC] KafkaConsumer是从kafka集群消费消息的客户端.这是kafka的高级消费者,而SimpleConsumer是kafka的低级消费者.何为高级?何为低级? 我们所谓的高级,就是 ...
- KafkaConsumer 长时间地在poll(long )方法中阻塞
一,问题描述 搭建的用来测试的单节点Kafka集群(Zookeeper和Kafka Broker都在同一台Ubuntu上),在命令行下使用: ./bin/kafka-topics. --replica ...
- CDH下集成spark2.2.0与kafka(四十一):在spark+kafka流处理程序中抛出错误java.lang.NoSuchMethodError: org.apache.kafka.clients.consumer.KafkaConsumer.subscribe(Ljava/util/Collection;)V
错误信息 19/01/15 19:36:40 WARN consumer.ConsumerConfig: The configuration max.poll.records = 1 was supp ...
- 基于Confluent.Kafka实现的KafkaConsumer消费者类和KafkaProducer消息生产者类型
一.引言 研究Kafka有一段时间了,略有心得,基于此自己就写了一个Kafka的消费者的类和Kafka消息生产者的类,进行了单元测试和生产环境的测试,还是挺可靠的. 二.源码 话不多说,直接上代码,代 ...
- Python KafkaProducer and KafkaConsumer的开发模块
1.在python中往kakfa写数据和读取数据,使用的是python-kafka库 2.消费者需持续写入数据,因groupid存在偏移量,才能看看到数据. 3.安装库的命令为pip install ...
- 从KafkaConsumer看看Kafka(一)
Kafka的消息模型为发布订阅模型,消息生产者将消息发布到主题(topic)中,一个或多个消费者订阅(消费)该主题消息并消费,此模型中发布到topic中的消息会被所有消费者所订阅到,先介绍Kafk ...
- 单独KafkaConsumer实例and多worker线程。
1.单独KafkaConsumer实例and多worker线程.将获取的消息和消息的处理解耦,将消息的处理放入单独的工作者线程中,即工作线程中,同时维护一个或者若各干consumer实例执行消息获取任 ...
随机推荐
- 正则表达式计算 origin = "1 - 2 * ( ( 60 - 30 + ( -40.0 / 5 ) * ( 9 - 2 * 5 / 3 + 7 / 3 * 99 / 4 * 2998 + 10 * 568 / 14 )) - ( - 4 * 3 ) / ( 16 - 3 * 2))"
#!/usr/bin/env python import re def f1(arg): return 1 origin = "1 - 2 * ( ( 60 - 30 + ( -40.0 / ...
- tortoisesvn 本项目的忽略项
https://tortoisesvn.net/docs/release/TortoiseSVN_en/tsvn-dug-propertypage.html Adding and Editing Pr ...
- 利用powerdesigner创建表模型后导出sql语句方法,以及报错 Generation aborted due to errors detected during the verification of the model.的解决办法
今天用powerdesigner建了表模型,下面先说一下导出sql语句的步骤. 1.选项 2. 然后就报错了,下面说解决办法,很简单. 你没看错,把模型检查的√去掉就行了~~ 导出表名不带双引号的设置 ...
- 指定jdk编译或运行
set JAVA_HOME=D:\java\jdk8 set CLASSPATH=.;%JAVA_HOME%\lib\dt.jar;%JAVA_HOMe%\lib\tools.jar; set Pat ...
- JPA entityManagerFactory配置详解
以下是本人的一些理解 如有误的地方欢迎指出 谢谢! jpa.LocalContainerEntityManagerFactoryBean 与 hibernate的sessionFactory一样都实现 ...
- 前端学习笔记2017.6.21-html是个什么东西
html有两种意思,html语言和html格式 html语言是一种面向人类的计算机语言,这是啥意思?人类用html这种语言描述出一个网页的样子,浏览器解析这个语言并展示出来. html格式是一种文件格 ...
- DBUtils工具类和DBCP连接池
今日内容介绍 1.DBUtils2.处理结果集的八种方式3.连接池4.连接池的用法1 PrepareStatement接口预编译SQL语句 1.1 预处理对象 * 使用PreparedStatemen ...
- URLTester2.3.2
文件: URLTester2.3.2.zip 大小: 1170KB 下载: 下载 URLTester是一个URL测试工具,最主要的一个特色是:当一个域名对应多个IP地址时,不用修改hosts文件,即可 ...
- 《Head First Servlets & JSP》-9-使用JSTL
安装JSTL1.1的说明 JSTL1.1不是JSP2.0规范的一部分,能访问servlet和JSP API并不意味着能访问JSTL. 使用JSTL之前,需要将jstl.jar文件安装到Web应用的WE ...
- ios MVC笔记
MVC:Model 模型 View 视图 Controller 控制器 关系图: