删除了原有的offset之后再次启动会报错park Streaming from Kafka has error numRecords must not ...

- 从zk获取topic/partition 的fromOffset(获取方法链接)
- 利用SimpleConsumer获取每个partiton的lastOffset(untilOffset )
- 判断每个partition lastOffset与fromOffset的关系
- 当lastOffset < fromOffset时,将fromOffset赋值为0
获取kafka topic partition lastoffset代码:
package org.frey.example.utils.kafka; import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.Broker;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer; import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map; /**
* KafkaOffsetTool
*
* @author angel
* @date 2016/4/11
*/
public class KafkaOffsetTool { private static KafkaOffsetTool instance;
final int TIMEOUT = 100000;
final int BUFFERSIZE = 64 * 1024; private KafkaOffsetTool() {
} public static synchronized KafkaOffsetTool getInstance() {
if (instance == null) {
instance = new KafkaOffsetTool();
}
return instance;
} public Map<TopicAndPartition, Long> getLastOffset(String brokerList, List<String> topics,
String groupId) { Map<TopicAndPartition, Long> topicAndPartitionLongMap = Maps.newHashMap(); Map<TopicAndPartition, Broker> topicAndPartitionBrokerMap =
KafkaOffsetTool.getInstance().findLeader(brokerList, topics); for (Map.Entry<TopicAndPartition, Broker> topicAndPartitionBrokerEntry : topicAndPartitionBrokerMap
.entrySet()) {
// get leader broker
Broker leaderBroker = topicAndPartitionBrokerEntry.getValue(); SimpleConsumer simpleConsumer = new SimpleConsumer(leaderBroker.host(), leaderBroker.port(),
TIMEOUT, BUFFERSIZE, groupId); long readOffset = getTopicAndPartitionLastOffset(simpleConsumer,
topicAndPartitionBrokerEntry.getKey(), groupId); topicAndPartitionLongMap.put(topicAndPartitionBrokerEntry.getKey(), readOffset); } return topicAndPartitionLongMap; } /**
* 得到所有的 TopicAndPartition
*
* @param brokerList
* @param topics
* @return topicAndPartitions
*/
private Map<TopicAndPartition, Broker> findLeader(String brokerList, List<String> topics) {
// get broker's url array
String[] brokerUrlArray = getBorkerUrlFromBrokerList(brokerList);
// get broker's port map
Map<String, Integer> brokerPortMap = getPortFromBrokerList(brokerList); // create array list of TopicAndPartition
Map<TopicAndPartition, Broker> topicAndPartitionBrokerMap = Maps.newHashMap(); for (String broker : brokerUrlArray) { SimpleConsumer consumer = null;
try {
// new instance of simple Consumer
consumer = new SimpleConsumer(broker, brokerPortMap.get(broker), TIMEOUT, BUFFERSIZE,
"leaderLookup" + new Date().getTime()); TopicMetadataRequest req = new TopicMetadataRequest(topics); TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata(); for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
TopicAndPartition topicAndPartition =
new TopicAndPartition(item.topic(), part.partitionId());
topicAndPartitionBrokerMap.put(topicAndPartition, part.leader());
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (consumer != null)
consumer.close();
}
}
return topicAndPartitionBrokerMap;
} /**
* get last offset
* @param consumer
* @param topicAndPartition
* @param clientName
* @return
*/
private long getTopicAndPartitionLastOffset(SimpleConsumer consumer,
TopicAndPartition topicAndPartition, String clientName) {
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>(); requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
kafka.api.OffsetRequest.LatestTime(), 1)); OffsetRequest request = new OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(),
clientName); OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) {
System.out
.println("Error fetching data Offset Data the Broker. Reason: "
+ response.errorCode(topicAndPartition.topic(), topicAndPartition.partition()));
return 0;
}
long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition());
return offsets[0];
}
/**
* 得到所有的broker url
*
* @param brokerlist
* @return
*/
private String[] getBorkerUrlFromBrokerList(String brokerlist) {
String[] brokers = brokerlist.split(",");
for (int i = 0; i < brokers.length; i++) {
brokers[i] = brokers[i].split(":")[0];
}
return brokers;
} /**
* 得到broker url 与 其port 的映射关系
*
* @param brokerlist
* @return
*/
private Map<String, Integer> getPortFromBrokerList(String brokerlist) {
Map<String, Integer> map = new HashMap<String, Integer>();
String[] brokers = brokerlist.split(",");
for (String item : brokers) {
String[] itemArr = item.split(":");
if (itemArr.length > 1) {
map.put(itemArr[0], Integer.parseInt(itemArr[1]));
}
}
return map;
} public static void main(String[] args) {
List<String> topics = Lists.newArrayList();
topics.add("ys");
topics.add("bugfix");
Map<TopicAndPartition, Long> topicAndPartitionLongMap =
KafkaOffsetTool.getInstance().getLastOffset("broker001:9092,broker002:9092", topics, "my.group.id"); for (Map.Entry<TopicAndPartition, Long> entry : topicAndPartitionLongMap.entrySet()) {
System.out.println(entry.getKey().topic() + "-"+ entry.getKey().partition() + ":" + entry.getValue());
}
}
} 矫正offset核心代码:
/** 以下 矫正 offset */
// 得到Topic/partition 的lastOffsets
Map<TopicAndPartition, Long> topicAndPartitionLongMap =
KafkaOffsetTool.getInstance().getLastOffset(kafkaParams.get("metadata.broker.list"),
topicList, "my.group.id"); // 遍历每个Topic.partition
for (Map.Entry<TopicAndPartition, Long> topicAndPartitionLongEntry : fromOffsets.entrySet()) {
// fromOffset > lastOffset时
if (topicAndPartitionLongEntry.getValue() >
topicAndPartitionLongMap.get(topicAndPartitionLongEntry.getKey())) {
//矫正fromoffset为offset初始值0
topicAndPartitionLongEntry.setValue(0L);
}
}
/** 以上 矫正 offset */
删除了原有的offset之后再次启动会报错park Streaming from Kafka has error numRecords must not ...的更多相关文章
- 【转】Eclipse下启动tomcat报错:/bin/bootstrap.jar which is referenced by the classpath, does not exist.
转载地址:http://blog.csdn.net/jnqqls/article/details/8946964 1.错误: 在Eclipse下启动tomcat的时候,报错为:Eclipse下启动to ...
- Sql Server 2008卸载后再次安装一直报错
sql server 2008卸载之后再次安装一直报错问题. 第一:由于上一次的卸载不干净,可参照百度完全卸载sql server2008 的方式 1. 用WindowsInstaller删除所有与S ...
- Eclipse中启动tomcat报错:A child container failed during start
我真的很崩溃,先是workspace崩了,费了好久重建的workspace,然后建立了一个小demo项目,tomcat中启动却报错,挑选其中比较重要的2条信息如下: A child container ...
- 启动Mysql报错:Another MySQL daemon already running with the same unix socket.
启动Mysql报错: Another MySQL daemon already running with the same unix socket. 删除如下文件即可解决 /var/lib/mysql ...
- 启动MySQL报错
安装完MySQL,启动MySQL报错,报错信息如下:Starting MySQL....The server quit without updating PID file (/data/mysqlda ...
- C# 解决SharpSvn启动窗口报错 Unable to connect to a repository at URL 'svn://....'
在远程机打开sharpsvn客户端测试,结果报错 Svn启动窗口报错 Unable to connect to a repository at URL 'svn://...' 咋整,我在win10我的 ...
- Svn启动窗口报错 Could not load file or assembly 'SharpSvn.dll' or one of its
win10 64位系统生成没问题,测试都没问题,结果换到win7 64位系统上,点开就出现,网上搜了下,通过以下方式解决, 必须把bin 文件夹全部删除,重新生成.要不还是会报错. Solve it. ...
- Eclipse启动项目正常,放到tomcat下单独启动就报错的 一例
一个老的ssh的项目,进行二次开发(增加一些新功能)后, 首先用Eclipse中集成的Tomcat启动没有任何问题,但是把启动后的webapps下得目录放到 windows的普通tomcat下单独启动 ...
- (转)启动网卡报错(Failed to start LSB: Bring up/down networking )解决办法总结
启动网卡报错(Failed to start LSB: Bring up/down networking )解决办法总结 原文:http://blog.51cto.com/11863547/19059 ...
随机推荐
- 使用Vagrant搭建本地python开发环境
使用Vagrant搭建本地python开发环境 关于vagrant:Vagrant是一个基于Ruby的工具,用于创建和部署虚拟化开发环境,它使用Oracle的开源VirtualBox虚拟化系统也可以使 ...
- UVA 1395 MST
给你一个图, 求一个生成树, 边权Max – Min 要最小,输出最小值, 不能构成生成树的 输出 -1: 思路: Keuksal 算法, 先排序边, 然后枚举 第一条边, 往后加入边, 直到有 n- ...
- MySQL跟踪SQL执行之开启慢查询日志
查询慢查询相关参数 show variables like '%quer%'; slow_query_log(是否记录慢查询) slow_query_log_file(慢日志文件路径) ...
- [C]gcc编译器的一些常用语法
简单的GCC语法: 如果你只有一个文件(或者只有几个文件),那么就可以不写Makefile文件(当然有Makefile更加方便),用gcc直接编译就行了.在这里我们只介绍几个我经常用的几个参数,第一是 ...
- HSSFWorkbook操作excel读写
//exlel读操作 MultipartHttpServletRequest multipartRequest = (MultipartHttpServletRequest) request; Ite ...
- springmvc框架原理分析和简单入门程序
一.什么是springmvc? 我们知道三层架构的思想,并且如果你知道ssh的话,就会更加透彻的理解这个思想,struts2在web层,spring在中间控制,hibernate在dao层与数据库打交 ...
- python第一天,编写用户接口
作业:编写登陆接口 输入用户名密码 认证成功后显示欢迎信息 输错三次后锁定 流程图: 代码 #!/usr/bin/env python #-*- coding:utf-8 -*- #created b ...
- Confluence 6 诊断
当你对性能进行诊断或者希望知道是什么原因导致 Confluence 崩溃,你希望知道在 Confluence 内部是什么导致这些问题发生的.这个时候系统的诊断信息能够帮助你获得更多的有关的这些信息. ...
- window 上安装 Scala
第一步:Java 设置 检测方法前文已说明,这里不再描述. 如果还为安装,可以参考我们的Java 开发环境配置. 接下来,我们可以从 Scala 官网地址 http://www.scala-lang. ...
- java多线程快速入门(二十二)
线程池的好处: 避免我们过多的去new线程,new是占资源的(GC主要堆内存) 提高效率 避免浪费资源 提高响应速度 作用:会把之前执行某个线程完毕的线程不会释放掉会留到线程池中给下一个调用的线程直接 ...