Kafka学习笔记-Java简单操作
Maven依赖包:
- <dependency>
 - <groupId>org.apache.kafka</groupId>
 - <artifactId>kafka-clients</artifactId>
 - <version>0.8.2.1</version>
 - </dependency>
 - <dependency>
 - <groupId>org.apache.kafka</groupId>
 - <artifactId>kafka_2.11</artifactId>
 - <version>0.8.2.1</version>
 - </dependency>
 
代码如下:
- import java.util.Properties;
 - import org.apache.kafka.clients.producer.Callback;
 - import org.apache.kafka.clients.producer.KafkaProducer;
 - import org.apache.kafka.clients.producer.ProducerRecord;
 - import org.apache.kafka.clients.producer.RecordMetadata;
 - import org.slf4j.Logger;
 - import org.slf4j.LoggerFactory;
 - public class KafkaProducerTest {
 - private static final Logger LOG = LoggerFactory.getLogger(KafkaProducerTest.class);
 - private static Properties properties = null;
 - static {
 - properties = new Properties();
 - properties.put("bootstrap.servers", "centos.master:9092,centos.slave1:9092,centos.slave2:9092");
 - properties.put("producer.type", "sync");
 - properties.put("request.required.acks", "1");
 - properties.put("serializer.class", "kafka.serializer.DefaultEncoder");
 - properties.put("partitioner.class", "kafka.producer.DefaultPartitioner");
 - properties.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
 - // properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
 - properties.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
 - // properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
 - }
 - public void produce() {
 - KafkaProducer<byte[], byte[]> kafkaProducer = new KafkaProducer<byte[],byte[]>(properties);
 - ProducerRecord<byte[],byte[]> kafkaRecord = new ProducerRecord<byte[],byte[]>(
 - "test", "kkk".getBytes(), "vvv".getBytes());
 - kafkaProducer.send(kafkaRecord, new Callback() {
 - public void onCompletion(RecordMetadata metadata, Exception e) {
 - if(null != e) {
 - LOG.info("the offset of the send record is {}", metadata.offset());
 - LOG.error(e.getMessage(), e);
 - }
 - LOG.info("complete!");
 - }
 - });
 - kafkaProducer.close();
 - }
 - public static void main(String[] args) {
 - KafkaProducerTest kafkaProducerTest = new KafkaProducerTest();
 - for (int i = 0; i < 10; i++) {
 - kafkaProducerTest.produce();
 - }
 - }
 - }
 
- import java.util.List;
 - import java.util.Map;
 - import java.util.Properties;
 - import org.apache.kafka.clients.consumer.ConsumerConfig;
 - import org.apache.kafka.clients.consumer.ConsumerRecord;
 - import org.apache.kafka.clients.consumer.ConsumerRecords;
 - import org.apache.kafka.clients.consumer.KafkaConsumer;
 - import org.slf4j.Logger;
 - import org.slf4j.LoggerFactory;
 - public class KafkaConsumerTest {
 - private static final Logger LOG = LoggerFactory.getLogger(KafkaConsumerTest.class);
 - public static void main(String[] args) {
 - Properties properties = new Properties();
 - properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
 - "centos.master:9092,centos.slave1:9092,centos.slave2:9092");
 - properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test-consumer-group");
 - properties.put(ConsumerConfig.SESSION_TIMEOUT_MS, "1000");
 - properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
 - properties.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY, "range");
 - // properties.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY, "roundrobin");
 - properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "10000");
 - properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
 - "org.apache.kafka.common.serialization.ByteArrayDeserializer");
 - properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
 - "org.apache.kafka.common.serialization.ByteArrayDeserializer");
 - KafkaConsumer<byte[], byte[]> kafkaConsumer = new KafkaConsumer<byte[], byte[]>(properties);
 - kafkaConsumer.subscribe("test");
 - // kafkaConsumer.subscribe("*");
 - boolean isRunning = true;
 - while(isRunning) {
 - Map<String, ConsumerRecords<byte[], byte[]>> results = kafkaConsumer.poll(100);
 - if (null != results) {
 - for (Map.Entry<String, ConsumerRecords<byte[], byte[]>> entry : results.entrySet()) {
 - LOG.info("topic {}", entry.getKey());
 - ConsumerRecords<byte[], byte[]> consumerRecords = entry.getValue();
 - List<ConsumerRecord<byte[], byte[]>> records = consumerRecords.records();
 - for (int i = 0, len = records.size(); i < len; i++) {
 - ConsumerRecord<byte[], byte[]> consumerRecord = records.get(i);
 - LOG.info("topic {} partition {}", consumerRecord.topic(), consumerRecord.partition());
 - try {
 - LOG.info("offset {} value {}", consumerRecord.offset(), new String(consumerRecord.value()));
 - } catch (Exception e) {
 - LOG.error(e.getMessage(), e);
 - }
 - }
 - }
 - }
 - }
 - kafkaConsumer.close();
 - }
 - }
 
发现KafkaConsumer的poll方法未实现
- @Override
 - public Map<String, ConsumerRecords<K,V>> poll(long timeout) {
 - // TODO Auto-generated method stub
 - return null;
 - }
 
后改为kafka.javaapi.consumer.SimpleConsumer实现,正常运行
- import java.nio.ByteBuffer;
 - import java.util.ArrayList;
 - import java.util.Collections;
 - import java.util.HashMap;
 - import java.util.List;
 - import java.util.Map;
 - import kafka.api.FetchRequest;
 - import kafka.api.FetchRequestBuilder;
 - import kafka.api.PartitionOffsetRequestInfo;
 - import kafka.cluster.Broker;
 - import kafka.common.ErrorMapping;
 - import kafka.common.TopicAndPartition;
 - import kafka.javaapi.FetchResponse;
 - import kafka.javaapi.OffsetRequest;
 - import kafka.javaapi.OffsetResponse;
 - import kafka.javaapi.PartitionMetadata;
 - import kafka.javaapi.TopicMetadata;
 - import kafka.javaapi.TopicMetadataRequest;
 - import kafka.javaapi.TopicMetadataResponse;
 - import kafka.javaapi.consumer.SimpleConsumer;
 - import kafka.message.MessageAndOffset;
 - public class KafkaSimpleConsumerTest {
 - private List<String> borkerList = new ArrayList<String>();
 - public KafkaSimpleConsumerTest() {
 - borkerList = new ArrayList<String>();
 - }
 - public static void main(String args[]) {
 - KafkaSimpleConsumerTest kafkaSimpleConsumer = new KafkaSimpleConsumerTest();
 - // 最大读取消息数量
 - long maxReadNum = Long.parseLong("3");
 - // 订阅的topic
 - String topic = "test";
 - // 查找的分区
 - int partition = Integer.parseInt("0");
 - // broker节点
 - List<String> seeds = new ArrayList<String>();
 - seeds.add("centos.master");
 - seeds.add("centos.slave1");
 - seeds.add("centos.slave2");
 - // 端口
 - int port = Integer.parseInt("9092");
 - try {
 - kafkaSimpleConsumer.run(maxReadNum, topic, partition, seeds, port);
 - } catch (Exception e) {
 - System.out.println("Oops:" + e);
 - e.printStackTrace();
 - }
 - }
 - public void run(long maxReadNum, String topic, int partition, List<String> seedBrokers, int port) throws Exception {
 - // 获取指定topic partition的元数据
 - PartitionMetadata metadata = findLeader(seedBrokers, port, topic, partition);
 - if (metadata == null) {
 - System.out.println("can't find metadata for topic and partition. exit");
 - return;
 - }
 - if (metadata.leader() == null) {
 - System.out.println("can't find leader for topic and partition. exit");
 - return;
 - }
 - String leadBroker = metadata.leader().host();
 - String clientName = "client_" + topic + "_" + partition;
 - SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
 - long readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.EarliestTime(), clientName);
 - int numErrors = 0;
 - while (maxReadNum > 0) {
 - if (consumer == null) {
 - consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
 - }
 - FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition, readOffset, 100000).build();
 - FetchResponse fetchResponse = consumer.fetch(req);
 - if (fetchResponse.hasError()) {
 - numErrors++;
 - short code = fetchResponse.errorCode(topic, partition);
 - System.out.println("error fetching data from the broker:" + leadBroker + " reason: " + code);
 - if (numErrors > 5)
 - break;
 - if (code == ErrorMapping.OffsetOutOfRangeCode()) {
 - readOffset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
 - continue;
 - }
 - consumer.close();
 - consumer = null;
 - leadBroker = findNewLeader(leadBroker, topic, partition, port);
 - continue;
 - }
 - numErrors = 0;
 - long numRead = 0;
 - for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
 - long currentOffset = messageAndOffset.offset();
 - if (currentOffset < readOffset) {
 - System.out.println("found an old offset: " + currentOffset + " expecting: " + readOffset);
 - continue;
 - }
 - readOffset = messageAndOffset.nextOffset();
 - ByteBuffer payload = messageAndOffset.message().payload();
 - byte[] bytes = new byte[payload.limit()];
 - payload.get(bytes);
 - System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
 - numRead++;
 - maxReadNum--;
 - }
 - if (numRead == 0) {
 - try {
 - Thread.sleep(1000);
 - } catch (InterruptedException ie) {
 - }
 - }
 - }
 - if (consumer != null)
 - consumer.close();
 - }
 - /**
 - * 从活跃的Broker列表中找出指定Topic、Partition中的Leader Broker
 - * @param seedBrokers
 - * @param port
 - * @param topic
 - * @param partition
 - * @return
 - */
 - private PartitionMetadata findLeader(List<String> seedBrokers, int port, String topic, int partition) {
 - PartitionMetadata partitionMetadata = null;
 - loop: for (String seedBroker : seedBrokers) {
 - SimpleConsumer consumer = null;
 - try {
 - consumer = new SimpleConsumer(seedBroker, port, 100000, 64 * 1024, "leaderLookup");
 - List<String> topics = Collections.singletonList(topic);
 - TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(topics);
 - TopicMetadataResponse topicMetadataResponse = consumer.send(topicMetadataRequest);
 - List<TopicMetadata> topicMetadatas = topicMetadataResponse.topicsMetadata();
 - for (TopicMetadata topicMetadata : topicMetadatas) {
 - for (PartitionMetadata pMetadata : topicMetadata.partitionsMetadata()) {
 - if (pMetadata.partitionId() == partition) {
 - partitionMetadata = pMetadata;
 - break loop;
 - }
 - }
 - }
 - } catch (Exception e) {
 - System.out.println("error communicating with broker [" + seedBroker + "] to find leader for [" + topic + ", " + partition + "] reason: " + e);
 - } finally {
 - if (consumer != null)
 - consumer.close();
 - }
 - }
 - if (partitionMetadata != null) {
 - borkerList.clear();
 - for (Broker replica : partitionMetadata.replicas()) {
 - borkerList.add(replica.host());
 - }
 - }
 - return partitionMetadata;
 - }
 - public static long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
 - TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
 - Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
 - requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
 - OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
 - OffsetResponse response = consumer.getOffsetsBefore(request);
 - if (response.hasError()) {
 - System.out.println("error fetching data offset data the broker. reason: " + response.errorCode(topic, partition));
 - return 0;
 - }
 - long[] offsets = response.offsets(topic, partition);
 - return offsets[0];
 - }
 - private String findNewLeader(String oldLeader, String topic, int partition, int port) throws Exception {
 - for (int i = 0; i < 3; i++) {
 - boolean goToSleep = false;
 - PartitionMetadata metadata = findLeader(borkerList, port, topic, partition);
 - if (metadata == null) {
 - goToSleep = true;
 - } else if (metadata.leader() == null) {
 - goToSleep = true;
 - } else if (oldLeader.equalsIgnoreCase(metadata.leader().host()) && i == 0) {
 - goToSleep = true;
 - } else {
 - return metadata.leader().host();
 - }
 - if (goToSleep) {
 - try {
 - Thread.sleep(1000);
 - } catch (InterruptedException ie) {
 - }
 - }
 - }
 - System.out.println("unable to find new leader after broker failure. exit");
 - throw new Exception("unable to find new leader after broker failure. exit");
 - }
 - }
 
Kafka学习笔记-Java简单操作的更多相关文章
- HBase学习笔记——Java API操作
		
1.1. 配置 HBaseConfiguration 包:org.apache.hadoop.hbase.HBaseConfiguration 作用:通过此类可以对HBase进行配置 用法实例: C ...
 - Python学习笔记_03:简单操作MongoDB数据库
		
目录 1. 插入文档 2. 查询文档 3. 更新文档 4. 删除文档 1. 插入文档 # -*- coding: UTF-8 -*- import datetime from pymongo im ...
 - Java学习笔记-Java文件操作流
		
day03 输入输出流:读入写出 节点流: 有明确的来源和去向 往往对字节操作 节点流又叫低级流.字节流 处理流: 没有明确的来源和去向 往往对低级流或其他高级流进行操作,不能独立 ...
 - 学习笔记——Java字符串操作常用方法
		
1.创建字符串 最常用的是使用String类的构造方法:String s=new String("abcd"); 也可采用J2SE5.0添加的StringBuilder类的字符串构 ...
 - java学习笔记07--日期操作类
		
java学习笔记07--日期操作类 一.Date类 在java.util包中定义了Date类,Date类本身使用非常简单,直接输出其实例化对象即可. public class T { public ...
 - JAVA WEB学习笔记(三):简单的基于Tomcat的Web页面
		
注意:每次对Tomcat配置文件进行修改后,必须重启Tomcat 在E盘的DATA文件夹中创建TomcatDemo文件夹,并将Tomcat安装路径下的webapps/ROOT中的WEB-INF文件夹复 ...
 - kafka学习笔记(一)消息队列和kafka入门
		
概述 学习和使用kafka不知不觉已经将近5年了,觉得应该总结整理一下之前的知识更好,所以决定写一系列kafka学习笔记,在总结的基础上希望自己的知识更上一层楼.写的不对的地方请大家不吝指正,感激万分 ...
 - tensorflow学习笔记——使用TensorFlow操作MNIST数据(2)
		
tensorflow学习笔记——使用TensorFlow操作MNIST数据(1) 一:神经网络知识点整理 1.1,多层:使用多层权重,例如多层全连接方式 以下定义了三个隐藏层的全连接方式的神经网络样例 ...
 - 大数据 -- kafka学习笔记:知识点整理(部分转载)
		
一 为什么需要消息系统 1.解耦 允许你独立的扩展或修改两边的处理过程,只要确保它们遵守同样的接口约束. 2.冗余 消息队列把数据进行持久化直到它们已经被完全处理,通过这一方式规避了数据丢失风险.许多 ...
 
随机推荐
- 总结-Hibernate
			
JPA 全称 Java Persistence API @Entity @Table(name = "user") public class User { @Id @Generat ...
 - js 防止button频繁点击
			
<input type="button" class="test" title="Select" value="Select ...
 - 好用的Markdown编辑器一览
			
Markdown 是一种简单的.轻量级的标记语法.用户可以使用诸如 * # 等简单的标记符号以最小的输入代价生成极富表现力的文档. Markdown具有很多优点: 写作中添加简单符号即完成排版,所见即 ...
 - 分享 rabbitMQ入门详解
			
原文地址http://blog.csdn.net/cugb1004101218/article/details/21243927 目录(?)[-] rabbitMQ说明文档 rabbitMQ是什么 消 ...
 - spring+mybatis 手动开启和提交事务
			
spring配置文件 事务控制管理器transactionManager <!-- (事务管理)transaction manager, use JtaTransactionManager fo ...
 - ssh无密码通信设置
			
■单向登陆配置:1.在本地机器中的~/.ssh/目录下执行下命令#ssh-keygen -t dsa然后全部回车,采用默认值.生成了一对密钥,id_dsa和id_dsa.pub,存放在用户目录的~/. ...
 - 【转载】JMeter3.0图形化HTML报告中文乱码问题处理
			
由于个人在JMeter 3.0的实际应用中,脚本中的Test Plan/Sampler等元件命名都没有使用中文,所以在之前介绍Dashboard Report特性的博客(原文戳这里))成文时,没有提到 ...
 - 修复Magento SQLSTATE[23000]: Integrity constraint
			
magneto在意外情况下报错Magento SQLSTATE[23000]: Integrity constraint violation: 1062 Duplicate entry,出现这个问题最 ...
 - Java实现插入排序
			
package Sort; import java.util.Arrays; public class InsertionSort { public static int[] sort(int[] l ...
 - Hibernate不能自动建表解决办法
			
最近开始学Hibernate,看的是李刚的那本<轻量级java ee企业应用实战>.头一个hibernate程序,我原原本本的按照书上例子写下来,同时只是改动了些mysql的连接参数,并且 ...