1.使用Producer API发送消息到Kafka

从版本0.9开始被KafkaProducer替代。

HelloWorldProducer.java

package cn.ljh.kafka.kafka_helloworld;

import java.util.Date;
import java.util.Properties;
import java.util.Random; import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig; public class HelloWorldProducer {
public static void main(String[] args) {
long events = Long.parseLong(args[0]);
Random rnd = new Random(); Properties props = new Properties();
//配置kafka集群的broker地址,建议配置两个以上,以免其中一个失效,但不需要配全,集群会自动查找leader节点。
props.put("metadata.broker.list", "192.168.137.176:9092,192.168.137.176:9093");
//配置value的序列化类
//key的序列化类key.serializer.class可以单独配置,默认使用value的序列化类
props.put("serializer.class", "kafka.serializer.StringEncoder");
//配置partitionner选择策略,可选配置
props.put("partitioner.class", "cn.ljh.kafka.kafka_helloworld.SimplePartitioner");
props.put("request.required.acks", "1"); ProducerConfig config = new ProducerConfig(props); Producer<String, String> producer = new Producer<String, String>(config); for (long nEvents = 0; nEvents < events; nEvents++) {
long runtime = new Date().getTime();
String ip = "192.168.2." + rnd.nextInt(255);
String msg = runtime + ",www.example.com," + ip;
KeyedMessage<String, String> data = new KeyedMessage<String, String>("page_visits", ip, msg);
producer.send(data);
}
producer.close();
}
}

SimplePartitioner.java

package cn.ljh.kafka.kafka_helloworld;

import kafka.producer.Partitioner;
import kafka.utils.VerifiableProperties; public class SimplePartitioner implements Partitioner {
public SimplePartitioner (VerifiableProperties props) { } public int partition(Object key, int a_numPartitions) {
int partition = 0;
String stringKey = (String) key;
int offset = stringKey.lastIndexOf('.');
if (offset > 0) {
partition = Integer.parseInt( stringKey.substring(offset+1)) % a_numPartitions;
}
return partition;
} }

2.使用Kafka High Level Consumer API接收消息

ConsumerGroupExample.java

package cn.ljh.kafka.kafka_helloworld;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector; import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit; public class ConsumerGroupExample {
private final ConsumerConnector consumer;
private final String topic;
private ExecutorService executor; public ConsumerGroupExample(String a_zookeeper, String a_groupId, String a_topic) {
consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
createConsumerConfig(a_zookeeper, a_groupId));
this.topic = a_topic;
} public void shutdown() {
if (consumer != null) consumer.shutdown();
if (executor != null) executor.shutdown();
try {
if (!executor.awaitTermination(5000, TimeUnit.MILLISECONDS)) {
System.out.println("Timed out waiting for consumer threads to shut down, exiting uncleanly");
}
} catch (InterruptedException e) {
System.out.println("Interrupted during shutdown, exiting uncleanly");
}
} public void run(int a_numThreads) {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put(topic, new Integer(a_numThreads));
Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic); // now launch all the threads
//
executor = Executors.newFixedThreadPool(a_numThreads); // now create an object to consume the messages
//
int threadNumber = 0;
for (final KafkaStream stream : streams) {
executor.submit(new ConsumerTest(stream, threadNumber));
threadNumber++;
}
} private static ConsumerConfig createConsumerConfig(String a_zookeeper, String a_groupId) {
Properties props = new Properties();
props.put("zookeeper.connect", a_zookeeper);
props.put("group.id", a_groupId);
props.put("zookeeper.session.timeout.ms", "400");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000"); return new ConsumerConfig(props);
} public static void main(String[] args) {
// String zooKeeper = args[0];
// String groupId = args[1];
// String topic = args[2];
// int threads = Integer.parseInt(args[3]); String zooKeeper = "192.168.137.176:2181,192.168.137.176:2182,192.168.137.176:2183";
String groupId = "group1";
String topic = "page_visits";
int threads = 5; ConsumerGroupExample example = new ConsumerGroupExample(zooKeeper, groupId, topic);
example.run(threads); try {
Thread.sleep(10000);
} catch (InterruptedException ie) { }
example.shutdown();
}
}

ConsumerTest.java

package cn.ljh.kafka.kafka_helloworld;

import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream; public class ConsumerTest implements Runnable {
private KafkaStream m_stream;
private int m_threadNumber; public ConsumerTest(KafkaStream a_stream, int a_threadNumber) {
m_threadNumber = a_threadNumber;
m_stream = a_stream;
} public void run() {
ConsumerIterator<byte[], byte[]> it = m_stream.iterator();
//线程会一直等待有消息进入
while (it.hasNext())
System.out.println("Thread " + m_threadNumber + ": " + new String(it.next().message()));
System.out.println("Shutting down Thread: " + m_threadNumber);
}
}

3.使用kafka Simple Consumer API接收消息

SimpleConsumerExample.java

package cn.ljh.kafka.kafka_helloworld;

import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.api.PartitionOffsetRequestInfo;
import kafka.cluster.BrokerEndPoint;
import kafka.common.ErrorMapping;
import kafka.common.TopicAndPartition;
import kafka.javaapi.*;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.message.MessageAndOffset; import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/*
Why use SimpleConsumer?
The main reason to use a SimpleConsumer implementation is
you want greater control over partition consumption than Consumer Groups give you.
For example you want to:
1.Read a message multiple times
2.Consume only a subset of the partitions in a topic in a process
3.Manage transactions to make sure a message is processed once and only once
Downsides of using SimpleConsumer
The SimpleConsumer does require a significant amount of work not needed in the Consumer Groups:
1.You must keep track of the offsets in your application to know where you left off consuming.
2.You must figure out which Broker is the lead Broker for a topic and partition
3.You must handle Broker leader changes
Steps for using a SimpleConsumer
1.Find an active Broker and find out which Broker is the leader for your topic and partition
2.Determine who the replica Brokers are for your topic and partition
3.Build the request defining what data you are interested in
4.Fetch the data
5.Identify and recover from leader changes
You can change the following items if necessary.
1.Maximum number of messages to read (so we don’t loop forever)
2.Topic to read from
3.Partition to read from
4.One broker to use for Metadata lookup
5.Port the brokers listen on
*/
public class SimpleConsumerExample {
public static void main(String args[]) {
SimpleConsumerExample example = new SimpleConsumerExample(); //Maximum number of messages to read (so we don’t loop forever)
long maxReads = 500;
//Topic to read from
String topic = "page_visits";
//Partition to read from
int partition = 2;
//One broker to use for Metadata lookup
List<String> seeds = new ArrayList<String>();
seeds.add("192.168.137.176");
//Port the brokers listen on
List<Integer> ports = new ArrayList<Integer>();
ports.add(9092);
try {
example.run(maxReads, topic, partition, seeds, ports);
} catch (Exception e) {
System.out.println("Oops:" + e);
e.printStackTrace();
}
} private List<String> m_replicaBrokers = new ArrayList<String>();
private List<Integer> m_replicaPorts = new ArrayList<Integer>(); public SimpleConsumerExample() {
m_replicaBrokers = new ArrayList<String>();
m_replicaPorts = new ArrayList<Integer>();
} public void run(long a_maxReads, String a_topic, int a_partition, List<String> a_seedBrokers, List<Integer> a_ports) throws Exception {
// find the meta data about the topic and partition we are interested in
//
PartitionMetadata metadata = findLeader(a_seedBrokers, a_ports, a_topic, a_partition);
if (metadata == null) {
System.out.println("Can't find metadata for Topic and Partition. Exiting");
return;
}
if (metadata.leader() == null) {
System.out.println("Can't find Leader for Topic and Partition. Exiting");
return;
}
String leadBroker = metadata.leader().host();
int a_port = metadata.leader().port();
String clientName = "Client_" + a_topic + "_" + a_partition; SimpleConsumer consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
// kafka.api.OffsetRequest.EarliestTime() finds the beginning of the data in the logs and starts streaming from there
long readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.EarliestTime(), clientName); int numErrors = 0;
while (a_maxReads > 0) {
if (consumer == null) {
consumer = new SimpleConsumer(leadBroker, a_port, 100000, 64 * 1024, clientName);
}
// Note: this fetchSize of 100000 might need to be increased if large batches are written to Kafka
FetchRequest req = new FetchRequestBuilder()
.clientId(clientName)
.addFetch(a_topic, a_partition, readOffset, 100000)
.build(); FetchResponse fetchResponse = consumer.fetch(req); //Identify and recover from leader changes
if (fetchResponse.hasError()) {
numErrors++;
// Something went wrong!
short code = fetchResponse.errorCode(a_topic, a_partition);
System.out.println("Error fetching data from the Broker:" + leadBroker + " Reason: " + code);
if (numErrors > 5) break;
if (code == ErrorMapping.OffsetOutOfRangeCode()) {
// We asked for an invalid offset. For simple case ask for the last element to reset
readOffset = getLastOffset(consumer,a_topic, a_partition, kafka.api.OffsetRequest.LatestTime(), clientName);
continue;
}
consumer.close();
consumer = null;
//查找新的leader
metadata = findNewLeader(leadBroker, a_topic, a_partition, a_port);
leadBroker = metadata.leader().host();
a_port = metadata.leader().port();
continue;
}
numErrors = 0; //Fetch the data
long numRead = 0;
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(a_topic, a_partition)) {
if(a_maxReads > 0){
long currentOffset = messageAndOffset.offset();
//This is needed since if Kafka is compressing the messages,
//the fetch request will return an entire compressed block even if the requested offset isn't the beginning of the compressed block.
if (currentOffset < readOffset) {
System.out.println("Found an old offset: " + currentOffset + " Expecting: " + readOffset);
continue;
}
readOffset = messageAndOffset.nextOffset();
ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
System.out.println(String.valueOf(messageAndOffset.offset()) + ": " + new String(bytes, "UTF-8"));
numRead++;
a_maxReads--;
}
} //If we didn't read anything on the last request we go to sleep for a second so we aren't hammering Kafka when there is no data.
if (numRead == 0) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
if (consumer != null) consumer.close();
} public static long getLastOffset(SimpleConsumer consumer, String topic, int partition,
long whichTime, String clientName) {
TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
OffsetResponse response = consumer.getOffsetsBefore(request); if (response.hasError()) {
System.out.println("Error fetching data Offset Data the Broker. Reason: " + response.errorCode(topic, partition) );
return 0;
}
long[] offsets = response.offsets(topic, partition);
return offsets[0];
} private PartitionMetadata findNewLeader(String a_oldLeader, String a_topic, int a_partition, int a_oldLeader_port) throws Exception {
for (int i = 0; i < 3; i++) {
boolean goToSleep = false;
PartitionMetadata metadata = findLeader(m_replicaBrokers, m_replicaPorts, a_topic, a_partition);
if (metadata == null) {
goToSleep = true;
} else if (metadata.leader() == null) {
goToSleep = true;
} else if (a_oldLeader.equalsIgnoreCase(metadata.leader().host()) &&
a_oldLeader_port == metadata.leader().port() && i == 0) {
// first time through if the leader hasn't changed, give ZooKeeper a second to recover
// second time, assume the broker did recover before failover, or it was a non-Broker issue
//
goToSleep = true;
} else {
return metadata;
}
if (goToSleep) {
try {
Thread.sleep(1000);
} catch (InterruptedException ie) {
}
}
}
System.out.println("Unable to find new leader after Broker failure. Exiting");
throw new Exception("Unable to find new leader after Broker failure. Exiting");
} private PartitionMetadata findLeader(List<String> a_seedBrokers, List<Integer> a_port, String a_topic, int a_partition) {
PartitionMetadata returnMetaData = null;
loop:
for (int i = 0; i < a_seedBrokers.size(); i++) {
String seed = a_seedBrokers.get(i);
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(seed, a_port.get(i), 100000, 64 * 1024, "leaderLookup");
List<String> topics = Collections.singletonList(a_topic);
TopicMetadataRequest req = new TopicMetadataRequest(topics);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req); List<TopicMetadata> metaData = resp.topicsMetadata();
for (TopicMetadata item : metaData) {
for (PartitionMetadata part : item.partitionsMetadata()) {
if (part.partitionId() == a_partition) {
returnMetaData = part;
break loop;
}
}
}
} catch (Exception e) {
System.out.println("Error communicating with Broker [" + seed + "] to find Leader for [" + a_topic
+ ", " + a_partition + "] Reason: " + e);
} finally {
if (consumer != null) consumer.close();
}
}
if (returnMetaData != null) {
m_replicaBrokers.clear();
m_replicaPorts.clear();
for (BrokerEndPoint replica : returnMetaData.replicas()) {
m_replicaBrokers.add(replica.host());
m_replicaPorts.add(replica.port());
}
}
return returnMetaData;
}
}

kafka的Java客户端示例代码(kafka_2.11-0.8.2.2)的更多相关文章

  1. kafka的Java客户端示例代码(kafka_2.12-0.10.2.1)

    使用0.9开始增加的KafkaProducer和KafkaConsumer. Pom.xml <project xmlns="http://maven.apache.org/POM/4 ...

  2. 4 kafka集群部署及kafka生产者java客户端编程 + kafka消费者java客户端编程

    本博文的主要内容有   kafka的单机模式部署 kafka的分布式模式部署 生产者java客户端编程 消费者java客户端编程 运行kafka ,需要依赖 zookeeper,你可以使用已有的 zo ...

  3. 正则表达式学习笔记(附:Java版示例代码)

    具体学习推荐:正则表达式30分钟入门教程 .         除换行符以外的任意字符\w      word,正常字符,可以当做变量名的,字母.数字.下划线.汉字\s        space,空白符 ...

  4. C# WebSocket 服务端示例代码 + HTML5客户端示例代码

    WebSocket服务端 C#示例代码 using System; using System.Collections.Generic; using System.Linq; using System. ...

  5. HDFS的Java客户端操作代码(HDFS的查看、创建)

    1.HDFS的put上传文件操作的java代码: package Hdfs; import java.io.FileInputStream; import java.io.FileNotFoundEx ...

  6. JAVA SSM 示例代码

    SSM 即spring+spring mvc+mybatis,开发工具IDEA 1.先看下项目结构如图: 2.主要配置文件 spring-mvc.xml <?xml version=" ...

  7. kafka生产者java客户端

    producer 包含一个用于保存待发送消息的缓冲池,缓冲池中消息是还没来得及传输到kafka集群的消息. 位于底层的kafka I/O线程负责将缓冲池中的消息转换成请求发送到集群.如果在结束prod ...

  8. java 综合示例代码

    package javaenhance.src.cn.itcast.day3; import java.lang.reflect.Constructor; import java.lang.refle ...

  9. HDFS的java客户端操作代码(Windows上面打jar包,提交至linux运行)

    1.通过java.net.URL实现屏幕显示demo1文件的内容 package Hdfs; import java.io.InputStream; import java.net.URL; impo ...

随机推荐

  1. Deep Reinforcement Learning: Pong from Pixels

    这是一篇迟来很久的关于增强学习(Reinforcement Learning, RL)博文.增强学习最近非常火!你一定有所了解,现在的计算机能不但能够被全自动地训练去玩儿ATARI(译注:一种游戏机) ...

  2. Java提高篇之抽象类与接口

    接口和内部类为我们提供了一种将接口与实现分离的更加结构化的方法. 抽象类与接口是java语言中对抽象概念进行定义的两种机制,正是由于他们的存在才赋予java强大的面向对象的能力.他们两者之间对抽象概念 ...

  3. Java基础之基础语法

    前言:Java内功心法之基础语法,看完这篇你向Java大神的路上又迈出了一步(有什么问题或者需要资料可以联系我的扣扣:734999078) 一个Java程序可以认为是一系列对象的集合,而这些对象通过调 ...

  4. SpringBoot自动配置注解原理解析

    1. SpringBoot启动主程序类: @SpringBootApplication public class DemoApplication { public static void main(S ...

  5. 安装searchd

    把安装包解压到 D:coreseek 创建表 create table product( id int key auto_increment, title ), content text ); ins ...

  6. .Net实现微信公众平台开发接口(一) 之 “微信开发配置”

    我们只要通过微信官方认证,成为开发者,才能实现微信提供的各种接口,否则即使调用了接口,微信也不会实现推送,功能也无法通过开发模式真正得到实现,所以需要正确配置微信信息,通过微信官方认证,成为开发者才可 ...

  7. (转)Visual Studio 2013新功能预览:增代码的透明度和可追溯性

    微软打破了Visual Studio两年升级一次的传统,Visual Studio 2012发布还不足一年,微软就计划发布了Visual Studio 2013了.在今天的TechEd大会上,微软宣布 ...

  8. Jackson解析XML

    使用Jackson maven项目的pom.xml依赖 <dependency> <groupId>com.fasterxml.jackson.dataformat</g ...

  9. Redis管道

    介绍 Redis是一种基于客户端-服务端模型以及请求/响应协议的TCP服务.客户端请求会遵循以下步骤:客户端向服务端发送一个查询请求,并监听Socket返回,通常是以阻塞模式,等待服务端响应并将结果返 ...

  10. Hive数据倾斜

    数据倾斜是进行大数据计算时最经常遇到的问题之一.当我们在执行HiveQL或者运行MapReduce作业时候,如果遇到一直卡在map100%,reduce99%一般就是遇到了数据倾斜的问题.数据倾斜其实 ...