Springboot中使用kafka
注:kafka消息队列默认采用配置消息主题进行消费,一个topic中的消息只能被同一个组(groupId)的消费者中的一个消费者消费。
1.在pom.xml依赖下新添加一下kafka依赖ar包
<!--kafka-->
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>1.1.1.RELEASE</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka_2.10</artifactId>
<version>0.10.0.1</version>
</dependency>
2.在application.properties增加配置:
#原始数据kafka读取
kafka.consumer.servers=IP:9092,IP:9092(kafka消费集群ip+port端口)
kafka.consumer.enable.auto.commit=true(是否自动提交)
kafka.consumer.session.timeout=20000(连接超时时间)
kafka.consumer.auto.commit.interval=100
kafka.consumer.auto.offset.reset=latest(实时生产,实时消费,不会从头开始消费)
kafka.consumer.topic=result(消费的topic)
kafka.consumer.group.id=test(消费组)
kafka.consumer.concurrency=10(设置消费线程数) #协议转换后存储kafka
kafka.producer.servers=IP:9092,IP:9092(kafka生产集群ip+port端口)
kafka.producer.topic=result(生产的topic)
kafka.producer.retries=0
kafka.producer.batch.size=4096
kafka.producer.linger=1
kafka.producer.buffer.memory=40960
3.生产者配置类:
package com.mapbar.track_storage.config; import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory; import java.util.HashMap;
import java.util.Map; /**
* kafka生产配置
* @author Lvjiapeng
*
*/
@Configuration
@EnableKafka
public class KafkaProducerConfig {
@Value("${kafka.producer.servers}")
private String servers;
@Value("${kafka.producer.retries}")
private int retries;
@Value("${kafka.producer.batch.size}")
private int batchSize;
@Value("${kafka.producer.linger}")
private int linger;
@Value("${kafka.producer.buffer.memory}")
private int bufferMemory; public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ProducerConfig.RETRIES_CONFIG, retries);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
} public ProducerFactory<String, String> producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
} @Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<String, String>(producerFactory());
}
}
4.消费者配置类:
package com.mapbar.track_storage.config; import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; import java.util.HashMap;
import java.util.Map; /**
* kafka消费者配置
* @author Lvjiapeng
*
*/
@Configuration
@EnableKafka
public class KafkaConsumerConfig { @Value("${kafka.consumer.servers}")
private String servers;
@Value("${kafka.consumer.enable.auto.commit}")
private boolean enableAutoCommit;
@Value("${kafka.consumer.session.timeout}")
private String sessionTimeout;
@Value("${kafka.consumer.auto.commit.interval}")
private String autoCommitInterval;
@Value("${kafka.consumer.group.id}")
private String groupId;
@Value("${kafka.consumer.auto.offset.reset}")
private String autoOffsetReset;
@Value("${kafka.consumer.concurrency}")
private int concurrency; @Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(concurrency);
factory.getContainerProperties().setPollTimeout(1500);
return factory;
} public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
} public Map<String, Object> consumerConfigs() {
Map<String, Object> propsMap = new HashMap<>();
propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
return propsMap;
}
/**
* kafka监听
* @return
*/
@Bean
public RawDataListener listener() {
return new RawDataListener();
} }
5.测试生产者:
package com.mapbar.track_storage.controller; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException; @RequestMapping(value = "/kafka")
@Controller
public class ProducerController {
@Autowired
private KafkaTemplate kafkaTemplate; @RequestMapping(value = "/producer",method = RequestMethod.GET)
public void consume(HttpServletRequest request, HttpServletResponse response) throws IOException{
String value = "{\"code\":200,\"dataVersion\":\"17q1\",\"message\":\"\",\"id\":\"364f79f28eea48eefeca8c85477a10d3\",\"source\":\"didi\",\"tripList\":[{\"subTripList\":[{\"startTimeStamp\":1519879598,\"schemeList\":[{\"distance\":0.0,\"ids\":\"94666702,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519879598,\"subTripId\":0},{\"startTimeStamp\":1519879727,\"schemeList\":[{\"distance\":1395.0,\"ids\":\"94666729,7298838,7291709,7291706,88613298,88613297,7297542,7297541,94698785,94698786,94698778,94698780,94698779,94698782,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519879812,\"subTripId\":1},{\"startTimeStamp\":1519879836,\"schemeList\":[{\"distance\":0.0,\"ids\":\"54123007,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519879904,\"subTripId\":2},{\"startTimeStamp\":1519879959,\"schemeList\":[{\"distance\":0.0,\"ids\":\"54190443,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519879959,\"subTripId\":3},{\"startTimeStamp\":1519880088,\"schemeList\":[{\"distance\":2885.0,\"ids\":\"94698824,94698822,94698789,94698786,54123011,54123012,54123002,94698763,94698727,94698722,94698765,54123006,54123004,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519880300,\"subTripId\":4},{\"startTimeStamp\":1519880393,\"schemeList\":[{\"distance\":2398.0,\"ids\":\"7309441,7303680,54123061,54123038,7309478,7309477,94698204,94698203,94698273,94698274,94698288,94698296,94698295,94698289,94698310,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519880636,\"subTripId\":5},{\"startTimeStamp\":1519881064,\"schemeList\":[{\"distance\":35.0,\"ids\":\"7309474,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519881204,\"subTripId\":6},{\"startTimeStamp\":1519881204,\"schemeList\":[{\"distance\":28.0,\"ids\":\"7309476,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519881266,\"subTripId\":7},{\"startTimeStamp\":1519881291,\"schemeList\":[{\"distance\":463.0,\"ids\":\"7303683,\",\"schemeId\":0,\"linkList\":[{\"score\":72,\"distance\":1,\"gpsList\":[{\"origLonLat\":\"116.321343,40.43242\",\"grabLonLat\":\"112.32312,40.32132\",\"timestamp\":1515149926000}]}]}],\"endTimeStamp\":1519881329,\"subTripId\":8}],\"startTimeStamp\":1519879350,\"unUseTime\":1201,\"totalTime\":2049,\"endTimeStamp\":1519881399,\"tripId\":0}]}";
for (int i = 1; i<=500; i++){
kafkaTemplate.send("result",value);
}
}
}
6.测试消费者:
import net.sf.json.JSONObject;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.log4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component; import java.io.IOException;
import java.util.List; /**
* kafka监听
* @author shangzz
*
*/
@Component
public class RawDataListener {
Logger logger=Logger.getLogger(RawDataListener.class);
@Autowired
private MatchRoadService matchRoadService; /**
* 实时获取kafka数据(生产一条,监听生产topic自动消费一条)
* @param record
* @throws IOException
*/
@KafkaListener(topics = {"${kafka.consumer.topic}"})
public void listen(ConsumerRecord<?, ?> record) throws IOException {
String value = (String) record.value();
System.out.println(value);
} }
总结:
① 生产者环境类配置好以后,@Autowired自动注入KafkaTemplate类,使用send方法生产消息
② 消费者环境类配置好以后,方法头前使用@KafkaListener(topics = {"${kafka.consumer.topic}"})注解监听topic并传入ConsumerRecord<?, ?> record对象即可自动消费topic
③ 相关kafka配置只需在application.properties照葫芦画瓢添加,修改或者删除配置并在环境配置类中做出相应修改即可
二:怎么实现让一个topic可以让不同group消费呢
goupid不要用配置文件配置的方式,细心的话,会发现@KafkaListener 注解,里面有一个containerFactory参数,就是让你指定容器工厂的
栗子:
import java.util.HashMap;
import java.util.Map; import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer; @Configuration
public class KafkaConsumerConfig { private String brokers = "192.168.52.130:9092,192.168.52.131:9092,192.168.52.133:9092"; private String group1 = "test1";
private String group2 = "test2"; @Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory1() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory1());
factory.setConcurrency(4);
factory.getContainerProperties().setPollTimeout(4000);
return factory;
} @Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory2() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<String, String>();
factory.setConsumerFactory(consumerFactory2());
factory.setConcurrency(4);
factory.getContainerProperties().setPollTimeout(4000);
return factory;
} public Map<String, Object> getCommonPropertis() {
Map<String, Object> properties = new HashMap<String, Object>();
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "100");
properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
properties.put(ConsumerConfig.GROUP_ID_CONFIG, group1);
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
return properties;
} public ConsumerFactory<String, String> consumerFactory1() {
Map<String, Object> properties = getCommonPropertis();
properties.put(ConsumerConfig.GROUP_ID_CONFIG, group1);
return new DefaultKafkaConsumerFactory<String, String>(properties);
} public ConsumerFactory<String, String> consumerFactory2() {
Map<String, Object> properties = getCommonPropertis();
properties.put(ConsumerConfig.GROUP_ID_CONFIG, group2);
return new DefaultKafkaConsumerFactory<String, String>(properties);
}
}
最后,在@KafkaListener 中指定容器名称
@KafkaListener(id="test1",topics = "test-topic", containerFactory="kafkaListenerContainerFactory1")
@KafkaListener(id="test2",topics = "test-topic", containerFactory="kafkaListenerContainerFactory2")
高版本 在@KafkaListener 注解中有groupId属性可以设置
--------------------------------------------------------------------------------------------------
转载:https://blog.csdn.net/lv_1093964643/article/details/83177280
Springboot中使用kafka的更多相关文章
- SpringBoot中使用消息中间件Kafka实现Websocket的集群
1.在实际项目中,由于数据量的增大及并发数的增多,我们不可能只用一台Websocket服务,这个时候就需要用到Webscoket的集群.但是Websocket集群会遇到一些问题.首先我们肯定会想到直接 ...
- SpringBoot中异步请求和异步调用(看这一篇就够了)
原创不易,如需转载,请注明出处https://www.cnblogs.com/baixianlong/p/10661591.html,否则将追究法律责任!!! 一.SpringBoot中异步请求的使用 ...
- SpringBoot中如何灵活的实现接口数据的加解密功能?
数据是企业的第四张名片,企业级开发中少不了数据的加密传输,所以本文介绍下SpringBoot中接口数据加密.解密的方式. 本文目录 一.加密方案介绍二.实现原理三.实战四.测试五.踩到的坑 一.加密方 ...
- SpringBoot中如何优雅的读取yml配置文件?
YAML是一种简洁的非标记语言,以数据为中心,使用空白.缩进.分行组织数据,从而使得表示更加简洁易读.本文介绍下YAML的语法和SpringBoot读取该类型配置文件的过程. 本文目录 一.YAML基 ...
- 一次简单的springboot+dubbo+flume+kafka+storm+redis系统
最近无事学习一下,用springboot+dubbo+flume+kafka+storm+redis做了一个简单的scenic系统 scenicweb:展现层,springboot+dubbo sce ...
- SpringBoot中yaml配置对象
转载请在页首注明作者与出处 一:前言 YAML可以代替传统的xx.properties文件,但是它支持声明map,数组,list,字符串,boolean值,数值,NULL,日期,基本满足开发过程中的所 ...
- 如何在SpringBoot中使用JSP ?但强烈不推荐,果断改Themeleaf吧
做WEB项目,一定都用过JSP这个大牌.Spring MVC里面也可以很方便的将JSP与一个View关联起来,使用还是非常方便的.当你从一个传统的Spring MVC项目转入一个Spring Boot ...
- springboot中swaggerUI的使用
demo地址:demo-swagger-springboot springboot中swaggerUI的使用 1.pom文件中添加swagger依赖 2.从github项目中下载swaggerUI 然 ...
- spring-boot+mybatis开发实战:如何在spring-boot中使用myabtis持久层框架
前言: 本项目基于maven构建,使用mybatis-spring-boot作为spring-boot项目的持久层框架 spring-boot中使用mybatis持久层框架与原spring项目使用方式 ...
随机推荐
- 整理收集的一些常用java工具类
1.json转换工具 package com.taotao.utils; import java.util.List; import com.fasterxml.jackson.core.JsonPr ...
- monkey log 处理
Monkey结果输出 1.保存在pc中 adb shell monkey [option] <count> >d:\monkey.txt 2.保存在手机中 adb shell mon ...
- Python_初识函数
为什么要用函数 现在python届发生了一个大事件,len方法突然不能直接用了... 然后现在有一个需求,让你计算'hello world'的长度,你怎么计算? 这个需求对于现在的你其实不难,我们一起 ...
- js -- 日期时间格式化
/** * js日期时间格式化 * @param date 时间读对象 * @param format 格式化字符串 例如:yyyy年MM月dd日 hh时mm分ss秒 * @returns {stri ...
- C++-POJ1200-Crazy Search[hash]
由于已经给出字符只有NC种,故可以把子串视为一个NC进制的数,以此构造hash函数就可以了 #include <set> #include <map> #include < ...
- 题解【Codeforces886B】Vlad and Cafes
本题是模拟题. 我们可以用b数组记录每个数字在a数组中出现的最后位置,然后从0到2·10^5依次寻找最后一次出现最早的数(注意是0!),最后统计输出即可. AC代码: #include <bit ...
- 文件上传plupload组件使用
这段时间一直在使用文件上传,简要的介绍一下文件上传的组件使用,先上一段代码. var uploader = new plupload.Uploader( { //用来指定上传方式,指定多个上传方式请使 ...
- [USACO08JAN]Haybale Guessing(LuoguP2898)
The cows, who always have an inferiority complex about their intelligence, have a new guessing game ...
- 深入 js 深拷贝对象
前言 对象是 JS 中基本类型之一,而且和原型链.数组等知识息息相关.不管是面试中,还是实际开发中我们都会碰见深拷贝对象的问题. 顾名思义,深拷贝就是完完整整的将一个对象从内存中拷贝一份出来.所以无论 ...
- 【做题笔记】洛谷P1036 选数
作为一个 DFS 初学者这题真的做得很惨...其实窝学 DFS 一年多了,然后一开始就学不会最近被图论和数据结构打自闭后才准备好好学一学233 一开始,直接套框架,于是就有 #include < ...