spark-streaming-kafka-0-8 和 0-10的使用区别
一、spark-streaming-kafka-0-8_2.11-2.0.2.jar
1、pom.xml
- <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-core_2.11 -->
- <dependency>
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-core_2.11</artifactId>
- <version>2.0.2</version>
- <scope>runtime</scope>
- </dependency>
- <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-streaming_2.11 -->
- <dependency>
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-streaming_2.11</artifactId>
- <version>2.0.2</version>
- <scope>runtime</scope>
- </dependency>
- <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-streaming-kafka-0-8_2.11 -->
- <dependency>
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-streaming-kafka-0-8_2.11</artifactId>
- <version>2.0.2</version>
- <scope>runtime</scope>
- </dependency>
2、Kafka Consumer类
- package com.spark.main;
- import java.util.Arrays;
- import java.util.HashMap;
- import java.util.HashSet;
- import java.util.Map;
- import java.util.Set;
- import org.apache.spark.SparkConf;
- import org.apache.spark.api.java.JavaRDD;
- import org.apache.spark.api.java.function.Function;
- import org.apache.spark.api.java.function.VoidFunction;
- import org.apache.spark.streaming.Durations;
- import org.apache.spark.streaming.api.java.JavaDStream;
- import org.apache.spark.streaming.api.java.JavaPairInputDStream;
- import org.apache.spark.streaming.api.java.JavaStreamingContext;
- import org.apache.spark.streaming.kafka.KafkaUtils;
- import kafka.serializer.StringDecoder;
- import scala.Tuple2;
- public class KafkaConsumer{
- public static void main(String[] args) throws InterruptedException{
- /**
- * SparkConf sparkConf = new SparkConf().setAppName("KafkaConsumer").setMaster("local[2]");
- * setMaster("local[2]"),至少要指定两个线程,一条用于用于接收消息,一条线程用于处理消息
- * Durations.seconds(2)每两秒读取一次kafka
- */
- SparkConf sparkConf = new SparkConf().setAppName("KafkaConsumer").setMaster("local[2]");
- JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.milliseconds(500));
- jssc.checkpoint("hdfs://192.168.168.200:9000/checkpoint/KafkaConsumer");
- /**
- * 配置连接kafka的相关参数
- */
- Set<String> topicsSet = new HashSet<String>(Arrays.asList("TestTopic"));
- Map<String, String> kafkaParams = new HashMap<String, String>();
- kafkaParams.put("metadata.broker.list", "192.168.168.200:9092");
- kafkaParams.put("auto.offset.reset", "smallest");//smallest:从最初开始;largest :从最新开始
- kafkaParams.put("fetch.message.max.bytes", "524288");
- JavaPairInputDStream<String, String> messages = KafkaUtils.createDirectStream(jssc, String.class, String.class,
- StringDecoder.class, StringDecoder.class, kafkaParams, topicsSet);
- /**
- * _2()获取第二个对象的值
- */
- JavaDStream<String> lines = messages.map(new Function<Tuple2<String, String>, String>() {
- public String call(Tuple2<String, String> tuple2) {
- return tuple2._2();
- }
- });
- lines.foreachRDD(new VoidFunction<JavaRDD<String>>() {
- public void call(JavaRDD<String> rdd) throws Exception {
- rdd.foreach(new VoidFunction<String>() {
- public void call(String s) throws Exception {
- System.out.println(s);
- }
- });
- }
- });
- // Start the computation
- jssc.start();
- jssc.awaitTermination();
- }
- }
二、spark-streaming-kafka-0-10_2.11-2.0.2.jar
1、pom.xml
- <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-core_2.11 -->
- <dependency>
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-core_2.11</artifactId>
- <version>2.0.2</version>
- <scope>runtime</scope>
- </dependency>
- <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-streaming_2.11 -->
- <dependency>
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-streaming_2.11</artifactId>
- <version>2.0.2</version>
- <scope>runtime</scope>
- </dependency>
- <!-- https://mvnrepository.com/artifact/org.apache.spark/spark-streaming-kafka-0-10_2.11 -->
- <dependency>
- <groupId>org.apache.spark</groupId>
- <artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
- <version>2.0.2</version>
- <scope>runtime</scope>
- </dependency>
2、Kafka Consumer类
- package com.spark.main;
- import java.util.Arrays;
- import java.util.HashMap;
- import java.util.HashSet;
- import java.util.Map;
- import java.util.Set;
- import org.apache.kafka.clients.consumer.ConsumerRecord;
- import org.apache.kafka.common.serialization.StringDeserializer;
- import org.apache.spark.SparkConf;
- import org.apache.spark.api.java.JavaRDD;
- import org.apache.spark.api.java.function.Function;
- import org.apache.spark.api.java.function.VoidFunction;
- import org.apache.spark.streaming.Durations;
- import org.apache.spark.streaming.api.java.JavaDStream;
- import org.apache.spark.streaming.api.java.JavaInputDStream;
- import org.apache.spark.streaming.api.java.JavaPairInputDStream;
- import org.apache.spark.streaming.api.java.JavaStreamingContext;
- import org.apache.spark.streaming.kafka010.ConsumerStrategies;
- import org.apache.spark.streaming.kafka010.KafkaUtils;
- import org.apache.spark.streaming.kafka010.LocationStrategies;
- import kafka.serializer.StringDecoder;
- import scala.Tuple2;
- public class Kafka10Consumer{
- public static void main(String[] args) throws InterruptedException{
- /**
- * SparkConf sparkConf = new SparkConf().setAppName("KafkaConsumer").setMaster("local[2]");
- * setMaster("local[2]"),至少要指定两个线程,一条用于用于接收消息,一条线程用于处理消息
- * Durations.seconds(2)每两秒读取一次kafka
- */
- SparkConf sparkConf = new SparkConf().setAppName("Kafka10Consumer").setMaster("local[2]");
- JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.milliseconds(500));
- jssc.checkpoint("hdfs://192.168.168.200:9000/checkpoint/Kafka10Consumer");
- /**
- * 配置连接kafka的相关参数
- */
- Set<String> topicsSet = new HashSet<String>(Arrays.asList("TestTopic"));
- Map<String, Object> kafkaParams = new HashMap<String, Object>();
- kafkaParams.put("bootstrap.servers", "192.168.168.200:9092");
- kafkaParams.put("key.deserializer", StringDeserializer.class);
- kafkaParams.put("value.deserializer", StringDeserializer.class);
- kafkaParams.put("group.id", "Kafka10Consumer");
- kafkaParams.put("auto.offset.reset", "earliest");//earliest : 从最早开始;latest :从最新开始
- kafkaParams.put("enable.auto.commit", false);
- //通过KafkaUtils.createDirectStream(...)获得kafka数据,kafka相关参数由kafkaParams指定
- JavaInputDStream<ConsumerRecord<Object,Object>> messages = KafkaUtils.createDirectStream(
- jssc,
- LocationStrategies.PreferConsistent(),
- ConsumerStrategies.Subscribe(topicsSet, kafkaParams)
- );
- /**
- * _2()获取第二个对象的值
- */
- JavaDStream<String> lines = messages.map(new Function<ConsumerRecord<Object,Object>, String>() {
- @Override
- public String call(ConsumerRecord<Object, Object> consumerRecord) throws Exception {
- // TODO Auto-generated method stub
- return consumerRecord.value().toString();
- }
- });
- lines.foreachRDD(new VoidFunction<JavaRDD<String>>() {
- public void call(JavaRDD<String> rdd) throws Exception {
- rdd.foreach(new VoidFunction<String>() {
- public void call(String s) throws Exception {
- System.out.println(s);
- }
- });
- }
- });
- // Start the computation
- jssc.start();
- jssc.awaitTermination();
- }
- }
spark-streaming-kafka-0-8 和 0-10的使用区别的更多相关文章
- Spark Streaming + Kafka整合(Kafka broker版本0.8.2.1+)
这篇博客是基于Spark Streaming整合Kafka-0.8.2.1官方文档. 本文主要讲解了Spark Streaming如何从Kafka接收数据.Spark Streaming从Kafka接 ...
- Spark踩坑记——Spark Streaming+Kafka
[TOC] 前言 在WeTest舆情项目中,需要对每天千万级的游戏评论信息进行词频统计,在生产者一端,我们将数据按照每天的拉取时间存入了Kafka当中,而在消费者一端,我们利用了spark strea ...
- Spark Streaming+Kafka
Spark Streaming+Kafka 前言 在WeTest舆情项目中,需要对每天千万级的游戏评论信息进行词频统计,在生产者一端,我们将数据按照每天的拉取时间存入了Kafka当中,而在消费者一端, ...
- spark streaming kafka example
// scalastyle:off println package org.apache.spark.examples.streaming import kafka.serializer.String ...
- spark streaming - kafka updateStateByKey 统计用户消费金额
场景 餐厅老板想要统计每个用户来他的店里总共消费了多少金额,我们可以使用updateStateByKey来实现 从kafka接收用户消费json数据,统计每分钟用户的消费情况,并且统计所有时间所有用户 ...
- Spark踩坑记:Spark Streaming+kafka应用及调优
前言 在WeTest舆情项目中,需要对每天千万级的游戏评论信息进行词频统计,在生产者一端,我们将数据按照每天的拉取时间存入了Kafka当中,而在消费者一端,我们利用了spark streaming从k ...
- Spark streaming + Kafka 流式数据处理,结果存储至MongoDB、Solr、Neo4j(自用)
KafkaStreaming.scala文件 import kafka.serializer.StringDecoder import org.apache.spark.SparkConf impor ...
- IDEA Spark Streaming Kafka数据源-Consumer
import org.apache.spark.SparkConf import org.apache.spark.streaming.kafka.KafkaUtils import org.apac ...
- 4、spark streaming+kafka
一.Receiver模式 1. receiver模式原理图 在SparkStreaming程序运行起来后,Executor中会有receiver tasks接收kafka推送过来的数据.数据会被持久化 ...
- spark.streaming.kafka.maxRatePerPartition的理解
spark.streaming.kafka.maxRatePerPartition设定对目标topic每个partition每秒钟拉取的数据条数. 假设此项设为1,批次间隔为10s,目标topic只有 ...
随机推荐
- slaac
https://zhidao.baidu.com/question/460186176.html slaac是IPv6中的术语.Stateless address autoconfiguration, ...
- express安装中出现无此命令
原来,最新express4.0版本中将命令工具分家出来了(项目地址:https://github.com/expressjs/generator),所以我们还需要安装一个命令工具,命令如下: 安装ex ...
- Spring Boot 揭秘与实战(二) 数据缓存篇 - Guava Cache
文章目录 1. Guava Cache 集成 2. 个性化配置 3. 源代码 本文,讲解 Spring Boot 如何集成 Guava Cache,实现缓存. 在阅读「Spring Boot 揭秘与实 ...
- CodeForces - 1097F:Alex and a TV Show (bitset & 莫比乌斯容斥)
Alex decided to try his luck in TV shows. He once went to the quiz named "What's That Word?!&qu ...
- Linux中查看显卡硬件信息
Linux中查看显卡硬件信息 https://ywnz.com/linuxjc/67.html lspci -vnn | grep VGA -A 12lshw -C display 查看当前使用的显卡 ...
- hdu1540 Tunnel Warfare 线段树/树状数组
During the War of Resistance Against Japan, tunnel warfare was carried out extensively in the vast a ...
- LG4071 [SDOI2016]排列计数
题意 题目描述 求有多少种长度为 n 的序列 A,满足以下条件: 1 ~ n 这 n 个数在序列中各出现了一次 若第 i 个数 A[i] 的值为 i,则称 i 是稳定的.序列恰好有 m 个数是稳定的 ...
- java的数组和arraylist
1.数组 1.0 一开始就错了 int a[8]; //没有像C在内存中开辟了8个区域 改: int a[] = {1,2,3} ; System.out.println(a.length); ...
- OpenGL编程-OpenGL框架-win32项目
在win32项目中开发的程序 小知识: 控制台应用程序运行就是dos的界面 项目一般采用了可视化开发 开发出来的东西就像QQ之类的 是有窗口界面的 程序运行结果是这样的 源代码:对第45行进行覆盖 # ...
- rpm使用
查找某个rpm包是否安装 rpm -qa|grep 包名 #我们再次安装是会提示和那个包冲突,直接复制那个报名过来就可 安装rpm包 rpm -ivh 报名