package com.gm.hive.SparkHive;

import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties; import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.Optional;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.streaming.Durations;
import org.apache.spark.streaming.api.java.JavaInputDStream;
import org.apache.spark.streaming.api.java.JavaPairDStream;
import org.apache.spark.streaming.api.java.JavaStreamingContext;
import org.apache.spark.streaming.kafka010.ConsumerStrategies;
import org.apache.spark.streaming.kafka010.KafkaUtils;
import org.apache.spark.streaming.kafka010.LocationStrategies; import scala.Tuple2; public class App { public static void main(String[] args) {
// TODO Auto-generated method stub
SparkConf conf = new SparkConf().setMaster("local[2]").setAppName(
"streamingTest"); JavaSparkContext sc = new JavaSparkContext(conf);
sc.setLogLevel("ERROR");
sc.setCheckpointDir("./checkpoint"); JavaStreamingContext ssc = new JavaStreamingContext(sc,
Durations.seconds(10)); // kafka相关参数,必要!缺了会报错
Map<String, Object> kafkaParams = new HashMap<>();
kafkaParams.put("bootstrap.servers", "192.168.174.200:9092");
kafkaParams.put("key.deserializer", StringDeserializer.class);
kafkaParams.put("value.deserializer", StringDeserializer.class);
kafkaParams.put("group.id", "newgroup2");
kafkaParams.put("auto.offset.reset", "latest");
kafkaParams.put("enable.auto.commit", false); Collection<String> topics = Arrays.asList("test"); JavaInputDStream<ConsumerRecord<String, String>> stream = KafkaUtils
.createDirectStream(ssc, LocationStrategies.PreferConsistent(),
ConsumerStrategies.<String, String> Subscribe(topics,
kafkaParams)); // 注意这边的stream里的参数本身是个ConsumerRecord对象
JavaPairDStream<String, Integer> counts = stream
.flatMap(
x -> Arrays.asList(x.value().toString().split(" "))
.iterator())
.mapToPair(x -> new Tuple2<String, Integer>(x, 1))
.reduceByKey((x, y) -> x + y);
//counts.print(); JavaPairDStream<String, Integer> result = counts
.updateStateByKey(new Function2<List<Integer>, Optional<Integer>, Optional<Integer>>() { private static final long serialVersionUID = 1L; @Override
public Optional<Integer> call(List<Integer> values,
Optional<Integer> state) throws Exception {
/**
* values:经过分组最后 这个key所对应的value,如:[1,1,1,1,1]
* state:这个key在本次之前之前的状态
*/
Integer updateValue = 0;
if (state.isPresent()) {
updateValue = state.get();
} for (Integer value : values) {
updateValue += value;
}
return Optional.of(updateValue);
}
}); //数据库内容
String url = "jdbc:postgresql://192.168.174.200:5432/postgres?charSet=utf-8";
Properties connectionProperties = new Properties();
connectionProperties.put("user","postgres");
connectionProperties.put("password","postgres");
connectionProperties.put("driver","org.postgresql.Driver"); result.print(); result.foreachRDD(new VoidFunction<JavaPairRDD<String, Integer>>(){
public void call(JavaPairRDD<String, Integer> rdd)
throws Exception {
// TODO Auto-generated method stub
JavaRDD<ResultRow> rowRDD = rdd.map(new Function<Tuple2<String, Integer>,ResultRow>(){ public ResultRow call(Tuple2<String, Integer> arg0)
throws Exception {
// TODO Auto-generated method stub
ResultRow rr = new ResultRow();
rr.setTypeid(arg0._1);
rr.setKczs(arg0._2);
return rr;
} });
SparkSession spark = SparkSession.builder().config(rdd.context().getConf()).getOrCreate();
Dataset<Row> data = spark.createDataFrame(rowRDD, ResultRow.class);
//将数据通过覆盖的形式保存在数据表中
data.write().mode(SaveMode.Overwrite).jdbc(url, "kcssqktj", connectionProperties);
}
}); ssc.start();
try {
ssc.awaitTermination();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
ssc.close();
} }
package com.gm.hive.SparkHive;

import java.io.Serializable;

public class ResultRow implements Serializable {
private static final long serialVersionUID = 6681372116317508248L;
String typeid;
int kczs; public String getTypeid() {
return typeid;
} public void setTypeid(String typeid) {
this.typeid = typeid;
} public int getKczs() {
return kczs;
} public void setKczs(int kczs) {
this.kczs = kczs;
} }
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.test</groupId>
<artifactId>kcssqktj_spark</artifactId>
<version>0.0.1-SNAPSHOT</version>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
</properties> <dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.1</version>
<scope>test</scope>
</dependency> <dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>1.7.22</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.8.0</version>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency> <dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_2.11</artifactId>
<version>2.0.0</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-hive_2.11</artifactId>
<version>2.0.0</version>
</dependency> <dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_2.11</artifactId>
<version>2.0.0</version>
<exclusions>
<exclusion>
<artifactId>slf4j-log4j12</artifactId>
<groupId>org.slf4j</groupId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_2.11</artifactId>
<version>2.0.0</version>
</dependency> <!-- https://mvnrepository.com/artifact/org.apache.hive/hive-jdbc -->
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-jdbc</artifactId>
<version>2.1.1</version>
</dependency> <!-- https://mvnrepository.com/artifact/org.apache.hive/hive-exec -->
<dependency>
<groupId>org.apache.hive</groupId>
<artifactId>hive-exec</artifactId>
<version>2.1.1</version>
</dependency> <dependency>
<groupId>org.postgresql</groupId>
<artifactId>postgresql</artifactId>
<version>9.4-1201-jdbc4</version>
</dependency> <dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka-0-10_2.11</artifactId>
<version>2.0.0</version>
</dependency>
</dependencies>
<build> <plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<configuration>
<source>1.8</source>
<target>1.8</target>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<shadedArtifactAttached>true</shadedArtifactAttached>
<shadedClassifierName>allinone</shadedClassifierName>
<artifactSet>
<includes>
<include>*:*</include>
</includes>
</artifactSet>
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
<resource>reference.conf</resource>
</transformer>
<transformer
implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
<resource>META-INF/spring.handlers</resource>
</transformer>
<transformer
implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
<resource>META-INF/spring.schemas</resource>
</transformer>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<manifestEntries>
<Main-Class></Main-Class>
</manifestEntries>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

Sprak2.0 Streaming消费Kafka数据实时计算及运算结果保存数据库代码示例的更多相关文章

  1. storm消费kafka实现实时计算

    大致架构 * 每个应用实例部署一个日志agent * agent实时将日志发送到kafka * storm实时计算日志 * storm计算结果保存到hbase storm消费kafka 创建实时计算项 ...

  2. Flink消费Kafka数据并把实时计算的结果导入到Redis

    1. 完成的场景 在很多大数据场景下,要求数据形成数据流的形式进行计算和存储.上篇博客介绍了Flink消费Kafka数据实现Wordcount计算,这篇博客需要完成的是将实时计算的结果写到redis. ...

  3. Spark Streaming消费Kafka Direct方式数据零丢失实现

    使用场景 Spark Streaming实时消费kafka数据的时候,程序停止或者Kafka节点挂掉会导致数据丢失,Spark Streaming也没有设置CheckPoint(据说比较鸡肋,虽然可以 ...

  4. Spark Streaming消费Kafka Direct保存offset到Redis,实现数据零丢失和exactly once

    一.概述 上次写这篇文章文章的时候,Spark还是1.x,kafka还是0.8x版本,转眼间spark到了2.x,kafka也到了2.x,存储offset的方式也发生了改变,笔者根据上篇文章和网上文章 ...

  5. spark streaming从指定offset处消费Kafka数据

    spark streaming从指定offset处消费Kafka数据 -- : 770人阅读 评论() 收藏 举报 分类: spark() 原文地址:http://blog.csdn.net/high ...

  6. Spark streaming消费Kafka的正确姿势

    前言 在游戏项目中,需要对每天千万级的游戏评论信息进行词频统计,在生产者一端,我们将数据按照每天的拉取时间存入了Kafka当中,而在消费者一端,我们利用了spark streaming从kafka中不 ...

  7. 一文让你彻底了解大数据实时计算引擎 Flink

    前言 在上一篇文章 你公司到底需不需要引入实时计算引擎? 中我讲解了日常中常见的实时需求,然后分析了这些需求的实现方式,接着对比了实时计算和离线计算.随着这些年大数据的飞速发展,也出现了不少计算的框架 ...

  8. 基于Kafka的实时计算引擎如何选择?Flink or Spark?

    1.前言 目前实时计算的业务场景越来越多,实时计算引擎技术及生态也越来越成熟.以Flink和Spark为首的实时计算引擎,成为实时计算场景的重点考虑对象.那么,今天就来聊一聊基于Kafka的实时计算引 ...

  9. 基于Kafka的实时计算引擎如何选择?(转载)

    1.前言 目前实时计算的业务场景越来越多,实时计算引擎技术及生态也越来越成熟.以Flink和Spark为首的实时计算引擎,成为实时计算场景的重点考虑对象.那么,今天就来聊一聊基于Kafka的实时计算引 ...

随机推荐

  1. android 开发架构学习

    Android DataBinding(数据绑定)入门与实战 http://examplecode.cn/2018/07/20/android-databinding-01-introduction/ ...

  2. Leetcode 7. Reverse Integer(水)

    7. Reverse Integer Easy Given a 32-bit signed integer, reverse digits of an integer. Example 1: Inpu ...

  3. Internet History, Technology, and Security(week5)——Technology: Internets and Packets

    前言: 之前都在学习Internet的历史,从这周开始,进入到了Internet技术的学习. Layer1: Link Introduction / The Link Layer 80年代之前,主流网 ...

  4. 大数据笔记(一)——Hadoop的起源与背景知识

    一.大数据的5个特征(IBM提出): Volume(大量) Velocity(高速) Variety(多样) Value(价值) Varacity(真实性) 二.OLTP与OLAP 1.OLTP:联机 ...

  5. Android Build System Ultimate Guide

    Android Build System Ultimate Guide April 8,2013 Lately, Android Open Source Project has gone throug ...

  6. 迭代器遍历列表 构造方法 constructor ArrayList Vector LinkedList Array List 时间复杂度

    package priceton; import java.io.IOException; import java.util.concurrent.CyclicBarrier; import java ...

  7. mysql_Qcahce

    .cpu mem disk 如果是固态硬盘ssd那就是高速公路 火箭 高铁 普通公路 mysql 配置文件:windows 下 mysql.ini linux:my.cnf lamp路径:/opt/l ...

  8. Android studio 不能创建Activity等文件

    这是我之前安装Android studio的一系列问题:http://tieba.baidu.com/p/5921373177 1. 不能创建Activity等许多文件: 2. 工程运行不了: 3. ...

  9. Jmeter之正则表达式提取器

    在很多情况下,我们需要提取响应结果中的一些信息,供后续功能使用.可以使用后置处理器中的正则表达式提取器. 一.正则表达式提取器 二.配置说明 1.姓名:标识 2.注释:备注 3.Apply to:正则 ...

  10. spring的基础配置

    <?xml version="1.0" encoding="UTF-8"?> <beans xmlns="http://www.sp ...