spark java api数据分析实战
1 spark关键包
<!--spark-->
<dependency>
<groupId>fakepath</groupId>
<artifactId>spark-core</artifactId>
<version>2.10-1.5.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming</artifactId>
<version>2.10-1.5.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka</artifactId>
<version>2.10-1.5.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql</artifactId>
<version>2.10-1.5.1</version>
</dependency>
<dependency>
<groupId>backport-util-concurrent.org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>2.10.4</version>
</dependency>
<dependency>
<groupId>com.hw</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.objenesis</groupId>
<artifactId>kryo</artifactId>
<version>2.21</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.5</version>
</dependency>
2 分析模型昼伏夜出 spark-java
package com.xinyi.spark.analysis.tasks;
import com.google.common.base.Optional;
import com.xinyi.spark.analysis.utils.dbhelper.DBHelper;
import com.xinyi.xframe.base.utils.StringUtils;
import com.xinyi.xframe.base.utils.UUIDGenerator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.*;
public class RecordInfoSparkAnalsis {
//查询任务列表
private static DBHelper dbHelper = new DBHelper("xinyidb");
private final static String endNum = "9";
public static void main(String[] args) {
String sql ="select id,to_char(starttime,'yyyymmddhh24miss') starttime," +
"to_char(endtime,'yyyymmddhh24miss') endtime,starthour,endhour," +
"to_char(createtime,'yyyymmddhh24miss') createtime from recordinfo_task where status='0'";
List<Map<String,Object>> taskList = dbHelper.query(sql);
System.out.println(taskList);
if(taskList.isEmpty()){
System.out.println("任务列表为空!");
return;
}
for(Map<String,Object> task :taskList){
String taskid = String.valueOf(task.get("ID"));
updateRecordTask(taskid,"2");
}
//初始化Spark环境
SparkConf conf = new SparkConf().setAppName("RecordInfoSparkAnalsis");
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
conf.set("spark.kryo.registrator", "com.xinyi.spark.analysis.utils.MyRegistrator");
conf.set("spark.kryoserializer.buffer.max", "256");
JavaSparkContext jsc = new JavaSparkContext(conf);
// 构建spark-Hbase配置
Configuration hbConf = HBaseConfiguration.create(jsc.hadoopConfiguration());
//初始化rowkey存储设计的搜索方式
int endInt = Integer.valueOf(endNum);
for(Map<String,Object> task :taskList){
Object startObj = task.get("STARTTIME");
Object endObj = task.get("ENDTIME");
if(!StringUtils.isEmpty(startObj)&&!StringUtils.isEmpty(endObj)){
long s = System.currentTimeMillis();
String startTime = String.valueOf(startObj);
String endTime = String.valueOf(endObj);
String blackStartHour = String.valueOf(task.get("STARTHOUR"));
String blackEndHour = String.valueOf(task.get("ENDHOUR"));
System.out.println(blackStartHour+"---"+blackEndHour);
//全局RDD
JavaPairRDD<String, Long> white = null;
JavaPairRDD<String, Long> black = null;
for (int i = 0; i <= endInt; i++) {
//根据时间设置初始和结束rowkey
String startkey = String.valueOf(i) + startTime;
String endkey = String.valueOf(i) +endTime;
System.out.println(startkey);
System.out.println(endkey);
//查询晚上数据rdd
JavaPairRDD<String, Long> reduceRdd2 = getStringLongJavaPairRDD(jsc, hbConf, startkey, endkey,blackStartHour,blackEndHour);
if(black==null){
black = reduceRdd2;
}else {
black = black.union(reduceRdd2);
}
//查询白天数据rdd
JavaPairRDD<String, Long> whiteReduceRdd = getStringLongJavaPairRDD(jsc, hbConf, startkey, endkey,blackEndHour,"235959");
if(white==null){
white = whiteReduceRdd;
}else {
white = white.union(whiteReduceRdd);
}
}
System.out.println(black.collectAsMap());
black = black.reduceByKey(new Function2<Long, Long, Long>() {
public Long call(Long a1, Long a2) throws Exception {
return a1 + a2;
}
});
white = white.reduceByKey(new Function2<Long, Long, Long>() {
public Long call(Long a1, Long a2) throws Exception {
return a1 + a2;
}
});
//根据key左连接
JavaPairRDD<String,Tuple2<Long,Optional<Long>>> joinRdd = black.leftOuterJoin(white);
joinRdd = joinRdd.filter(new Function<Tuple2<String, Tuple2<Long, Optional<Long>>>, Boolean>() {
@Override
public Boolean call(Tuple2<String, Tuple2<Long, Optional<Long>>> stringTuple2Tuple2) throws Exception {
Long val1 = stringTuple2Tuple2._2._1;
Long val2 = 0l;
Set valSet = stringTuple2Tuple2._2._2.asSet();
for(Object val:valSet){
val2= Long.valueOf(val.toString());
}
//System.out.println(val1+"--"+val2);
if(valSet.isEmpty()&&val1>3){
return true;
}else if(val2<1&&val1>3){
return true;
}
return false;
}
});
Map<String,Tuple2<Long,Optional<Long>>> collectMap = joinRdd.collectAsMap();
System.out.println(collectMap);
String taskid = String.valueOf(task.get("ID"));
//保存结果到数据库
insert2RecordResult(taskid,collectMap);
updateRecordTask(taskid,"1");
long se = System.currentTimeMillis();
System.out.println("共耗时:"+(se-s));
}
}
jsc.stop();
}
private static void updateRecordTask(String taskid,String status){
String sql = "update recordinfo_task set status='"+status+"' where id='"+taskid+"'";
dbHelper.update(sql);
System.out.println("任务表状态已更新!");
}
/**
* 结果集插入到oracle结果表recordinfo_result
* @param taskid
* @param results
*/
private static void insert2RecordResult(String taskid, Map<String, Tuple2<Long, Optional<Long>>> results){
Set<String> keySet = results.keySet();
for(String key :keySet){
Tuple2<Long, Optional<Long>> vals = results.get(key);
String id= UUIDGenerator.generateOriginnalUUID();
String sql = "insert into recordinfo_result (id,taskid,tenementid,num) values ('"+id+"','"+taskid+"','"+key+"','"+vals._1+"')";
dbHelper.update(sql);
}
System.out.println("结果集已插入数据库");
}
/**
* 把所有相同的key对应的value累加起来并过滤value>某个值的
* @param black
* @param val 过滤比较值
* @param compare 比较符
* @return
*/
private static JavaPairRDD<String, Long> getStringLongJavaPairRDD(JavaPairRDD<String, Long> black,final int val,final String compare) {
black = black.reduceByKey(new Function2<Long, Long, Long>() {
@Override
public Long call(Long a1, Long a2) throws Exception {
return a1 + a2;
}
});
black = black.filter(new Function<Tuple2<String, Long>, Boolean>() {
@Override
public Boolean call(Tuple2<String, Long> stringLongTuple2) throws Exception {
if(">".equals(compare)){
if(stringLongTuple2._2>val){
//System.out.println(stringLongTuple2._1+"---"+stringLongTuple2._2);
return true;
}
}else if("<".equals(compare)){
if(stringLongTuple2._2<val){
//System.out.println(stringLongTuple2._1+"==="+stringLongTuple2._2);
return true;
}
}
return false;
}
});
return black;
}
/**
* 根据rowkey范围及hourlong范围 查询Hbase 获取JavaPairRDD
* @param jsc
* @param hbConf
* @param startkey
* @param endkey
* @param startHour
* @param endHour
* @return
*/
private static JavaPairRDD<String, Long> getStringLongJavaPairRDD(JavaSparkContext jsc, Configuration hbConf, String startkey, String endkey,String startHour,String endHour) {
Scan scan = new Scan(Bytes.toBytes(startkey), Bytes.toBytes(endkey));
// Scan scan = new Scan();
scan.setCacheBlocks(true);
scan.setCaching(10000);
scan.setStartRow(Bytes.toBytes(startkey));
scan.addFamily(Bytes.toBytes("info"));//colomn family
//晚上时间过滤条件
FilterList filterList = new FilterList();
Filter gtfilter = new SingleColumnValueFilter(Bytes.toBytes("info"),Bytes.toBytes("hourlong"), CompareFilter.CompareOp.GREATER,Bytes.toBytes(startHour));
filterList.addFilter(gtfilter);
Filter ltfilter = new SingleColumnValueFilter(Bytes.toBytes("info"),Bytes.toBytes("hourlong"), CompareFilter.CompareOp.LESS,Bytes.toBytes(endHour));
filterList.addFilter(ltfilter);
scan.setFilter(filterList);
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan proto = null;
try {
proto = ProtobufUtil.toScan(scan);
} catch (IOException e) {
e.printStackTrace();
}
String scanToString = Base64.encodeBytes(proto.toByteArray());
hbConf.set(TableInputFormat.INPUT_TABLE, "recordinfo");//table name
hbConf.set(TableInputFormat.SCAN, scanToString);
JavaPairRDD<ImmutableBytesWritable, Result> rdd = jsc.newAPIHadoopRDD(hbConf, TableInputFormat.class, ImmutableBytesWritable.class, Result.class);
//过滤算子,过滤集合只保留想要的字段作为key,设value=1
JavaPairRDD<String, Long> rddmap = rdd.mapToPair(new PairFunction<Tuple2<ImmutableBytesWritable, Result>, String, Long>() {
public Tuple2<String, Long> call(Tuple2<ImmutableBytesWritable, Result> item) throws Exception {
Iterator<Cell> it = item._2().listCells().iterator();
String tenementid = "";
while (it.hasNext()) {
Cell c = it.next();
String qualifier = Bytes.toString(CellUtil.cloneQualifier(c));
if (qualifier.equals("tenementid")) {
tenementid = Bytes.toString(CellUtil.cloneValue(c)).trim();
}
}
return new Tuple2<String, Long>(tenementid, 1L);
}
});
//根据key值累加value
return rddmap;
}
}
spark java api数据分析实战的更多相关文章
- Spark Java API 计算 Levenshtein 距离
Spark Java API 计算 Levenshtein 距离 在上一篇文章中,完成了Spark开发环境的搭建,最终的目标是对用户昵称信息做聚类分析,找出违规的昵称.聚类分析需要一个距离,用来衡量两 ...
- Spark Java API 之 CountVectorizer
Spark Java API 之 CountVectorizer 由于在Spark中文本处理与分析的一些机器学习算法的输入并不是文本数据,而是数值型向量.因此,需要进行转换.而将文本数据转换成数值型的 ...
- 在 IntelliJ IDEA 中配置 Spark(Java API) 运行环境
1. 新建Maven项目 初始Maven项目完成后,初始的配置(pom.xml)如下: 2. 配置Maven 向项目里新建Spark Core库 <?xml version="1.0& ...
- spark (java API) 在Intellij IDEA中开发并运行
概述:Spark 程序开发,调试和运行,intellij idea开发Spark java程序. 分两部分,第一部分基于intellij idea开发Spark实例程序并在intellij IDEA中 ...
- spark java API 实现二次排序
package com.spark.sort; import java.io.Serializable; import scala.math.Ordered; public class SecondS ...
- 【Spark Java API】broadcast、accumulator
转载自:http://www.jianshu.com/p/082ef79c63c1 broadcast 官方文档描述: Broadcast a read-only variable to the cl ...
- Spark基础与Java Api介绍
原创文章,转载请注明: 转载自http://www.cnblogs.com/tovin/p/3832405.html 一.Spark简介 1.什么是Spark 发源于AMPLab实验室的分布式内存计 ...
- Elasticsearch 5.4.3实战--Java API调用:索引mapping创建
因为项目开发使用的是Java语言, 项目的开发架构是Spring MVC+ maven的jar包管理, 所以今天重点说说ES 5.4.3 的Java API的源码实战 1. pom.xml文件增加依 ...
- 利用SparkLauncher 类以JAVA API 编程的方式提交Spark job
一.环境说明和使用软件的版本说明: hadoop-version:hadoop-2.9.0.tar.gz spark-version:spark-2.2.0-bin-hadoop2.7.tgz jav ...
随机推荐
- MYSQL-用户密码修改
解决方法如下:1.终端中结束当前正在运行的mysql进程.# sudo /etc/init.d/mysql stop2.用mysql安全模式运行并跳过权限验证.# sudo /usr/bin/mysq ...
- Java 学习笔记之 Suspend和Resume
Suspend和Resume: Suspend和Resume使用方法: 以下例子证明了线程确实被暂停了,而且还可以恢复成运行状态. public class SuspendResumeThread e ...
- Java 学习笔记之 线程interrupt方法
线程interrupt方法: interrupt方法是用来停止线程的,但是他的使用效果并不像for+break那样,马上就停止循环. 调用interrupt()其实仅仅是在当前线程中打了一个停止标记, ...
- 使用echarts画一个类似组织结构图的图表
昨天,写了一篇关于圆环进度条的博客(请移步:Vue/React圆环进度条),已经烦不胜烦,今天又遇到了需要展示类似公司的组织结构图的功能需求,要冒了!!! 这种需求,自己用div+css也是可以实现的 ...
- deferred对象和promise对象(二)---deferred对象
早上醒来,继续讨论deferred对象和primise对象. deferred对象的的方法: 1.$.Deferred()-----生成一个deferred对象 2.deferred.done()-- ...
- COGS 2510. 拯救紫萱学姐
[题目描述] 其实在开考前半个小时题面并不是这样的. 由于明天要考试,同学们要把抽屉里的书都搬空,书很多而且办了走读不能回寝室的学长一眼就看到了回班撩他的学姐,于是就把学姐当学长用♂了:“帮我把这摞书 ...
- 安装高可用Hadoop生态 (四) 安装Spark
4. 安装Spark 4.1. 准备目录 -bin-without-hadoop.tgz -C /opt/cloud/packages/ -bin-without-hadoop /opt/clo ...
- C# 动态(不定)类型和不定参数数量,使用param写入CSV文档的最简单方法,提供excel(或记事本)阅读支持格式
在开发一个项目,使用C#写入CSV文件时,虽并未遇到太多阻碍,但是很多小伙伴估计和我有过同样的想法.简单的写入CSV,固定参数数量就好了很简单写完.但是如果遇到你得到的数据参数数量和参数类型未知或者动 ...
- python-Flask模版注入攻击SSTI(python沙盒逃逸)
一篇以python Flask 模版渲染为例子的SSTI注入教学~ 0x01 Flask使用和渲染 这里简化了flask使用和渲染的教程 只把在安全中我们需要关注的部分写出来 来一段最简单的FLASK ...
- MSSQL提权之xp_cmdshell
0x01 前提 getshell或者存在sql注入并且能够执行命令. sql server是system权限,sql server默认就是system权限. 0x02 xp_cmdshell 有了xp ...