1 spark关键包

<!--spark-->

<dependency>
<groupId>fakepath</groupId>
<artifactId>spark-core</artifactId>
<version>2.10-1.5.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming</artifactId>
<version>2.10-1.5.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming-kafka</artifactId>
<version>2.10-1.5.1</version>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql</artifactId>
<version>2.10-1.5.1</version>
</dependency>

<dependency>
<groupId>backport-util-concurrent.org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>2.10.4</version>
</dependency>
<dependency>
<groupId>com.hw</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.7.2</version>
</dependency>

<dependency>
<groupId>org.objenesis</groupId>
<artifactId>kryo</artifactId>
<version>2.21</version>
</dependency>

<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.5</version>
</dependency>

2 分析模型昼伏夜出 spark-java

package com.xinyi.spark.analysis.tasks;

import com.google.common.base.Optional;
import com.xinyi.spark.analysis.utils.dbhelper.DBHelper;
import com.xinyi.xframe.base.utils.StringUtils;
import com.xinyi.xframe.base.utils.UUIDGenerator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.TableInputFormat;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.*;

public class RecordInfoSparkAnalsis {

//查询任务列表
private static DBHelper dbHelper = new DBHelper("xinyidb");
private final static String endNum = "9";
public static void main(String[] args) {

String sql ="select id,to_char(starttime,'yyyymmddhh24miss') starttime," +
"to_char(endtime,'yyyymmddhh24miss') endtime,starthour,endhour," +
"to_char(createtime,'yyyymmddhh24miss') createtime from recordinfo_task where status='0'";
List<Map<String,Object>> taskList = dbHelper.query(sql);
System.out.println(taskList);
if(taskList.isEmpty()){
System.out.println("任务列表为空!");
return;
}
for(Map<String,Object> task :taskList){
String taskid = String.valueOf(task.get("ID"));
updateRecordTask(taskid,"2");
}

//初始化Spark环境
SparkConf conf = new SparkConf().setAppName("RecordInfoSparkAnalsis");
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
conf.set("spark.kryo.registrator", "com.xinyi.spark.analysis.utils.MyRegistrator");
conf.set("spark.kryoserializer.buffer.max", "256");
JavaSparkContext jsc = new JavaSparkContext(conf);
// 构建spark-Hbase配置
Configuration hbConf = HBaseConfiguration.create(jsc.hadoopConfiguration());
//初始化rowkey存储设计的搜索方式
int endInt = Integer.valueOf(endNum);

for(Map<String,Object> task :taskList){
Object startObj = task.get("STARTTIME");
Object endObj = task.get("ENDTIME");
if(!StringUtils.isEmpty(startObj)&&!StringUtils.isEmpty(endObj)){
long s = System.currentTimeMillis();
String startTime = String.valueOf(startObj);
String endTime = String.valueOf(endObj);
String blackStartHour = String.valueOf(task.get("STARTHOUR"));
String blackEndHour = String.valueOf(task.get("ENDHOUR"));
System.out.println(blackStartHour+"---"+blackEndHour);
//全局RDD
JavaPairRDD<String, Long> white = null;
JavaPairRDD<String, Long> black = null;
for (int i = 0; i <= endInt; i++) {
//根据时间设置初始和结束rowkey
String startkey = String.valueOf(i) + startTime;
String endkey = String.valueOf(i) +endTime;
System.out.println(startkey);
System.out.println(endkey);
//查询晚上数据rdd
JavaPairRDD<String, Long> reduceRdd2 = getStringLongJavaPairRDD(jsc, hbConf, startkey, endkey,blackStartHour,blackEndHour);
if(black==null){
black = reduceRdd2;
}else {
black = black.union(reduceRdd2);
}
//查询白天数据rdd
JavaPairRDD<String, Long> whiteReduceRdd = getStringLongJavaPairRDD(jsc, hbConf, startkey, endkey,blackEndHour,"235959");
if(white==null){
white = whiteReduceRdd;
}else {
white = white.union(whiteReduceRdd);
}
}
System.out.println(black.collectAsMap());
black = black.reduceByKey(new Function2<Long, Long, Long>() {
public Long call(Long a1, Long a2) throws Exception {
return a1 + a2;
}
});
white = white.reduceByKey(new Function2<Long, Long, Long>() {
public Long call(Long a1, Long a2) throws Exception {
return a1 + a2;
}
});
//根据key左连接
JavaPairRDD<String,Tuple2<Long,Optional<Long>>> joinRdd = black.leftOuterJoin(white);
joinRdd = joinRdd.filter(new Function<Tuple2<String, Tuple2<Long, Optional<Long>>>, Boolean>() {
@Override
public Boolean call(Tuple2<String, Tuple2<Long, Optional<Long>>> stringTuple2Tuple2) throws Exception {
Long val1 = stringTuple2Tuple2._2._1;
Long val2 = 0l;
Set valSet = stringTuple2Tuple2._2._2.asSet();
for(Object val:valSet){
val2= Long.valueOf(val.toString());
}
//System.out.println(val1+"--"+val2);
if(valSet.isEmpty()&&val1>3){
return true;
}else if(val2<1&&val1>3){
return true;
}
return false;
}
});

Map<String,Tuple2<Long,Optional<Long>>> collectMap = joinRdd.collectAsMap();
System.out.println(collectMap);

String taskid = String.valueOf(task.get("ID"));
//保存结果到数据库
insert2RecordResult(taskid,collectMap);
updateRecordTask(taskid,"1");
long se = System.currentTimeMillis();
System.out.println("共耗时:"+(se-s));
}

}
jsc.stop();
}

private static void updateRecordTask(String taskid,String status){
String sql = "update recordinfo_task set status='"+status+"' where id='"+taskid+"'";
dbHelper.update(sql);
System.out.println("任务表状态已更新!");
}
/**
* 结果集插入到oracle结果表recordinfo_result
* @param taskid
* @param results
*/
private static void insert2RecordResult(String taskid, Map<String, Tuple2<Long, Optional<Long>>> results){
Set<String> keySet = results.keySet();
for(String key :keySet){
Tuple2<Long, Optional<Long>> vals = results.get(key);
String id= UUIDGenerator.generateOriginnalUUID();
String sql = "insert into recordinfo_result (id,taskid,tenementid,num) values ('"+id+"','"+taskid+"','"+key+"','"+vals._1+"')";
dbHelper.update(sql);
}
System.out.println("结果集已插入数据库");
}

/**
* 把所有相同的key对应的value累加起来并过滤value>某个值的
* @param black
* @param val 过滤比较值
* @param compare 比较符
* @return
*/
private static JavaPairRDD<String, Long> getStringLongJavaPairRDD(JavaPairRDD<String, Long> black,final int val,final String compare) {
black = black.reduceByKey(new Function2<Long, Long, Long>() {
@Override
public Long call(Long a1, Long a2) throws Exception {
return a1 + a2;
}
});
black = black.filter(new Function<Tuple2<String, Long>, Boolean>() {
@Override
public Boolean call(Tuple2<String, Long> stringLongTuple2) throws Exception {
if(">".equals(compare)){
if(stringLongTuple2._2>val){
//System.out.println(stringLongTuple2._1+"---"+stringLongTuple2._2);
return true;
}
}else if("<".equals(compare)){
if(stringLongTuple2._2<val){
//System.out.println(stringLongTuple2._1+"==="+stringLongTuple2._2);
return true;
}
}
return false;
}
});
return black;
}

/**
* 根据rowkey范围及hourlong范围 查询Hbase 获取JavaPairRDD
* @param jsc
* @param hbConf
* @param startkey
* @param endkey
* @param startHour
* @param endHour
* @return
*/
private static JavaPairRDD<String, Long> getStringLongJavaPairRDD(JavaSparkContext jsc, Configuration hbConf, String startkey, String endkey,String startHour,String endHour) {
Scan scan = new Scan(Bytes.toBytes(startkey), Bytes.toBytes(endkey));
// Scan scan = new Scan();
scan.setCacheBlocks(true);
scan.setCaching(10000);
scan.setStartRow(Bytes.toBytes(startkey));
scan.addFamily(Bytes.toBytes("info"));//colomn family

//晚上时间过滤条件
FilterList filterList = new FilterList();
Filter gtfilter = new SingleColumnValueFilter(Bytes.toBytes("info"),Bytes.toBytes("hourlong"), CompareFilter.CompareOp.GREATER,Bytes.toBytes(startHour));
filterList.addFilter(gtfilter);
Filter ltfilter = new SingleColumnValueFilter(Bytes.toBytes("info"),Bytes.toBytes("hourlong"), CompareFilter.CompareOp.LESS,Bytes.toBytes(endHour));
filterList.addFilter(ltfilter);
scan.setFilter(filterList);

org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan proto = null;
try {
proto = ProtobufUtil.toScan(scan);
} catch (IOException e) {
e.printStackTrace();
}

String scanToString = Base64.encodeBytes(proto.toByteArray());
hbConf.set(TableInputFormat.INPUT_TABLE, "recordinfo");//table name
hbConf.set(TableInputFormat.SCAN, scanToString);

JavaPairRDD<ImmutableBytesWritable, Result> rdd = jsc.newAPIHadoopRDD(hbConf, TableInputFormat.class, ImmutableBytesWritable.class, Result.class);
//过滤算子,过滤集合只保留想要的字段作为key,设value=1
JavaPairRDD<String, Long> rddmap = rdd.mapToPair(new PairFunction<Tuple2<ImmutableBytesWritable, Result>, String, Long>() {
public Tuple2<String, Long> call(Tuple2<ImmutableBytesWritable, Result> item) throws Exception {
Iterator<Cell> it = item._2().listCells().iterator();
String tenementid = "";
while (it.hasNext()) {
Cell c = it.next();
String qualifier = Bytes.toString(CellUtil.cloneQualifier(c));
if (qualifier.equals("tenementid")) {
tenementid = Bytes.toString(CellUtil.cloneValue(c)).trim();
}
}
return new Tuple2<String, Long>(tenementid, 1L);
}
});
//根据key值累加value
return rddmap;
}
}

spark java api数据分析实战的更多相关文章

  1. Spark Java API 计算 Levenshtein 距离

    Spark Java API 计算 Levenshtein 距离 在上一篇文章中,完成了Spark开发环境的搭建,最终的目标是对用户昵称信息做聚类分析,找出违规的昵称.聚类分析需要一个距离,用来衡量两 ...

  2. Spark Java API 之 CountVectorizer

    Spark Java API 之 CountVectorizer 由于在Spark中文本处理与分析的一些机器学习算法的输入并不是文本数据,而是数值型向量.因此,需要进行转换.而将文本数据转换成数值型的 ...

  3. 在 IntelliJ IDEA 中配置 Spark(Java API) 运行环境

    1. 新建Maven项目 初始Maven项目完成后,初始的配置(pom.xml)如下: 2. 配置Maven 向项目里新建Spark Core库 <?xml version="1.0& ...

  4. spark (java API) 在Intellij IDEA中开发并运行

    概述:Spark 程序开发,调试和运行,intellij idea开发Spark java程序. 分两部分,第一部分基于intellij idea开发Spark实例程序并在intellij IDEA中 ...

  5. spark java API 实现二次排序

    package com.spark.sort; import java.io.Serializable; import scala.math.Ordered; public class SecondS ...

  6. 【Spark Java API】broadcast、accumulator

    转载自:http://www.jianshu.com/p/082ef79c63c1 broadcast 官方文档描述: Broadcast a read-only variable to the cl ...

  7. Spark基础与Java Api介绍

    原创文章,转载请注明: 转载自http://www.cnblogs.com/tovin/p/3832405.html  一.Spark简介 1.什么是Spark 发源于AMPLab实验室的分布式内存计 ...

  8. Elasticsearch 5.4.3实战--Java API调用:索引mapping创建

    因为项目开发使用的是Java语言, 项目的开发架构是Spring MVC+ maven的jar包管理,  所以今天重点说说ES 5.4.3 的Java API的源码实战 1. pom.xml文件增加依 ...

  9. 利用SparkLauncher 类以JAVA API 编程的方式提交Spark job

    一.环境说明和使用软件的版本说明: hadoop-version:hadoop-2.9.0.tar.gz spark-version:spark-2.2.0-bin-hadoop2.7.tgz jav ...

随机推荐

  1. SpringBoot数据聚合(spring-boot-data-aggregator-starter)

    背景 接口开发是后端开发中最常见的场景, 可能是RESTFul接口, 也可能是RPC接口. 接口开发往往是从各处捞出数据, 然后组装成结果, 特别是那些偏业务的接口. 例如, 我现在需要实现一个接口, ...

  2. 在 ASP.NET Core 项目中使用 AutoMapper 进行实体映射

    一.前言 在实际项目开发过程中,我们使用到的各种 ORM 组件都可以很便捷的将我们获取到的数据绑定到对应的 List<T> 集合中,因为我们最终想要在页面上展示的数据与数据库实体类之间可能 ...

  3. ES6学习总结之Set和Map数据结构的理解

    前言 当我们需要存储一些数据的时候,首先想到的是定义一个变量用来存储,之后我们可能学了数组,发现数组比变量可以存储更多的数据,接着可能有其它的存储数据的方法等等,然而我今天需要介绍的是在ES6中比较常 ...

  4. export import 的用法和注意之处

       1.整体引入: 会将若干export导出的内容组合成一个对象返回: import *as  api from  utils.https; api为自定义名称,可直接指定此文件中的某个方法,uti ...

  5. go语言标准库之http/template

    html/template包实现了数据驱动的模板,用于生成可对抗代码注入的安全HTML输出.它提供了和text/template包相同的接口,Go语言中输出HTML的场景都应使用text/templa ...

  6. Nature Methods | 新软件SAVER-X可对单细胞转录组学数据进行有效降噪

                                                                          图片来源(Nature Methods)   摘要 单细胞转 ...

  7. CSS 预处理语言之 Scss 篇

    简介 1. Sass 和 Scss Sass 和 Scss 其实是同一种东西,我们平时都称之为 Sass:Scss 是 Sass 3 引入新的语法,其语法完全兼容 CSS3,并且继承了 Sass 的强 ...

  8. Windows下如何调试驱动程序

    Windows内核分析索引目录:https://www.cnblogs.com/onetrainee/p/11675224.html 一.配置Windbg使用双机调试 win10中“windbg+vm ...

  9. Python 元组(Tuple)操作详解

    Python的元组与列表类似,不同之处在于元组的元素不能修改,元组使用小括号, 列表使用方括号,元组创建很简单,只需要在括号中添加元素,并使用逗号隔开即可 一.创建元组 代码如下: tup1 = (' ...

  10. java并发之synchronized详解

    前言 多个线程访问同一个类的synchronized方法时, 都是串行执行的 ! 就算有多个cpu也不例外 ! synchronized方法使用了类java的内置锁, 即锁住的是方法所属对象本身. 同 ...