spark 练习
scala> import org.apache.spark.SparkContext
import org.apache.spark.SparkContext
scala> import org.apache.spark.SparkConf
import org.apache.spark.SparkConf
scala> import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.SQLContext
scala> import spark.implicits._
import spark.implicits._
scala> val mysqlcon=new SQLContext(sc)
warning: there was one deprecation warning; re-run with -deprecation for details
mysqlcon: org.apache.spark.sql.SQLContext = org.apache.spark.sql.SQLContext@3ac76ad9
scala> val mysqldf=mysqlcon.read.format("jdbc").options(Map("url"->"jdbc:mysql://localhost:3306/test","user"->"root","password"->"root","dbtable"->"Account_3004")).load()
mysqldf: org.apache.spark.sql.DataFrame = [AccountName: string, Accid: bigint ... 30 more fields]
scala> mysqldf.printSchema
root
|-- AccountName: string (nullable = false)
|-- Accid: long (nullable = false)
|-- platid: integer (nullable = false)
|-- DateID: integer (nullable = false)
|-- CreateTime: timestamp (nullable = false)
|-- Retention1: integer (nullable = false)
|-- Retention2: integer (nullable = true)
|-- Retention3: integer (nullable = true)
|-- Retention4: integer (nullable = true)
|-- Retention5: integer (nullable = true)
|-- Retention6: integer (nullable = true)
|-- Retention7: integer (nullable = true)
|-- Retention10: integer (nullable = true)
|-- Retention14: integer (nullable = true)
|-- Retention21: integer (nullable = true)
|-- Retention30: integer (nullable = true)
|-- GameID: integer (nullable = false)
|-- id: long (nullable = false)
|-- adcode: string (nullable = true)
|-- AddRMB1: double (nullable = true)
|-- AddRMB2: double (nullable = true)
|-- AddRMB3: double (nullable = true)
|-- AddRMB4: double (nullable = true)
|-- AddRMB5: double (nullable = true)
|-- AddRMB6: double (nullable = true)
|-- AddRMB7: double (nullable = true)
|-- AddRMB10: double (nullable = true)
|-- AddRMB14: double (nullable = true)
|-- AddRMB21: double (nullable = true)
|-- AddRMB30: double (nullable = true)
|-- LoginTimes: integer (nullable = true)
|-- LoginMinutes: integer (nullable = true)
scala> mysqldf.count()
res2: Long = 76813
scala> mysqldf.show(2)
+-----------+--------+------+--------+-------------------+----------+----------+----------+----------+----------+----------+----------+-----------+-----------+-----------+-----------+------+-----+------+-------+-------+-------+-------+-------+-------+-------+--------+--------+--------+--------+----------+------------+
|AccountName| Accid|platid| DateID| CreateTime|Retention1|Retention2|Retention3|Retention4|Retention5|Retention6|Retention7|Retention10|Retention14|Retention21|Retention30|GameID| id|adcode|AddRMB1|AddRMB2|AddRMB3|AddRMB4|AddRMB5|AddRMB6|AddRMB7|AddRMB10|AddRMB14|AddRMB21|AddRMB30|LoginTimes|LoginMinutes|
+-----------+--------+------+--------+-------------------+----------+----------+----------+----------+----------+----------+----------+-----------+-----------+-----------+-----------+------+-----+------+-------+-------+-------+-------+-------+-------+-------+--------+--------+--------+--------+----------+------------+
| | 1004210| 6|20180116|2018-01-16 10:39:50| 1| 0| 0| 0| 0| 0| 0| 0| 0| 0| 0| 3004|22438| | 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 1| 7|
| |20946754| 0|20170913|2017-09-13 10:02:37| 1| 0| 0| 1| 0| 0| 0| 0| 0| 0| 0| 3004| 167| | 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 0.0| 3| 219|
+-----------+--------+------+--------+-------------------+----------+----------+----------+----------+----------+----------+----------+-----------+-----------+-----------+-----------+------+-----+------+-------+-------+-------+-------+-------+-------+-------+--------+--------+--------+--------+----------+------------+
only showing top 2 rows
scala> mysqldf.select("accid").show(2)
+--------+
| accid|
+--------+
|20964769|
|22235886|
+--------+
only showing top 2 rows
scala> mysqldf.select("accid","platid").show(2)
+--------+------+
| accid|platid|
+--------+------+
| 1004210| 6|
|20946754| 0|
+--------+------+
only showing top 2 rows
scala> mysqldf.filter($"dateid">20180510).count
res9: Long = 5101
scala> mysqldf.select($"accid",$"platid"+1000).show(2)
+--------+---------------+
| accid|(platid + 1000)|
+--------+---------------+
| 1004210| 1006|
|20946754| 1000|
+--------+---------------+
only showing top 2 rows
scala> mysqldf.groupBy("platid").count().show
+------+-----+
|platid|count|
+------+-----+
| 27| 7157|
| 93| 44|
| 291| 10|
| 1| 8503|
| 13| 290|
| 6| 4765|
| 3| 3281|
| 295| 2|
| 10000| 1|
| 191| 758|
| 24294| 9|
| 19| 1549|
| 15| 8838|
| 17| 6|
| 9| 365|
| 286| 1|
| 35| 4075|
| 4|10395|
| 247| 1|
| 277| 453|
+------+-----+
only showing top 20 rows
scala> mysqldf.filter($"dateid">20180520).groupBy("platid").count().show
+------+-----+
|platid|count|
+------+-----+
| 27| 131|
| 93| 14|
| 291| 2|
| 1| 333|
| 13| 25|
| 6| 116|
| 3| 36|
| 191| 136|
| 24294| 2|
| 19| 39|
| 15| 978|
| 9| 2|
| 35| 72|
| 4| 161|
| 277| 11|
| 50| 8|
| 38| 4|
| 289| 12|
| 21| 24|
| 60| 75|
+------+-----+
only showing top 20 rows
scala> mysqldf.createOrReplaceTempView("account")
scala> val sqldf=spark.sql("select platid,accid,dateid from account where dateid>=20180601" )
sqldf: org.apache.spark.sql.DataFrame = [platid: int, accid: bigint ... 1 more field]
scala> sqldf.show(2)
+------+--------+--------+
|platid| accid| dateid|
+------+--------+--------+
| 0|22514097|20180601|
| 36|22857594|20180601|
+------+--------+--------+
only showing top 2 rows
------------------------------------
Spark SQL中的临时性视图在会话范围内,如果创建会话的会话终止,它们将消失。如果您希望拥有一个在所有会话中共享的临时视图,并在Spark应用程序终止之前保持活动状态,您可以创建一个全局临时视图。全局临时视图与系统保存的数据库global_temp绑定,我们必须使用限定名来引用它,例如,从global_temp.view1中选择*。
--------------------------------------
scala> mysqldf.createOrReplaceGlobalTempView("tb_acc")
scala> val globaldf=spark.sql("select platid,accid,dateid from global_temp.tb_acc where dateid>=20180601" )
globaldf: org.apache.spark.sql.DataFrame = [platid: int, accid: bigint ... 1 more field]
scala> globaldf.show(2)
+------+--------+--------+
|platid| accid| dateid|
+------+--------+--------+
| 0|22514097|20180601|
| 36|22857594|20180601|
+------+--------+--------+
only showing top 2 rows
--------------------------
Datasets are similar to RDDs, however, instead of using Java serialization or Kryo they use a specialized Encoder to serialize the objects for processing or transmitting over the network. While both encoders and standard serialization are responsible for turning an object into bytes, encoders are code generated dynamically and use a format that allows Spark to perform many operations like filtering, sorting and hashing without deserializing the bytes back into an object.
但是,数据集类似于RDDs,而不是使用Java序列化或Kryo,而是使用专门的编码器将对象序列化,以便在网络上进行处理或传输。尽管编码器和标准序列化都负责将对象转换成字节,编码器是动态生成的代码,并使用允许Spark执行许多操作(如过滤、排序和散列)的格式,而不会将字节反序列化为对象。
----------------------------------------------------------------
scala> val df1=spark.sql("select distinct platid,dateid from account where dateid>=20180601" )
df1: org.apache.spark.sql.DataFrame = [platid: int, dateid: int]
scala> val ds=df1.toDF
ds: org.apache.spark.sql.DataFrame = [platid: int, dateid: int]
scala> mysqldf.where("dateid>20180601").count()
res36: Long = 2249
scala> mysqldf.filter("dateid>20180601").count()
res37: Long = 2249
scala> mysqldf.apply("accid")
res38: org.apache.spark.sql.Column = accid
scala> mysqldf.filter("dateid>20180601").orderBy(mysqldf("dateid")).show 顺序
scala>mysqldf.filter("dateid>20180601").orderBy(-mysqldf("dateid")).show 逆序
scala> mysqldf.filter("dateid>20180601").orderBy(mysqldf("dateid").desc).show 逆序
scala> mysqldf.groupBy("platid").agg(max("dateid"),min("dateid")).show(2)
+------+-----------+-----------+
|platid|max(dateid)|min(dateid)|
+------+-----------+-----------+
| 27| 20180619| 20170906|
| 93| 20180615| 20180314|
+------+-----------+-----------+
only showing top 2 rows
spark 练习的更多相关文章
- Spark踩坑记——Spark Streaming+Kafka
[TOC] 前言 在WeTest舆情项目中,需要对每天千万级的游戏评论信息进行词频统计,在生产者一端,我们将数据按照每天的拉取时间存入了Kafka当中,而在消费者一端,我们利用了spark strea ...
- Spark RDD 核心总结
摘要: 1.RDD的五大属性 1.1 partitions(分区) 1.2 partitioner(分区方法) 1.3 dependencies(依赖关系) 1.4 compute(获取分区迭代列表) ...
- spark处理大规模语料库统计词汇
最近迷上了spark,写一个专门处理语料库生成词库的项目拿来练练手, github地址:https://github.com/LiuRoy/spark_splitter.代码实现参考wordmaker ...
- Hive on Spark安装配置详解(都是坑啊)
个人主页:http://www.linbingdong.com 简书地址:http://www.jianshu.com/p/a7f75b868568 简介 本文主要记录如何安装配置Hive on Sp ...
- Spark踩坑记——数据库(Hbase+Mysql)
[TOC] 前言 在使用Spark Streaming的过程中对于计算产生结果的进行持久化时,我们往往需要操作数据库,去统计或者改变一些值.最近一个实时消费者处理任务,在使用spark streami ...
- Spark踩坑记——初试
[TOC] Spark简介 整体认识 Apache Spark是一个围绕速度.易用性和复杂分析构建的大数据处理框架.最初在2009年由加州大学伯克利分校的AMPLab开发,并于2010年成为Apach ...
- Spark读写Hbase的二种方式对比
作者:Syn良子 出处:http://www.cnblogs.com/cssdongl 转载请注明出处 一.传统方式 这种方式就是常用的TableInputFormat和TableOutputForm ...
- (资源整理)带你入门Spark
一.Spark简介: 以下是百度百科对Spark的介绍: Spark 是一种与 Hadoop 相似的开源集群计算环境,但是两者之间还存在一些不同之处,这些有用的不同之处使 Spark 在某些工作负载方 ...
- Spark的StandAlone模式原理和安装、Spark-on-YARN的理解
Spark是一个内存迭代式运算框架,通过RDD来描述数据从哪里来,数据用那个算子计算,计算完的数据保存到哪里,RDD之间的依赖关系.他只是一个运算框架,和storm一样只做运算,不做存储. Spark ...
- (一)Spark简介-Java&Python版Spark
Spark简介 视频教程: 1.优酷 2.YouTube 简介: Spark是加州大学伯克利分校AMP实验室,开发的通用内存并行计算框架.Spark在2013年6月进入Apache成为孵化项目,8个月 ...
随机推荐
- 关于Java与Map的那点事
Java将Map拼接成“参数=值&参数=值”: 把一个map的键值对拼接成“参数=值&参数=值”即“username=angusbao&password=123456”这种形式 ...
- 关于Zookeeper选举机制
zookeeper集群 配置多个实例共同构成一个集群对外提供服务以达到水平扩展的目的,每个服务器上的数据是相同的,每一个服务器均可以对外提供读和写的服务,这点和redis是相同的,即对客户端来讲每个服 ...
- Xshell里连接VirtualBox里的Centos7
关闭虚拟机 右键虚拟机->设置->网络,连接方式选择NAT,如下图: 3.在Xshell里连接即可.
- ALGO-145_蓝桥杯_算法训练_4-1打印下述图形
记: 这里用到了printf("%*s%s%*s\n",n-i,"",arr,n-i,"");的写法, 其中%*s中的*代表该字符串s的个数 ...
- Flex组件参考 代码参考汇总
1:tourdeflex快速熟悉各种组件用法的参考http://www.adobe.com/devnet/flex/tourdeflex.html在线:http://www.adobe.com/dev ...
- bzoj2909: Bipartite Numbers
Description Bipartite Number是这样的一个正整数,他只能由两段相同的数组成,如44444411,10000000, 5555556,41,而4444114,44444则不是. ...
- Hadoop概念学习系列之关于hadoop-2.2.0和hadoop2.6.0的winutils.exe、hadoop.dll版本混用(易出错)(四十三)
问题详情是 2016-12-10 23:24:13,317 INFO [org.apache.hadoop.metrics.jvm.JvmMetrics] - Initializing JVM Met ...
- 传统Java Web(非Spring Boot)、非Java语言项目接入Spring Cloud方案
技术架构在向spring Cloud转型时,一定会有一些年代较久远的项目,代码已变成天书,这时就希望能在不大规模重构的前提下将这些传统应用接入到Spring Cloud架构体系中作为一个服务以供其它项 ...
- acl的基本知识点
#ACL acl number 3001 rule 1 deny udp destination-port eq 445 rule 2 deny tcp destination-por ...
- arduino mega 避障报距小车
流程图 硬件 mega2560 // Pin 13 has an LED connected on most Arduino boards. // give it a name: #include&l ...