spark sql01
package sql; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext; /**
*
*/
public class DataFrameReadJsonOps2 { /**
* @param args
*/
public static void main(String[] args) {
//创建SparkConf用于读取系统配置信息并设置当前应用程序的名字
SparkConf conf = new SparkConf().setAppName("DataFrameOps").setMaster("local");
//创建JavaSparkContext对象实例作为整个Driver的核心基石
JavaSparkContext sc = new JavaSparkContext(conf);
//设置日志级别为WARN
sc.setLogLevel("WARN");
//创建SQLContext上下文对象用于SQL的分析
SQLContext sqlContext = new SQLContext(sc);
//创建Data Frame,可以简单的认为DataFrame是一张表
DataFrame df = sqlContext.read().json("c:/resources/people.json");
//select * from table
df.show();
//desc table
df.printSchema();
//select name from table
df.select(df.col("name")).show();
//select name, age+10 from table
df.select(df.col("name"), df.col("age").plus()).show();
//select * from table where age > 21
df.filter(df.col("age").gt()).show();
//select age, count(1) from table group by age
df.groupBy("age").count().show(); //df.groupBy(df.col("age")).count().show();
} }
//
//SLF4J: Class path contains multiple SLF4J bindings.
//SLF4J: Found binding in [jar:file:/E:/bigdata/spark-1.4.0-bin-hadoop2.6/lib/spark-assembly-1.4.0-hadoop2.6.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
//SLF4J: Found binding in [jar:file:/E:/bigdata/spark-1.4.0-bin-hadoop2.6/lib/spark-examples-1.4.0-hadoop2.6.0.jar!/org/slf4j/impl/StaticLoggerBinder.class]
//SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
//SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
//Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
//17/12/29 14:15:10 INFO SparkContext: Running Spark version 1.4.0
//17/12/29 14:15:24 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
//17/12/29 14:15:28 INFO SecurityManager: Changing view acls to: alamps
//17/12/29 14:15:28 INFO SecurityManager: Changing modify acls to: alamps
//17/12/29 14:15:28 INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(alamps); users with modify permissions: Set(alamps)
//17/12/29 14:15:37 INFO Slf4jLogger: Slf4jLogger started
//17/12/29 14:15:39 INFO Remoting: Starting remoting
//17/12/29 14:15:44 INFO Remoting: Remoting started; listening on addresses :[akka.tcp://sparkDriver@172.18.3.7:55458]
//17/12/29 14:15:44 INFO Utils: Successfully started service 'sparkDriver' on port 55458.
//17/12/29 14:15:45 INFO SparkEnv: Registering MapOutputTracker
//17/12/29 14:15:46 INFO SparkEnv: Registering BlockManagerMaster
//17/12/29 14:15:46 INFO DiskBlockManager: Created local directory at C:\Users\alamps\AppData\Local\Temp\spark-cd3ecbc3-41b5-4d8b-8e78-8c2c368ce80b\blockmgr-660894dd-39d3-4c8a-bf25-ae1d3850953d
//17/12/29 14:15:46 INFO MemoryStore: MemoryStore started with capacity 467.6 MB
//17/12/29 14:15:47 INFO HttpFileServer: HTTP File server directory is C:\Users\alamps\AppData\Local\Temp\spark-cd3ecbc3-41b5-4d8b-8e78-8c2c368ce80b\httpd-106ce90e-d496-4e96-a383-b471aeb5a224
//17/12/29 14:15:47 INFO HttpServer: Starting HTTP Server
//17/12/29 14:15:48 INFO Utils: Successfully started service 'HTTP file server' on port 55464.
//17/12/29 14:15:48 INFO SparkEnv: Registering OutputCommitCoordinator
//17/12/29 14:15:49 INFO Utils: Successfully started service 'SparkUI' on port 4040.
//17/12/29 14:15:49 INFO SparkUI: Started SparkUI at http://172.18.3.7:4040
//17/12/29 14:15:49 INFO Executor: Starting executor ID driver on host localhost
//17/12/29 14:15:50 INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port 55483.
//17/12/29 14:15:50 INFO NettyBlockTransferService: Server created on 55483
//17/12/29 14:15:50 INFO BlockManagerMaster: Trying to register BlockManager
//17/12/29 14:15:50 INFO BlockManagerMasterEndpoint: Registering block manager localhost:55483 with 467.6 MB RAM, BlockManagerId(driver, localhost, 55483)
//17/12/29 14:15:50 INFO BlockManagerMaster: Registered BlockManager
//+----+-------+
//| age| name|
//+----+-------+
//|null|Michael|
//| 30| Andy|
//| 19| Justin|
//+----+-------+
//
//root
// |-- age: long (nullable = true)
// |-- name: string (nullable = true)
//
//+-------+
//| name|
//+-------+
//|Michael|
//| Andy|
//| Justin|
//+-------+
//
//+-------+----------+
//| name|(age + 10)|
//+-------+----------+
//|Michael| null|
//| Andy| 40|
//| Justin| 29|
//+-------+----------+
//
//+---+----+
//|age|name|
//+---+----+
//| 30|Andy|
//+---+----+
//
//+----+-----+
//| age|count|
//+----+-----+
//|null| 1|
//| 19| 1|
//| 30| 1|
//+----+-----+
spark sql01的更多相关文章
- Spark踩坑记——Spark Streaming+Kafka
[TOC] 前言 在WeTest舆情项目中,需要对每天千万级的游戏评论信息进行词频统计,在生产者一端,我们将数据按照每天的拉取时间存入了Kafka当中,而在消费者一端,我们利用了spark strea ...
- Spark RDD 核心总结
摘要: 1.RDD的五大属性 1.1 partitions(分区) 1.2 partitioner(分区方法) 1.3 dependencies(依赖关系) 1.4 compute(获取分区迭代列表) ...
- spark处理大规模语料库统计词汇
最近迷上了spark,写一个专门处理语料库生成词库的项目拿来练练手, github地址:https://github.com/LiuRoy/spark_splitter.代码实现参考wordmaker ...
- Hive on Spark安装配置详解(都是坑啊)
个人主页:http://www.linbingdong.com 简书地址:http://www.jianshu.com/p/a7f75b868568 简介 本文主要记录如何安装配置Hive on Sp ...
- Spark踩坑记——数据库(Hbase+Mysql)
[TOC] 前言 在使用Spark Streaming的过程中对于计算产生结果的进行持久化时,我们往往需要操作数据库,去统计或者改变一些值.最近一个实时消费者处理任务,在使用spark streami ...
- Spark踩坑记——初试
[TOC] Spark简介 整体认识 Apache Spark是一个围绕速度.易用性和复杂分析构建的大数据处理框架.最初在2009年由加州大学伯克利分校的AMPLab开发,并于2010年成为Apach ...
- Spark读写Hbase的二种方式对比
作者:Syn良子 出处:http://www.cnblogs.com/cssdongl 转载请注明出处 一.传统方式 这种方式就是常用的TableInputFormat和TableOutputForm ...
- (资源整理)带你入门Spark
一.Spark简介: 以下是百度百科对Spark的介绍: Spark 是一种与 Hadoop 相似的开源集群计算环境,但是两者之间还存在一些不同之处,这些有用的不同之处使 Spark 在某些工作负载方 ...
- Spark的StandAlone模式原理和安装、Spark-on-YARN的理解
Spark是一个内存迭代式运算框架,通过RDD来描述数据从哪里来,数据用那个算子计算,计算完的数据保存到哪里,RDD之间的依赖关系.他只是一个运算框架,和storm一样只做运算,不做存储. Spark ...
随机推荐
- bitmq集群高可用测试
Rabbitmq集群高可用 RabbitMQ是用erlang开发的,集群非常方便,因为erlang天生就是一门分布式语言,但其本身并不支持负载均衡. Rabbit模式大概分为以下三种:单一模式.普通模 ...
- vsftp设置不同用户登录ftp的根目录不同
创建三个用户 [root@SHM-Storage-EF ~]# useradd kids [root@SHM-Storage-EF ~]# useradd mini [root@SHM-Storage ...
- ehlib预览打印的使用
ehlib支持预览打印功能,可以省去重新制作报表的麻烦,经过一天的努力,基本上解决了这个问题.把解决方法写出来,同行的朋友可以参考,同时为自己做个学习笔记. 首先,需要放置PrintDBGri ...
- pacakge-info.java
翻看以前的笔记,看到一个特殊的java文件:pacakge-info.java,虽然有记录,但是不全,就尝试着追踪一下该问题, 分享一下流水账式的结果. 首先,它不能随便被创建.在Eclipse中, ...
- mysql之show engine innodb status解读(转)
add by zhj: 我第一次知道这个命令是线上服务出了问题,然后同事用这个命令去查看死锁.但用这个命令看死锁有一定的局限性,它只能看到最后一次死锁, 而且只能看到死锁环中的两个事务所执行的最后一条 ...
- python基础数据类型考试题
Python基础数据类型考试题 考试时间:两个半小时 满分100分(80分以上包含80分及格) 一,基础题. 1,简述变量命名规范(3分) 2,字节和位的关系 ...
- ZedBoard上运行linux系统的准备工作框架
目标:ZedBoard上运行linux系统. 需要什么:图中上色部分. 应该做哪些工作:上色部分之前的所有步骤. 由上图得知,为了顺利在zedboard上构建嵌入式Linux操作系统,我们一般需要如下 ...
- 关于ARMv8另外几个问题
版权声明:本文为博主原创文章.未经博主同意不得转载. https://blog.csdn.net/qianlong4526888/article/details/27510675 问题1:支持ARMv ...
- UIBezierPath使用
效果图,Demo的例子是我自己做的,下面曲线的代码是从别处copy过来的 copy地址 -(void)touchesBegan:(NSSet<UITouch *> *)touches wi ...
- Pycharm快捷键大全(windows + Mac)
Windows快捷键 1.编辑 Ctrl + Space 基本的代码完成(类.方法.属性) Ctrl + Alt + Space 快速导入任意类 Ctrl + Shift + Enter ...