scala mysql jdbc oper
package egsql
import java.util.Properties import com.sun.org.apache.xalan.internal.xsltc.compiler.util.IntType
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
object jdbc {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("JdbcOperation").setMaster("local")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
///////////////////////read///////////////////////////
val properties = new Properties()
properties.put("user","root")
properties.put("password","xxx")
val url = "jdbc:mysql://127.0.0.1:3306/spark?useUnicode=true&characterEncoding=gbk&zeroDateTimeBehavior=convertToNull"
val stud_scoreDF = sqlContext.read.jdbc(url,"dtspark",properties)
stud_scoreDF.show() ///////////////////////write///////////////////////////
//通过并行化创建RDD
val personRDD = sc.parallelize(Array("blm 5 144", "jerry 18 188", "kitty 5 166")).map(_.split(" "))
//通过StrutType直接指定每个字段的schema
val schema = StructType(
List(
StructField("name",StringType,true),
StructField("age",IntegerType,true),
StructField("score",IntegerType,true)
)
)
//将RDD映射到rowRDD
val rowRDD = personRDD.map(p => Row(p().trim, p().toInt, p().toInt))
//将schema信息应用到rowRDD上
val personDataFrame = sqlContext.createDataFrame(rowRDD,schema)
//创建Properties存储数据库相关属性
//将数据追加到数据库
personDataFrame.write.mode("append").jdbc("jdbc:mysql://127.0.0.1:3306/spark",
"dtspark",properties)
//停止SparkContext //print again
stud_scoreDF.show()
sc.stop()
}
} ================================
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
// :: INFO SparkContext: Running Spark version 2.2.
// :: INFO SparkContext: Submitted application: JdbcOperation
// :: INFO SecurityManager: Changing view acls to: fangping
// :: INFO SecurityManager: Changing modify acls to: fangping
// :: INFO SecurityManager: Changing view acls groups to:
// :: INFO SecurityManager: Changing modify acls groups to:
// :: INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(fangping); groups with view permissions: Set(); users with modify permissions: Set(fangping); groups with modify permissions: Set()
// :: INFO Utils: Successfully started service 'sparkDriver' on port .
// :: INFO SparkEnv: Registering MapOutputTracker
// :: INFO SparkEnv: Registering BlockManagerMaster
// :: INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
// :: INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
// :: INFO DiskBlockManager: Created local directory at C:\Users\fangping\AppData\Local\Temp\blockmgr-b8154e0d-e77b-4ba9-818b-71d3ffb8c553
// :: INFO MemoryStore: MemoryStore started with capacity 339.6 MB
// :: INFO SparkEnv: Registering OutputCommitCoordinator
// :: INFO Utils: Successfully started service 'SparkUI' on port .
// :: INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://172.18.3.13:4040
// :: INFO Executor: Starting executor ID driver on host localhost
// :: INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port .
// :: INFO NettyBlockTransferService: Server created on 172.18.3.13:
// :: INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
// :: INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManagerMasterEndpoint: Registering block manager 172.18.3.13: with 339.6 MB RAM, BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/E:/back/scalaWs/Spark2Demo/spark-warehouse/').
// :: INFO SharedState: Warehouse path is 'file:/E:/back/scalaWs/Spark2Demo/spark-warehouse/'.
// :: INFO StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
// :: INFO CodeGenerator: Code generated in 239.883814 ms
// :: INFO CodeGenerator: Code generated in 15.603579 ms
// :: INFO SparkContext: Starting job: show at jdbc.scala:
// :: INFO DAGScheduler: Got job (show at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (show at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at show at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 7.5 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 4.2 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on 172.18.3.13: (size: 4.2 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at show at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 0.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 0.0 (TID )
// :: INFO JDBCRDD: closed connection
// :: INFO Executor: Finished task 0.0 in stage 0.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 0.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: ResultStage (show at jdbc.scala:) finished in 0.224 s
// :: INFO DAGScheduler: Job finished: show at jdbc.scala:, took 2.466901 s
+-----+---+-----+
| name|age|score|
+-----+---+-----+
| swk|||
| blm| | |
|jerry| | |
|kitty| | |
+-----+---+-----+ // :: INFO SparkContext: Starting job: jdbc at jdbc.scala:
// :: INFO DAGScheduler: Got job (jdbc at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (jdbc at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at jdbc at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 10.8 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 5.8 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on 172.18.3.13: (size: 5.8 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at jdbc at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 1.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 1.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 1.0 (TID )
// :: INFO CodeGenerator: Code generated in 13.199986 ms
// :: INFO CodeGenerator: Code generated in 86.854105 ms
// :: INFO Executor: Finished task 0.0 in stage 1.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 1.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO DAGScheduler: ResultStage (jdbc at jdbc.scala:) finished in 0.473 s
// :: INFO DAGScheduler: Job finished: jdbc at jdbc.scala:, took 0.496857 s
// :: INFO TaskSchedulerImpl: Removed TaskSet 1.0, whose tasks have all completed, from pool
// :: INFO SparkContext: Starting job: show at jdbc.scala:
// :: INFO DAGScheduler: Got job (show at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (show at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at show at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_2 stored as values in memory (estimated size 7.5 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 4.2 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_2_piece0 in memory on 172.18.3.13: (size: 4.2 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at show at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 2.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 2.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 2.0 (TID )
// :: INFO JDBCRDD: closed connection
// :: INFO Executor: Finished task 0.0 in stage 2.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 2.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO TaskSchedulerImpl: Removed TaskSet 2.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: ResultStage (show at jdbc.scala:) finished in 0.027 s
// :: INFO DAGScheduler: Job finished: show at jdbc.scala:, took 0.088025 s
+-----+---+-----+
| name|age|score|
+-----+---+-----+
| swk|||
| blm| | |
|jerry| | |
|kitty| | |
| blm| | |
|jerry| | |
|kitty| | |
+-----+---+-----+ // :: INFO SparkUI: Stopped Spark web UI at http://172.18.3.13:4040
// :: INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
// :: INFO MemoryStore: MemoryStore cleared
// :: INFO BlockManager: BlockManager stopped
// :: INFO BlockManagerMaster: BlockManagerMaster stopped
// :: INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
// :: INFO SparkContext: Successfully stopped SparkContext
// :: INFO ShutdownHookManager: Shutdown hook called
// :: INFO ShutdownHookManager: Deleting directory C:\Users\\AppData\Local\Temp\spark-e888fd39-2e41-43e5-829b-ca203b786cef
scala mysql jdbc oper的更多相关文章
- scala 通过jdbc访问mysql
scala是jvm语言,运行在jvm之上 我们知道jdbc是java访问数据库的技术,那么scala能不能通过jdbc操作数据库呢,答案是可以的 部分代码如下: /** * 获取连接 */ priva ...
- 【知识积累】SBT+Scala+MySQL的Demo
一.背景 由于项目需要,需要在Sbt+Scala项目中连接MySQL数据库.由于之前使用Maven+Java进行依赖管理偏多,在Sbt+Scala方面也在不断进行摸索,特此记录,作为小模块知识的积累. ...
- ambari-server启动出现Caused by: java.lang.RuntimeException:java.lang.ClassNotFoundEception:com.mysql.jdbc.Driver问题解决办法(图文详解)
不多说,直接上干货! 问题详解 启动ambari-server出现 Caused by: java.lang.RuntimeException:java.lang.ClassNotFoundEcept ...
- ambari-server启动出现ERROR main] DBAccessorImpl:106 - Error while creating database accessor java.lang.ClassNotFoundException:com.mysql.jdbc.Driver问题解决办法(图文详解)
不多说,直接上干货! 问题详情 ambari-server启动时,报如下的错误 问题分析 注:启动ambari访问前,请确保mysql驱动已经放置在/usr/share/Java内且名字是mysql- ...
- cloudera-scm-server启动出现Error creating bean with name 'entityManagerFactoryBean'与HHH010003: JDBC Driver class not found: com.mysql.jdbc.Driver错误解决办法(图文详解)
不多说,直接上干货! 问题详情 -- ::, INFO main:com.cloudera.server.cmf.Main: Starting SCM Server. JVM Args: [-Dlog ...
- com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Unknown column 'dd' in 'where clause'
今天在使用mysql数据库查找数据的时候报错,错误信息如下: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Unknown co ...
- Class.forName("com.mysql.jdbc.Driver") ;
try { Class.forName("com.mysql.jdbc.Driver") ; } catch(ClassNotFoundException e) { System. ...
- [bigdata] 启动CM出现 “JDBC Driver class not found: com.mysql.jdbc.Driver” 以及“Error creating bean with name 'serverLogFetcherImpl'”问题的解决方法
问题:“JDBC Driver class not found: com.mysql.jdbc.Driver” 通过以下命令启动cm [root@hadoop1 ~]# /etc/init.d/cl ...
- java中myeclipse连接mysql问题(java.lang.ClassNotFoundException: com.mysql.jdbc.Driver)
java中myeclipse连接mysql问题(java.lang.ClassNotFoundException: com.mysql.jdbc.Driver) 1.往项目中添加mysql-conne ...
随机推荐
- .NET工程师必须掌握的知识点
Microsoft SQL Server 数据库 一.创建和维护数据库 1.数据库 SQL Server 数据库的组成部分?(参见联机丛书) 如何保证数据库的完整性.安全性.并发性? 数据库设计创建步 ...
- JS之JSON.parse和JSON.stringify
这两个函数有兼容性问题, 会报错JSON"未定义 解决方案, 引入json2.js,可以解决浏览器的兼容性 https://link.jianshu.com/?t=https://githu ...
- [daily][centos][sudo] sudo 报错
有时候, 比如在CentOS 6上. sudo 会报如下错误: sudo: must be setuid root 这是因为, sudo 命令, 没有SUID, [root@T209 ~]# ll / ...
- Flink - CoGroup
使用方式, dataStream.coGroup(otherStream) .where(0).equalTo(1) .window(TumblingEventTimeWindows.of(Time. ...
- javascript中的值如何传递到django下的views.py中或者数据库中?
用Ajax,Ajax有很多种写法,包括JQuery和JS,这里贴一个用JQuery写的最通用的Ajax,POST方法传递JSON格式数据: $.ajax({ url: "your url&q ...
- redis哨兵模式,数据尽量少的丢失
min-slave-to-write 1 ->至少要有1个从节点 min-slaves-max-lag 10 ->超过10秒如果数据不能同步则拒绝新的写请求
- PyQt5学习笔记----标准文件打开保存框QFileDialog
单个文件打开 QFileDialog.getOpenFileName()多个文件打开 QFileDialog.getOpenFileNames() 文件夹选取 QFileDialog.getE ...
- 陌生的 metaclass(转)
add by zhj:这是我见过的对metaclass解释最清楚的文章了,例子很好,真是一例胜千言 原文:http://wiki.jikexueyuan.com/project/explore-pyt ...
- centos中文语言安装
1.查看当前使用的系统语言 #echo LANG 2.查看系统是否安装中文 #locale 如有zh_cn,表示已经安装了中文语言 3.安装中文 #yum groupinstall chinese-s ...
- ES6面试题总结
1.说出至少5个ES6的新特性,并简述它们的作用.(简答题) 1.let关键字,用于声明只在块级作用域起作用的变量: 2.const关键字,用于声明一个常量: 3.结构赋值,一种新的变量赋值方式.常用 ...