package egsql
import java.util.Properties import com.sun.org.apache.xalan.internal.xsltc.compiler.util.IntType
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
object jdbc {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("JdbcOperation").setMaster("local")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
 ///////////////////////read///////////////////////////

val properties = new Properties()
properties.put("user","root")
properties.put("password","xxx")
val url = "jdbc:mysql://127.0.0.1:3306/spark?useUnicode=true&characterEncoding=gbk&zeroDateTimeBehavior=convertToNull"
val stud_scoreDF = sqlContext.read.jdbc(url,"dtspark",properties)
stud_scoreDF.show() ///////////////////////write///////////////////////////
//通过并行化创建RDD
val personRDD = sc.parallelize(Array("blm 5 144", "jerry 18 188", "kitty 5 166")).map(_.split(" "))
//通过StrutType直接指定每个字段的schema
val schema = StructType(
List(
StructField("name",StringType,true),
StructField("age",IntegerType,true),
StructField("score",IntegerType,true)
)
)
//将RDD映射到rowRDD
val rowRDD = personRDD.map(p => Row(p().trim, p().toInt, p().toInt))
//将schema信息应用到rowRDD上
val personDataFrame = sqlContext.createDataFrame(rowRDD,schema)
//创建Properties存储数据库相关属性
//将数据追加到数据库
personDataFrame.write.mode("append").jdbc("jdbc:mysql://127.0.0.1:3306/spark",
"dtspark",properties)
//停止SparkContext //print again
stud_scoreDF.show()
sc.stop()
}
} ================================
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
// :: INFO SparkContext: Running Spark version 2.2.
// :: INFO SparkContext: Submitted application: JdbcOperation
// :: INFO SecurityManager: Changing view acls to: fangping
// :: INFO SecurityManager: Changing modify acls to: fangping
// :: INFO SecurityManager: Changing view acls groups to:
// :: INFO SecurityManager: Changing modify acls groups to:
// :: INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(fangping); groups with view permissions: Set(); users with modify permissions: Set(fangping); groups with modify permissions: Set()
// :: INFO Utils: Successfully started service 'sparkDriver' on port .
// :: INFO SparkEnv: Registering MapOutputTracker
// :: INFO SparkEnv: Registering BlockManagerMaster
// :: INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
// :: INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
// :: INFO DiskBlockManager: Created local directory at C:\Users\fangping\AppData\Local\Temp\blockmgr-b8154e0d-e77b-4ba9-818b-71d3ffb8c553
// :: INFO MemoryStore: MemoryStore started with capacity 339.6 MB
// :: INFO SparkEnv: Registering OutputCommitCoordinator
// :: INFO Utils: Successfully started service 'SparkUI' on port .
// :: INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://172.18.3.13:4040
// :: INFO Executor: Starting executor ID driver on host localhost
// :: INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port .
// :: INFO NettyBlockTransferService: Server created on 172.18.3.13:
// :: INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
// :: INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManagerMasterEndpoint: Registering block manager 172.18.3.13: with 339.6 MB RAM, BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/E:/back/scalaWs/Spark2Demo/spark-warehouse/').
// :: INFO SharedState: Warehouse path is 'file:/E:/back/scalaWs/Spark2Demo/spark-warehouse/'.
// :: INFO StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
// :: INFO CodeGenerator: Code generated in 239.883814 ms
// :: INFO CodeGenerator: Code generated in 15.603579 ms
// :: INFO SparkContext: Starting job: show at jdbc.scala:
// :: INFO DAGScheduler: Got job (show at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (show at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at show at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 7.5 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 4.2 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on 172.18.3.13: (size: 4.2 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at show at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 0.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 0.0 (TID )
// :: INFO JDBCRDD: closed connection
// :: INFO Executor: Finished task 0.0 in stage 0.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 0.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: ResultStage (show at jdbc.scala:) finished in 0.224 s
// :: INFO DAGScheduler: Job finished: show at jdbc.scala:, took 2.466901 s
+-----+---+-----+
| name|age|score|
+-----+---+-----+
| swk|||
| blm| | |
|jerry| | |
|kitty| | |
+-----+---+-----+ // :: INFO SparkContext: Starting job: jdbc at jdbc.scala:
// :: INFO DAGScheduler: Got job (jdbc at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (jdbc at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at jdbc at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 10.8 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 5.8 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on 172.18.3.13: (size: 5.8 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at jdbc at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 1.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 1.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 1.0 (TID )
// :: INFO CodeGenerator: Code generated in 13.199986 ms
// :: INFO CodeGenerator: Code generated in 86.854105 ms
// :: INFO Executor: Finished task 0.0 in stage 1.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 1.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO DAGScheduler: ResultStage (jdbc at jdbc.scala:) finished in 0.473 s
// :: INFO DAGScheduler: Job finished: jdbc at jdbc.scala:, took 0.496857 s
// :: INFO TaskSchedulerImpl: Removed TaskSet 1.0, whose tasks have all completed, from pool
// :: INFO SparkContext: Starting job: show at jdbc.scala:
// :: INFO DAGScheduler: Got job (show at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (show at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at show at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_2 stored as values in memory (estimated size 7.5 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 4.2 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_2_piece0 in memory on 172.18.3.13: (size: 4.2 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at show at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 2.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 2.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 2.0 (TID )
// :: INFO JDBCRDD: closed connection
// :: INFO Executor: Finished task 0.0 in stage 2.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 2.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO TaskSchedulerImpl: Removed TaskSet 2.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: ResultStage (show at jdbc.scala:) finished in 0.027 s
// :: INFO DAGScheduler: Job finished: show at jdbc.scala:, took 0.088025 s
+-----+---+-----+
| name|age|score|
+-----+---+-----+
| swk|||
| blm| | |
|jerry| | |
|kitty| | |
| blm| | |
|jerry| | |
|kitty| | |
+-----+---+-----+ // :: INFO SparkUI: Stopped Spark web UI at http://172.18.3.13:4040
// :: INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
// :: INFO MemoryStore: MemoryStore cleared
// :: INFO BlockManager: BlockManager stopped
// :: INFO BlockManagerMaster: BlockManagerMaster stopped
// :: INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
// :: INFO SparkContext: Successfully stopped SparkContext
// :: INFO ShutdownHookManager: Shutdown hook called
// :: INFO ShutdownHookManager: Deleting directory C:\Users\\AppData\Local\Temp\spark-e888fd39-2e41-43e5-829b-ca203b786cef

scala mysql jdbc oper的更多相关文章

  1. scala 通过jdbc访问mysql

    scala是jvm语言,运行在jvm之上 我们知道jdbc是java访问数据库的技术,那么scala能不能通过jdbc操作数据库呢,答案是可以的 部分代码如下: /** * 获取连接 */ priva ...

  2. 【知识积累】SBT+Scala+MySQL的Demo

    一.背景 由于项目需要,需要在Sbt+Scala项目中连接MySQL数据库.由于之前使用Maven+Java进行依赖管理偏多,在Sbt+Scala方面也在不断进行摸索,特此记录,作为小模块知识的积累. ...

  3. ambari-server启动出现Caused by: java.lang.RuntimeException:java.lang.ClassNotFoundEception:com.mysql.jdbc.Driver问题解决办法(图文详解)

    不多说,直接上干货! 问题详解 启动ambari-server出现 Caused by: java.lang.RuntimeException:java.lang.ClassNotFoundEcept ...

  4. ambari-server启动出现ERROR main] DBAccessorImpl:106 - Error while creating database accessor java.lang.ClassNotFoundException:com.mysql.jdbc.Driver问题解决办法(图文详解)

    不多说,直接上干货! 问题详情 ambari-server启动时,报如下的错误 问题分析 注:启动ambari访问前,请确保mysql驱动已经放置在/usr/share/Java内且名字是mysql- ...

  5. cloudera-scm-server启动出现Error creating bean with name 'entityManagerFactoryBean'与HHH010003: JDBC Driver class not found: com.mysql.jdbc.Driver错误解决办法(图文详解)

    不多说,直接上干货! 问题详情 -- ::, INFO main:com.cloudera.server.cmf.Main: Starting SCM Server. JVM Args: [-Dlog ...

  6. com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Unknown column 'dd' in 'where clause'

    今天在使用mysql数据库查找数据的时候报错,错误信息如下: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Unknown co ...

  7. Class.forName("com.mysql.jdbc.Driver") ;

    try { Class.forName("com.mysql.jdbc.Driver") ; } catch(ClassNotFoundException e) { System. ...

  8. [bigdata] 启动CM出现 “JDBC Driver class not found: com.mysql.jdbc.Driver” 以及“Error creating bean with name 'serverLogFetcherImpl'”问题的解决方法

    问题:“JDBC Driver class not found: com.mysql.jdbc.Driver”  通过以下命令启动cm [root@hadoop1 ~]# /etc/init.d/cl ...

  9. java中myeclipse连接mysql问题(java.lang.ClassNotFoundException: com.mysql.jdbc.Driver)

    java中myeclipse连接mysql问题(java.lang.ClassNotFoundException: com.mysql.jdbc.Driver) 1.往项目中添加mysql-conne ...

随机推荐

  1. PostgreSQL+PostGIS安装以及使用

    安装,参照:    https://www.cnblogs.com/ytwy/p/6817179.html 创建企业级地理文件数据库时报错," You must copy the lates ...

  2. [knowledge] big data things

    http://hadoop.apache.org/ https://spark.apache.org/ https://nifi.apache.org/ https://www.cloudera.co ...

  3. 读书笔记iOS-Core-Animation-Advanced-Techniques,iOS性能调试工具

    调试卡顿,除了使用timer profile,还可以使用 OpenGL ES驱动工具 OpenGL ES Driver工具显示的GPU利用率,打开Color Blended Layers 我们给图片和 ...

  4. 剑指Offer题解(Python版)

    https://blog.csdn.net/tinkle181129/article/details/79326023# 二叉树的镜像    链表中环的入口结点    删除链表中重复的结点    从尾 ...

  5. webpack打包配置模板

    /** * Created by zzq on 2017/3/26. *///__dirname是node.js中的一个全局变量,它指向当前执行脚本所在的目录module.exports = {//注 ...

  6. 内存的一些magic number和debug crt(0xCCCCCCCC和0xCDCDCDCD,debug版本的CRT为了方便调试程序的初始值)

    调试过debug版本的vc程序的人一定对0xCCCCCCCC和0xCDCDCDCD这样的内存很有印象.这是debug版本的CRT为了方便调试程序,在分配出来还没有初始化的时候提供的初始值. 实际上,W ...

  7. 内置函数time

    time   import time.time() # 浮点型,给计算机看,随机 时间有三种: First: 时间戳  (time.time()) Second: 结构化时间  可以修改 Third: ...

  8. @property专题

    “属性” (property)作为 Objective-C 的一项特性,主要的作用就在于封装对象中的数据. Objective-C 对象通常会把其所需要的数据保存为各种实例变量.实例变量一般通过“存取 ...

  9. 右键菜单添加git选项

    1.jpg   2.jpg   3.jpg   4.jpg   5.jpg   6.jpg   7.jpg

  10. Java Script的用途(简介)

    1.可以用来写入HTML输出 <script> document.write("<h1>This is a heading</h1>");//标 ...