package egsql
import java.util.Properties import com.sun.org.apache.xalan.internal.xsltc.compiler.util.IntType
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
object jdbc {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("JdbcOperation").setMaster("local")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
 ///////////////////////read///////////////////////////

val properties = new Properties()
properties.put("user","root")
properties.put("password","xxx")
val url = "jdbc:mysql://127.0.0.1:3306/spark?useUnicode=true&characterEncoding=gbk&zeroDateTimeBehavior=convertToNull"
val stud_scoreDF = sqlContext.read.jdbc(url,"dtspark",properties)
stud_scoreDF.show() ///////////////////////write///////////////////////////
//通过并行化创建RDD
val personRDD = sc.parallelize(Array("blm 5 144", "jerry 18 188", "kitty 5 166")).map(_.split(" "))
//通过StrutType直接指定每个字段的schema
val schema = StructType(
List(
StructField("name",StringType,true),
StructField("age",IntegerType,true),
StructField("score",IntegerType,true)
)
)
//将RDD映射到rowRDD
val rowRDD = personRDD.map(p => Row(p().trim, p().toInt, p().toInt))
//将schema信息应用到rowRDD上
val personDataFrame = sqlContext.createDataFrame(rowRDD,schema)
//创建Properties存储数据库相关属性
//将数据追加到数据库
personDataFrame.write.mode("append").jdbc("jdbc:mysql://127.0.0.1:3306/spark",
"dtspark",properties)
//停止SparkContext //print again
stud_scoreDF.show()
sc.stop()
}
} ================================
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
// :: INFO SparkContext: Running Spark version 2.2.
// :: INFO SparkContext: Submitted application: JdbcOperation
// :: INFO SecurityManager: Changing view acls to: fangping
// :: INFO SecurityManager: Changing modify acls to: fangping
// :: INFO SecurityManager: Changing view acls groups to:
// :: INFO SecurityManager: Changing modify acls groups to:
// :: INFO SecurityManager: SecurityManager: authentication disabled; ui acls disabled; users with view permissions: Set(fangping); groups with view permissions: Set(); users with modify permissions: Set(fangping); groups with modify permissions: Set()
// :: INFO Utils: Successfully started service 'sparkDriver' on port .
// :: INFO SparkEnv: Registering MapOutputTracker
// :: INFO SparkEnv: Registering BlockManagerMaster
// :: INFO BlockManagerMasterEndpoint: Using org.apache.spark.storage.DefaultTopologyMapper for getting topology information
// :: INFO BlockManagerMasterEndpoint: BlockManagerMasterEndpoint up
// :: INFO DiskBlockManager: Created local directory at C:\Users\fangping\AppData\Local\Temp\blockmgr-b8154e0d-e77b-4ba9-818b-71d3ffb8c553
// :: INFO MemoryStore: MemoryStore started with capacity 339.6 MB
// :: INFO SparkEnv: Registering OutputCommitCoordinator
// :: INFO Utils: Successfully started service 'SparkUI' on port .
// :: INFO SparkUI: Bound SparkUI to 0.0.0.0, and started at http://172.18.3.13:4040
// :: INFO Executor: Starting executor ID driver on host localhost
// :: INFO Utils: Successfully started service 'org.apache.spark.network.netty.NettyBlockTransferService' on port .
// :: INFO NettyBlockTransferService: Server created on 172.18.3.13:
// :: INFO BlockManager: Using org.apache.spark.storage.RandomBlockReplicationPolicy for block replication policy
// :: INFO BlockManagerMaster: Registering BlockManager BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManagerMasterEndpoint: Registering block manager 172.18.3.13: with 339.6 MB RAM, BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManagerMaster: Registered BlockManager BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO BlockManager: Initialized BlockManager: BlockManagerId(driver, 172.18.3.13, , None)
// :: INFO SharedState: Setting hive.metastore.warehouse.dir ('null') to the value of spark.sql.warehouse.dir ('file:/E:/back/scalaWs/Spark2Demo/spark-warehouse/').
// :: INFO SharedState: Warehouse path is 'file:/E:/back/scalaWs/Spark2Demo/spark-warehouse/'.
// :: INFO StateStoreCoordinatorRef: Registered StateStoreCoordinator endpoint
// :: INFO CodeGenerator: Code generated in 239.883814 ms
// :: INFO CodeGenerator: Code generated in 15.603579 ms
// :: INFO SparkContext: Starting job: show at jdbc.scala:
// :: INFO DAGScheduler: Got job (show at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (show at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at show at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_0 stored as values in memory (estimated size 7.5 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_0_piece0 stored as bytes in memory (estimated size 4.2 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_0_piece0 in memory on 172.18.3.13: (size: 4.2 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at show at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 0.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 0.0 (TID )
// :: INFO JDBCRDD: closed connection
// :: INFO Executor: Finished task 0.0 in stage 0.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 0.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: ResultStage (show at jdbc.scala:) finished in 0.224 s
// :: INFO DAGScheduler: Job finished: show at jdbc.scala:, took 2.466901 s
+-----+---+-----+
| name|age|score|
+-----+---+-----+
| swk|||
| blm| | |
|jerry| | |
|kitty| | |
+-----+---+-----+ // :: INFO SparkContext: Starting job: jdbc at jdbc.scala:
// :: INFO DAGScheduler: Got job (jdbc at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (jdbc at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at jdbc at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_1 stored as values in memory (estimated size 10.8 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_1_piece0 stored as bytes in memory (estimated size 5.8 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on 172.18.3.13: (size: 5.8 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at jdbc at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 1.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 1.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 1.0 (TID )
// :: INFO CodeGenerator: Code generated in 13.199986 ms
// :: INFO CodeGenerator: Code generated in 86.854105 ms
// :: INFO Executor: Finished task 0.0 in stage 1.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 1.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO DAGScheduler: ResultStage (jdbc at jdbc.scala:) finished in 0.473 s
// :: INFO DAGScheduler: Job finished: jdbc at jdbc.scala:, took 0.496857 s
// :: INFO TaskSchedulerImpl: Removed TaskSet 1.0, whose tasks have all completed, from pool
// :: INFO SparkContext: Starting job: show at jdbc.scala:
// :: INFO DAGScheduler: Got job (show at jdbc.scala:) with output partitions
// :: INFO DAGScheduler: Final stage: ResultStage (show at jdbc.scala:)
// :: INFO DAGScheduler: Parents of final stage: List()
// :: INFO DAGScheduler: Missing parents: List()
// :: INFO DAGScheduler: Submitting ResultStage (MapPartitionsRDD[] at show at jdbc.scala:), which has no missing parents
// :: INFO MemoryStore: Block broadcast_2 stored as values in memory (estimated size 7.5 KB, free 339.6 MB)
// :: INFO MemoryStore: Block broadcast_2_piece0 stored as bytes in memory (estimated size 4.2 KB, free 339.6 MB)
// :: INFO BlockManagerInfo: Added broadcast_2_piece0 in memory on 172.18.3.13: (size: 4.2 KB, free: 339.6 MB)
// :: INFO SparkContext: Created broadcast from broadcast at DAGScheduler.scala:
// :: INFO DAGScheduler: Submitting missing tasks from ResultStage (MapPartitionsRDD[] at show at jdbc.scala:) (first tasks are for partitions Vector())
// :: INFO TaskSchedulerImpl: Adding task set 2.0 with tasks
// :: INFO TaskSetManager: Starting task 0.0 in stage 2.0 (TID , localhost, executor driver, partition , PROCESS_LOCAL, bytes)
// :: INFO Executor: Running task 0.0 in stage 2.0 (TID )
// :: INFO JDBCRDD: closed connection
// :: INFO Executor: Finished task 0.0 in stage 2.0 (TID ). bytes result sent to driver
// :: INFO TaskSetManager: Finished task 0.0 in stage 2.0 (TID ) in ms on localhost (executor driver) (/)
// :: INFO TaskSchedulerImpl: Removed TaskSet 2.0, whose tasks have all completed, from pool
// :: INFO DAGScheduler: ResultStage (show at jdbc.scala:) finished in 0.027 s
// :: INFO DAGScheduler: Job finished: show at jdbc.scala:, took 0.088025 s
+-----+---+-----+
| name|age|score|
+-----+---+-----+
| swk|||
| blm| | |
|jerry| | |
|kitty| | |
| blm| | |
|jerry| | |
|kitty| | |
+-----+---+-----+ // :: INFO SparkUI: Stopped Spark web UI at http://172.18.3.13:4040
// :: INFO MapOutputTrackerMasterEndpoint: MapOutputTrackerMasterEndpoint stopped!
// :: INFO MemoryStore: MemoryStore cleared
// :: INFO BlockManager: BlockManager stopped
// :: INFO BlockManagerMaster: BlockManagerMaster stopped
// :: INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: OutputCommitCoordinator stopped!
// :: INFO SparkContext: Successfully stopped SparkContext
// :: INFO ShutdownHookManager: Shutdown hook called
// :: INFO ShutdownHookManager: Deleting directory C:\Users\\AppData\Local\Temp\spark-e888fd39-2e41-43e5-829b-ca203b786cef

scala mysql jdbc oper的更多相关文章

  1. scala 通过jdbc访问mysql

    scala是jvm语言,运行在jvm之上 我们知道jdbc是java访问数据库的技术,那么scala能不能通过jdbc操作数据库呢,答案是可以的 部分代码如下: /** * 获取连接 */ priva ...

  2. 【知识积累】SBT+Scala+MySQL的Demo

    一.背景 由于项目需要,需要在Sbt+Scala项目中连接MySQL数据库.由于之前使用Maven+Java进行依赖管理偏多,在Sbt+Scala方面也在不断进行摸索,特此记录,作为小模块知识的积累. ...

  3. ambari-server启动出现Caused by: java.lang.RuntimeException:java.lang.ClassNotFoundEception:com.mysql.jdbc.Driver问题解决办法(图文详解)

    不多说,直接上干货! 问题详解 启动ambari-server出现 Caused by: java.lang.RuntimeException:java.lang.ClassNotFoundEcept ...

  4. ambari-server启动出现ERROR main] DBAccessorImpl:106 - Error while creating database accessor java.lang.ClassNotFoundException:com.mysql.jdbc.Driver问题解决办法(图文详解)

    不多说,直接上干货! 问题详情 ambari-server启动时,报如下的错误 问题分析 注:启动ambari访问前,请确保mysql驱动已经放置在/usr/share/Java内且名字是mysql- ...

  5. cloudera-scm-server启动出现Error creating bean with name 'entityManagerFactoryBean'与HHH010003: JDBC Driver class not found: com.mysql.jdbc.Driver错误解决办法(图文详解)

    不多说,直接上干货! 问题详情 -- ::, INFO main:com.cloudera.server.cmf.Main: Starting SCM Server. JVM Args: [-Dlog ...

  6. com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Unknown column 'dd' in 'where clause'

    今天在使用mysql数据库查找数据的时候报错,错误信息如下: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Unknown co ...

  7. Class.forName("com.mysql.jdbc.Driver") ;

    try { Class.forName("com.mysql.jdbc.Driver") ; } catch(ClassNotFoundException e) { System. ...

  8. [bigdata] 启动CM出现 “JDBC Driver class not found: com.mysql.jdbc.Driver” 以及“Error creating bean with name 'serverLogFetcherImpl'”问题的解决方法

    问题:“JDBC Driver class not found: com.mysql.jdbc.Driver”  通过以下命令启动cm [root@hadoop1 ~]# /etc/init.d/cl ...

  9. java中myeclipse连接mysql问题(java.lang.ClassNotFoundException: com.mysql.jdbc.Driver)

    java中myeclipse连接mysql问题(java.lang.ClassNotFoundException: com.mysql.jdbc.Driver) 1.往项目中添加mysql-conne ...

随机推荐

  1. JQuery中数组的创建与使用

    一.创建数组的方式: 1.定义并赋值 var str = ['java', 'php', 'c++', 'c#', 'perl', 'vb', 'html', 'css']; 2.用{}定义后赋值: ...

  2. Nessus离线安装及升级插件

    最近做客户的内网主机漏洞扫描,申请了一台内网主机做扫描服务器,安装Nessus.由于客户严格限制内网主机不能开通外网访问权限,折腾了一下Nessus离线激活和离线更新漏洞插件,详细过程截图记录. 一. ...

  3. eclipse中tomcat无法加载spring boot

    转自: http://blog.csdn.net/u010797575/article/details/50517777 最近搭建一套spring boot框架,作为 application 启动项目 ...

  4. tensorflow入门笔记(一) tf.app.flags.FLAGS

    tf.app.flags.DEFINE_xxx()就是添加命令行的optional argument(可选参数),而tf.app.flags.FLAGS可以从对应的命令行参数取出参数.举例如下: FL ...

  5. 10.2-uC/OS-III内部任务管理(任务状态)

    1.任务状态 从用户的观点来看,任务可以是有 5种状态,见图 5-6.展示了任务状态间的转换关系. {休眠状态,就绪状态,运行状态,挂起状态,中断状态} (1).处于休眠状态的任务驻留于内存但未被uC ...

  6. finecms5采集接口下载

    哪里有finecms采集接口可以下载?我们在用finecms建站时比较纠结的是要如何采集文章,finecms商城是有售卖采集插件,价格是50元,有些朋友感觉比较贵,不太愿意买,我们也是比较权衡了才很久 ...

  7. 微信小程序tabbar设置样式在哪里改

    微信小程序tabbar通俗点说就是底部导航,我们一般会配置相关的菜单,方便读者快速导航.tabbar是在项目根目录中的配置文件 app.json 中进行设置:如果小程序是一个多 tab 应用(客户端窗 ...

  8. mysql批量插入数据

    建表 create table `dept`( `id` ) unsigned NOT NULL AUTO_INCREMENT, `deptno` mediumint() unsigned ', `d ...

  9. 萌新接触前端的第三课——JavaScript

    JavaScript概述 一.JavaScript的历史 1992年Nombas开发出C-minus-minus(C--)的嵌入式脚本语言(最初绑定在CEnvi软件中).后将其改名ScriptEase ...

  10. Python3学习之路~0 目录

    目录 Python3学习之路~2.1 列表.元组操作 Python3学习之路~2.2 简单的购物车程序 Python3学习之路~2.3 字符串操作 Python3学习之路~2.4 字典操作 Pytho ...