sqlalchemy 源码分析之create_engine引擎的创建
引擎是sqlalchemy的核心,不管是 sql core 还是orm的使用都需要依赖引擎的创建,为此我们研究下,引擎是如何创建的。
from sqlalchemy import create_engine
engine = create_engine('mysql+pymysql://root:x@127.0.0.1/test',
echo=True, # 设置为True,则输出sql语句
pool_size=5, # 数据库连接池初始化的容量
max_overflow=10, # 连接池最大溢出容量,该容量+初始容量=最大容量。超出会堵塞等待,等待时间为timeout参数值默认30 pool_recycle=7200 # 重连周期
)
create_engine 创建引擎对象,源代码如下:
class PlainEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring a regular Engine.""" name = "plain"
engine_cls = base.Engine PlainEngineStrategy()
这里有个参数 strategy:策略,一般情况默认是'plain',通过参数动态去实例策略类。我们看看对应默认的策略'plain'对应的类是哪个?
default_strategy = "plain"
def create_engine(*args, **kwargs):
strategy = kwargs.pop("strategy", default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
class DefaultEngineStrategy(EngineStrategy):
"""Base class for built-in strategies.""" def create(self, name_or_url, **kwargs):
# create url.URL object
u = url.make_url(name_or_url) plugins = u._instantiate_plugins(kwargs) u.query.pop("plugin", None)
kwargs.pop("plugins", None) entrypoint = u._get_entrypoint()
dialect_cls = entrypoint.get_dialect_cls(u) if kwargs.pop("_coerce_config", False): def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value else:
pop_kwarg = kwargs.pop dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k) dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args) dialect_args["dbapi"] = dbapi for plugin in plugins:
plugin.handle_dialect_kwargs(dialect_cls, dialect_args) # create dialect
dialect = dialect_cls(**dialect_args) # assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability # look for existing pool or create
pool = pop_kwarg("pool", None)
if pool is None: def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(
dialect, connection_record, cargs, cparams
)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams) creator = pop_kwarg("creator", connect) poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect} # consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"use_threadlocal": "pool_threadlocal",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk) for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args) pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool pool._dialect = dialect # create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k) _initialize = kwargs.pop("_initialize", True) # all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
) engine = engineclass(pool, dialect, u, **engine_args) if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect: def on_connect(dbapi_connection, connection_record):
conn = getattr(
dbapi_connection, "_sqla_unwrap", dbapi_connection
)
if conn is None:
return
do_on_connect(conn) event.listen(pool, "first_connect", on_connect)
event.listen(pool, "connect", on_connect) def first_connect(dbapi_connection, connection_record):
c = base.Connection(
engine, connection=dbapi_connection, _has_events=False
)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection) event.listen(pool, "first_connect", first_connect, once=True) dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine) for plugin in plugins:
plugin.engine_created(engine) return engine
if kwargs.pop("_coerce_config", False): def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value else:
pop_kwarg = kwargs.pop dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
这段代码没啥,创建出方言所需要的完整参数dialect_args
dbapi = kwargs.pop("module", None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args) dialect_args["dbapi"] = dbapi
这段代码 则是实例化dbpai对象。
# create dialect
dialect = dialect_cls(**dialect_args)
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg("connect_args", {}))
cargs = list(cargs) # allow mutability
pool = pop_kwarg("pool", None)
if pool is None: def connect(connection_record=None):
if dialect._has_events:
for fn in dialect.dispatch.do_connect:
connection = fn(
dialect, connection_record, cargs, cparams
)
if connection is not None:
return connection
return dialect.connect(*cargs, **cparams) creator = pop_kwarg("creator", connect) poolclass = pop_kwarg("poolclass", None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {"dialect": dialect} # consume pool arguments from kwargs, translating a few of
# the arguments
translate = {
"logging_name": "pool_logging_name",
"echo": "echo_pool",
"timeout": "pool_timeout",
"recycle": "pool_recycle",
"events": "pool_events",
"use_threadlocal": "pool_threadlocal",
"reset_on_return": "pool_reset_on_return",
"pre_ping": "pool_pre_ping",
"use_lifo": "pool_use_lifo",
}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk) for plugin in plugins:
plugin.handle_pool_kwargs(poolclass, pool_args) pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib.dbapi_proxy._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
pool._dialect = dialect
创建连接池,默认创建pool.QueuePool
# create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k) _initialize = kwargs.pop("_initialize", True) # all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components."
% (
",".join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__,
)
) engine = engineclass(pool, dialect, u, **engine_args)
从上面可以看出来,引擎的核心是连接池和方言,连接池负责连接的维护,方言负责数据的行为。
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect: def on_connect(dbapi_connection, connection_record):
conn = getattr(
dbapi_connection, "_sqla_unwrap", dbapi_connection
)
if conn is None:
return
do_on_connect(conn) event.listen(pool, "first_connect", on_connect)
event.listen(pool, "connect", on_connect) def first_connect(dbapi_connection, connection_record):
c = base.Connection(
engine, connection=dbapi_connection, _has_events=False
)
c._execution_options = util.immutabledict()
dialect.initialize(c)
dialect.do_rollback(c.connection) event.listen(pool, "first_connect", first_connect, once=True)
dialect_cls.engine_created(engine)
if entrypoint is not dialect_cls:
entrypoint.engine_created(engine) for plugin in plugins:
plugin.engine_created(engine)
sqlalchemy 源码分析之create_engine引擎的创建的更多相关文章
- [源码分析] Dynomite 分布式存储引擎 之 DynoJedisClient(1)
[源码分析] Dynomite 分布式存储引擎 之 DynoJedisClient(1) 目录 [源码分析] Dynomite 分布式存储引擎 之 DynoJedisClient(1) 0x00 摘要 ...
- [源码分析] Dynomite 分布式存储引擎 之 DynoJedisClient(2)
[源码分析] Dynomite 分布式存储引擎 之 DynoJedisClient(2) 目录 [源码分析] Dynomite 分布式存储引擎 之 DynoJedisClient(2) 0x00 摘要 ...
- Nmap源码分析(脚本引擎)
Nmap提供了强大的脚本引擎(NSE),以支持通过Lua编程来扩展Nmap的功能.目前脚本库已经包含300多个常用的Lua脚本,辅助完成Nmap的主机发现.端口扫描.服务侦测.操作系统侦测四个基本功能 ...
- Docker源码分析(二):Docker Client创建与命令执行
1. 前言 如今,Docker作为业界领先的轻量级虚拟化容器管理引擎,给全球开发者提供了一种新颖.便捷的软件集成测试与部署之道.在团队开发软件时,Docker可以提供可复用的运行环境.灵活的资源配置. ...
- Spark源码分析(三)-TaskScheduler创建
原创文章,转载请注明: 转载自http://www.cnblogs.com/tovin/p/3879151.html 在SparkContext创建过程中会调用createTaskScheduler函 ...
- Spring源码分析(十八)创建bean
本文结合<Spring源码深度解析>来分析Spring 5.0.6版本的源代码.若有描述错误之处,欢迎指正. 目录 一.创建bean的实例 1. autowireConstructor 2 ...
- twemproxy源码分析2——守护进程的创建
twemproxy源码中关于守护进程的创建实现得比较标准,先贴出代码来,然后结合一些资料来分析和列举一些实现守护进程的常用方法,不过不得不说twemproxy的实现确实是不错的,注释都写在了代码中,直 ...
- tair源码分析——leveldb存储引擎使用
分析完leveldb以后,接下来的时间准备队tair的源码进行阅读和分析.我们刚刚分析完了leveldb而在tair中leveldb是其几大存储引擎之一,所以我们这里首先从tair对leveldb的使 ...
- Mysql源码分析--csv存储引擎
一直想分析下mysql的源码,开始的时候不知道从哪下手,先从csv的文件存储开始吧,这个还是比较简单的.我是用的是mysql5.7.16版本的源码. csv源码文件在mysql源码的mysql-5.7 ...
随机推荐
- 4、OGNL与值栈
一.OGNL 1.什么是OGNL 对象导航图语言(Object Graph Navigation Language),简称OGNL,是应用于Java中的一个开源的表达式语言(Expression La ...
- 解析fiddler返回的部分数据。
1.通过抓包获取的数据,里面包含的哪些内容是需要我们去关注的? 2.首先上图. 3.图片说明: 此图片中是利用豆瓣API提供的接口实现返回数据.内容与抓包返回的内容格式一致 url:https://a ...
- 谢宝友: 手把手教你给Linux内核发patch
本文系转载,著作权归作者所有. 商业转载请联系作者获得授权,非商业转载请注明出处. 作者: 谢宝友 来源: 微信公众号 linux阅码场 (id: linuxdev) 本文简介 本文一步一 ...
- OptimalSolution(7)--大数据和空间限制
一.布隆过滤器 问题:不安全网页的黑名单包含100亿个黑名单网页,每个网页的URL最多占用64B.现在想要实现一种网页过滤系统,可以根据网页的URL判断该网页是否在黑名单上,如何设计该系统. 要求:允 ...
- django-表单之获取表单信息(二)
urls.py from django.urls import path from . import views urlpatterns = [ path('',views.index,name=&q ...
- RSA学习1
对PEM文件(以前是一个邮件编码)进行编码,得到RSA公钥.国密的RSA标准,一般是tlv(tag-version)格式的. 明文hash后的数据进行BER编码再进行加密.-签名 对于RSA的结构,全 ...
- dbms_job基础
a.创建job: dbms_job.submit(jobno,what,next_date,interval);b.删除job: dbms_job.remove(jobno); c.修改要执行的操作: ...
- nginx基于uwsgi部署Django
1.安装nginx yum install -y nginx(需要epel源) 2.安装uwsgi yum groupinstall "Development tools" yum ...
- python新式类继承------C3算法
一.引入 mro即method resolution order,主要用于在多继承时判断调的属性的路径(来自于哪个类).之前查看了很多资料,说mro是基于深度优先搜索算法的.但不完全正确在Python ...
- ThinkPHP5.1 反序列化利用链
笔记里直接复制出来的 1 composer直接获取框架代码 ➜ composer create-project --prefer-dist topthink/think tp5137 ➜ ...