每天迁移MySQL历史数据到历史库Python脚本
#!/usr/bin/env python # coding:utf-8 #__author__ = 'Logan' import MySQLdb import sys import datetime import time class ClassMigrate(object): def _get_argv(self): self.usage = """ usage(): python daily_migration.py --source=192.168.1.4:3306/db_name:tab_name/proxy/password \\ --dest=192.168.1.150:13301/db_name_archive:tab_name_201601/proxy/password \\ --delete_strategy=delete --primary_key=auto_id --date_col=ut --time_interval=180 """ if len(sys.argv) == 1: print self.usage sys.exit(1) elif sys.argv[1] == '--help' or sys.argv[1] == '-h': print self.usage sys.exit() elif len(sys.argv) > 2: for i in sys.argv[1:]: _argv = i.split('=') if _argv[0] == '--source': _list = _argv[1].split('/') self.source_host = _list[0].split(':')[0] self.source_port = int(_list[0].split(':')[1]) self.source_db = _list[1].split(':')[0] self.source_tab = _list[1].split(':')[1] self.source_user = _list[2] self.source_password = _list[3] elif _argv[0] == '--dest': _list = _argv[1].split('/') self.dest_host = _list[0].split(':')[0] self.dest_port = int(_list[0].split(':')[1]) self.dest_db = _list[1].split(':')[0] self.dest_tab = _list[1].split(':')[1] self.dest_user = _list[2] self.dest_password = _list[3] elif _argv[0] == '--delete_strategy': self.deleteStrategy = _argv[1] if self.deleteStrategy not in ('delete', 'drop'): print (self.usage) sys.exit(1) elif _argv[0] == '--primary_key': self.pk = _argv[1] elif _argv[0] == '--date_col': self.date_col = _argv[1] elif _argv[0] == '--time_interval': self.interval = _argv[1] else: print (self.usage) sys.exit(1) def __init__(self): self._get_argv() ## -------------------------------------------------------------------- self.sourcedb_conn_str = MySQLdb.connect(host=self.source_host, port=self.source_port, user=self.source_user, passwd=self.source_password, db=self.source_db, charset='utf8') self.sourcedb_conn_str.autocommit(True) self.destdb_conn_str = MySQLdb.connect(host=self.dest_host, port=self.dest_port, user=self.dest_user, passwd=self.dest_password, db=self.dest_db, charset='utf8') self.destdb_conn_str.autocommit(True) ## -------------------------------------------------------------------- self.template_tab = self.source_tab + '_template' self.step_size = 20000## -------------------------------------------------------------------- self._migCompleteState = False self._deleteCompleteState = False## -------------------------------------------------------------------- self.source_cnt = '' self.source_min_id = '' self.source_max_id = '' self.source_checksum = '' self.dest_cn = '' ## -------------------------------------------------------------------- self.today = time.strftime("%Y-%m-%d") # self.today = '2016-05-30 09:59:40' def sourcedb_query(self, sql, sql_type): try: cr = self.sourcedb_conn_str.cursor() cr.execute(sql) if sql_type == 'select': return cr.fetchall() elif sql_type == 'dml': rows = self.sourcedb_conn_str.affected_rows() return rows else: return True except Exception, e: print (str(e) + "<br>") return False finally: cr.close() def destdb_query(self, sql, sql_type, values=''): try: cr = self.destdb_conn_str.cursor() if sql_type == 'select': cr.execute(sql) return cr.fetchall() elif sql_type == 'insertmany': cr.executemany(sql, values) rows = self.destdb_conn_str.affected_rows() return rows else: cr.execute(sql) return True except Exception, e: print (str(e) + "<br>") return False finally: cr.close() def create_table_from_source(self): '''''因为tab_name表的数据需要迁移到archive引擎表,所以不适合使用这种方式。 预留作其他用途。''' try: sql = "show create table %s;" % self.source_tab create_str = self.sourcedb_query(sql, 'select')[0][1] create_str = create_str.replace('CREATE TABLE', 'CREATE TABLE IF NOT EXISTS') self.destdb_query(create_str, 'ddl') return True except Exception, e: print (str(e) + "<br>") return False def create_table_from_template(self): try: sql = 'CREATE TABLE IF NOT EXISTS %s like %s;' % (self.dest_tab, self.template_tab) state = self.destdb_query(sql, 'ddl') if state: return True else: return False except Exception, e: print (str(e + "<br>") + "<br>") return False def get_min_max(self): """ 创建目标表、并获取源表需要迁移的总条数、最小id、最大id """ try: print ("\nStarting Migrate at -- %s <br>") % (datetime.datetime.now().__str__()) sql = """select count(*),IFNULL(min(%s),-1),IFNULL(max(%s),-1) from %s where %s >= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 00:00:00') \ and %s <= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 23:59:59') """ \ % (self.pk, self.pk, self.source_tab, self.date_col, self.today, self.interval, self.date_col, self.today, self.interval) q = self.sourcedb_query(sql, 'select') self.source_cnt = q[0][0] self.source_min_id = q[0][1] self.source_max_id = q[0][2] self.source_checksum = str(self.source_cnt) + '_' + str(self.source_min_id) + '_' + str(self.source_max_id) if self.source_cnt == 0 or self.source_min_id == -1 or self.source_max_id == -1: print ("There is 0 record in source table been matched! <br>") return False else: return True except Exception, e: print (str(e) + "<br>") return False def migrate_2_destdb(self): try: get_min_max_id = self.get_min_max() if get_min_max_id: k = self.source_min_id desc_sql = "desc %s;" % self.source_tab # self.filed = [] cols = self.sourcedb_query(desc_sql, 'select') # for j in cols: # self.filed.append(j[0]) fileds = "%s," * len(cols) # 源表有多少个字段,就拼凑多少个%s,拼接到insert语句 fileds = fileds.rstrip(',') while k <= self.source_max_id: sql = """select * from %s where %s >= %d and %s< %d \ and %s >= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 00:00:00') \ and %s <= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 23:59:59') """\ % (self.source_tab, self.pk, k, self.pk, k+self.step_size, self.date_col, self.today, self.interval, self.date_col, self.today, self.interval) print ("\n%s <br>") % sql starttime = datetime.datetime.now() results = self.sourcedb_query(sql, 'select') insert_sql = "insert into " + self.dest_tab + " values (%s)" % fileds rows = self.destdb_query(insert_sql, 'insertmany', results) if rows == False: print ("Insert failed!! <br>") else: print ("Inserted %s rows. <br>") % rows endtime = datetime.datetime.now() timeinterval = endtime - starttime print("Elapsed :" + str(timeinterval.seconds) + '.' + str(timeinterval.microseconds) + " seconds <br>") k += self.step_size print ("\nInsert complete at -- %s <br>") % (datetime.datetime.now().__str__()) return True else: return False except Exception, e: print (str(e) + "<br>") return False def verify_total_cnt(self): try: sql = """select count(*),IFNULL(min(%s),-1),IFNULL(max(%s),-1) from %s where %s >= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 00:00:00') \ and %s <= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 23:59:59') """ \ % (self.pk, self.pk, self.dest_tab, self.date_col, self.today, self.interval, self.date_col, self.today, self.interval) dest_result = self.destdb_query(sql, 'select') self.dest_cnt = dest_result[0][0] dest_checksum = str(self.dest_cnt) + '_' + str(dest_result[0][1]) + '_' + str(dest_result[0][2]) print ("source_checksum: %s, dest_checksum: %s <br>") % (self.source_checksum, dest_checksum) if self.source_cnt == dest_result[0][0] and dest_result[0][0] != 0 and self.source_checksum == dest_checksum: self._migCompleteState = True print ("Verify successfully !!<br>") else: print ("Verify failed !!<br>") sys.exit(77) except Exception, e: print (str(e) + "<br>") def drop_daily_partition(self): try: if self._migCompleteState: sql = """explain partitions select * from %s where %s >= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 00:00:00') and %s <= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 23:59:59') """\ % (self.source_tab, self.date_col, self.today, self.interval, self.date_col, self.today, self.interval) partition_name = self.sourcedb_query(sql, 'select') partition_name = partition_name[0][3] sql = """select count(*),IFNULL(min(%s),-1),IFNULL(max(%s),-1) from %s partition (%s)""" \ % (self.pk, self.pk, self.source_tab, partition_name) q = self.sourcedb_query(sql, 'select') source_cnt = q[0][0] source_min_id = q[0][1] source_max_id = q[0][2] checksum = str(source_cnt) + '_' + str(source_min_id) + '_' + str(source_max_id) if source_cnt == 0 or source_min_id == -1 or source_max_id == -1: print ("There is 0 record in source PARTITION been matched! <br>") else: if checksum == self.source_checksum: drop_par_sql = "alter table %s drop partition %s;" % (self.source_tab, partition_name) droped = self.sourcedb_query(drop_par_sql, 'ddl') if droped: print (drop_par_sql + " <br>") print ("\nDrop partition complete at -- %s <br>") % (datetime.datetime.now().__str__()) self._deleteCompleteState = True else: print (drop_par_sql + " <br>") print ("Drop partition failed.. <br>") else: print ("The partition %s checksum failed !! Drop failed !!") % partition_name sys.exit(77) except Exception, e: print (str(e) + "<br>") def delete_data(self): try: if self._migCompleteState: k = self.source_min_id while k <= self.source_max_id: sql = """delete from %s where %s >= %d and %s< %d \ and %s >= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 00:00:00') \ and %s <= CONCAT(DATE_FORMAT(DATE_ADD('%s', INTERVAL -%s day),'%%Y-%%m-%%d'), ' 23:59:59') """ \ % (self.source_tab, self.pk, k, self.pk, k+self.step_size, self.date_col, self.today, self.interval, self.date_col, self.today, self.interval) print ("\n%s <br>") % sql starttime = datetime.datetime.now() rows = self.sourcedb_query(sql, 'dml') if rows == False: print ("Delete failed!! <br>") else: print ("Deleted %s rows. <br>") % rows endtime = datetime.datetime.now() timeinterval = endtime - starttime print("Elapsed :" + str(timeinterval.seconds) + '.' + str(timeinterval.microseconds) + " seconds <br>") time.sleep(1) k += self.step_size print ("\nDelete complete at -- %s <br>") % (datetime.datetime.now().__str__()) self._deleteCompleteState = True except Exception, e: print (str(e) + "<br>") def do(self): tab_create = self.create_table_from_template() if tab_create: migration = self.migrate_2_destdb() if migration: self.verify_total_cnt() if self._migCompleteState: if self.deleteStrategy == 'drop': self.drop_daily_partition() else: self.delete_data() print ("\n<br>") print ("====="*5 + '<br>') print ("source_total_cnt: %s <br>") % self.source_cnt print ("dest_total_cnt: %s <br>") % self.dest_cnt print ("====="*5 + '<br>') if self._deleteCompleteState: print ("\nFinal result: Successfully !! <br>") sys.exit(88) else: print ("\nFinal result: Failed !! <br>") sys.exit(254) else: print ("Create table failed ! Exiting. . .") sys.exit(255) f = ClassMigrate() f.do() 每天迁移MySQL历史数据到历史库Python脚本的更多相关文章
- 从零开始学安全(三十五)●mysql 盲注手工自定义python脚本
import requests import string #mysql 手动注入 通用脚本 适用盲注 可以跟具自己的需求更改 def home(): url="url" list ...
- 迁移mysql数据到oracle上
转自:http://www.cnblogs.com/Warmsunshine/p/4651283.html 我是生成的文件里面的master.sql里面的sql,一个一个拷出来的. 迁移mysql数据 ...
- 基于binlog来分析mysql的行记录修改情况(python脚本分析)
最近写完mysql flashback,突然发现还有有这种使用场景:有些情况下,可能会统计在某个时间段内,MySQL修改了多少数据量?发生了多少事务?主要是哪些表格发生变动?变动的数量是怎 ...
- MySQL利用binlog恢复误操作数据(python脚本)
在人工手动进行一些数据库写操作的时候(比方说数据订正),尤其是一些不可控的批量更新或删除,通常都建议备份后操作.不过不怕万一,就怕一万,有备无患总是好的.在线上或者测试环境误操作导致数据被删除或者更新 ...
- mysql更新(三)语句 库的操作 表的操作
04-初始mysql语句 本节课先对mysql的基本语法初体验. 操作文件夹(库) 增 create database db1 charset utf8; 查 # 查看当前创建的数据库 show ...
- HBase——使用Put迁移MySql数据到Hbase
先上code: /** * 功能:迁移mysql上电池历史数据到hbase * Created by liuhuichao on 2016/12/6. */ public class MySqlToH ...
- mysql数据库从删库到跑路之mysql基础
一 数据库是什么 之前所学,数据要永久保存,比如用户注册的用户信息,都是保存于文件中,而文件只能存在于某一台机器上. 如果我们不考虑从文件中读取数据的效率问题,并且假设我们的程序所有的组件都运行在一台 ...
- 使用第三方库连接MySql数据库:PyMysql库和Pandas库
使用PyMysql库和Pandas库链接Mysql 1 系统环境 系统版本:Win10 64位 Mysql版本: 8.0.15 MySQL Community Server - GPL pymysql ...
- 32.修改IK分词器源码来基于mysql热更新词库
主要知识点, 修改IK分词器源码来基于mysql热更新词库 一.IK增加新词的原因 在第32小节中学习到了直接在es的词库中增加词语,来扩充自已的词库,但是这样做有以下缺点: (1)每次添加完 ...
随机推荐
- odoo源生打印【web report】
https://www.odoo.com/documentation/12.0/reference/reports.html 具体的看官方文档 一.纸张格式设置: <record id= ...
- python算法学习--待续
几个算法网站 算法可视化网站:https://visualgo.net/en,通过动画展示算法实现过程 程序可视化网站:http://www.pythontutor.com/visualize.htm ...
- 第二十七篇 -- QTreeWidget总结
前言 之前写过几篇关于TreeWidget的文章,不过不方便查阅,特此重新整合作为总结.不过关于QtDesigner画图,还是不重新写了,看 第一篇 就OK. 准备工作 1. 用QtDesigner画 ...
- HTTP_CLIENT_IP、HTTP_X_FORWARDED_FOR、REMOTE_ADDR
REMOTE_ADDR 是你的客户端跟你的服务器"握手"时候的IP.如果使用了"匿名代理",REMOTE_ADDR将显示代理服务器的IP. HTTP_CLIEN ...
- CF201C Fragile Bridges TJ
本题解依旧发布于洛谷,如果您能点个赞的话--(逃 前言 题目链接 正解:动态规划 思路不是很好想,想出来了应该就没有多大问题了,但是需要处理的细节较多,再加上水水的样例,难度应该是偏难的.个人感觉应该 ...
- Flutter学习(7)——网络请求框架Dio简单使用
原文地址: Flutter学习(7)--网络请求框架Dio简单使用 | Stars-One的杂货小窝 Flutter系列学习之前都是在个人博客发布,感兴趣可以过去看看 网络请求一般APP都是需要的,在 ...
- UE4 Slate控件之TreeView 使用例子(一)
TreeView例子 先从Contruct中往子Slot添加Widget,先声明指向STreeView的指针,后续方便进行视图的一些操作 TSharedPtr<STreeView<TSha ...
- flink clickhouse-jdbc和flink-connector 写入数据到clickhouse因为jar包冲突导致的60 seconds.Please check if the requested resources are available in the YARN cluster和Could not resolve ResourceManager address akka报错血案
一.问题现象,使用flink on yarn 模式,写入数据到clickhouse,但是在yarn 集群充足的情况下一直报:Deployment took more than 60 seconds. ...
- MIT6.828 La5 File system, Spawn and Shell
Lab 5: File system, Spawn and Shell 1. File system preliminaries 在lab中我们要使用的文件系统比大多数"真实"文件 ...
- rabbitMQ批量删除指定的队列
首先进入到rabbitmq目录下的sbin目录 方法1: ./rabbitmqctl list_queues| grep helloQueue | awk '{print $1}' | xargs - ...