共计 9745 个字符,预计需要花费 25 分钟才能阅读完成。
本篇内容主要讲解“MySQL Inception 的安装和使用”,感兴趣的朋友不妨来看看。本文介绍的方法操作简单快捷,实用性强。下面就让丸趣 TV 小编来带大家学习“MySQL Inception 的安装和使用”吧!
Inception 是一个集审核、执行、备份及生成回滚语句于一身的 MySQL 自动化运维工具,支持 MySQL 5.5、5.6 以及 Percona 等主流版本。
测试环境:CentOS 6.9
下载源码
[root@MySQL01 ~]# cd /fire/
[root@MySQL01 fire]# mkdir inception
[root@MySQL01 fire]# cd inception/
[root@MySQL01 inception]# git clone https://github.com/mysql-inception/inception.git
Initialized empty Git repository in /fire/inception/inception/.git/
remote: Counting objects: 2018, done.
remote: Total 2018 (delta 0), reused 0 (delta 0), pack-reused 2018
Receiving objects: 100% (2018/2018), 11.80 MiB | 37 KiB/s, done.
Resolving deltas: 100% (522/522), done.
安装依赖包
yum install bison
yum install cmake
yum install ncurses-devel
yum install openssl-devel
yum install gcc-c++ libgcc gcc
通过 sh inception_build.sh debug 脚本方式安装
[root@MySQL01 inception]# sh inception_build.sh debug
…
— Installing: /fire/inception/inception/debug/mysql/man/man1/mysql.1
— Installing: /fire/inception/inception/debug/mysql/man/man1/mysql_fix_extensions.1
— Installing: /fire/inception/inception/debug/mysql/man/man1/mysqltest.1
— Installing: /fire/inception/inception/debug/mysql/man/man1/mysql_client_test.1
— Installing: /fire/inception/inception/debug/mysql/man/man1/mysql_waitpid.1
— Installing: /fire/inception/inception/debug/mysql/man/man1/mysql.server.1
— Installing: /fire/inception/inception/debug/mysql/man/man8/mysqld.8
— Installing: /fire/inception/inception/debug/mysql/support-files/solaris/postinstall-solaris
默认会安装在 /fire/inception/inception/debug/mysql 下面
编辑配置文件
[root@MySQL01 script]# vim /etc/inc.cnf
[inception]
general_log=1
general_log_file=/var/inception/log/inception.log
port=6690
socket=/fire/inception/inception/debug/mysql/inc.socket
character-set-client-handshake=0
character-set-server=utf8
# MySQL 支持字符集
inception_support_charset=utf8
inception_remote_backup_host=192.168.56.102
inception_remote_backup_port=3306
inception_remote_system_user=inception
inception_remote_system_password=inception
#inception_remote_charset=utf8mb4
inception_enable_nullable=0
inception_check_primary_key=1
inception_check_column_comment=1
inception_check_table_comment=1
inception_enable_blob_type=1
inception_check_column_default_value=1
# OSC
inception_osc_on=1
inception_osc_min_table_size=1
inception_osc_bin_dir=/script/percona-toolkit-3.0.2/bin
inception_osc_check_interval=5
inception_osc_chunk_time=0.1
启动
# /fire/inception/inception/debug/mysql/bin/Inception –defaults-file=/etc/inc.cnf
执行 SQL
编辑 python 2 脚本,为一张 100 万行的大表添加字段
[root@MySQL01 script]# vim inception.py
#!/usr/bin/python2.6
#-\*-coding: utf-8-\*-
import MySQLdb
sql= /*–user=neo;–password=neo;–host=192.168.56.101;–execute=1;–port=3306;*/\
inception_magic_start;\
use test;\
alter table item_order add loc5 varchar(30) not null default \ xxx\ comment \ efg\ \
inception_magic_commit;
try:
conn=MySQLdb.connect(host= 127.0.0.1 ,user= ,passwd= ,db= ,port=6690)
cur=conn.cursor()
ret=cur.execute(sql)
result=cur.fetchall()
num_fields = len(cur.description)
field_names = [i[0] for i in cur.description]
print field_names
for row in result:
print row[0], | ,row[1], | ,row[2], | ,row[3], | ,row[4], | ,
row[5], | ,row[6], | ,row[7], | ,row[8], | ,row[9], | ,row[10]
cur.close()
conn.close()
except MySQLdb.Error,e:
print Mysql Error %d: %s % (e.args[0], e.args[1])
执行脚本
[root@MySQL01 script]# python2.6 inception.py
登录 Inception 查看 OSC 的执行过程
# /mysql_software_56/bin/mysql -uroot -h 127.0.0.1 -P 6690
mysql inception get osc processlist\G
*************************** 1. row ***************************
DBNAME: test
TABLENAME: item_order
COMMAND: alter table item_order add loc5 varchar(30) not null default xxx comment efg
SQLSHA1: *862B1979B3751217FE56799A0216A2629F2FFD4C
PERCENT: 100
REMAINTIME: 00:00
INFOMATION: No slaves found. See –recursion-method if host MySQL01 has slaves.
Not checking slave lag because no slaves were found and –check-slave-lag was not specified.
Operation, tries, wait:
analyze_table, 10, 1
copy_rows, 10, 0.25
create_triggers, 10, 1
drop_triggers, 10, 1
swap_tables, 10, 1
update_foreign_keys, 10, 1
Not updating foreign keys because –alter-foreign-keys-method=none. Foreign keys that reference the table will no longer work.
Altering `test`.`item_order`…
Creating new table…
CREATE TABLE `test`.`_item_order_new` (
`order_id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 订单编号 ,
`loc_id` tinyint(3) unsigned NOT NULL COMMENT 地区编号 ,
`order_create_date` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 订单生成日期 ,
`order_expire_date` timestamp NOT NULL DEFAULT 0000-00-00 00:00:00 COMMENT 订单过期日期 ,
`item_id` int(10) unsigned NOT NULL COMMENT 商品编号 ,
`item_cnt` int(10) unsigned NOT NULL COMMENT 商品数量 ,
`order_status` tinyint(3) unsigned NOT NULL COMMENT 订单状态,0- 失效,1- 交易成功 ,
`tran_amount` bigint(20) unsigned NOT NULL COMMENT 交易金额 ,
PRIMARY KEY (`order_id`),
KEY `idx_order_loc_status` (`loc_id`,`order_status`,`order_expire_date`),
KEY `idx_order_loc_exp` (`loc_id`,`order_expire_date`),
KEY `idx_order_stat_loc_item` (`order_status`,`loc_id`,`item_id`,`tran_amount`),
KEY `idx_item_id` (`item_id`)
) ENGINE=InnoDB AUTO_INCREMENT=1000001 DEFAULT CHARSET=utf8 COMMENT= 订单表
Created new table test._item_order_new OK.
Altering new table…
ALTER TABLE `test`.`_item_order_new` add loc5 varchar(30) not null default xxx comment efg
Altered `test`.`_item_order_new` OK.
2018-04-07T02:03:33 Creating triggers…
CREATE TRIGGER `pt_osc_test_item_order_del` AFTER DELETE ON `test`.`item_order` FOR EACH ROW DELETE IGNORE FROM `test`.`_item_order_new` WHERE `test`.`_item_order_new`.`order_id` = OLD.`order_id`
CREATE TRIGGER `pt_osc_test_item_order_upd` AFTER UPDATE ON `test`.`item_order` FOR EACH ROW BEGIN DELETE IGNORE FROM `test`.`_item_order_new` WHERE !(OLD.`order_id` = NEW.`order_id`) AND `test`.`_item_order_new`.`order_id` = OLD.`order_id`;REPLACE INTO `test`.`_item_order_new` (`order_id`, `loc_id`, `order_create_date`, `order_expire_date`, `item_id`, `item_cnt`, `order_status`, `tran_amount`) VALUES (NEW.`order_id`, NEW.`loc_id`, NEW.`order_create_date`, NEW.`order_expire_date`, NEW.`item_id`, NEW.`item_cnt`, NEW.`order_status`, NEW.`tran_amount`);END
CREATE TRIGGER `pt_osc_test_item_order_ins` AFTER INSERT ON `test`.`item_order` FOR EACH ROW REPLACE INTO `test`.`_item_order_new` (`order_id`, `loc_id`, `order_create_date`, `order_expire_date`, `item_id`, `item_cnt`, `order_status`, `tran_amount`) VALUES (NEW.`order_id`, NEW.`loc_id`, NEW.`order_create_date`, NEW.`order_expire_date`, NEW.`item_id`, NEW.`item_cnt`, NEW.`order_status`, NEW.`tran_amount`)
2018-04-07T02:03:33 Created triggers OK.
2018-04-07T02:03:33 Copying approximately 1000219 rows…
INSERT LOW_PRIORITY IGNORE INTO `test`.`_item_order_new` (`order_id`, `loc_id`, `order_create_date`, `order_expire_date`, `item_id`, `item_cnt`, `order_status`, `tran_amount`) SELECT `order_id`, `loc_id`, `order_create_date`, `order_expire_date`, `item_id`, `item_cnt`, `order_status`, `tran_amount` FROM `test`.`item_order` FORCE INDEX(`PRIMARY`) WHERE ((`order_id` = ?)) AND ((`order_id` = ?)) LOCK IN SHARE MODE /*pt-online-schema-change 5178 copy nibble*/
SELECT /*!40001 SQL_NO_CACHE */ `order_id` FROM `test`.`item_order` FORCE INDEX(`PRIMARY`) WHERE ((`order_id` = ?)) ORDER BY `order_id` LIMIT ?, 2 /*next chunk boundary*/
2018-04-07T02:05:07 Copied rows OK.
2018-04-07T02:05:07 Swapping tables…
RENAME TABLE `test`.`item_order` TO `test`.`_item_order_old`, `test`.`_item_order_new` TO `test`.`item_order`
2018-04-07T02:05:10 Swapped original and new tables OK.
2018-04-07T02:05:10 Dropping old table…
SET foreign_key_checks=0
DROP TABLE IF EXISTS `test`.`_item_order_old`
2018-04-07T02:05:11 Dropped old table `test`.`_item_order_old` OK.
2018-04-07T02:05:11 Dropping triggers…
DROP TRIGGER IF EXISTS `test`.`pt_osc_test_item_order_del`;
DROP TRIGGER IF EXISTS `test`.`pt_osc_test_item_order_upd`;
DROP TRIGGER IF EXISTS `test`.`pt_osc_test_item_order_ins`;
2018-04-07T02:05:11 Dropped triggers OK.
# Event Count
# ====== =====
# INSERT 727
Successfully altered `test`.`item_order`.
1 row in set (0.00 sec)
语句执行完成
[root@MySQL01 script]# python2.6 inception.py
[ID , stage , errlevel , stagestatus , errormessage , SQL , Affected_rows , sequence , backup_dbname , execute_time , sqlsha1]
1 | RERUN | 0 | Execute Successfully | None | 2 | EXECUTED | 0 | Execute Successfully
Backup successfully | None |
备份及回滚语句的生成条件:
线上服务器必须要打开 binlog,在启动时需要设置参数 log_bin、log_bin_index 等关于 binlog 的参数。不然不会备份及生成回滚语句。
参数 binlog_format 必须要设置为 mixed 或者 row 模式。
参数 server_id 必须要设置为非 0 及非 1。
mysql show global variables like %log%bin%
+———————————+————————————+
| Variable_name | Value |
+———————————+————————————+
| log_bin | ON |
| log_bin_basename | /mysql_56_3306/log/mysql-bin |
| log_bin_index | /mysql_56_3306/log/mysql-bin.index |
| log_bin_trust_function_creators | ON |
| log_bin_use_v1_row_events | OFF |
| sql_log_bin | ON |
+———————————+————————————+
6 rows in set (0.00 sec)
mysql show global variables like binlog_format
+—————+——-+
| Variable_name | Value |
+—————+——-+
| binlog_format | ROW |
+—————+——-+
1 row in set (0.00 sec)
mysql show global variables like server_id
+—————+——-+
| Variable_name | Value |
+—————+——-+
| server_id | 102 |
+—————+——-+
1 row in set (0.00 sec)
查看备份表
mysql show databases;
+————————–+
| Database |
+————————–+
| information_schema |
| 192_168_56_101_3306_test |
| inception |
| mysql |
| performance_schema |
| sale |
| test |
+————————–+
8 rows in set (0.00 sec)
mysql use 192_168_56_101_3306_test
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
mysql show tables;
+————————————+
| Tables_in_192_168_56_101_3306_test |
+————————————+
| $_$inception_backup_information$_$ |
| item_order |
+————————————+
2 rows in set (0.00 sec)
mysql select * from item_order;
+—-+—————————————————–+—————–+
| id | rollback_statement | opid_time |
+—-+—————————————————–+—————–+
| 1 | ALTER TABLE `test`.`item_order` DROP COLUMN `loc5`; | 1523037912_28_1 |
+—-+—————————————————–+—————–+
1 row in set (0.00 sec)
到此,相信大家对“MySQL Inception 的安装和使用”有了更深的了解,不妨来实际操作一番吧!这里是丸趣 TV 网站,更多相关内容可以进入相关频道进行查询,关注我们,继续学习!