需要开放的端口:(可以先关闭防火墙和selinux)
3306 4444 4567 4568
Node Host IP
Node 1 pxc1 10.50.215.29
Node 2 pxc2 10.50.215.115
Node 3 pxc3 10.50.215.227
setenforce 0;systemctl stop iptables;systemctl stop firewalld
一、所有节点的操作
1:配置Percona存储 库
cd /opt;wget https://repo.percona.com/yum/percona-release-latest.noarch.rpm --no-check-certificate
rpm -ivh percona-release-latest.noarch.rpm
[root@beiyong opt]# yum list | grep percona-xtrabackup
percona-xtrabackup.x86_64 2.3.10-1.el7 percona-release-x86_64
percona-xtrabackup-22.x86_64 2.2.13-1.el7 percona-release-x86_64
percona-xtrabackup-22-debuginfo.x86_64 2.2.13-1.el7 percona-release-x86_64
percona-xtrabackup-24.x86_64 2.4.13-1.el7 percona-release-x86_64
percona-xtrabackup-24-debuginfo.x86_64 2.4.13-1.el7 percona-release-x86_64
percona-xtrabackup-debuginfo.x86_64 2.3.10-1.el7 percona-release-x86_64
percona-xtrabackup-test.x86_64 2.3.10-1.el7 percona-release-x86_64
percona-xtrabackup-test-22.x86_64 2.2.13-1.el7 percona-release-x86_64
percona-xtrabackup-test-24.x86_64 2.4.13-1.el7 percona-release-x86_64
2:安装Percona XtraDB Cluster软件包:
yum clean all;yum -y install epel-release
yum install Percona-XtraDB-Cluster-57 -y (如有报错某个软件包没有下载,请复制软件包名称继续yum下载)
3:修改配置文件(三台节点都修改好)
[mysqld]
port = 3306
log-error = /home/mysql/log/error.log
log_warnings = 2
slow_query_log = ON
slow_query_log_file = /home/mysql/log/slow.log
long_query_time = 2
datadir = /home/mysql/data
skip-external-locking
key_buffer_size = 256M
max_allowed_packet = 500M
table_open_cache = 1024
sort_buffer_size = 4M
net_buffer_length = 8K
read_buffer_size = 4M
read_rnd_buffer_size = 512K
myisam_sort_buffer_size = 64M
thread_cache_size = 128
query_cache_size = 128M
tmp_table_size = 128M
performance_schema_max_table_instances = 6000
sql-mode=NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
event_scheduler = 1 ##只一台开启即可
#设置自增ID
auto_increment_increment = 3
auto_increment_offset = 2 --保持id的增长值跳跃增长不冲突,每个节点
explicit_defaults_for_timestamp = true
max_connections = 1000
max_connect_errors = 100
open_files_limit = 65535
log_bin = /home/mysql/binlog/mysql-bin
server-id = 2 --保持id值不一样,每个节点
expire_logs_days = 5
innodb_file_per_table = 1
innodb_data_home_dir = /home/mysql/data
innodb_data_file_path = ibdata1:10M:autoextend
innodb_log_group_home_dir = /home/mysql/data
innodb_buffer_pool_size = 8G
innodb_log_file_size = 256M
innodb_log_buffer_size = 8M
innodb_flush_log_at_trx_commit = 1
innodb_lock_wait_timeout = 50
#PXC设置
wsrep_cluster_address = gcomm://10.50.215.29,10.50.215.217,10.50.215.240 #三台节点的ip,不分先后
wsrep_provider = /usr/lib64/galera3/libgalera_smm.so
wsrep_slave_threads = 2
wsrep_cluster_name = fbs2pxc --创建集群名称,随便名称
wsrep_node_name = pxcNode3 --每个节点不一样
wsrep_node_address = 10.50.215.217 --本机ip
wsrep_sst_auth = "sstuser:sstuserpwd" --上面创建的
#wsrep_sst_method = rsync
wsrep_sst_method = xtrabackup-v2 --同步模式
#pxc_strict_mode=ENFORCING
binlog_format=ROW
default_storage_engine=InnoDB
innodb_autoinc_lock_mode=2
[mysqld_safe]
pid-file = /run/mysqld/mysql.pid
syslog
[mysqldump]
quick
max_allowed_packet = 16M
[mysql]
no-auto-rehash
[myisamchk]
key_buffer_size = 256M
sort_buffer_size = 4M
read_buffer = 2M
write_buffer = 2M
[mysqlhotcopy]
interactive-timeout
!includedir /etc/my.cnf.d
:wq
4:创建以上配置文件中需要的目录,并更改权限
mkdir -p /home/mysql/{data,log,binlog};chown mysql.mysql -R /home/mysql/
5:启动节点1--注意!!!这个只是第一个节点启动的命令
systemctl start [email protected]
(启动时多开一个窗口,观察有没有错误日志 tailf /home/mysql/log/error.log)
获取初始密码: &7(,Sl=1sa&2
grep 'temporary password' /home/mysql/log/error.log
2019-04-17T13:47:48.758384Z 1 [Note] A temporary password is generated for root@localhost: &7(,Sl=1sa&2
使用初始密码进入数据库,设置root密码:本地登录授权(这边授权以后,其他的节点启动后,数据自动同步过去了)
mysql> ALTER USER 'root'@'localhost' IDENTIFIED BY '123456';flush privileges;
Query OK, 0 rows affected (0.00 sec)
Query OK, 0 rows affected (0.01 sec)
为了使用XtraBackup执行成功的状态快照传输,需要使用适当的权限设置新用户,(这边授权以后,其他的节点启动后,数据自动同步过去了)
mysql> CREATE USER 'sstuser'@'localhost' IDENTIFIED BY 'sstuserpwd';
mysql> GRANT RELOAD, LOCK TABLES, PROCESS, REPLICATION CLIENT ON *.* TO 'sstuser'@'localhost';FLUSH PRIVILEGES;
6:检查集群状态
mysql> show status like 'wsrep%';
+----------------------------------+---------------------------------------------------------+
| Variable_name | Value |
+----------------------------------+---------------------------------------------------------+
| wsrep_local_state_uuid | 67d01e5b-6117-11e9-a1db-3743261561ca |
| wsrep_protocol_version | 9 |
| wsrep_last_applied | 5 |
| wsrep_last_committed | 5 |
| wsrep_replicated | 5 |
| wsrep_replicated_bytes | 1160 |
| wsrep_repl_keys | 5 |
| wsrep_repl_keys_bytes | 160 |
| wsrep_repl_data_bytes | 661 |
| wsrep_repl_other_bytes | 0 |
| wsrep_received | 10 |
| wsrep_received_bytes | 797 |
| wsrep_local_commits | 0 |
| wsrep_local_cert_failures | 0 |
| wsrep_local_replays | 0 |
| wsrep_local_send_queue | 0 |
| wsrep_local_send_queue_max | 1 |
| wsrep_local_send_queue_min | 0 |
| wsrep_local_send_queue_avg | 0.000000 |
| wsrep_local_recv_queue | 0 |
| wsrep_local_recv_queue_max | 2 |
| wsrep_local_recv_queue_min | 0 |
| wsrep_local_recv_queue_avg | 0.100000 |
| wsrep_local_cached_downto | 1 |
| wsrep_flow_control_paused_ns | 0 |
| wsrep_flow_control_paused | 0.000000 |
| wsrep_flow_control_sent | 0 |
| wsrep_flow_control_recv | 0 |
| wsrep_flow_control_interval | [ 173, 173 ] |
| wsrep_flow_control_interval_low | 173 |
| wsrep_flow_control_interval_high | 173 |
| wsrep_flow_control_status | OFF |
| wsrep_cert_deps_distance | 1.000000 |
| wsrep_apply_oooe | 0.000000 |
| wsrep_apply_oool | 0.000000 |
| wsrep_apply_window | 1.000000 |
| wsrep_commit_oooe | 0.000000 |
| wsrep_commit_oool | 0.000000 |
| wsrep_commit_window | 1.000000 |
| wsrep_local_state | 4 |
| wsrep_local_state_comment | Synced | --节点处于Synced状态,它已完全连接并准备进行写入集复制
| wsrep_cert_index_size | 1 |
| wsrep_cert_bucket_count | 22 |
| wsrep_gcache_pool_size | 2952 |
| wsrep_causal_reads | 0 |
| wsrep_cert_interval | 0.000000 |
| wsrep_open_transactions | 0 |
| wsrep_open_connections | 0 |
| wsrep_ist_receive_status | |
| wsrep_ist_receive_seqno_start | 0 |
| wsrep_ist_receive_seqno_current | 0 |
| wsrep_ist_receive_seqno_end | 0 |
| wsrep_incoming_addresses | 10.50.215.29:3306,10.50.215.217:3306,10.50.215.240:3306 |
| wsrep_cluster_weight | 3 |
| wsrep_desync_count | 0 |
| wsrep_evs_delayed | |
| wsrep_evs_evict_list | |
| wsrep_evs_repl_latency | 0/0/0/0/0 |
| wsrep_evs_state | OPERATIONAL |
| wsrep_gcomm_uuid | 67cd0b3f-6117-11e9-bb6d-56841fec47a4 |
| wsrep_cluster_conf_id | 3 |
| wsrep_cluster_size | 1 | --目前只有自己一个节点,等下第二个节点启动成功后,自动加入到节点数字就是2,以此类推
| wsrep_cluster_state_uuid | 67d01e5b-6117-11e9-a1db-3743261561ca |
| wsrep_cluster_status | Primary | --集群正常状态
| wsrep_connected | ON |
| wsrep_local_bf_aborts | 0 |
| wsrep_local_index | 0 |
| wsrep_provider_name | Galera |
| wsrep_provider_vendor | Codership Oy <[email protected]> |
| wsrep_provider_version | 3.35(rddf9876) |
| wsrep_ready | ON |
+----------------------------------+---------------------------------------------------------+
6:启动其他的节点:(启动时多开一个窗口,观察有没有错误日志 tailf /home/mysql/log/error.log)
systemctl start mysqld
查看是否成功加入集群中
mysql> show status like 'wsrep%';
wsrep_cluster_size 2 --第二个节点加入成功
wsrep_cluster_size 3 --第三个节点加入成功
7:数据同步测试
在某个节点上创建库和插入数据,看看其他节点上数据同步过来没有
[root@PXC-3 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: -1
safe_to_bootstrap: 0 --三台的值都一样都为1
第一台启动时有报错:
2019-04-17T13:45:35.077513Z 0 [Note] Shutting down plugin 'ngram'
2019-04-17T13:45:35.077643Z 0 [Note] Shutting down plugin 'partition'
2019-04-17T13:45:35.077656Z 0 [Note] Shutting down plugin 'BLACKHOLE'
2019-04-17T13:45:35.077661Z 0 [Note] Shutting down plugin 'ARCHIVE'
2019-04-17T13:45:35.077666Z 0 [Note] Shutting down plugin 'INNODB_TABLESPACES_SCRUBBING'
2019-04-17T13:45:35.077671Z 0 [Note] Shutting down plugin 'INNODB_TABLESPACES_ENCRYPTION'
2019-04-17T13:45:35.077675Z 0 [Note] Shutting down plugin 'INNODB_SYS_VIRTUAL'
2019-04-17T13:45:35.077679Z 0 [Note] Shutting down plugin 'INNODB_CHANGED_PAGES'
2019-04-17T13:45:35.077683Z 0 [Note] Shutting down plugin 'INNODB_SYS_DATAFILES'
2019-04-17T13:45:35.077687Z 0 [Note] Shutting down plugin 'INNODB_SYS_TABLESPACES'
2019-04-17T13:45:35.077691Z 0 [Note] Shutting down plugin 'INNODB_SYS_FOREIGN_COLS'
2019-04-17T13:45:35.077695Z 0 [Note] Shutting down plugin 'INNODB_SYS_FOREIGN'
2019-04-17T13:45:35.077699Z 0 [Note] Shutting down plugin 'INNODB_SYS_FIELDS'
2019-04-17T13:45:35.077703Z 0 [Note] Shutting down plugin 'INNODB_SYS_COLUMNS'
2019-04-17T13:45:35.077707Z 0 [Note] Shutting down plugin 'INNODB_SYS_INDEXES'
2019-04-17T13:45:35.077710Z 0 [Note] Shutting down plugin 'INNODB_SYS_TABLESTATS'
2019-04-17T13:45:35.077714Z 0 [Note] Shutting down plugin 'INNODB_SYS_TABLES'
2019-04-17T13:45:35.077718Z 0 [Note] Shutting down plugin 'INNODB_FT_INDEX_TABLE'
2019-04-17T13:45:35.077722Z 0 [Note] Shutting down plugin 'INNODB_FT_INDEX_CACHE'
2019-04-17T13:45:35.077727Z 0 [Note] Shutting down plugin 'INNODB_FT_CONFIG'
2019-04-17T13:45:35.077731Z 0 [Note] Shutting down plugin 'INNODB_FT_BEING_DELETED'
2019-04-17T13:45:35.077735Z 0 [Note] Shutting down plugin 'INNODB_FT_DELETED'
2019-04-17T13:45:35.077746Z 0 [Note] Shutting down plugin 'INNODB_FT_DEFAULT_STOPWORD'
2019-04-17T13:45:35.077750Z 0 [Note] Shutting down plugin 'INNODB_METRICS'
2019-04-17T13:45:35.077755Z 0 [Note] Shutting down plugin 'INNODB_TEMP_TABLE_INFO'
2019-04-17T13:45:35.077759Z 0 [Note] Shutting down plugin 'INNODB_BUFFER_POOL_STATS'
2019-04-17T13:45:35.077762Z 0 [Note] Shutting down plugin 'INNODB_BUFFER_PAGE_LRU'
2019-04-17T13:45:35.077766Z 0 [Note] Shutting down plugin 'INNODB_BUFFER_PAGE'
2019-04-17T13:45:35.077771Z 0 [Note] Shutting down plugin 'INNODB_CMP_PER_INDEX_RESET'
2019-04-17T13:45:35.077775Z 0 [Note] Shutting down plugin 'INNODB_CMP_PER_INDEX'
2019-04-17T13:45:35.077779Z 0 [Note] Shutting down plugin 'INNODB_CMPMEM_RESET'
2019-04-17T13:45:35.077783Z 0 [Note] Shutting down plugin 'INNODB_CMPMEM'
解决:
cd /home/mysql/data
rm -rf ibdata1 ib_logfile0 ib_logfile1
再次启动:
systemctl start [email protected]
(启动时多开一个窗口,观察有没有错误日志 tailf /home/mysql/log/error.log)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 故障模拟 单节点或者双节点故障 ~~~~~~~~~~~~~~~~
一、只停掉节点2:节点1为主
[root@PXC-2 ~]#systemctl stop mysqld
再在节点1和2上查看集群数量和状态:
mysql> show status like 'wsrep%';
…………
wsrep_cluster_size | 2 --剩下两个节点
wsrep_cluster_status | Primary --集群状态正常
恢复:
1:直接启动systemctl start mysqld:
2:如果其他节点的集群状态不属于:wsrep_cluster_status != Primary --集群状态不正常
恢复:删除数据目录里面的全部内容
[root@PXC-2 ~]# ls /home/mysql/data/
auto.cnf ca.pem client-key.pem grastate.dat ibdata1 ib_logfile1 performance_schema public_key.pem server-key.pem xtrabackup_binlog_pos_innodb xtrabackup_info
ca-key.pem client-cert.pem galera.cache ib_buffer_pool ib_logfile0 mysql private_key.pem server-cert.pem sys xtrabackup_galera_info xtrabackup_master_key_id
[root@PXC-2 ~]# rm -rf /home/mysql/data/*
再启动:systemctl start mysqld
二、 只停掉节点2和3:节点1为主
再在节点1上查看集群数量和状态:
mysql> show status like 'wsrep%';
…………
wsrep_cluster_size | 1 --剩下1个节点
wsrep_cluster_status | Primary --集群状态正常
恢复:直接一个节点一个节点的恢复(数据量大的话,怕数据库压力太大)
systemctl start mysqld
如果集群状态不是正常的值: !=Primary
恢复:删除数据目录里面的全部内容
[root@PXC-2 ~]# ls /home/mysql/data/
auto.cnf ca.pem client-key.pem grastate.dat ibdata1 ib_logfile1 performance_schema public_key.pem server-key.pem xtrabackup_binlog_pos_innodb xtrabackup_info
ca-key.pem client-cert.pem galera.cache ib_buffer_pool ib_logfile0 mysql private_key.pem server-cert.pem sys xtrabackup_galera_info xtrabackup_master_key_id
[root@PXC-2 ~]# rm -rf /home/mysql/data/*
再启动:systemctl start mysqld
三、只停掉节点1,节点1为主
systemctl stop mysqld --这个命令没用的话,也没事,只是等下作为节点启动时先要systemctl stop mysqld再systemctl start mysqld
systemctl stop [email protected]
再在节点2和3上查看集群数量和状态:
mysql> show status like 'wsrep%';
…………
wsrep_cluster_size | 2 --剩下2个节点
wsrep_cluster_status | Primary --集群状态正常
恢复:不能直接这样启动--会报错,但是不影响现有的两个节点集群
[root@PXC-1 ~]# systemctl start [email protected]
Job for [email protected] failed because the control process exited with error code. See "systemctl status [email protected]" and "journalctl -xe" for details.
正确恢复:
[root@PXC-1 ~]# systemctl start mysqld
再在节点2和3上查看集群数量和状态:
mysql> show status like 'wsrep%';
…………
wsrep_cluster_size | 3 --剩下3个节点
wsrep_cluster_status | Primary --集群状态正常
四、如果在主节点1上不小心执行了:systemctl stop mysqld --这个不影响集群
如果在主节点1上不小心执行了:systemctl start mysqld --这个不影响集群
只是会报错一下:
[root@PXC-1 etc]# systemctl start mysqld
Job for mysql.service failed because the control process exited with error code. See "systemctl status mysql.service" and "journalctl -xe" for details.
五、以上模拟的故障这个值全部节点不变:
[root@PXC-3 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: 5 --值从 -1 变为 5(每停一个节点,某个节点的值变为5)
safe_to_bootstrap: 0 --值不变
六、如果还是节点1作为主启动会直接报错:
[root@PXC-1 ~]# systemctl start [email protected]
Job for [email protected] failed because the control process exited with error code. See "systemctl status [email protected]" and "journalctl -xe" for details.
总结:
[root@PXC-2 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: -1 --这个值为 -1时,节点没有挂逼,为 5 时节点挂逼了
safe_to_bootstrap: 0
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 故障模拟 全部节点故障 ~~~~~~~~~~~~~~~~
1:停掉主节点3,再停掉2,再停掉1 --这次节点3是主,这个停的时候用两个命令
[root@PXC-3 ~]# systemctl stop [email protected]
[root@PXC-3 ~]# systemctl stop mysqld --这个命令没用的话,也没事,只是等下作为节点启动时先要systemctl stop mysqld再systemctl start mysqld
再在节点1和2上:查看集群状态:wsrep_cluster_size 2,wsrep_cluster_status Primary
[root@PXC-2 ~]# systemctl stop mysqld
[root@PXC-1 ~]# systemctl stop mysqld
[root@PXC-3 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: 5 --从-1变成5
safe_to_bootstrap: 0 --没有变化
[root@PXC-2 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: 5 --从-1变成5
safe_to_bootstrap: 0 --没有变化
[root@PXC-1 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: 5 --没有变化
safe_to_bootstrap: 1 --最后一个停的值变为1了
恢复:按照正常顺序恢复:
[root@PXC-1 ~]# systemctl start [email protected] --没有报错,集群状态和数量也正常
[root@PXC-2 ~]# systemctl start mysqld --没有报错,集群状态和数量也正常
[root@PXC-3 ~]# systemctl start mysqld --没有报错,集群状态和数量也正常
[root@PXC-1 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: -1 --恢复到故障之前
safe_to_bootstrap: 0 --恢复到故障之前
[root@PXC-2 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: -1 --恢复到故障之前
safe_to_bootstrap: 0 --恢复到故障之前
[root@PXC-3 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: -1 --恢复到故障之前
safe_to_bootstrap: 0 --恢复到故障之前
总结:节点全部挂了时,选择某个节点作为主节点启动:
启动之前查看:
[root@PXC-1 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: 5 --5代表节点是挂逼的,-1 时代表节点没有挂逼
safe_to_bootstrap: 1 --最后一个停的值为1,这个作为主节点启动
集群全部正常时:
[root@PXC-2 ~]# cat /home/mysql/data/grastate.dat
# GALERA saved state
version: 2.1
uuid: 67d01e5b-6117-11e9-a1db-3743261561ca
seqno: -1 --每个节点的值都一样
safe_to_bootstrap: 0 --每个节点的值都一样
更全点的配置:
[mysqld]
pid-file=/var/run/mysqld/mysqld.pid
user=mysql
#端口
port = 3306
binlog_format = ROW
innodb_data_file_path = ibdata1:500M:autoextend
innodb_buffer_pool_size = 3G --根据内存增加最好内存的50%-80%
innodb_buffer_pool_instances = 8
innodb_flush_log_at_trx_commit = 2
sync_binlog = 1
innodb_flush_method = O_DIRECT
innodb_log_files_in_group = 5
innodb_log_file_size = 256M
innodb_file_per_table = 1
innodb_open_files = 65535
#不允许读延迟数据,会影响读性能, ON/OFF
wsrep_causal_reads = OFF
innodb_autoinc_lock_mode = 2
innodb_locks_unsafe_for_binlog = 1
innodb_print_all_deadlocks = ON
innodb_rollback_on_timeout = ON
innodb_io_capacity = 2000
innodb_io_capacity_max = 4000
#ON BINLOG
log_bin = /home/mysql/binlog/mysql-bin
#setting deaklock timeout
innodb_lock_wait_timeout = 120
wait_timeout = 600
#innodb io_trreads setting
innodb_write_io_threads = 8
innodb_read_io_threads = 8
#setting time out
interactive_timeout = 600
#setting sort buff
sort_buffer_size = 2M
net_buffer_length = 8K
read_buffer_size = 4M
read_rnd_buffer_size = 8M
myisam_sort_buffer_size = 32M
#关闭查询缓存
query_cache_size = 0
query_cache_type = 0
#BINLOG keep
expire_logs_days = 7
default_storage_engine = InnoDB
character-set-server = utf8
collation-server = utf8_general_ci
max_allowed_packet = 256m
max_connections = 5000
max_connect_errors = 2500
open_files_limit = 65535
log-error = /home/mysql/log/error.log
log_warnings = 2
slow_query_log = ON
slow_query_log_file = /home/mysql/log/slow.log
long_query_time = 2
log_bin_trust_function_creators= on
pxc_strict_mode = PERMISSIVE
wsrep_provider_options = "gcache.size=3G; gcache.page_size=2G; gcs.fc_limit = 256; gcs.fc_factor = 0.8;" --3G,2G,根据硬盘可以再增加,最好32G,8G (32G是8G的四倍,32g设置硬盘的%10左右)
innodb_sort_buffer_size = 64M
back_log = 5000
#只需要开启自增ID为1的那一台
event_scheduler = 1
datadir = /home/mysql/data #数据存储的路径
#PXC设置
wsrep_cluster_address = gcomm://10.50.215.29,10.50.215.217
wsrep_provider = /usr/lib64/galera3/libgalera_smm.so
wsrep_slave_threads = 2
wsrep_cluster_name = fbs2pxc
wsrep_node_name = pxcNode2
wsrep_node_address = 10.50.215.217
wsrep_sst_auth = "sstuser:sstuserpwd"
#wsrep_sst_method = rsync
wsrep_sst_method = xtrabackup-v2
#设置自增ID
auto_increment_increment = 3
auto_increment_offset = 2
server-id = 2
skip-name-resolve
skip-external-locking
sql-mode = "NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION"
[mysqld_safe]
pid-file = /run/mysqld/mysql.pid
syslog
wsrep_recover=1
wsrep_recover=on
[mysql_client]
socket = /tmp/mysql.sock