Server IP : 104.21.38.3 / Your IP : 162.158.106.26 Web Server : Apache System : Linux krdc-ubuntu-s-2vcpu-4gb-amd-blr1-01.localdomain 5.15.0-142-generic #152-Ubuntu SMP Mon May 19 10:54:31 UTC 2025 x86_64 User : www ( 1000) PHP Version : 7.4.33 Disable Function : passthru,exec,system,putenv,chroot,chgrp,chown,shell_exec,popen,proc_open,pcntl_exec,ini_alter,ini_restore,dl,openlog,syslog,readlink,symlink,popepassthru,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,imap_open,apache_setenv MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : OFF | Sudo : ON | Pkexec : ON Directory : /www/server/mysql/mysql-test/suite/group_replication/t/ |
Upload File : |
################################################################################ # This test evaluates that whenever the donor has purged GTIDs that causes # errors in the joiner, a failover to another donor will happen. # # The test steps are: # 0) The test requires three servers. # 1) Bootstrap start group on server1. Add some data. Start GR on server2. # 2) Purge some GTIDs on server1 by flushing and purging binary log. # Create a replication user for recovery. # 3) Start server3 with the configured replication user and watch it fail as # 1. Server1 has purged GTIDs # 2. Server2 does not has the correct replication user # 4) Create the recovery user on server2. # The joiner (server3) should now connect to server2 when failing over. # 5) The joiner (server3) should become online. Validate data. # 6) Clean up. ################################################################################ --source ../inc/have_group_replication_plugin.inc --let $rpl_skip_group_replication_start= 1 --let $rpl_server_count= 3 --source ../inc/group_replication.inc --source include/have_perfschema.inc --let $recovery_user= recovery_user --let $recovery_password= recovery_password --echo # --echo # Setup a new 2 member group --echo # --connection server1 --source ../inc/start_and_bootstrap_group_replication.inc SET SESSION sql_log_bin= 0; CALL mtr.add_suppression("Cannot replicate to server with server_uuid*"); SET SESSION sql_log_bin= 1; #insert some data CREATE TABLE t1 (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); --connection server2 --source include/start_group_replication.inc --echo # --echo # Purge member 1 binlog after a flush and create a replication user --echo # --connection server1 --let $server_binlog_file_prev= query_get_value(SHOW MASTER STATUS, File, 1) FLUSH BINARY LOGS; --let $server_binlog_file_cur= query_get_value(SHOW MASTER STATUS, File, 1) # # At this point the dump thread spawned to attend the # recovery procedure for server2 must have stopped or # is about to. We make sure that it is so we can purge # the binary log successfully - note that # start_group_replication.inc waits for the member to # be ONLINE. # # Otherwise the test could fail since the purge # command might not be able to remove the file because, # the lingering dump thread could still clinging on # to it. # --source include/stop_dump_threads.inc # # And just to make sure that the file is indeed not # being used. # --let $wait_file_name=$server_binlog_file_prev --source include/wait_for_file_closed.inc --disable_query_log --eval PURGE BINARY LOGS TO '$server_binlog_file_cur' --enable_query_log SET SESSION sql_log_bin= 0; --eval CREATE USER "$recovery_user" IDENTIFIED BY "$recovery_password" --eval GRANT REPLICATION SLAVE ON *.* TO "$recovery_user" FLUSH PRIVILEGES; SET SESSION sql_log_bin= 1; --echo # --echo # Start recovery and watch it fail for a bit as: --echo # 1) Server 1 has purged GTIDs --echo # 2) Server 2 does not has the correct replication user --echo # --connection server3 SET SESSION sql_log_bin= 0; call mtr.add_suppression("There was an error when connecting to the donor*"); call mtr.add_suppression("For details please check performance_schema.replication_connection_status table and error log messages of Slave I/O for channel group_replication_recovery."); call mtr.add_suppression("Slave I/O for channel 'group_replication_recovery': Got fatal error 1236*"); call mtr.add_suppression("Error while starting the group replication recovery receiver/applier threads"); call mtr.add_suppression("Slave I/O for channel 'group_replication_recovery': Master command COM_REGISTER_SLAVE failed: Access denied for user 'recovery_user'.*"); call mtr.add_suppression("Slave I/O for channel 'group_replication_recovery': Master command COM_REGISTER_SLAVE failed: failed registering on master, reconnecting to try again.*"); call mtr.add_suppression("Slave I/O thread couldn't register on master"); call mtr.add_suppression("Error while creating the group replication recovery channel with donor.*"); call mtr.add_suppression("Error when configuring the group recovery connection to the donor.*"); SET SESSION sql_log_bin= 1; --disable_warnings --eval CHANGE MASTER TO MASTER_USER= '$recovery_user', MASTER_PASSWORD= '$recovery_password' FOR CHANNEL 'group_replication_recovery' --enable_warnings SET @debug_save_rec_int= @@GLOBAL.group_replication_recovery_reconnect_interval; --eval SET GLOBAL group_replication_recovery_reconnect_interval= 1 # seconds --disable_query_log --eval SET GLOBAL group_replication_group_name= "$group_replication_group_name"; --enable_query_log --source include/start_group_replication_command.inc #give it time to fail several times --sleep 5 --let $group_replication_member_state= RECOVERING --source ../inc/gr_wait_for_member_state.inc --echo # --echo # Create the recovery user on server 2 --echo # The joiner should now be able to connect to server 2 when failing over. --echo # --connection server2 SET SESSION sql_log_bin= 0; --eval CREATE USER "$recovery_user" IDENTIFIED BY "$recovery_password" --eval GRANT REPLICATION SLAVE ON *.* TO "$recovery_user" FLUSH PRIVILEGES; SET SESSION sql_log_bin= 1; --connection server3 --let $group_replication_member_state= ONLINE --source ../inc/gr_wait_for_member_state.inc --let $assert_text= On the recovered member, the table should exist and have 1 elements --let $assert_cond= [SELECT COUNT(*) FROM t1] = 1; --source include/assert.inc --echo # --echo # Cleaning up --echo # SET @@GLOBAL.group_replication_recovery_reconnect_interval= @debug_save_rec_int; DROP TABLE t1; --connection server2 SET SESSION sql_log_bin= 0; --eval DROP USER "$recovery_user" SET SESSION sql_log_bin= 1; --connection server1 SET SESSION sql_log_bin= 0; --eval DROP USER "$recovery_user" SET SESSION sql_log_bin= 1; --source ../inc/group_replication_end.inc