403Webshell
Server IP : 172.67.216.182  /  Your IP : 108.162.227.142
Web Server : Apache
System : Linux krdc-ubuntu-s-2vcpu-4gb-amd-blr1-01.localdomain 5.15.0-142-generic #152-Ubuntu SMP Mon May 19 10:54:31 UTC 2025 x86_64
User : www ( 1000)
PHP Version : 7.4.33
Disable Function : passthru,exec,system,putenv,chroot,chgrp,chown,shell_exec,popen,proc_open,pcntl_exec,ini_alter,ini_restore,dl,openlog,syslog,readlink,symlink,popepassthru,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,imap_open,apache_setenv
MySQL : OFF  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : OFF  |  Sudo : ON  |  Pkexec : ON
Directory :  /www/server/mysql/mysql-test/suite/group_replication/t/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ Back ]     

Current File : /www/server/mysql/mysql-test/suite/group_replication/t/gr_recovery_double_view_update.test
################################################################################
# This test causes to consecutive group updates on recovery in a failure
# situation. First the only donor leaves (first view change) and then as a
# consequence the joiner also leaves as he cannot recover.
#
# Test:
# 0) The test requires two servers.
# 1) Start group replication on server1. Add some data for recovery.
# 2) Set a bad password on server2 so it cannot recover.
#    Also set only 2 recovery attempts with a sleep time of 10 seconds
# 3) Start group replication on server2 and wait for it to fail once.
# 4) Make the donor (server1) leave.
# 5) Wait for the joiner (server2) to be in ERROR state.
# 6) Check all is fine.
# 7) Clean up.
################################################################################

--source include/have_debug.inc
--source include/have_debug_sync.inc
--let $group_replication_group_name= 6749cab0-93ae-11e5-a837-0800200c9a66
--source ../inc/have_group_replication_plugin.inc
--let $rpl_skip_group_replication_start= 1
--source ../inc/group_replication.inc


--echo #
--echo # Start group replication on server 1
--echo #

--connection server1
--echo server1
--source ../inc/start_and_bootstrap_group_replication.inc

# Add some data for recovery
CREATE TABLE t1 (c1 INT NOT NULL PRIMARY KEY) ENGINE=InnoDB;

--echo #
--echo # Set server 2 options to fail on recovery
--echo #

--connection server2
--echo server2

SET @debug_save_rec_int= @@GLOBAL.group_replication_recovery_reconnect_interval;
SET @debug_save_ret_count= @@GLOBAL.group_replication_recovery_retry_count;

--disable_warnings
CHANGE MASTER TO MASTER_PASSWORD='bad_password' FOR CHANNEL 'group_replication_recovery';
--enable_warnings

--eval SET GLOBAL group_replication_recovery_reconnect_interval= 10 # seconds
--eval SET GLOBAL group_replication_recovery_retry_count= 2

--echo #
--echo # Set server 2 making sure 2 views are received when recovery fails
--echo #

set session sql_log_bin=0;
call mtr.add_suppression("There was an error when connecting to the donor*");
call mtr.add_suppression("For details please check performance_schema.replication_connection_status table and error log messages of Slave I/O for channel group_replication_recovery.");
call mtr.add_suppression("Maximum number of retries when*");
call mtr.add_suppression("Fatal error during the Recovery process of Group Replication.*");
call mtr.add_suppression("The member is leaving a group without being on one");
# on slow runs (valgrind) it can happen because the connection
# of the recovery thread may be slower than 5 seconds and the
# server1 may actually be stopped before the reconnection attempt
# TODO: the sleeps below should be replaced by debug sync points
# For now ensure that we get coverage in most of the runs and
# do not fail on valgrind
call mtr.add_suppression("All donors left. Aborting group replication recovery.*");
set session sql_log_bin=1;

SET @debug_save= @@GLOBAL.DEBUG;
SET @@GLOBAL.DEBUG='d,recovery_thread_wait_before_cleanup';

--eval SET GLOBAL group_replication_group_name= "$group_replication_group_name"
--source include/start_group_replication_command.inc

--echo #
--echo # Give time for server 2 to fail the connection once
--echo # Stop recovery on the donor (server1)
--echo #

# give it time to fail
--sleep 5

--connection server1
--echo server1
--source include/stop_group_replication.inc

#give it time to fail and leave as there are no donors.
--sleep 10

--echo #
--echo # Watch server 2 enter an error state.
--echo #

--connection server2
--echo server2

SET DEBUG_SYNC= "now SIGNAL signal.recovery_end_end";
SET @@GLOBAL.DEBUG= @debug_save;

--let $group_replication_member_state= ERROR
--source ../inc/gr_wait_for_member_state.inc

--echo #
--echo # Check all is fine
--echo #

--connection server1
--echo server1
--source ../inc/start_and_bootstrap_group_replication.inc

--connection server2
--echo server2

--source include/stop_group_replication.inc
--disable_warnings
CHANGE MASTER TO MASTER_USER='root', MASTER_PASSWORD='' FOR CHANNEL 'group_replication_recovery';
--enable_warnings
--source include/start_group_replication.inc

INSERT INTO t1 VALUES (1);

--source include/rpl_sync.inc

--let $assert_text= The table should contain 1 elements
--let $assert_cond= [SELECT COUNT(*) FROM t1] = 1;
--source include/assert.inc

DROP TABLE t1;

--echo #
--echo # Cleanup
--echo #

SET @@GLOBAL.group_replication_recovery_reconnect_interval= @debug_save_rec_int;
SET @@GLOBAL.group_replication_recovery_retry_count= @debug_save_ret_count;

--source ../inc/group_replication_end.inc

Youez - 2016 - github.com/yon3zu
LinuXploit