403Webshell
Server IP : 172.67.216.182  /  Your IP : 162.158.88.118
Web Server : Apache
System : Linux krdc-ubuntu-s-2vcpu-4gb-amd-blr1-01.localdomain 5.15.0-142-generic #152-Ubuntu SMP Mon May 19 10:54:31 UTC 2025 x86_64
User : www ( 1000)
PHP Version : 7.4.33
Disable Function : passthru,exec,system,putenv,chroot,chgrp,chown,shell_exec,popen,proc_open,pcntl_exec,ini_alter,ini_restore,dl,openlog,syslog,readlink,symlink,popepassthru,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,imap_open,apache_setenv
MySQL : OFF  |  cURL : ON  |  WGET : ON  |  Perl : ON  |  Python : OFF  |  Sudo : ON  |  Pkexec : ON
Directory :  /www/server/mysql/src/mysql-test/suite/ndb/t/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ Back ]     

Current File : /www/server/mysql/src/mysql-test/suite/ndb/t/ndb_addnode.test
# The include statement below is a temp one for tests that are yet to
#be ported to run with InnoDB,
#but needs to be kept for tests that would need MyISAM in future.
--source include/force_myisam_default.inc

-- source include/have_ndb.inc
-- source include/not_embedded.inc
--result_format 2

--exec $NDB_MGM -e show

CREATE LOGFILE GROUP lg_1
    ADD UNDOFILE 'undo_1.dat'
    INITIAL_SIZE 4M
    UNDO_BUFFER_SIZE 2M
    ENGINE NDB;

CREATE TABLESPACE ts_1
    ADD DATAFILE 'data_1.dat'
    USE LOGFILE GROUP lg_1
    INITIAL_SIZE 16M
    ENGINE NDB;

create table t1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
create table t2(id int NOT NULL PRIMARY KEY, data char(8))
TABLESPACE ts_1 STORAGE DISK engine=ndb;
# BUG#13714648
create table t5(id int NOT NULL PRIMARY KEY, data char(8)) max_rows=50000000 engine=ndb;
create table t6(id int not null primary key, dat blob, txt text) engine=ndb;

# Get blob tables table test.t6
let ndb_database= test;
let ndb_table= t6;
--source suite/ndb/include/ndb_get_blob_tables.inc

load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 fields terminated by ' ' lines terminated by '\n';
load data local infile 'suite/ndb/data/table_data10000.dat' into table t2 fields terminated by ' ' lines terminated by '\n';
load data local infile 'suite/ndb/data/table_data10000.dat' into table t5 fields terminated by ' ' lines terminated by '\n';
load data local infile 'suite/ndb/data/table_data10000.dat' into table t6 fields terminated by ' ' lines terminated by '\n' (id, @data) set dat = repeat(@data, 100), txt = repeat(@data,100);

select count(1) as t1_part_count from information_schema.partitions where table_schema='test' and table_name='t1';
select count(1) as t2_part_count from information_schema.partitions where table_schema='test' and table_name='t2';
select @init_t5_part_count:= count(1) as t5_part_count from information_schema.partitions where table_schema='test' and table_name='t5';
select count(1) as t6_part_count from information_schema.partitions where table_schema='test' and table_name='t6';

## Check details of t5 partitioning
--exec $NDB_DESC -dtest -p -n t5

## Check details of t6 partitioning
--exec $NDB_DESC -dtest -p -n t6 '$bt_test_t6_dat' '$bt_test_t6_txt'

## Create nodegroup for "new" nodes
--exec $NDB_MGM -e "create nodegroup 3,4"

# Cluster running after adding two ndbd nodes
--exec $NDB_MGM -e show

## Drop
--exec $NDB_MGM -e "drop nodegroup 1"
## and create
--exec $NDB_MGM -e "create nodegroup 3,4"

create table t3(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
create table t4(id int NOT NULL PRIMARY KEY, data char(8))
TABLESPACE ts_1 STORAGE DISK engine=ndb;

insert into t3(id, data) VALUES 
(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),
(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
insert into t4(id, data) VALUES
(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),
(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');

alter table t1 algorithm=inplace, add column c int unsigned default null;
alter online table t1 reorganize partition;
alter online table t2 reorganize partition;
alter online table t5 max_rows=300000000;
if (!$DISABLE_BLOB_REORG)
{
  alter online table t6 reorganize partition;
}

select count(1) as t1_part_count from information_schema.partitions where table_schema='test' and table_name='t1';
select count(1) as t2_part_count from information_schema.partitions where table_schema='test' and table_name='t2';
select count(1) as t3_part_count from information_schema.partitions where table_schema='test' and table_name='t3';
select count(1) as t4_part_count from information_schema.partitions where table_schema='test' and table_name='t4';
select @reorg_t5_part_count:= count(1) as t5_part_count from information_schema.partitions where table_schema='test' and table_name='t5';
select count(1) as t6_part_count from information_schema.partitions where table_schema='test' and table_name='t6';

## Check details of t5 partitioning
--exec $NDB_DESC -dtest -p -n t5

--let $t5_part_diff=query_get_value('select @reorg_t5_part_count-@init_t5_part_count as Value',Value,1)

if (!$t5_part_diff)
{
  --die Table t5 was not reorganised
}

## Simple blob usage of t6
select count(0) as row_count, sum(length(dat)) as data_length, sum(length(txt)) as text_length from t6;

## Check details of t6 partitioning
--exec $NDB_DESC -dtest -p -n t6 '$bt_test_t6_dat' '$bt_test_t6_txt'

# Check that main table and blob tables have same hashmap.
let ndb_database= test;
let ndb_table= t6;
let ndb_die_on_error= 1;
--source suite/ndb/include/ndb_check_blob_tables.inc

## Drop nodegroup with "new" nodes is not allowed with data one those nodes
# NOTE: --error=0 is due to return codes doesnt work on windoze
--error 0,255
--exec $NDB_MGM -e "drop nodegroup 1"

## Nodegroup with "new" nodes still exist after dropping it as shown:
--exec $NDB_MGM -e show

drop table t1,t2,t3,t4,t5,t6;

## Drop nodegroup with "new" nodes
--exec $NDB_MGM -e "drop nodegroup 1"

## Nodegroup with "new" nodes still exists after dropping it as shown:
--exec $NDB_MGM -e show

# Cleanup
ALTER TABLESPACE ts_1 DROP DATAFILE 'data_1.dat' ENGINE NDB;
DROP TABLESPACE ts_1 ENGINE NDB;
DROP LOGFILE GROUP lg_1 ENGINE NDB;


Youez - 2016 - github.com/yon3zu
LinuXploit