Server IP : 172.67.216.182 / Your IP : 172.70.143.69 Web Server : Apache System : Linux krdc-ubuntu-s-2vcpu-4gb-amd-blr1-01.localdomain 5.15.0-142-generic #152-Ubuntu SMP Mon May 19 10:54:31 UTC 2025 x86_64 User : www ( 1000) PHP Version : 7.4.33 Disable Function : passthru,exec,system,putenv,chroot,chgrp,chown,shell_exec,popen,proc_open,pcntl_exec,ini_alter,ini_restore,dl,openlog,syslog,readlink,symlink,popepassthru,pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,imap_open,apache_setenv MySQL : OFF | cURL : ON | WGET : ON | Perl : ON | Python : OFF | Sudo : ON | Pkexec : ON Directory : /www/server/mysql/src/mysql-test/suite/ndb/t/ |
Upload File : |
--source include/have_ndb.inc --source include/not_embedded.inc # Test is using error insert, check that binaries support it --source suite/ndb/t/have_ndb_error_insert.inc # Use small LoadFactors to force sparse hash table --exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all error 3003" >> $NDB_TOOLS_OUTPUT set max_heap_table_size = 286720000; create table t1 (a int primary key) engine=memory; load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 columns terminated by ' ' (a, @col2); let $i = 4; let $b = 10000; while ($i) { --eval insert into t1 select a + $b from t1; let $b = $b * 2; dec $i; } select count(*) from t1; alter table t1 engine=ndbcluster comment='NDB_TABLE=NOLOGGING' partition by key() partitions 1; --exec $NDB_MGM --no-defaults --ndb-connectstring="$NDB_CONNECTSTRING" -e "all report memory" >> $NDB_TOOLS_OUTPUT create table t2 (a int primary key) engine=memory; insert into t2 select a from t1; --echo the left join below should result in scanning t2 and do pk lookups in t1 --replace_column 10 # 11 # explain select if(isnull(t1.a),t2.a,NULL) missed, count(*) rows from t2 left join t1 on t1.a=t2.a group by if(isnull(t1.a),t2.a,NULL); --echo the result rows with missed equal to NULL should count all rows (160000) --echo the other rows are the failed lookups and there should not be any such select if(isnull(t1.a),t2.a,NULL) missed, count(*) rows from t2 left join t1 on t1.a=t2.a group by if(isnull(t1.a),t2.a,NULL); --echo verify that it is not possible to reinsert all rows in t1 to itself --echo affected rows should be zero # All the below INSERT IGNORE statements generate 10000 duplicate key # warnings each. By default only the first 64 warnings are saved on the # warning stack (and thus printed to the .result file). Since the order of # the inserts are slightly random the first 64 warnings are also random. # Fix by setting the number of saved warnigns to zero, since the test are # only interested in checking that no records are allowed to be inserted # by looking at "affected_rows" set @save_max_error_count = @@max_error_count; set @@max_error_count = 0; --enable_info insert ignore into t1 select a from t2 limit 0, 10000; insert ignore into t1 select a from t2 limit 10000, 10000; insert ignore into t1 select a from t2 limit 20000, 10000; insert ignore into t1 select a from t2 limit 30000, 10000; insert ignore into t1 select a from t2 limit 40000, 10000; insert ignore into t1 select a from t2 limit 50000, 10000; insert ignore into t1 select a from t2 limit 60000, 10000; insert ignore into t1 select a from t2 limit 70000, 10000; insert ignore into t1 select a from t2 limit 80000, 10000; insert ignore into t1 select a from t2 limit 90000, 10000; insert ignore into t1 select a from t2 limit 100000, 10000; insert ignore into t1 select a from t2 limit 110000, 10000; insert ignore into t1 select a from t2 limit 120000, 10000; insert ignore into t1 select a from t2 limit 130000, 10000; insert ignore into t1 select a from t2 limit 140000, 10000; insert ignore into t1 select a from t2 limit 150000, 10000; --disable_info set @@max_error_count = @save_max_error_count; drop table t1, t2;