List:Commits« Previous MessageNext Message »
From:Pekka Nousiainen Date:May 17 2011 1:41pm
Subject:bzr push into mysql-5.1-telco-7.0-wl4163 branch (pekka:4371 to 4373)
View as plain text  
 4373 Pekka Nousiainen	2011-05-17 [merge]
      merge to wl4163

    modified:
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
 4372 Pekka Nousiainen	2011-05-17 [merge]
      merge to wl4163

    removed:
      mysql-test/suite/ndb/include/add_six_nodes.inc
      mysql-test/suite/ndb/include/add_two_nodes.inc
      mysql-test/suite/ndb/include/reload_ndb_mgmd.inc
      mysql-test/suite/ndb/r/add_node01.result
      mysql-test/suite/ndb/r/add_node02.result
      mysql-test/suite/ndb/r/add_node03.result
      mysql-test/suite/ndb/t/add_node01.test
      mysql-test/suite/ndb/t/add_node02.test
      mysql-test/suite/ndb/t/add_node03.test
    added:
      mysql-test/suite/ndb/data/
      mysql-test/suite/ndb_rpl/
      mysql-test/suite/ndb_rpl/my.cnf
      mysql-test/suite/ndb_rpl/r/
      mysql-test/suite/ndb_rpl/t/
      mysql-test/suite/ndb_rpl/t/disabled.def
      storage/ndb/src/kernel/vm/NdbSeqLock.hpp
    renamed:
      mysql-test/suite/ndb/std_data/ => mysql-test/suite/ndb/backups/
      mysql-test/suite/ndb/std_data/ndb_backup50/ => mysql-test/suite/ndb/backups/50/
      mysql-test/suite/ndb/std_data/ndb_backup51/ => mysql-test/suite/ndb/backups/51/
      mysql-test/suite/ndb/std_data/ndb_backup51_d2_be/ => mysql-test/suite/ndb/backups/51_d2_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_d2_le/ => mysql-test/suite/ndb/backups/51_d2_le/
      mysql-test/suite/ndb/std_data/ndb_backup51_data_be/ => mysql-test/suite/ndb/backups/51_data_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_data_le/ => mysql-test/suite/ndb/backups/51_data_le/
      mysql-test/suite/ndb/std_data/ndb_backup51_dd/ => mysql-test/suite/ndb/backups/51_dd/
      mysql-test/suite/ndb/std_data/ndb_backup51_undolog_be/ => mysql-test/suite/ndb/backups/51_undolog_be/
      mysql-test/suite/ndb/std_data/ndb_backup51_undolog_le/ => mysql-test/suite/ndb/backups/51_undolog_le/
      mysql-test/suite/ndb/std_data/ndb_backup_before_native_default/ => mysql-test/suite/ndb/backups/before_native_default/
      mysql-test/suite/ndb/std_data/ndb_backup_bug54613/ => mysql-test/suite/ndb/backups/bug54613/
      mysql-test/suite/ndb/std_data/ndb_backup_hashmap/ => mysql-test/suite/ndb/backups/hashmap/
      mysql-test/suite/ndb/std_data/ndb_backup_packed/ => mysql-test/suite/ndb/backups/packed/
      mysql-test/suite/ndb/std_data/table_data10000.dat => mysql-test/suite/ndb/data/table_data10000.dat
      mysql-test/suite/ndb/std_data/table_data100000.dat => mysql-test/suite/ndb/data/table_data100000.dat
      mysql-test/suite/rpl_ndb/ndb_master-slave.inc => mysql-test/suite/ndb_rpl/ndb_master-slave.inc
      mysql-test/suite/rpl_ndb/ndb_master-slave_2ch.inc => mysql-test/suite/ndb_rpl/ndb_master-slave_2ch.inc
      mysql-test/suite/rpl_ndb/r/rpl_ndb_2innodb.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_2innodb.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_2myisam.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_2myisam.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_2ndb.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_2ndb.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_2other.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_2other.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_add_column.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_add_column.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_apply_status.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_apply_status.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_auto_inc.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_auto_inc.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_bank.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_bank.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_basic.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_basic.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_bitfield.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_bitfield.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_blob.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_blob.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_break_3_chain.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_break_3_chain.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_bug22045.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_bug22045.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_check_for_mixed.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_check_for_mixed.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_circular.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_circular.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_circular_2ch.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_circular_simplex.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_simplex.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_conflict.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_conflict_max.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_max.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_conflict_max_delete_win.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_max_delete_win.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_conflict_old.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_old.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ctype_ucs2_def.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_ctype_ucs2_def.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_dd_advance.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_dd_advance.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_dd_basic.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_dd_basic.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_dd_partitions.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_dd_partitions.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_do_db.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_do_db.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_do_table.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_do_table.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_empty_epoch.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_empty_epoch.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_gap_event.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_gap_event.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_idempotent.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_idempotent.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ignore_db.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_ignore_db.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_innodb2ndb.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_innodb2ndb.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_innodb_trans.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_innodb_trans.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_load.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_load.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_logging.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_logging.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_mix_eng_trans.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_mix_eng_trans.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_mix_innodb.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_mix_innodb.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_mixed_tables.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_mixed_tables.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_multi.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_multi.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_myisam2ndb.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_myisam2ndb.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_rep_error.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_rep_error.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_rep_ignore.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_rep_ignore.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_skip_gap_event.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_skip_gap_event.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_slave_lsu.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_lsu.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_slave_lsu_anyval.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_lsu_anyval.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_slave_restart.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_restart.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_stm_innodb.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_stm_innodb.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_sync.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_sync.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ui.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_ui.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ui2.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_ui2.result
      mysql-test/suite/rpl_ndb/r/rpl_ndb_ui3.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_ui3.result
      mysql-test/suite/rpl_ndb/r/rpl_ndbapi_multi.result => mysql-test/suite/ndb_rpl/r/ndb_rpl_ndbapi_multi.result
      mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result => mysql-test/suite/rpl_ndb/r/rpl_ndb_row_basic.result
      mysql-test/suite/rpl_ndb/r/rpl_truncate_7ndb.result => mysql-test/suite/ndb_rpl/r/rpl_truncate_7ndb.result
      mysql-test/suite/rpl_ndb/r/rpl_truncate_7ndb_2.result => mysql-test/suite/ndb_rpl/r/rpl_truncate_7ndb_2.result
      mysql-test/suite/rpl_ndb/t/ndb_apply_status.frm => mysql-test/suite/ndb_rpl/t/ndb_apply_status.frm
      mysql-test/suite/rpl_ndb/t/ndb_conflict_info.inc => mysql-test/suite/ndb_rpl/t/ndb_conflict_info.inc
      mysql-test/suite/rpl_ndb/t/ndb_conflict_info_init.inc => mysql-test/suite/ndb_rpl/t/ndb_conflict_info_init.inc
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2innodb-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_2innodb-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2innodb-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_2innodb-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2innodb.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_2innodb.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2multi_basic.inc => mysql-test/suite/ndb_rpl/t/ndb_rpl_2multi_basic.inc
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2multi_eng.inc => mysql-test/suite/ndb_rpl/t/ndb_rpl_2multi_eng.inc
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2myisam-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_2myisam-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2myisam-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_2myisam-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2myisam.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_2myisam.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2ndb-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_2ndb-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2ndb.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_2ndb.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2other-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_2other-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_2other.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_2other.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_add_column.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_add_column.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_apply_status-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_apply_status.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_auto_inc.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_auto_inc.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_bank.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_bank.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_basic.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_basic.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_bitfield.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_bitfield.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_blob.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_blob.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_break_3_chain.cnf => mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf
      mysql-test/suite/rpl_ndb/t/rpl_ndb_break_3_chain.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_bug22045.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_bug22045.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_check_for_mixed.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_check_for_mixed.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_circular.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_circular.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.cnf => mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch.cnf
      mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_simplex.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_simplex.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_1.inc => mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
      mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max_delete_win-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max_delete_win-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max_delete_win.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max_delete_win.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_old-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_old-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_old.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_old.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ctype_ucs2_def-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_ctype_ucs2_def-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ctype_ucs2_def.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_ctype_ucs2_def.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_advance.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_advance.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_basic.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_basic.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_partitions-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_partitions-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_partitions-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_partitions-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_partitions.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_partitions.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_do_db-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_do_db.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_do_table-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_do_table.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_empty_epoch.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_empty_epoch.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_gap_event-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_gap_event.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_get_binlog_events.inc => mysql-test/suite/ndb_rpl/t/ndb_rpl_get_binlog_events.inc
      mysql-test/suite/rpl_ndb/t/rpl_ndb_idempotent.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_idempotent.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ignore_db-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_ignore_db-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ignore_db.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_ignore_db.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb2ndb-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb2ndb-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb2ndb-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb2ndb-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb2ndb.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb2ndb.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb_trans-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb_trans.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_load.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_load.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_logging.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_logging.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_eng_trans-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_eng_trans-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_eng_trans-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_eng_trans-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_eng_trans.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_eng_trans.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_innodb-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_innodb-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_innodb.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_innodb.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_tables-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_mixed_tables-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_tables-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_mixed_tables-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_tables.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_mixed_tables.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_multi.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_multi.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.cnf => mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf
      mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.inc => mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.inc
      mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update2-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_update2-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_myisam2ndb-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_myisam2ndb-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_myisam2ndb.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_myisam2ndb.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_relayrotate-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_relayrotate-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_error.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_error.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_ignore-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_ignore.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_skip_gap_event-slave.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_skip_gap_event-slave.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_skip_gap_event.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_skip_gap_event.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu.cnf => mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu.cnf
      mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu_anyval.cnf => mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu_anyval.cnf
      mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu_anyval.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu_anyval.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_restart.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_restart.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_stm_innodb-master.opt => mysql-test/suite/ndb_rpl/t/ndb_rpl_stm_innodb-master.opt
      mysql-test/suite/rpl_ndb/t/rpl_ndb_stm_innodb.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_stm_innodb.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_sync.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_sync.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ui.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_ui.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ui2.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_ui2.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ui3.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_ui3.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc => mysql-test/suite/ndb_rpl/t/wait_schema_logging.inc
      mysql-test/suite/rpl_ndb/t/rpl_ndb_xxx_innodb.inc => mysql-test/suite/ndb_rpl/t/ndb_rpl_xxx_innodb.inc
      mysql-test/suite/rpl_ndb/t/rpl_ndbapi_multi.test => mysql-test/suite/ndb_rpl/t/ndb_rpl_ndbapi_multi.test
      mysql-test/suite/rpl_ndb/t/rpl_row_basic_7ndb.test => mysql-test/suite/rpl_ndb/t/rpl_ndb_row_basic.test
      mysql-test/suite/rpl_ndb/t/rpl_truncate_7ndb.test => mysql-test/suite/ndb_rpl/t/rpl_truncate_7ndb.test
      mysql-test/suite/rpl_ndb/t/rpl_truncate_7ndb_2.test => mysql-test/suite/ndb_rpl/t/rpl_truncate_7ndb_2.test
      mysql-test/suite/rpl_ndb/t/select_ndb_apply_status.inc => mysql-test/suite/ndb_rpl/t/select_ndb_apply_status.inc
    modified:
      CMakeLists.txt
      mysql-test/Makefile.am
      mysql-test/include/ndb_backup_id.inc
      mysql-test/lib/My/SysInfo.pm
      mysql-test/mysql-test-run.pl
      mysql-test/suite/ndb/r/ndb_add_partition.result
      mysql-test/suite/ndb/r/ndb_alter_table_online2.result
      mysql-test/suite/ndb/r/ndb_rename.result
      mysql-test/suite/ndb/t/disabled.def
      mysql-test/suite/ndb/t/ndb_add_partition.test
      mysql-test/suite/ndb/t/ndb_addnode.test
      mysql-test/suite/ndb/t/ndb_alter_table_backup.test
      mysql-test/suite/ndb/t/ndb_alter_table_online2.test
      mysql-test/suite/ndb/t/ndb_dd_restore_compat.test
      mysql-test/suite/ndb/t/ndb_native_default_support.test
      mysql-test/suite/ndb/t/ndb_rename.test
      mysql-test/suite/ndb/t/ndb_restore_compat_downward.test
      mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test
      mysql-test/suite/ndb/t/ndb_restore_misc.test
      mysql-test/suite/ndb/t/ndb_restore_undolog.test
      mysql-test/suite/ndb/t/ndb_show_tables_result.inc
      mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test
      mysql-test/suite/ndb_binlog/t/ndb_binlog_variants.test
      mysql-test/suite/rpl_ndb/t/disabled.def
      mysql-test/suite/rpl_ndb/t/rpl_ndb_UUID.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_blob2.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_commit_afterflush.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_ddl.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_delete_nowhere.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_extra_col_master.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_extra_col_slave.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_func003.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_insert_ignore.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update2.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update3.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_relayrotate.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_row_001.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_set_null.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_sp003.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_sp006.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_trig004.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_all.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_lossy.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_nonlossy.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_strict.test
      storage/ndb/CMakeLists.txt
      storage/ndb/include/CMakeLists.txt
      storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp
      storage/ndb/include/kernel/signaldata/QueryTree.hpp
      storage/ndb/include/ndbapi/NdbReceiver.hpp
      storage/ndb/ndb_configure.m4
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/portlib/Makefile.am
      storage/ndb/src/common/portlib/NdbMutex.c
      storage/ndb/src/common/portlib/NdbTCP.cpp
      storage/ndb/src/kernel/blocks/backup/Backup.cpp
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp
      storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
      storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.cpp
      storage/ndb/src/kernel/vm/SimulatedBlock.hpp
      storage/ndb/src/kernel/vm/mt-asm.h
      storage/ndb/src/kernel/vm/mt.cpp
      storage/ndb/src/kernel/vm/mt.hpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/ndbapi/DictCache.cpp
      storage/ndb/src/ndbapi/Ndb.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
      storage/ndb/src/ndbapi/NdbOperationExec.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbReceiver.cpp
      storage/ndb/src/ndbapi/NdbScanOperation.cpp
      storage/ndb/src/ndbapi/Ndbinit.cpp
      storage/ndb/src/ndbapi/ObjectMap.cpp
      storage/ndb/src/ndbapi/ObjectMap.hpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/ndbapi/testBasic.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      mysql-test/suite/ndb_rpl/r/ndb_rpl_ui2.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_2innodb.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_2myisam.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_2ndb.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_2other.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_add_column.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_apply_status.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_auto_inc.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_bank.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_basic.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_bitfield.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_blob.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_bug22045.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_check_for_mixed.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_simplex.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max_delete_win.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_old.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_ctype_ucs2_def.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_advance.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_basic.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_partitions.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_do_db.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_do_table.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_empty_epoch.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_gap_event.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_idempotent.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_ignore_db.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb2ndb.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb_trans.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_load.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_logging.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_eng_trans.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_innodb.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_mixed_tables.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.inc
      mysql-test/suite/ndb_rpl/t/ndb_rpl_myisam2ndb.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_error.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_ignore.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_skip_gap_event.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu_anyval.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu_anyval.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_restart.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_stm_innodb.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_sync.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_ui.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_ui2.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_ui3.test
      mysql-test/suite/ndb_rpl/t/wait_schema_logging.inc
      mysql-test/suite/ndb_rpl/t/ndb_rpl_xxx_innodb.inc
      mysql-test/suite/ndb_rpl/t/ndb_rpl_ndbapi_multi.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_row_basic.test
      mysql-test/suite/ndb_rpl/t/rpl_truncate_7ndb.test
      mysql-test/suite/ndb_rpl/t/rpl_truncate_7ndb_2.test
 4371 Pekka Nousiainen	2011-05-17
      wl#4163 i05_fix.diff
      remove debug buffer from stack (MT build 8k stack)

    modified:
      storage/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxBuild.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
=== modified file 'CMakeLists.txt'
--- a/CMakeLists.txt	2011-03-22 13:16:46 +0000
+++ b/CMakeLists.txt	2011-05-12 14:13:43 +0000
@@ -326,3 +326,20 @@ IF(WITH_EMBEDDED_SERVER)
   ADD_SUBDIRECTORY(libmysqld/examples)
 ENDIF(WITH_EMBEDDED_SERVER)
 ADD_SUBDIRECTORY(mysql-test/lib/My/SafeProcess)
+
+# Dump cmake's output and error log to help diagnosing
+# platform checks
+MACRO(DUMP_FILE filename)
+  IF(EXISTS ${filename})
+    FILE(READ ${filename} content)
+    MESSAGE(STATUS "=vvvv= Dumping ${filename} ")
+    MESSAGE(STATUS "${content}")
+    MESSAGE(STATUS "=^^^^=")
+  ELSE()
+    MESSAGE(STATUS "'${filename}' does not exist")
+  ENDIF()
+ENDMACRO()
+ 
+DUMP_FILE("${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log")
+DUMP_FILE("${CMAKE_BINARY_DIR}/CMakeFiles/CMakeOutput.log")
+

=== modified file 'mysql-test/Makefile.am'
--- a/mysql-test/Makefile.am	2011-04-08 13:59:44 +0000
+++ b/mysql-test/Makefile.am	2011-05-16 07:20:54 +0000
@@ -75,19 +75,19 @@ EXTRA_DIST =	README \
 # List of directories containing test + result files and the
 # related test data files that should be copied
 TEST_DIRS = t r include std_data std_data/parts collections \
-	suite/ndb/std_data/ndb_backup50 \
-	suite/ndb/std_data/ndb_backup51 \
-	suite/ndb/std_data/ndb_backup51_data_be \
-	suite/ndb/std_data/ndb_backup51_data_le \
-	suite/ndb/std_data/ndb_backup51_dd \
-	suite/ndb/std_data/ndb_backup_packed \
-	suite/ndb/std_data/ndb_backup51_d2_be \
-	suite/ndb/std_data/ndb_backup51_d2_le \
-	suite/ndb/std_data/ndb_backup51_undolog_be \
-	suite/ndb/std_data/ndb_backup51_undolog_le \
-	suite/ndb/std_data/ndb_backup_hashmap \
-	suite/ndb/std_data/ndb_backup_before_native_default \
-	suite/ndb/std_data/ndb_backup_bug54613 \
+	suite/ndb/backups/50 \
+	suite/ndb/backups/51 \
+	suite/ndb/backups/51_data_be \
+	suite/ndb/backups/51_data_le \
+	suite/ndb/backups/51_dd \
+	suite/ndb/backups/packed \
+	suite/ndb/backups/51_d2_be \
+	suite/ndb/backups/51_d2_le \
+	suite/ndb/backups/51_undolog_be \
+	suite/ndb/backups/51_undolog_le \
+	suite/ndb/backups/hashmap \
+	suite/ndb/backups/before_native_default \
+	suite/ndb/backups/bug54613 \
 	std_data/funcs_1 \
 	extra/binlog_tests/ extra/rpl_tests \
 	suite/binlog suite/binlog/t suite/binlog/r suite/binlog/std_data \
@@ -105,11 +105,12 @@ TEST_DIRS = t r include std_data std_dat
 	suite/rpl suite/rpl/data suite/rpl/include suite/rpl/r \
 	suite/rpl/t \
 	suite/stress/include suite/stress/t suite/stress/r \
-	suite/ndb suite/ndb/t suite/ndb/r suite/ndb/include suite/ndb/std_data \
+	suite/ndb suite/ndb/t suite/ndb/r suite/ndb/include suite/ndb/data \
 	suite/ndb_big \
 	suite/ndb_binlog suite/ndb_binlog/t suite/ndb_binlog/r \
 	suite/ndb_team suite/ndb_team/t suite/ndb_team/r \
 	suite/rpl_ndb suite/rpl_ndb/t suite/rpl_ndb/r \
+	suite/ndb_rpl suite/ndb_rpl/t suite/ndb_rpl/r \
 	suite/parts suite/parts/t suite/parts/r suite/parts/inc \
 	suite/innodb suite/innodb/t suite/innodb/r suite/innodb/include \
         suite/innodb_plugin suite/innodb_plugin/t suite/innodb_plugin/r \

=== modified file 'mysql-test/include/ndb_backup_id.inc'
--- a/mysql-test/include/ndb_backup_id.inc	2009-07-14 07:52:35 +0000
+++ b/mysql-test/include/ndb_backup_id.inc	2011-05-04 13:00:44 +0000
@@ -1,6 +1,6 @@
 # there is no neat way to find the backupid, this is a hack to find it...
 --let $dump_file= $MYSQLTEST_VARDIR/tmp/select_all.txt
---exec $NDB_SELECT_ALL -d sys --delimiter=, SYSTAB_0 > $dump_file
+--exec $NDB_SELECT_ALL --no-defaults -d sys --delimiter=, SYSTAB_0 > $dump_file
 
 CREATE TEMPORARY TABLE test.backup_info(
   a BIGINT,

=== modified file 'mysql-test/lib/My/SysInfo.pm'
--- a/mysql-test/lib/My/SysInfo.pm	2011-04-08 12:48:50 +0000
+++ b/mysql-test/lib/My/SysInfo.pm	2011-05-06 08:05:58 +0000
@@ -168,6 +168,28 @@ sub num_cpus {
 }
 
 
+# Return the number of cores found
+#  - if there is a "core_id" attribute in the
+#    cpuinfo, use it to filter out only the count of
+#    cores, else return count of cpus 
+sub num_cores {
+  my ($self)= @_;
+  
+  my $cores = 0;
+  my %seen = (); # Hash with the core id's already seen 
+  foreach my $cpu (@{$self->{cpus}}) {
+    my $core_id = $cpu->{core_id};
+    
+    next if (defined $core_id and $seen{$core_id}++);
+ 
+    # Unknown core id or not seen this core before, count it
+    $cores++;
+  }
+  return $cores or
+    confess "INTERNAL ERROR: No cores!";
+}
+
+
 # Return the smallest bogomips value amongst the processors
 sub min_bogomips {
   my ($self)= @_;

=== modified file 'mysql-test/mysql-test-run.pl'
--- a/mysql-test/mysql-test-run.pl	2011-04-27 06:52:34 +0000
+++ b/mysql-test/mysql-test-run.pl	2011-05-16 10:02:42 +0000
@@ -158,7 +158,7 @@ my $path_config_file;           # The ge
 # executables will be used by the test suite.
 our $opt_vs_config = $ENV{'MTR_VS_CONFIG'};
 
-my $DEFAULT_SUITES= "ndb,ndb_binlog,rpl_ndb,main,binlog,federated,rpl,innodb,ndb_team";
+my $DEFAULT_SUITES= "ndb,ndb_binlog,rpl_ndb,ndb_rpl,main,binlog,federated,rpl,innodb,ndb_team";
 my $opt_suites;
 my $opt_extra_suites;
 
@@ -382,14 +382,11 @@ sub main {
     # Try to find a suitable value for number of workers
     my $sys_info= My::SysInfo->new();
 
-    $opt_parallel= $sys_info->num_cpus();
-    print "num_cpus: $opt_parallel, min_bogomips: " .
-      $sys_info->min_bogomips(). "\n";
+    $opt_parallel= $sys_info->num_cores();
     for my $limit (2000, 1500, 1000, 500){
       $opt_parallel-- if ($sys_info->min_bogomips() < $limit);
     }
     my $max_par= $ENV{MTR_MAX_PARALLEL} || 8;
-    print "max_par: $max_par\n";
     $opt_parallel= $max_par if ($opt_parallel > $max_par);
     $opt_parallel= $num_tests if ($opt_parallel > $num_tests);
     $opt_parallel= 1 if (IS_WINDOWS and $sys_info->isvm());
@@ -2632,6 +2629,41 @@ sub ndbcluster_wait_started($$){
 }
 
 
+sub ndbcluster_dump($) {
+  my ($cluster)= @_;
+
+  print "\n== Dumping cluster log files\n\n";
+
+  # ndb_mgmd(s)
+  foreach my $ndb_mgmd ( in_cluster($cluster, ndb_mgmds()) )
+  {
+    my $datadir = $ndb_mgmd->value('DataDir');
+
+    # Should find ndb_<nodeid>_cluster.log and ndb_mgmd.log
+    foreach my $file ( glob("$datadir/ndb*.log") )
+    {
+      print "$file:\n";
+      mtr_printfile("$file");
+      print "\n";
+    }
+  }
+
+  # ndb(s)
+  foreach my $ndbd ( in_cluster($cluster, ndbds()) )
+  {
+    my $datadir = $ndbd->value('DataDir');
+
+    # Should find ndbd.log
+    foreach my $file ( glob("$datadir/ndbd.log") )
+    {
+      print "$file:\n";
+      mtr_printfile("$file");
+      print "\n";
+    }
+  }
+}
+
+
 sub ndb_mgmd_wait_started($) {
   my ($cluster)= @_;
 
@@ -5085,6 +5117,13 @@ sub start_servers($) {
     {
       # failed to start
       $tinfo->{'comment'}= "Start of '".$cluster->name()."' cluster failed";
+
+      #
+      # Dump cluster log files to log file to help analyze the
+      # cause of the failed start
+      #
+      ndbcluster_dump($cluster);
+
       return 1;
     }
   }

=== renamed directory 'mysql-test/suite/ndb/std_data' => 'mysql-test/suite/ndb/backups'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup50' => 'mysql-test/suite/ndb/backups/50'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51' => 'mysql-test/suite/ndb/backups/51'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_d2_be' => 'mysql-test/suite/ndb/backups/51_d2_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_d2_le' => 'mysql-test/suite/ndb/backups/51_d2_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_data_be' => 'mysql-test/suite/ndb/backups/51_data_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_data_le' => 'mysql-test/suite/ndb/backups/51_data_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_dd' => 'mysql-test/suite/ndb/backups/51_dd'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_undolog_be' => 'mysql-test/suite/ndb/backups/51_undolog_be'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup51_undolog_le' => 'mysql-test/suite/ndb/backups/51_undolog_le'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_before_native_default' => 'mysql-test/suite/ndb/backups/before_native_default'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_bug54613' => 'mysql-test/suite/ndb/backups/bug54613'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_hashmap' => 'mysql-test/suite/ndb/backups/hashmap'
=== renamed directory 'mysql-test/suite/ndb/std_data/ndb_backup_packed' => 'mysql-test/suite/ndb/backups/packed'
=== added directory 'mysql-test/suite/ndb/data'
=== renamed file 'mysql-test/suite/ndb/std_data/table_data10000.dat' => 'mysql-test/suite/ndb/data/table_data10000.dat'
=== renamed file 'mysql-test/suite/ndb/std_data/table_data100000.dat' => 'mysql-test/suite/ndb/data/table_data100000.dat'
=== removed file 'mysql-test/suite/ndb/include/add_six_nodes.inc'
--- a/mysql-test/suite/ndb/include/add_six_nodes.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/add_six_nodes.inc	1970-01-01 00:00:00 +0000
@@ -1,64 +0,0 @@
---perl
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $file ="$vardir/my.cnf";
-my $file_new = "$vardir/my.cnf.new";
-
-open (IN, "$file") || die $!;
-open (OUT, ">$file_new") || die $!;
-
-while ($_ = <IN> ) {
-  if ($_ =~ /ndbd=localhost,localhost/i) 
-  {
-    # Replace text, all instances on a line (/g), case insensitive (/i)
-    $_ =~ s/ndbd=localhost,localhost/ndbd=localhost,localhost,localhost,localhost,localhost,localhost,localhost,localhost/gi;
-  }
-  print OUT "$_";
-  if ($_=~ /cluster_config.ndb_mgmd.1.1/i) 
-  {
-    print OUT "NodeId=3\n";
-  }
-}
-
-close IN;
-close OUT;
-
-open (OUT, ">>$file_new") || die $!;
-print OUT "[cluster_config.ndbd.3.1]\n";
-print OUT "Id=40\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.4.1]\n";
-print OUT "Id=41\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.5.1]\n";
-print OUT "Id=42\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.6.1]\n";
-print OUT "Id=43\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.7.1]\n";
-print OUT "Id=44\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.8.1]\n";
-print OUT "Id=45\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-
-close OUT;
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/include/add_two_nodes.inc'
--- a/mysql-test/suite/ndb/include/add_two_nodes.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/add_two_nodes.inc	1970-01-01 00:00:00 +0000
@@ -1,39 +0,0 @@
---perl
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $file ="$vardir/my.cnf";
-my $file_new = "$vardir/my.cnf.new";
-
-open (IN, "$file") || die $!;
-open (OUT, ">$file_new") || die $!;
-
-while ($_ = <IN> ) {
-  if ($_ =~ /ndbd=localhost,localhost/i) 
-  {
-    # Replace text, all instances on a line (/g), case insensitive (/i)
-    $_ =~ s/ndbd=localhost,localhost/ndbd=localhost,localhost,localhost,localhost/gi;
-  }
-  print OUT "$_";
-  if ($_=~ /cluster_config.ndb_mgmd.1.1/i) 
-  {
-    print OUT "NodeId=3\n";
-  }
-}
-
-close IN;
-close OUT;
-
-open (OUT, ">>$file_new") || die $!;
-print OUT "[cluster_config.ndbd.3.1]\n";
-print OUT "Id=40\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.1\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.1/uf\n";
-print OUT "\n";
-print OUT "[cluster_config.ndbd.4.1]\n";
-print OUT "Id=41\n";
-print OUT "DataDir=$vardir/mysql_cluster.1/ndbd.2\n";
-print OUT "BackupDataDir=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-print OUT "FileSystemPathDataFiles=$vardir/mysql_cluster.1/ndbd.2/uf\n";
-close OUT;
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/include/reload_ndb_mgmd.inc'
--- a/mysql-test/suite/ndb/include/reload_ndb_mgmd.inc	2009-03-26 08:21:55 +0000
+++ b/mysql-test/suite/ndb/include/reload_ndb_mgmd.inc	1970-01-01 00:00:00 +0000
@@ -1,37 +0,0 @@
---perl
-
-use strict;
-use IO::Socket::INET;
-
-use lib "lib/";
-use My::Config;
-
-my $vardir = $ENV{MYSQLTEST_VARDIR} or die "Need MYSQLTEST_VARDIR";
-my $config= My::Config->new("$vardir/my.cnf");
-my $mgmd = $config->group("cluster_config.ndb_mgmd.1.1");
-my $server_port = $mgmd->value("PortNumber");
-#print "server_port: $server_port\n";
-
-my $server = new IO::Socket::INET
-(
- PeerAddr => 'localhost',
- PeerPort => $server_port,
- Proto    => 'tcp'
-);
-
-print $server "reload config\n";
-print $server "mycnf: 1\n";
-print $server "\n";
-
-my $result = "unkown error";
-while(my $line= <$server>){
-  if ($line =~ /result: (.*)/)
-  {
-    $result = $1;
-  }
-  last if ($line eq "\n");
-}
-die "reload failed, result: '$result'"
-    unless $result eq "Ok";
-
-EOF
\ No newline at end of file

=== removed file 'mysql-test/suite/ndb/r/add_node01.result'
--- a/mysql-test/suite/ndb/r/add_node01.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node01.result	1970-01-01 00:00:00 +0000
@@ -1,238 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-drop database if exists DB1;
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-use DB1;
-create table new_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table new_table2(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-insert into new_table1(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-insert into new_table2(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-
-## ndb_mgm dump shows old data resides only on "old" nodes and new data resides on all nodes in cluster log 
-alter online table old_table1 reorganize partition;
-alter online table old_table2 reorganize partition;
-alter online table old_table3 reorganize partition;
-alter online table old_table4 reorganize partition;
-
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_1.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	EXTRA
-lg_1	UNDO LOG	CLUSTER_NODE=1;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=2;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=40;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=41;UNDO_BUFFER_SIZE=2097152
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_2.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	EXTRA
-lg_1	UNDO LOG	CLUSTER_NODE=1;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=2;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=40;UNDO_BUFFER_SIZE=2097152
-lg_1	UNDO LOG	CLUSTER_NODE=41;UNDO_BUFFER_SIZE=2097152
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_1.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	TABLESPACE_NAME	EXTRA
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=1
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=2
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=40
-lg_1	DATAFILE	ts_1	CLUSTER_NODE=41
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_2.dat';
-LOGFILE_GROUP_NAME	FILE_TYPE	TABLESPACE_NAME	EXTRA
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=1
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=2
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=40
-lg_1	DATAFILE	ts_2	CLUSTER_NODE=41
-
-## Drop nodegroup with "new" nodes is not allowed with data one those nodes
-## Nodegroup with "new" nodes still exist after dropping it as shown:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-show databases;
-Database
-information_schema
-DB1
-mtr
-mysql
-test
-drop table old_table1,old_table2,old_table3,old_table4,new_table1,new_table2;
-drop database DB1;
-show databases;
-Database
-information_schema
-mtr
-mysql
-test
-
-## Drop nodegroup with "new" nodes
-## Nodegroup with "new" nodes still exists after dropping it as shown:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, no nodegroup)
-id=41	@127.0.0.1  (mysql ndb, no nodegroup)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-
-drop LOGFILE GROUP lg_1 ENGINE NDB;

=== removed file 'mysql-test/suite/ndb/r/add_node02.result'
--- a/mysql-test/suite/ndb/r/add_node02.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node02.result	1970-01-01 00:00:00 +0000
@@ -1,143 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-drop database if exists DB1;
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-## Cluster running after adding two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	4 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-######################################################
-######################################################
-CREATE TEMPORARY TABLE test.backup_info (id INT, backup_id INT) ENGINE = HEAP;
-
-LOAD DATA INFILE 'DUMP_FILE' INTO TABLE test.backup_info FIELDS TERMINATED BY ',';
-
-DROP TABLE test.backup_info;
-
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-drop database DB1;

=== removed file 'mysql-test/suite/ndb/r/add_node03.result'
--- a/mysql-test/suite/ndb/r/add_node03.result	2009-04-06 07:44:28 +0000
+++ b/mysql-test/suite/ndb/r/add_node03.result	1970-01-01 00:00:00 +0000
@@ -1,76 +0,0 @@
-result_format: 2
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart();
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	2 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-
-## Add six nodes to my.cnf
-## Reload ndb_mgmd
-## Restart the "old" ndbd nodes
-## Restart mysqld nodes
-
-
-
-
-
-## Initial start of "new" data nodes
-## Wait for added nodes started
-## Create nodegroup for "new" nodes
-## Cluster running after adding six ndbd nodes:
-Connected to Management Server at: localhost
-Cluster Configuration
----------------------
-[ndbd(NDB)]	8 node(s)
-id=1	@127.0.0.1  (mysql ndb, Nodegroup: 0, Master)
-id=2	@127.0.0.1  (mysql ndb, Nodegroup: 0)
-id=40	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=41	@127.0.0.1  (mysql ndb, Nodegroup: 1)
-id=42	@127.0.0.1  (mysql ndb, Nodegroup: 2)
-id=43	@127.0.0.1  (mysql ndb, Nodegroup: 2)
-id=44	@127.0.0.1  (mysql ndb, Nodegroup: 3)
-id=45	@127.0.0.1  (mysql ndb, Nodegroup: 3)
-
-[ndb_mgmd(MGM)]	1 node(s)
-id=3	@127.0.0.1  (mysql ndb)
-
-[mysqld(API)]	14 node(s)
-id=4	@127.0.0.1  (mysql ndb)
-id=5	@127.0.0.1  (mysql ndb)
-id=6	@127.0.0.1  (mysql ndb)
-id=7	@127.0.0.1  (mysql ndb)
-id=8	@127.0.0.1  (mysql ndb)
-id=9	@127.0.0.1  (mysql ndb)
-id=10 (not connected, accepting connect from localhost)
-id=11 (not connected, accepting connect from localhost)
-id=12 (not connected, accepting connect from localhost)
-id=63 (not connected, accepting connect from localhost)
-id=127 (not connected, accepting connect from localhost)
-id=192 (not connected, accepting connect from localhost)
-id=228 (not connected, accepting connect from localhost)
-id=255 (not connected, accepting connect from localhost)
-

=== modified file 'mysql-test/suite/ndb/r/ndb_add_partition.result'
--- a/mysql-test/suite/ndb/r/ndb_add_partition.result	2009-05-09 15:49:27 +0000
+++ b/mysql-test/suite/ndb/r/ndb_add_partition.result	2011-05-12 11:31:21 +0000
@@ -144,8 +144,20 @@ a	b	c
 50	50	50
 alter online table t1 reorganize partition;
 alter online table t2 reorganize partition;
+partitions added to t1
+t1_added
+0
+partitions added to t2
+t2_added
+0
 alter online table t1 add partition partitions 1;
 alter online table t2 add partition partitions 4;
+partitions added to t1
+t1_added
+1
+partitions added to t2
+t2_added
+4
 alter online table t1 reorganize partition;
 ERROR HY000: REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs
 select count(*) from t1;
@@ -246,6 +258,12 @@ a	b	c
 50	50	50
 alter online table t1 add partition partitions 2;
 alter online table t2 add partition partitions 1;
+partitions added to t1
+t1_added
+3
+partitions added to t2
+t2_added
+5
 select count(*) from t1;
 count(*)
 100

=== modified file 'mysql-test/suite/ndb/r/ndb_alter_table_online2.result'
--- a/mysql-test/suite/ndb/r/ndb_alter_table_online2.result	2009-10-09 15:48:57 +0000
+++ b/mysql-test/suite/ndb/r/ndb_alter_table_online2.result	2011-05-13 11:42:59 +0000
@@ -75,6 +75,9 @@ name
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 update t1 set c= 0;
+select * from t1;
+pk	a	b	c
+1	5000	5000	5000
 
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 ~ Alter table t1 and try to add partitions

=== modified file 'mysql-test/suite/ndb/r/ndb_rename.result'
--- a/mysql-test/suite/ndb/r/ndb_rename.result	2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/ndb/r/ndb_rename.result	2011-05-12 09:01:21 +0000
@@ -21,4 +21,11 @@ SELECT * FROM ndbtest.t2 WHERE attr1 = 1
 pk1	attr1	attr2	attr3
 1	1	1	one
 drop table ndbtest.t2;
+create table t1 (
+pk1 INT NOT NULL PRIMARY KEY,
+b blob
+) engine = ndbcluster;
+alter table t1 rename ndbtest.t1;
+alter table ndbtest.t1 rename test.t1;
+drop table test.t1;
 drop database ndbtest;

=== removed file 'mysql-test/suite/ndb/t/add_node01.test'
--- a/mysql-test/suite/ndb/t/add_node01.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node01.test	1970-01-01 00:00:00 +0000
@@ -1,150 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
---disable_warnings
-drop database if exists DB1;
---enable_warnings
-
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_two_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
-
-# Cluster running after adding two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-use DB1;
-create table new_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table new_table2(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-insert into new_table1(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-insert into new_table2(id, data) VALUES(1,'new'), (2,'new'),(3,'new'),(4,'new'),(5,'new'),(6,'new'),(7,'new'),(8,'new'),(9,'new'),(10,'new');
-
-## ndb_mgm dump shows old data resides only on "old" nodes and new data resides on all nodes in cluster log 
---exec $NDB_MGM -e "all dump 18" >> $NDB_TOOLS_OUTPUT
-
-alter online table old_table1 reorganize partition;
-alter online table old_table2 reorganize partition;
-alter online table old_table3 reorganize partition;
-alter online table old_table4 reorganize partition;
-
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_1.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='undo_2.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_1.dat';
-select LOGFILE_GROUP_NAME,FILE_TYPE,TABLESPACE_NAME,EXTRA from INFORMATION_SCHEMA.FILES where FILE_NAME='data_2.dat';
-
-## Drop nodegroup with "new" nodes is not allowed with data one those nodes
---error 255
---exec $NDB_MGM -e "drop nodegroup 1" >> $NDB_TOOLS_OUTPUT
-
-## Nodegroup with "new" nodes still exist after dropping it as shown:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-show databases;
-drop table old_table1,old_table2,old_table3,old_table4,new_table1,new_table2;
-drop database DB1;
-show databases;
-
-## Drop nodegroup with "new" nodes
---exec $NDB_MGM -e "drop nodegroup 1" >> $NDB_TOOLS_OUTPUT
-
-## Nodegroup with "new" nodes still exists after dropping it as shown:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-# Cleanup
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-
-drop LOGFILE GROUP lg_1 ENGINE NDB;

=== removed file 'mysql-test/suite/ndb/t/add_node02.test'
--- a/mysql-test/suite/ndb/t/add_node02.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node02.test	1970-01-01 00:00:00 +0000
@@ -1,124 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/ 
---exec $NDB_MGM -e show
-
---disable_warnings
-drop database if exists DB1;
---enable_warnings
-
-CREATE LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_1.dat'
-    INITIAL_SIZE 16M
-    UNDO_BUFFER_SIZE 2M
-    ENGINE NDB;
-
-ALTER LOGFILE GROUP lg_1
-    ADD UNDOFILE 'undo_2.dat'
-    INITIAL_SIZE 12M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_1
-    ADD DATAFILE 'data_1.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-CREATE TABLESPACE ts_2
-    ADD DATAFILE 'data_2.dat'
-    USE LOGFILE GROUP lg_1
-    INITIAL_SIZE 32M
-    ENGINE NDB;
-
-create database DB1;
-use DB1;
-create table old_table1(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table2(id int NOT NULL PRIMARY KEY, data char(8)) engine=ndb;
-create table old_table3(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_1 STORAGE DISK
-engine=ndb;
-create table old_table4(id int NOT NULL PRIMARY KEY, data char(8))
-TABLESPACE ts_2 STORAGE DISK
-engine=ndb;
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table2 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table3 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table old_table4 fields terminated by ' ' lines terminated by '\n';
-
-## Add two nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_two_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41 --initial
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
-
-## Cluster running after adding two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
---source include/ndb_backup.inc
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-
---exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUPS-$the_backup_id >> $NDB_TOOLS_OUTPUT
-
-# Cleanup
-use DB1;
-drop table old_table1, old_table2, old_table3, old_table4;
-ALTER TABLESPACE ts_1
-    DROP DATAFILE 'data_1.dat'
-    ENGINE NDB;
-ALTER TABLESPACE ts_2
-    DROP DATAFILE 'data_2.dat'
-    ENGINE NDB;
-drop TABLESPACE ts_1 ENGINE NDB;
-drop TABLESPACE ts_2 ENGINE NDB;
-drop LOGFILE GROUP lg_1 ENGINE NDB;
-drop database DB1;

=== removed file 'mysql-test/suite/ndb/t/add_node03.test'
--- a/mysql-test/suite/ndb/t/add_node03.test	2010-03-17 10:50:18 +0000
+++ b/mysql-test/suite/ndb/t/add_node03.test	1970-01-01 00:00:00 +0000
@@ -1,67 +0,0 @@
--- source include/have_ndb.inc
--- source include/not_embedded.inc
---result_format 2
-
-## Make mtr.pl restart all servers after this test
-call mtr.force_restart(); 
-
-## Show cluster is started with one ndb_mgmd and two ndbd nodes
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-
-## Add six nodes to my.cnf
-# Set ndb_mgmd with node id 3, otherwise the configuration will change and the
-# cluster may fail to restart
---source suite/ndb/include/add_six_nodes.inc
-
-## Reload ndb_mgmd
---source suite/ndb/include/reload_ndb_mgmd.inc
---exec $NDB_MGM -e show >> $NDB_TOOLS_OUTPUT
-
-## Restart the "old" ndbd nodes
---exec $NDB_MGM -e "1 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41,42,43,44,45 >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "2 restart" >> $NDB_TOOLS_OUTPUT
---exec $NDB_WAITER --nowait-nodes=40,41,42,43,44,45 >> $NDB_TOOLS_OUTPUT
-
-## Restart mysqld nodes
-let $mysqld_name=mysqld.1.1;
---source include/restart_mysqld.inc
-connect (mysqld_2_1,127.0.0.1,root,,test,$MASTER_MYPORT1,);
-connection mysqld_2_1;
-let $mysqld_name= mysqld.2.1;
---source include/restart_mysqld.inc
-connection default;
-
-## Initial start of "new" data nodes
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=40 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=41 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=42 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=43 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=44 --initial
---replace_regex /[0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]*/YYYY-MM-DD HH:MM:SS/
-/localhost:[0-9]*/localhost/ /generation: [0-9]*/generation: X/
---exec $NDB_NDBD --ndb-nodeid=45 --initial
-
-## Wait for added nodes started
---exec $NDB_WAITER --timeout=300 >> $NDB_TOOLS_OUTPUT
-
-## Create nodegroup for "new" nodes
---exec $NDB_MGM -e "create nodegroup 40,41" >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "create nodegroup 42,43" >> $NDB_TOOLS_OUTPUT
---exec $NDB_MGM -e "create nodegroup 44,45" >> $NDB_TOOLS_OUTPUT
-
-## Cluster running after adding six ndbd nodes:
---replace_regex /mysql-[0-9]*.[0-9]*.[0-9]*/mysql/ /ndb-[0-9]*.[0-9]*.[0-9]*/ndb/ /localhost:[0-9]*/localhost/
---exec $NDB_MGM -e show
-

=== modified file 'mysql-test/suite/ndb/t/disabled.def'
--- a/mysql-test/suite/ndb/t/disabled.def	2010-04-28 10:33:09 +0000
+++ b/mysql-test/suite/ndb/t/disabled.def	2011-05-06 13:40:42 +0000
@@ -16,7 +16,3 @@ ndb_partition_error2 : Bug#40989 ndb_par
 ndb_cache_trans           : Bug#42197 Query cache and autocommit
 ndb_disconnect_ddl        : Bug#31853 flaky testcase...
 
-# the below testcase have detected the bugs that are still open
-add_node01    : disabled waiting for safe_process compatible spawn
-add_node02    : disabled waiting for safe_process compatible spawn
-add_node03    : disabled waiting for safe_process compatible spawn

=== modified file 'mysql-test/suite/ndb/t/ndb_add_partition.test'
--- a/mysql-test/suite/ndb/t/ndb_add_partition.test	2009-02-12 15:21:46 +0000
+++ b/mysql-test/suite/ndb/t/ndb_add_partition.test	2011-05-12 11:31:21 +0000
@@ -33,6 +33,10 @@ STORAGE DISK
 TABLESPACE ts1
 partition by key(a);
 
+let $t1_part_count_start = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't1', Value, 1);
+
+let $t2_part_count_start = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't2', Value, 1);
+
 INSERT INTO t1 VALUES
 (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
 (6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10),
@@ -91,9 +95,31 @@ select * from t2 where b = 50;
 alter online table t1 reorganize partition;
 alter online table t2 reorganize partition;
 
+let $t1_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't1', Value, 1);
+
+let $t2_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't2', Value, 1);
+
+--disable_query_log
+--echo partitions added to t1
+eval select $t1_part_count_now - $t1_part_count_start as t1_added;
+--echo partitions added to t2
+eval select $t2_part_count_now - $t2_part_count_start as t2_added;
+--enable_query_log
+
 alter online table t1 add partition partitions 1;
 alter online table t2 add partition partitions 4;
 
+let $t1_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't1', Value, 1);
+
+let $t2_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't2', Value, 1);
+
+--disable_query_log
+--echo partitions added to t1
+eval select $t1_part_count_now - $t1_part_count_start as t1_added;
+--echo partitions added to t2
+eval select $t2_part_count_now - $t2_part_count_start as t2_added;
+--enable_query_log
+
 # reorganize partition not support if not default partitioning
 # and after a add partition it's no longer default
 --error ER_REORG_NO_PARAM_ERROR
@@ -134,6 +160,17 @@ select * from t2 where b = 50;
 alter online table t1 add partition partitions 2;
 alter online table t2 add partition partitions 1;
 
+let $t1_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't1', Value, 1);
+
+let $t2_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't2', Value, 1);
+
+--disable_query_log
+--echo partitions added to t1
+eval select $t1_part_count_now - $t1_part_count_start as t1_added;
+--echo partitions added to t2
+eval select $t2_part_count_now - $t2_part_count_start as t2_added;
+--enable_query_log
+
 select count(*) from t1;
 select count(*) from t2;
 --sorted_result

=== modified file 'mysql-test/suite/ndb/t/ndb_addnode.test'
--- a/mysql-test/suite/ndb/t/ndb_addnode.test	2010-01-27 10:08:37 +0000
+++ b/mysql-test/suite/ndb/t/ndb_addnode.test	2011-05-06 13:48:00 +0000
@@ -20,8 +20,8 @@ create table t1(id int NOT NULL PRIMARY
 create table t2(id int NOT NULL PRIMARY KEY, data char(8))
 TABLESPACE ts_1 STORAGE DISK engine=ndb;
 
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table t1 fields terminated by ' ' lines terminated by '\n';
-load data local infile 'suite/ndb/std_data/table_data10000.dat' into table t2 fields terminated by ' ' lines terminated by '\n';
+load data local infile 'suite/ndb/data/table_data10000.dat' into table t1 fields terminated by ' ' lines terminated by '\n';
+load data local infile 'suite/ndb/data/table_data10000.dat' into table t2 fields terminated by ' ' lines terminated by '\n';
 
 ## Create nodegroup for "new" nodes
 --exec $NDB_MGM -e "create nodegroup 3,4"

=== modified file 'mysql-test/suite/ndb/t/ndb_alter_table_backup.test'
--- a/mysql-test/suite/ndb/t/ndb_alter_table_backup.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_alter_table_backup.test	2011-05-06 14:11:46 +0000
@@ -7,7 +7,7 @@
 --source include/have_ndb.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 ##############################
 # mix endian restore section #
@@ -22,8 +22,8 @@ DROP TABLE IF EXISTS t1;
 --echo *********************************
 --echo * restore tables w/ new column from little endian
 --echo *********************************
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_d2_le >> $NDB_TOOLS_OUTPUT 2>&1
 SHOW TABLES;
 SHOW CREATE TABLE t1;
 SELECT * FROM t1 WHERE a = 1 or a = 10 or a = 20 or a = 30 ORDER BY a;
@@ -35,8 +35,8 @@ DROP TABLE t1;
 --echo *********************************
 --echo * restore tables w/ new column from big endian
 --echo *********************************
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_d2_be >> $NDB_TOOLS_OUTPUT 2>&1
 SHOW TABLES;
 SHOW CREATE TABLE t1;
 SELECT * FROM t1 WHERE a = 1 or a = 10 or a = 20 or a = 30 ORDER BY a;

=== modified file 'mysql-test/suite/ndb/t/ndb_alter_table_online2.test'
--- a/mysql-test/suite/ndb/t/ndb_alter_table_online2.test	2009-10-09 15:48:57 +0000
+++ b/mysql-test/suite/ndb/t/ndb_alter_table_online2.test	2011-05-13 11:42:59 +0000
@@ -52,7 +52,7 @@ set @t1_id = (select id from ndb_show_ta
 --echo
 
 let $end_mysqlslap= 5000;
---exec $MYSQL_SLAP --silent --query="update test.t1 set a=a+1 where pk=1" -i $end_mysqlslap >> $NDB_TOOLS_OUTPUT &
+--exec $MYSQL_SLAP --query="update test.t1 set a=a+1 where pk=1" -i $end_mysqlslap >> $NDB_TOOLS_OUTPUT &
 
 # wait for 100 updates
 --disable_result_log
@@ -93,7 +93,7 @@ select name from ndb_show_tables_results
 --echo
 
 update t1 set b= 0;
---exec $MYSQL_SLAP --silent --query="update test.t1 set b=b+1 where pk=1" -i $end_mysqlslap >> $NDB_TOOLS_OUTPUT &
+--exec $MYSQL_SLAP --query="update test.t1 set b=b+1 where pk=1" -i $end_mysqlslap >> $NDB_TOOLS_OUTPUT &
 
 # wait for 100 updates
 --disable_result_log
@@ -134,22 +134,32 @@ select name from ndb_show_tables_results
 --echo
 
 update t1 set c= 0;
---exec $MYSQL_SLAP --silent --query="update test.t1 set c=c+1 where pk=1" -i $end_mysqlslap >> $NDB_TOOLS_OUTPUT &
+--exec $MYSQL_SLAP --query="update test.t1 set c=c+1 where pk=1" -i $end_mysqlslap >> $NDB_TOOLS_OUTPUT &
 
 # wait for mysqlslap to end
 --disable_result_log
 --disable_query_log
 --eval select @end:=$end_mysqlslap
 let $val= 1;
+# 10 minutes = 600s sleep 0.1 => 6000
+let $maxwait = 6000;
 while ($val)
 {
   --sleep 0.1
   select @val1:=a,@val2:=b,@val3:=c from t1 where pk=1;
   let $val= `select @end > @val1 || @end > @val2 || @end > @val3`;
+  dec $maxwait;
+
+  if (!$maxwait)
+  {
+    let $val = 0;
+  }
 }
 --enable_result_log
 --enable_query_log
 
+select * from t1;
+
 --echo
 --echo ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 --echo ~ Alter table t1 and try to add partitions

=== modified file 'mysql-test/suite/ndb/t/ndb_dd_restore_compat.test'
--- a/mysql-test/suite/ndb/t/ndb_dd_restore_compat.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_dd_restore_compat.test	2011-05-06 14:11:46 +0000
@@ -1,10 +1,10 @@
 -- source include/have_ndb.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/ndb_backup51_dd >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51_dd >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/51_dd >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/51_dd >> $NDB_TOOLS_OUTPUT
 
 # (priviliges differ on embedded and server so replace)
 --replace_column 18 #

=== modified file 'mysql-test/suite/ndb/t/ndb_native_default_support.test'
--- a/mysql-test/suite/ndb/t/ndb_native_default_support.test	2010-11-10 13:39:11 +0000
+++ b/mysql-test/suite/ndb/t/ndb_native_default_support.test	2011-05-06 14:11:46 +0000
@@ -8,7 +8,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 --disable_warnings
 DROP TABLE IF EXISTS t1,bit1;
@@ -189,8 +189,8 @@ DROP DATABASE mysqltest;
 --echo * Restore the backup from 6.3 or 6.4, which don't support native default value
 --echo ******************************************************************************
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
 
 ####
 # Bug# 53539 Ndb : MySQLD default values in frm embedded in backup not endian-converted
@@ -572,8 +572,8 @@ SHOW CREATE TABLE t1;
 --let ndb_desc_opts= -d test t1
 --source suite/ndb/include/ndb_desc_print.inc
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -r --promote-attribute --exclude-missing-columns $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r --promote-attribute --exclude-missing-columns $backup_data_dir/ndb_backup_before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -r --promote-attribute --exclude-missing-columns $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r --promote-attribute --exclude-missing-columns $backup_data_dir/before_native_default >> $NDB_TOOLS_OUTPUT
 
 SELECT i, j, f, d, d2, ch, HEX(b), HEX(vb), HEX(blob1), text1, timestamp_c, newOne, newTwo from t1 order by i;
 

=== modified file 'mysql-test/suite/ndb/t/ndb_rename.test'
--- a/mysql-test/suite/ndb/t/ndb_rename.test	2007-11-29 10:29:35 +0000
+++ b/mysql-test/suite/ndb/t/ndb_rename.test	2011-05-12 09:01:21 +0000
@@ -30,6 +30,17 @@ alter table t2 rename ndbtest.t2;
 SELECT * FROM ndbtest.t2 WHERE attr1 = 1;
 
 drop table ndbtest.t2;
+
+create table t1 (
+  pk1 INT NOT NULL PRIMARY KEY,
+  b blob
+) engine = ndbcluster;
+
+alter table t1 rename ndbtest.t1;
+alter table ndbtest.t1 rename test.t1;
+
+drop table test.t1;
+
 drop database ndbtest;
 
 # End of 4.1 tests

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_compat_downward.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_compat_downward.test	2011-02-22 03:29:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_compat_downward.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/have_case_sensitive_file_system.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 # This test currently requires case sensitive file system as the tables
 # are originally stored with uppercase
@@ -19,8 +19,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 DROP DATABASE IF EXISTS BANK;
 --enable_warnings
 CREATE DATABASE BANK default charset=latin1 default collate=latin1_bin;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -m -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 USE BANK;
 --sorted_result
 SHOW TABLES;
@@ -56,8 +56,8 @@ TRUNCATE ACCOUNT_TYPE;
 --exec $NDB_DESC --no-defaults -d BANK ACCOUNT_TYPE | grep ForceVarPart
 
 # Restore data
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -p 1 -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -e -b 1 -n 2 -p 1 -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 # Check data
 SELECT * FROM GL            ORDER BY TIME,ACCOUNT_TYPE;
@@ -65,8 +65,8 @@ SELECT * FROM ACCOUNT       ORDER BY ACC
 SELECT COUNT(*) FROM TRANSACTION;
 SELECT * FROM SYSTEM_VALUES ORDER BY SYSTEM_VALUES_ID;
 SELECT * FROM mysql.ndb_apply_status WHERE server_id=0;
---exec $NDB_RESTORE --no-defaults -b 2 -n 1 -m -p 1 -s -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 2 -n 2 -p 1 -s -r $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 2 -n 1 -m -p 1 -s -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 2 -n 2 -p 1 -s -r $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 SELECT * FROM DESCRIPTION ORDER BY USERNAME;
 --exec $NDB_DESC --no-defaults -d BANK DESCRIPTION | grep SHORT_VAR
 --exec $NDB_DESC --no-defaults -d BANK DESCRIPTION | grep MEDIUM_VAR
@@ -78,8 +78,8 @@ DROP TABLE TRANSACTION;
 DROP TABLE SYSTEM_VALUES;
 DROP TABLE ACCOUNT_TYPE;
 
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -m $backup_data_dir/ndb_backup_packed >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup_packed >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -m $backup_data_dir/packed >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -p 1 -r $backup_data_dir/packed >> $NDB_TOOLS_OUTPUT
 
 SELECT * FROM GL            ORDER BY TIME,ACCOUNT_TYPE;
 SELECT * FROM ACCOUNT       ORDER BY ACCOUNT_ID;
@@ -96,7 +96,7 @@ drop table t1;
 # bug#54613
 
 --error 1
---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects $backup_data_dir/ndb_backup_bug54613 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects $backup_data_dir/bug54613 >> $NDB_TOOLS_OUTPUT
 
 --error 0
---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects --skip-broken-objects $backup_data_dir/ndb_backup_bug54613 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -m --core=0 --include-databases=ham --skip-unknown-objects --skip-broken-objects $backup_data_dir/bug54613 >> $NDB_TOOLS_OUTPUT

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test	2011-02-22 03:29:24 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_compat_endianness.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # Bug #27543 restore of backup from different endian does not work for blob column
@@ -151,8 +151,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 USE test;
 DROP TABLE IF EXISTS t_num,t_datetime,t_string_1,t_string_2,t_gis;
 --enable_warnings
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_data_le >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_data_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_data_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_data_le >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;
@@ -173,8 +173,8 @@ SELECT AsText(t_geometrycollection), AsT
 #
 
 DROP TABLE t_num,t_datetime,t_string_1,t_string_2,t_gis;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_data_be >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_data_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_data_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_data_be >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_misc.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_misc.test	2011-04-11 13:36:12 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_misc.test	2011-05-06 14:11:46 +0000
@@ -6,7 +6,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # Bug #27775 - mediumint auto inc not restored correctly
@@ -484,9 +484,9 @@ source include/ndb_backup_id.inc;
 #
 
 # ensure correct restore of epoch numbers in old versions
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 select epoch from mysql.ndb_apply_status where server_id=0;
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 1 $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 select epoch from mysql.ndb_apply_status where server_id=0;
 # ensure correct restore of epoch numbers in current version
 # number hould be "big"
@@ -497,12 +497,12 @@ select epoch > (1 << 32) from mysql.ndb_
 #
 # Bug#40428 core dumped when restore backup log file(redo log)
 #
---exec $NDB_RESTORE --print --print_meta -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --print --print_meta -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug #33040 ndb_restore crashes with --print_log
 #
---exec $NDB_RESTORE --print_log -b 1 -n 1 $backup_data_dir/ndb_backup50 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --print_log -b 1 -n 1 $backup_data_dir/50 >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug#48005 ndb backup / restore does not restore the auto_increment
@@ -554,7 +554,7 @@ drop table ndb_show_tables_results;
 #
 # Bug#51432
 #
---exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 2 -m $backup_data_dir/ndb_backup_hashmap >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --core=0 -e -b 1 -n 2 -m $backup_data_dir/hashmap >> $NDB_TOOLS_OUTPUT
 
 #
 # Bug#56285

=== modified file 'mysql-test/suite/ndb/t/ndb_restore_undolog.test'
--- a/mysql-test/suite/ndb/t/ndb_restore_undolog.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb/t/ndb_restore_undolog.test	2011-05-06 14:11:46 +0000
@@ -2,7 +2,7 @@
 -- source include/ndb_default_cluster.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 #
 # The table structure and data list below
@@ -440,8 +440,8 @@ let $backup_data_dir=$MYSQL_TEST_DIR/sui
 USE test;
 DROP TABLE IF EXISTS t_num,t_datetime,t_string_1,t_string_2,t_gis,t_string_3,t_string_4,t_string_5;
 --enable_warnings
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_undolog_le >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_undolog_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_undolog_le >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_undolog_le >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;
@@ -480,8 +480,8 @@ ENGINE =NDB;
 #
 
 USE test;
---exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/ndb_backup51_undolog_be >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/ndb_backup51_undolog_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 1 -m -r $backup_data_dir/51_undolog_be >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults -b 1 -n 2 -r $backup_data_dir/51_undolog_be >> $NDB_TOOLS_OUTPUT
 --sorted_result
 SHOW TABLES;
 SHOW CREATE TABLE t_num;

=== modified file 'mysql-test/suite/ndb/t/ndb_show_tables_result.inc'
--- a/mysql-test/suite/ndb/t/ndb_show_tables_result.inc	2009-10-09 16:13:54 +0000
+++ b/mysql-test/suite/ndb/t/ndb_show_tables_result.inc	2011-05-04 13:00:44 +0000
@@ -2,7 +2,7 @@
 let $dump_file = $MYSQLTEST_VARDIR/tmp/ndb_show_tables.txt;
 
 # Dump the output of ndb_show_tables to file
---exec $NDB_SHOW_TABLES --p > $dump_file
+--exec $NDB_SHOW_TABLES --no-defaults --p > $dump_file
 
 TRUNCATE ndb_show_tables_results;
 

=== modified file 'mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test'
--- a/mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test	2010-10-25 09:15:03 +0000
+++ b/mysql-test/suite/ndb_binlog/t/ndb_binlog_restore.test	2011-05-06 14:11:46 +0000
@@ -2,7 +2,7 @@
 -- source include/have_binlog_format_mixed_or_row.inc
 
 # Directory containing the saved backup files
-let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/std_data;
+let $backup_data_dir=$MYSQL_TEST_DIR/suite/ndb/backups;
 
 --disable_warnings
 drop table if exists t1;
@@ -123,8 +123,8 @@ CREATE TABLE ACCOUNT_TYPE ( ACCOUNT_TYPE
 --echo #
 --echo # reset, restore and  binlog should _not_ happen
 reset master;
---exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 1 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
---exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 2 -p 1 -r $backup_data_dir/ndb_backup51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 1 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
+--exec $NDB_RESTORE --no-defaults --no-binlog -b 1 -n 2 -p 1 -r $backup_data_dir/51 >> $NDB_TOOLS_OUTPUT
 
 select count(*) from TRANSACTION;
 --source include/show_binlog_events2.inc

=== modified file 'mysql-test/suite/ndb_binlog/t/ndb_binlog_variants.test'
--- a/mysql-test/suite/ndb_binlog/t/ndb_binlog_variants.test	2010-12-14 19:28:08 +0000
+++ b/mysql-test/suite/ndb_binlog/t/ndb_binlog_variants.test	2011-05-13 07:40:50 +0000
@@ -35,15 +35,15 @@ create table ba(ks int primary key, st i
 # Wait for each mysqld to startup binlogging
 --let $source_server=mysqld1
 --let $dest_server=mysqld2
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --let $source_server=mysqld1
 --let $dest_server=mysqld3
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --let $source_server=mysqld1
 --let $dest_server=mysqld4
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --disable_query_log
 connection mysqld1;

=== added directory 'mysql-test/suite/ndb_rpl'
=== added file 'mysql-test/suite/ndb_rpl/my.cnf'
--- a/mysql-test/suite/ndb_rpl/my.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/my.cnf	2011-05-13 07:40:50 +0000
@@ -0,0 +1,97 @@
+!include include/default_mysqld.cnf
+!include include/default_ndbd.cnf
+
+[cluster_config]
+TimeBetweenEpochsTimeout = 30000
+
+[cluster_config.1]
+NoOfReplicas=                  2
+ndbd=,
+ndb_mgmd=
+mysqld=,
+ndbapi=,,,,,,,,,
+
+[cluster_config.slave]
+NoOfReplicas=                  1
+ndbd=
+ndb_mgmd=
+mysqld=
+ndbapi=,,,,
+
+[mysqld]
+# Make all mysqlds use cluster
+ndbcluster
+ndb-wait-connected=20
+ndb-wait-setup=120
+ndb-cluster-connection-pool=3
+slave-allow-batching
+ndb-log-orig
+# Turn on bin logging
+log-bin=                       master-bin
+# for performance reasons
+ndb-table-temporary=1
+
+ndb-extra-logging=99
+# Cluster only supports row format
+binlog-format=                 row
+
+[mysqld.1.1]
+
+[mysqld.1.1]
+
+[mysqld.1.slave]
+
+# Append <testname>-slave.opt file to the list of argument used when
+# starting the mysqld
+#!use-slave-opt
+
+# Connect mysqld in the second cluster as save to  first mysqld
+# Hardcode the host to 127.0.0.1 until running on more
+# than one host and it probably need to be masked anyway
+# master-host=                @mysqld.1.#host
+master-host=                  127.0.0.1
+master-port=                  @mysqld.1.1.port
+master-password=              @mysqld.1.1.#password
+master-user=                  @mysqld.1.1.#user
+master-connect-retry=         1
+
+log-bin=                      slave-bin
+relay-log=                    slave-relay-bin
+# Cluster only supports row format
+binlog-format=                 row
+
+init-rpl-role=                slave
+log-slave-updates
+master-retry-count=           10
+
+# Values reported by slave when it connect to master
+# and shows up in SHOW SLAVE STATUS;
+report-host=                  127.0.0.1
+report-port=                  @mysqld.1.slave.port
+report-user=                  root
+
+# Configure slave mysqld without innodb, and set myisam
+# as default storage engine(since innodb will be default
+# otherwise starting  from 5.5)
+loose-skip-innodb
+default-storage-engine=myisam
+
+skip-slave-start
+
+# Directory where slaves find the dumps generated by "load data"
+# on the server. The path need to have constant length otherwise
+# test results will vary, thus a relative path is used.
+slave-load-tmpdir=            ../../../tmp
+
+rpl-recovery-rank=            @mysqld.1.slave.server-id
+
+[ENV]
+NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
+MASTER_MYPORT=                @mysqld.1.1.port
+MASTER_MYPORT1=               @mysqld.2.1.port
+
+NDB_CONNECTSTRING_SLAVE=      @mysql_cluster.slave.ndb_connectstring
+SLAVE_MYPORT=                 @mysqld.1.slave.port
+SLAVE_MYSOCK=                 @mysqld.1.slave.socket
+
+NDB_BACKUP_DIR=               @cluster_config.ndbd.1.1.BackupDataDir

=== renamed file 'mysql-test/suite/rpl_ndb/ndb_master-slave.inc' => 'mysql-test/suite/ndb_rpl/ndb_master-slave.inc'
=== renamed file 'mysql-test/suite/rpl_ndb/ndb_master-slave_2ch.inc' => 'mysql-test/suite/ndb_rpl/ndb_master-slave_2ch.inc'
=== added directory 'mysql-test/suite/ndb_rpl/r'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_2innodb.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_2innodb.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_2myisam.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_2myisam.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_2ndb.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_2ndb.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_2other.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_2other.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_add_column.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_add_column.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_apply_status.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_apply_status.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_auto_inc.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_auto_inc.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_bank.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_bank.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_basic.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_basic.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_bitfield.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_bitfield.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_blob.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_blob.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_break_3_chain.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_break_3_chain.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_bug22045.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_bug22045.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_check_for_mixed.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_check_for_mixed.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_circular.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_circular.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_circular_2ch.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_circular_simplex.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_simplex.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_conflict.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_conflict_max.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_max.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_conflict_max_delete_win.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_max_delete_win.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_conflict_old.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_conflict_old.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ctype_ucs2_def.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_ctype_ucs2_def.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_dd_advance.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_dd_advance.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_dd_basic.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_dd_basic.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_dd_partitions.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_dd_partitions.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_do_db.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_do_db.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_do_table.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_do_table.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_empty_epoch.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_empty_epoch.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_gap_event.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_gap_event.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_idempotent.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_idempotent.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ignore_db.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_ignore_db.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_innodb2ndb.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_innodb2ndb.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_innodb_trans.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_innodb_trans.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_load.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_load.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_logging.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_logging.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_mix_eng_trans.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_mix_eng_trans.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_mix_innodb.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_mix_innodb.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_mixed_tables.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_mixed_tables.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_multi.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_multi.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_myisam2ndb.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_myisam2ndb.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndbapi_multi.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_ndbapi_multi.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_rep_error.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_rep_error.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_rep_ignore.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_rep_ignore.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_skip_gap_event.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_skip_gap_event.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_slave_lsu.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_lsu.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_slave_lsu_anyval.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_lsu_anyval.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_slave_restart.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_slave_restart.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_stm_innodb.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_stm_innodb.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_sync.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_sync.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ui.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_ui.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ui2.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_ui2.result'
--- a/mysql-test/suite/rpl_ndb/r/rpl_ndb_ui2.result	2011-04-28 07:47:53 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_ui2.result	2011-05-16 06:50:14 +0000
@@ -4,11 +4,15 @@ CREATE TABLE t1 (pk int primary key, uk
 STOP SLAVE;
 insert into t1 values (1,1);
 insert into t1 values (2,2);
+begin;
 insert into t1 values (1,2);
 insert into t1 values (2,1);
+commit;
 delete from t1;
+begin;
 insert into t1 values (1,1);
 insert into t1 values (2,2);
+commit;
 start slave;
 select * from t1;
 pk	uk

=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_ndb_ui3.result' => 'mysql-test/suite/ndb_rpl/r/ndb_rpl_ui3.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_truncate_7ndb.result' => 'mysql-test/suite/ndb_rpl/r/rpl_truncate_7ndb.result'
=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_truncate_7ndb_2.result' => 'mysql-test/suite/ndb_rpl/r/rpl_truncate_7ndb_2.result'
=== added directory 'mysql-test/suite/ndb_rpl/t'
=== added file 'mysql-test/suite/ndb_rpl/t/disabled.def'
--- a/mysql-test/suite/ndb_rpl/t/disabled.def	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/disabled.def	2011-05-13 07:40:50 +0000
@@ -0,0 +1,13 @@
+##############################################################################
+#
+#  List the test cases that are to be disabled temporarily.
+#
+#  Separate the test case name and the comment with ':'.
+#
+#    <testcasename> : BUG#<xxxx> <date disabled> <disabler> <comment>
+#
+#  Do not use any TAB characters for whitespace.
+#
+##############################################################################
+
+ndb_rpl_ctype_ucs2_def : bug #34661 rpl_ndb_ctype_ucs2_def fails in 6.2

=== renamed file 'mysql-test/suite/rpl_ndb/t/ndb_apply_status.frm' => 'mysql-test/suite/ndb_rpl/t/ndb_apply_status.frm'
=== renamed file 'mysql-test/suite/rpl_ndb/t/ndb_conflict_info.inc' => 'mysql-test/suite/ndb_rpl/t/ndb_conflict_info.inc'
=== renamed file 'mysql-test/suite/rpl_ndb/t/ndb_conflict_info_init.inc' => 'mysql-test/suite/ndb_rpl/t/ndb_conflict_info_init.inc'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2innodb-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2innodb-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2innodb-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2innodb-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2innodb.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2innodb.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_2innodb.test	2011-04-20 11:50:29 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_2innodb.test	2011-05-13 07:40:50 +0000
@@ -42,7 +42,7 @@ set new=on;
 set storage_engine=ndbcluster;
 --enable_query_log
 
---source suite/rpl_ndb/t/rpl_ndb_2multi_eng.inc
+--source suite/ndb_rpl/t/ndb_rpl_2multi_eng.inc
 
 --connection slave
 set @@global.slave_exec_mode= 'STRICT';

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2multi_basic.inc' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2multi_basic.inc'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2multi_eng.inc' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2multi_eng.inc'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2myisam-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2myisam-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2myisam-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2myisam-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2myisam.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2myisam.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_2myisam.test	2011-04-20 11:50:29 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_2myisam.test	2011-05-13 07:40:50 +0000
@@ -41,7 +41,7 @@ set new=on;
 set storage_engine=ndbcluster;
 --enable_query_log
 
---source suite/rpl_ndb/t/rpl_ndb_2multi_eng.inc
+--source suite/ndb_rpl/t/ndb_rpl_2multi_eng.inc
 
 --connection slave
 set @@global.slave_exec_mode= 'STRICT';

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2ndb-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2ndb-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2ndb.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2ndb.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_2ndb.test	2011-04-20 12:10:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_2ndb.test	2011-05-13 07:40:50 +0000
@@ -6,7 +6,7 @@
 ##############################################################
 --source include/have_binlog_format_mixed_or_row.inc
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 # On master use NDB as storage engine.
 connection master;
@@ -16,5 +16,5 @@ SET storage_engine=ndb;
 --echo === NDB -> NDB ===
 --echo
 connection slave;
---source suite/rpl_ndb/t/rpl_ndb_2multi_basic.inc
+--source suite/ndb_rpl/t/ndb_rpl_2multi_basic.inc
 --source include/rpl_end.inc

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2other-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2other-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_2other.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_2other.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_2other.test	2011-04-20 14:36:44 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_2other.test	2011-05-13 07:40:50 +0000
@@ -41,7 +41,7 @@ SET @old_slave_storage_engine=@@global.s
 SET @@global.storage_engine=myisam;
 
 --source include/start_slave.inc
---source suite/rpl_ndb/t/rpl_ndb_2multi_basic.inc
+--source suite/ndb_rpl/t/ndb_rpl_2multi_basic.inc
 --source include/rpl_connection_slave.inc
 --source include/stop_slave.inc
 
@@ -56,7 +56,7 @@ alter table mysql.ndb_apply_status engin
 SET @@global.storage_engine=innodb;
 
 --source include/start_slave.inc
---source suite/rpl_ndb/t/rpl_ndb_2multi_basic.inc
+--source suite/ndb_rpl/t/ndb_rpl_2multi_basic.inc
 --source include/rpl_connection_slave.inc
 --source include/stop_slave.inc
 

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_add_column.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_add_column.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_add_column.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_add_column.test	2011-05-13 07:40:50 +0000
@@ -1,5 +1,5 @@
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # Test add column

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_apply_status-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_apply_status.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_apply_status.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_apply_status.test	2011-05-13 07:40:50 +0000
@@ -7,7 +7,7 @@
 
 # The table might already have been created by mysqld -> ignore error
 --error 0,1
-copy_file $MYSQL_TEST_DIR/suite/rpl_ndb/t/ndb_apply_status.frm $MYSQLTEST_VARDIR/master-data/mysql/ndb_apply_status.frm;
+copy_file $MYSQL_TEST_DIR/suite/ndb_rpl/t/ndb_apply_status.frm $MYSQLTEST_VARDIR/master-data/mysql/ndb_apply_status.frm;
 --disable_result_log
 --disable_abort_on_error
 select * from mysql.ndb_apply_status;
@@ -17,7 +17,7 @@ select * from mysql.ndb_apply_status;
 
 -- source include/have_ndb.inc
 -- source include/have_binlog_format_mixed_or_row.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # Bug#28170 replicate-ignore-db=mysql should not ignore mysql.ndb_apply_status

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_auto_inc.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_auto_inc.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_auto_inc.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_auto_inc.test	2011-05-13 07:40:50 +0000
@@ -8,7 +8,7 @@
 #####################################
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --echo ***************** Test 1 ************************
 --echo

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_bank.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_bank.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_bank.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_bank.test	2011-05-13 07:40:50 +0000
@@ -13,7 +13,7 @@
 --source include/have_ndb.inc
 --source include/have_ndb_extra.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 # kill any trailing processes
 --system killall lt-bankTransactionMaker lt-bankTimer lt-bankMakeGL || true

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_basic.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_basic.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_basic.test	2011-04-11 13:36:12 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_basic.test	2011-05-13 07:40:50 +0000
@@ -1,7 +1,7 @@
 --source include/have_ndb.inc
 #error message differs slightly with statement based replication
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 -- disable_query_log
 call mtr.add_suppression("Slave: Got error 146 during COMMIT Error_code: 1180");

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_bitfield.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_bitfield.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_bitfield.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_bitfield.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # Verifies replication of bit fields

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_blob.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_blob.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_blob.test	2011-05-13 07:40:50 +0000
@@ -1,5 +1,5 @@
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # basic test of blob replication for NDB

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_break_3_chain.cnf' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_break_3_chain.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.test'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_bug22045.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_bug22045.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_bug22045.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_bug22045.test	2011-05-13 07:40:50 +0000
@@ -2,7 +2,7 @@
 # Test case currently fails for statement-based replication, as BUG#22045 is
 # not fixed in the replication slave thread.
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 # BUG#22045: Got error 839 'Illegal null attribute' from NDBCLUSTER when 'Replace Into'.
 # Check that REPLACE INTO replicates correctly with partial value list and with/without blobs.

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_check_for_mixed.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_check_for_mixed.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_check_for_mixed.test	2011-04-11 13:34:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_check_for_mixed.test	2011-05-13 07:40:50 +0000
@@ -10,7 +10,7 @@
 
 --source include/have_ndb.inc
 --source include/have_log_bin.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 disable_query_log;
 # so that both ROW and MIXED is output as MIXED

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_circular.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular.test'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.cnf' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch.cnf'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_2ch.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch.test	2011-05-13 07:40:50 +0000
@@ -5,7 +5,7 @@
 # independent channels between two clusters
 #############################################################
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave_2ch.inc
+--source suite/ndb_rpl/ndb_master-slave_2ch.inc
 --source include/have_binlog_format_mixed_or_row.inc
 --echo
 

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_simplex.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_simplex.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_circular_simplex.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_simplex.test	2011-05-13 07:40:50 +0000
@@ -1,5 +1,5 @@
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 connection master;
 CREATE TABLE t1 (a int key, b int) ENGINE=NDB;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict.test	2011-05-13 07:40:50 +0000
@@ -4,7 +4,7 @@
 #
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_query_log
 --connection master

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_1.inc' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_1.inc'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_1.inc	2011-03-16 11:07:13 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_1.inc	2011-05-13 07:40:50 +0000
@@ -1,5 +1,5 @@
 --connection slave
---source suite/rpl_ndb/t/ndb_conflict_info_init.inc
+--source suite/ndb_rpl/t/ndb_conflict_info_init.inc
 
 --connection master
 
@@ -32,7 +32,7 @@ commit;
 --connection slave
 select * from t1 order by a, d;
 select * from t2 order by a, d;
---source suite/rpl_ndb/t/ndb_conflict_info.inc
+--source suite/ndb_rpl/t/ndb_conflict_info.inc
 
 --echo *** master - update 2 rows in different tables
 --connection master
@@ -43,7 +43,7 @@ update t2 set b="Master t2 a=3 at c=3",
 --connection slave
 select * from t1 order by a, d;
 select * from t2 order by a, d;
---source suite/rpl_ndb/t/ndb_conflict_info.inc
+--source suite/ndb_rpl/t/ndb_conflict_info.inc
 
 --echo *** master - delete all
 --connection master
@@ -54,8 +54,8 @@ delete from t2;
 --connection slave
 select * from t1 order by a, d;
 select * from t2 order by a, d;
---source suite/rpl_ndb/t/ndb_conflict_info.inc
---source suite/rpl_ndb/t/ndb_conflict_info_init.inc
+--source suite/ndb_rpl/t/ndb_conflict_info.inc
+--source suite/ndb_rpl/t/ndb_conflict_info_init.inc
 
 --echo ************************
 --echo * start conflict testing
@@ -76,7 +76,7 @@ commit;
 --echo *** slave - check conflict info, no conflicts yet
 --sync_slave_with_master
 --connection slave
---source suite/rpl_ndb/t/ndb_conflict_info.inc
+--source suite/ndb_rpl/t/ndb_conflict_info.inc
 
 --echo *** slave - check insert some data
 select * from t1 order by a, d;
@@ -110,7 +110,7 @@ commit;
 --echo *** slave - check conflict info, there should be some
 --sync_slave_with_master
 --connection slave
---source suite/rpl_ndb/t/ndb_conflict_info.inc
+--source suite/ndb_rpl/t/ndb_conflict_info.inc
 
 --echo *** slave - check update some data that causes conflicts
 select * from t1 order by a, d;
@@ -133,7 +133,7 @@ commit;
 --echo *** slave - check conflict info, change depends on calling test
 --sync_slave_with_master
 --connection slave
---source suite/rpl_ndb/t/ndb_conflict_info.inc
+--source suite/ndb_rpl/t/ndb_conflict_info.inc
 
 --echo *** slave - check higer timestamp
 select * from t1 order by a, d;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max.test	2011-05-13 07:40:50 +0000
@@ -4,7 +4,7 @@
 #
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_query_log
 --connection master
@@ -33,7 +33,7 @@ create table `test`.`t1$EX`
 --connection master
 insert into mysql.ndb_replication values ("test", "t1", 0, NULL, "NDB$MAX(X)");
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 2 ********************************************************
 --echo * test native NDB\$MAX() conflict resolution
@@ -44,7 +44,7 @@ insert into mysql.ndb_replication values
 set global ndb_log_update_as_write=1;
 update mysql.ndb_replication set binlog_type=4;
 --let col_type = bigint unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 3 ***********************************************************
 --echo * test that setting binlog type really also sets the "USE_UPDATE" flag
@@ -55,7 +55,7 @@ update mysql.ndb_replication set binlog_
 set global ndb_log_update_as_write=0;
 update mysql.ndb_replication set binlog_type=2;
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 4 ***********************************************************
 --echo * test with FULL rows and "USE_UPDATE" flag
@@ -64,7 +64,7 @@ update mysql.ndb_replication set binlog_
 set global ndb_log_update_as_write=0;
 update mysql.ndb_replication set binlog_type=7;
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** test cleanup
 --disable_query_log

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max_delete_win-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max_delete_win-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max_delete_win.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max_delete_win.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_max_delete_win.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_max_delete_win.test	2011-05-13 07:40:50 +0000
@@ -4,7 +4,7 @@
 #
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_query_log
 --connection master
@@ -33,7 +33,7 @@ create table `test`.`t1$EX`
 --connection master
 insert into mysql.ndb_replication values ("test", "t1", 0, NULL, "NDB$MAX_DELETE_WIN(X)");
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 2 ********************************************************
 --echo * test native NDB\$MAX_DELETE_WIN() conflict resolution
@@ -44,7 +44,7 @@ insert into mysql.ndb_replication values
 set global ndb_log_update_as_write=1;
 update mysql.ndb_replication set binlog_type=4;
 --let col_type = bigint unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 3 ***********************************************************
 --echo * test that setting binlog type really also sets the "USE_UPDATE" flag
@@ -55,7 +55,7 @@ update mysql.ndb_replication set binlog_
 set global ndb_log_update_as_write=0;
 update mysql.ndb_replication set binlog_type=2;
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 4 ***********************************************************
 --echo * test with FULL rows and "USE_UPDATE" flag
@@ -64,7 +64,7 @@ update mysql.ndb_replication set binlog_
 set global ndb_log_update_as_write=0;
 update mysql.ndb_replication set binlog_type=7;
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** test cleanup
 --disable_query_log

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_old-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_old-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_old.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_old.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_conflict_old.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_conflict_old.test	2011-05-13 07:40:50 +0000
@@ -4,7 +4,7 @@
 #
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_query_log
 --connection master
@@ -33,7 +33,7 @@ create table `test`.`t1$EX`
 --connection master
 insert into mysql.ndb_replication values ("test", "t1", 0, NULL, "NDB$OLD(X)");
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 2 ********************************************************
 --echo * test native NDB\$OLD() conflict resolution
@@ -44,7 +44,7 @@ insert into mysql.ndb_replication values
 set global ndb_log_update_as_write=1;
 update mysql.ndb_replication set binlog_type=4;
 --let col_type = bigint unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 3 ***********************************************************
 --echo * test that setting binlog type really also sets the "USE_UPDATE" flag
@@ -55,7 +55,7 @@ update mysql.ndb_replication set binlog_
 set global ndb_log_update_as_write=0;
 update mysql.ndb_replication set binlog_type=2;
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** Test 4 ***********************************************************
 --echo * test with FULL rows and "USE_UPDATE" flag
@@ -64,7 +64,7 @@ update mysql.ndb_replication set binlog_
 set global ndb_log_update_as_write=0;
 update mysql.ndb_replication set binlog_type=7;
 --let col_type = int unsigned
---source suite/rpl_ndb/t/rpl_ndb_conflict_1.inc
+--source suite/ndb_rpl/t/ndb_rpl_conflict_1.inc
 
 --echo *** test cleanup
 --disable_query_log

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ctype_ucs2_def-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_ctype_ucs2_def-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ctype_ucs2_def.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_ctype_ucs2_def.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ctype_ucs2_def.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_ctype_ucs2_def.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ucs2.inc
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # MySQL Bug#15276: MySQL ignores collation-server

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_advance.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_advance.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_advance.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_advance.test	2011-05-13 07:40:50 +0000
@@ -7,7 +7,7 @@
 --source include/have_binlog_format_mixed_or_row.inc
 --source include/ndb_default_cluster.inc
 --source include/not_embedded.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 ######################################################
 # Requirment: Cluster DD and replication must be able#

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_basic.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_basic.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_basic.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_basic.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_warnings
 DROP TABLE IF EXISTS t1;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_partitions-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_partitions-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_partitions-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_partitions-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_partitions.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_partitions.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_dd_partitions.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_dd_partitions.test	2011-05-13 07:40:50 +0000
@@ -7,7 +7,7 @@
 
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --echo --- Doing pre test cleanup --- 
 

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_do_db-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_do_db.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_db.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_do_db.test	2011-05-13 07:40:50 +0000
@@ -6,7 +6,7 @@
 ##########################################################
 
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_warnings
 DROP DATABASE IF EXISTS replica;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_do_table-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_do_table.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_do_table.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_do_table.test	2011-05-13 07:40:50 +0000
@@ -6,7 +6,7 @@
 ##########################################################
 
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_warnings
 DROP TABLE IF EXISTS t1, t2;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_empty_epoch.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_empty_epoch.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_empty_epoch.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_empty_epoch.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 CREATE TABLE t1 (
  id INT UNSIGNED NOT NULL AUTO_INCREMENT,

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_gap_event-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_gap_event.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_gap_event.test	2011-05-16 06:50:14 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --connection master
 create table t1(pk int not null primary key, i int) engine = ndb;
@@ -22,6 +22,13 @@ enable_query_log;
 --connection slave
 select count(*) from t1;
 
+# Add suppression for the LOST_EVENTS error message
+# which will occur on slave when master is restarted while
+# slave is running
+--disable_query_log
+call mtr.add_suppression("Slave.*: The incident LOST_EVENTS occured on the master");
+--enable_query_log
+
 --connection master
 
 --echo Restarting mysqld

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_get_binlog_events.inc' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_get_binlog_events.inc'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_idempotent.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_idempotent.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_idempotent.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_idempotent.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # Currently test only works with ndb since it retrieves "old"

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ignore_db-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_ignore_db-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ignore_db.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_ignore_db.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ignore_db.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_ignore_db.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 # We are not binlogging changes to the mysql db.  Check
 # that changes to tables in the mysql db are not replicated

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb2ndb-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb2ndb-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb2ndb-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb2ndb-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb2ndb.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb2ndb.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb2ndb.test	2011-04-20 11:50:29 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb2ndb.test	2011-05-13 07:40:50 +0000
@@ -10,7 +10,7 @@
 -- source include/have_innodb.inc
 -- source include/have_ndb.inc
 -- source include/have_binlog_format_mixed_or_row.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source suite/ndb_rpl/ndb_master-slave.inc
 SET storage_engine=innodb;
---source suite/rpl_ndb/t/rpl_ndb_2multi_eng.inc
+--source suite/ndb_rpl/t/ndb_rpl_2multi_eng.inc
 --source include/rpl_end.inc

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb_trans-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb_trans.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_innodb_trans.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_innodb_trans.test	2011-05-13 07:40:50 +0000
@@ -2,7 +2,7 @@
 
 -- source include/have_ndb.inc
 -- source include/have_innodb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source suite/ndb_rpl/ndb_master-slave.inc
 
 create table t1 (a int, unique(a)) engine=ndbcluster;
 create table t2 (a int, unique(a)) engine=innodb;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_load.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_load.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_load.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_load.test	2011-05-13 07:40:50 +0000
@@ -7,7 +7,7 @@
 --source include/have_ndb.inc
 --source include/have_ndb_extra.inc
 --source include/have_binlog_format_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_warnings
 # reset master

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_logging.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_logging.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_logging.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_logging.test	2011-05-13 07:40:50 +0000
@@ -3,7 +3,7 @@
 #
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_warnings
 --disable_query_log

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_eng_trans-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_eng_trans-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_eng_trans-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_eng_trans-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_eng_trans.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_eng_trans.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_eng_trans.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_eng_trans.test	2011-05-13 07:40:50 +0000
@@ -23,7 +23,7 @@
 
 
 source include/have_ndb.inc;
-source suite/rpl_ndb/ndb_master-slave.inc;
+source suite/ndb_rpl/ndb_master-slave.inc;
 source include/have_innodb.inc;
 
 CREATE TABLE tmyisam (a int) ENGINE = MYISAM;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_innodb-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_innodb-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_innodb.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_innodb.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_mix_innodb.test	2011-04-19 14:34:27 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_mix_innodb.test	2011-05-13 07:40:50 +0000
@@ -8,10 +8,10 @@
 --source include/have_ndb.inc
 --source include/have_innodb.inc
 --source include/have_binlog_format_mixed.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 --enable_query_log
 let $off_set = 9;
 let $rpl_format = 'MIX';
---source suite/rpl_ndb/t/rpl_ndb_xxx_innodb.inc
+--source suite/ndb_rpl/t/ndb_rpl_xxx_innodb.inc
 
 --source include/rpl_end.inc

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_tables-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_mixed_tables-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_tables-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_mixed_tables-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_tables.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_mixed_tables.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_mixed_tables.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_mixed_tables.test	2011-05-13 07:40:50 +0000
@@ -44,7 +44,7 @@
 
 --source include/have_ndb.inc
 --source include/have_innodb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --echo ---- setup master ----
 

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_multi.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_multi.test'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.cnf' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.inc' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.inc'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.inc	2011-04-27 06:29:23 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.inc	2011-05-13 07:40:50 +0000
@@ -145,7 +145,7 @@ show variables like 'log_bin';
 show variables like 'log_slave_updates';
 --source include/show_binlog_events2.inc
 let $BINLOG_FILENAME=master-bin;
---source suite/rpl_ndb/t/rpl_ndb_get_binlog_events.inc
+--source suite/ndb_rpl/t/ndb_rpl_get_binlog_events.inc
 
 
 connection srv_master1;
@@ -168,7 +168,7 @@ show variables like 'log_bin';
 show variables like 'log_slave_updates';
 --source include/show_binlog_events2.inc
 let $BINLOG_FILENAME=master-bin;
---source suite/rpl_ndb/t/rpl_ndb_get_binlog_events.inc
+--source suite/ndb_rpl/t/ndb_rpl_get_binlog_events.inc
 
 connection srv_slave;
 --echo 
@@ -190,7 +190,7 @@ show variables like 'log_bin';
 show variables like 'log_slave_updates';
 --source include/show_binlog_events2.inc
 let $BINLOG_FILENAME=slave-master-bin;
---source suite/rpl_ndb/t/rpl_ndb_get_binlog_events.inc
+--source suite/ndb_rpl/t/ndb_rpl_get_binlog_events.inc
 
 connection srv_slave2;
 --echo 
@@ -202,6 +202,6 @@ show variables like 'log_bin';
 show variables like 'log_slave_updates';
 --source include/show_binlog_events2.inc
 let $BINLOG_FILENAME=slave-master-bin;
---source suite/rpl_ndb/t/rpl_ndb_get_binlog_events.inc
+--source suite/ndb_rpl/t/ndb_rpl_get_binlog_events.inc
 
 connection $which_slave;
\ No newline at end of file

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update2-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_update2-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_myisam2ndb-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_myisam2ndb-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_myisam2ndb.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_myisam2ndb.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_myisam2ndb.test	2011-04-20 11:50:29 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_myisam2ndb.test	2011-05-13 07:40:50 +0000
@@ -9,7 +9,7 @@
 ##############################################################
 -- source include/have_ndb.inc
 -- source include/have_binlog_format_mixed_or_row.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source suite/ndb_rpl/ndb_master-slave.inc
 SET storage_engine=myisam;
---source suite/rpl_ndb/t/rpl_ndb_2multi_eng.inc
+--source suite/ndb_rpl/t/ndb_rpl_2multi_eng.inc
 --source include/rpl_end.inc

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndbapi_multi.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_ndbapi_multi.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndbapi_multi.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_ndbapi_multi.test	2011-05-13 07:40:50 +0000
@@ -1,7 +1,7 @@
 -- source include/have_ndb.inc
 -- source include/have_binlog_format_mixed_or_row.inc
 -- source include/have_ndbapi_examples.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source suite/ndb_rpl/ndb_master-slave.inc
 --let MASTER_MYSOCK=`select @@socket`;
 
 --exec echo Running ndbapi_simple_dual

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_relayrotate-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_relayrotate-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_error.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_error.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_error.test	2010-10-22 15:16:26 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_error.test	2011-05-13 07:40:50 +0000
@@ -4,7 +4,7 @@
 #
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # wrong schema for the table

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_ignore-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_ignore.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_rep_ignore.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_rep_ignore.test	2011-05-13 07:40:50 +0000
@@ -7,7 +7,7 @@
 ##########################################################
 
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --disable_warnings
 DROP DATABASE IF EXISTS replica;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_skip_gap_event-slave.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_skip_gap_event-slave.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_skip_gap_event.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_skip_gap_event.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_skip_gap_event.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_skip_gap_event.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --connection master
 create table t1(pk int not null primary key, i int) engine = ndb;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu.cnf' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu.cnf'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu.cnf	2009-09-16 16:04:14 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu.cnf	2011-05-13 07:40:50 +0000
@@ -1 +1 @@
-!include suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.cnf
+!include suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu.test	2011-04-27 06:29:23 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu.test	2011-05-13 07:40:50 +0000
@@ -18,7 +18,7 @@
 #  - Second replicate via srv_slave1 and check all nodes' Binlog contents
 #  - Third replicate via srv_slave2 and check all nodes' Binlog contents
 #
-# Makes use of suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.inc
+# Makes use of suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.inc
 # Originally motivated by bug#45756
 ###############################################################################
 
@@ -66,36 +66,36 @@
 # Check schema op binlogging enabled between servers on cluster1
 --let $source_server=srv_master
 --let $dest_server=srv_master2
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --let $source_server=srv_master1
 --let $dest_server=srv_master
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --let $source_server=srv_master1
 --let $dest_server=srv_master2
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --let $source_server=srv_master2
 --let $dest_server=srv_master
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 # Check schema op binlogging enabled between servers on cluster2
 --let $source_server=srv_slave
 --let $dest_server=srv_slave1
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --let $source_server=srv_slave
 --let $dest_server=srv_slave2
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --let $source_server=srv_slave1
 --let $dest_server=srv_slave2
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 --let $source_server=srv_slave2
 --let $dest_server=srv_slave1
-source suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc;
+source suite/ndb_rpl/t/wait_schema_logging.inc;
 
 # Reset state of all Binlogging nodes
 --disable_query_log
@@ -133,7 +133,7 @@ RESET MASTER;
 
 # Run the test script
 --let $which_slave=srv_slave
---source suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.inc
+--source suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.inc
 
 --source include/stop_slave.inc
 
@@ -173,7 +173,7 @@ RESET MASTER;
 
 # Run the test script
 --let $which_slave=srv_slave1
---source suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.inc
+--source suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.inc
 
 --source include/stop_slave.inc
 
@@ -213,7 +213,7 @@ RESET MASTER;
 
 # Run the test script
 --let $which_slave=srv_slave2
---source suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.inc
+--source suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.inc
 
 --source include/stop_slave.inc
 

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu_anyval.cnf' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu_anyval.cnf'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu_anyval.cnf	2010-07-02 11:01:48 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu_anyval.cnf	2011-05-13 07:40:50 +0000
@@ -1,4 +1,4 @@
-!include suite/rpl_ndb/t/rpl_ndb_multi_binlog_update.cnf
+!include suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf
 
 [mysqld]
 # Low ServerId bits and NDB_TEST_ANYVALUE_USERDATA below

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu_anyval.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu_anyval.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_lsu_anyval.test	2010-07-02 11:01:48 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_lsu_anyval.test	2011-05-13 07:40:50 +0000
@@ -36,4 +36,4 @@
 #        into the binlog of the slave running with --log-slave-updates on.
 ###############################################################################
 --source include/have_debug.inc
---source suite/rpl_ndb/t/rpl_ndb_slave_lsu.test
+--source suite/ndb_rpl/t/ndb_rpl_slave_lsu.test

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_restart.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_restart.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_slave_restart.test	2011-04-08 12:52:26 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_slave_restart.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_multi_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 # note: server2 is another "master" connected to the master cluster
 

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_stm_innodb-master.opt' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_stm_innodb-master.opt'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_stm_innodb.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_stm_innodb.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_stm_innodb.test	2011-04-19 14:34:27 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_stm_innodb.test	2011-05-13 07:40:50 +0000
@@ -9,12 +9,12 @@
 --disable_query_log
 --source include/have_ndb.inc
 --source include/have_innodb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 --enable_query_log
 
 # statement format is supported because master uses innodb
 SET binlog_format = STATEMENT;
 let $off_set = 6;
 let $rpl_format = 'SBR';
---source suite/rpl_ndb/t/rpl_ndb_xxx_innodb.inc
+--source suite/ndb_rpl/t/ndb_rpl_xxx_innodb.inc
 --source include/rpl_end.inc

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_sync.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_sync.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_sync.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_sync.test	2011-05-13 07:40:50 +0000
@@ -2,7 +2,7 @@
 --source include/ndb_default_cluster.inc
 --source include/not_embedded.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # Currently test only works with ndb since it retrieves "old"

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ui.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_ui.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui.test	2011-04-28 07:47:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_ui.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # Slave behaviour when replicating unique index operations

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ui2.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_ui2.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui2.test	2011-04-28 07:47:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_ui2.test	2011-05-16 06:50:14 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 --connection master
 
@@ -12,13 +12,15 @@ STOP SLAVE;
 insert into t1 values (1,1);
 insert into t1 values (2,2);
 --connection master
+begin;
 insert into t1 values (1,2);
 insert into t1 values (2,1);
---sleep 1
+commit;
 delete from t1;
---sleep 1
+begin;
 insert into t1 values (1,1);
 insert into t1 values (2,2);
+commit;
 
 --connection slave
 start slave;

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ui3.test' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_ui3.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ui3.test	2011-04-28 07:47:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_ui3.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 
 #
 # Slave behaviour when replicating unique index operations

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_xxx_innodb.inc' => 'mysql-test/suite/ndb_rpl/t/ndb_rpl_xxx_innodb.inc'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_xxx_innodb.inc	2011-04-19 14:34:27 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_xxx_innodb.inc	2011-05-13 07:40:50 +0000
@@ -45,7 +45,7 @@ insert into t1 values (1,2);
 --echo
 
 --sync_slave_with_master
---source suite/rpl_ndb/t/select_ndb_apply_status.inc
+--source suite/ndb_rpl/t/select_ndb_apply_status.inc
 
 --echo
 
@@ -85,7 +85,7 @@ commit;
 --echo
 
 --sync_slave_with_master
---source suite/rpl_ndb/t/select_ndb_apply_status.inc
+--source suite/ndb_rpl/t/select_ndb_apply_status.inc
 
 connection master;
 --let $binlog_start= $start_pos
@@ -158,7 +158,7 @@ SET AUTOCOMMIT=1;
 --enable_query_log
 
 --sync_slave_with_master
---source suite/rpl_ndb/t/select_ndb_apply_status.inc
+--source suite/ndb_rpl/t/select_ndb_apply_status.inc
 
 --echo
 
@@ -208,7 +208,7 @@ SET AUTOCOMMIT=1;
 --echo
 
 --sync_slave_with_master
---source suite/rpl_ndb/t/select_ndb_apply_status.inc
+--source suite/ndb_rpl/t/select_ndb_apply_status.inc
 
 --echo
 
@@ -256,7 +256,7 @@ SET AUTOCOMMIT=1;
 --enable_query_log
 
 --sync_slave_with_master
---source suite/rpl_ndb/t/select_ndb_apply_status.inc
+--source suite/ndb_rpl/t/select_ndb_apply_status.inc
 
 --echo
 
@@ -281,7 +281,7 @@ DROP DATABASE tpcb;
 
 ####### Commenting out until decision on Bug#27960 ###########
 
-#--source suite/rpl_ndb/t/select_ndb_apply_status.inc
+#--source suite/ndb_rpl/t/select_ndb_apply_status.inc
 
 #connection master;
 #--eval SHOW BINLOG EVENTS in '$log_name' from $start_pos

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_truncate_7ndb.test' => 'mysql-test/suite/ndb_rpl/t/rpl_truncate_7ndb.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_truncate_7ndb.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/ndb_rpl/t/rpl_truncate_7ndb.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 
 --source include/have_ndb.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
 --source include/have_binlog_format_mixed_or_row.inc
 
 --disable_query_log

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_truncate_7ndb_2.test' => 'mysql-test/suite/ndb_rpl/t/rpl_truncate_7ndb_2.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_truncate_7ndb_2.test	2011-03-29 14:09:05 +0000
+++ b/mysql-test/suite/ndb_rpl/t/rpl_truncate_7ndb_2.test	2011-05-13 07:40:50 +0000
@@ -7,4 +7,4 @@
 # Change:  Moved test to rpl_ndb suite, updated location of --source .test file
 
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/t/rpl_truncate_7ndb.test
+--source suite/ndb_rpl/t/rpl_truncate_7ndb.test

=== renamed file 'mysql-test/suite/rpl_ndb/t/select_ndb_apply_status.inc' => 'mysql-test/suite/ndb_rpl/t/select_ndb_apply_status.inc'
=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc' => 'mysql-test/suite/ndb_rpl/t/wait_schema_logging.inc'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_wait_schema_logging.inc	2009-09-16 16:04:14 +0000
+++ b/mysql-test/suite/ndb_rpl/t/wait_schema_logging.inc	2011-05-13 07:40:50 +0000
@@ -1,4 +1,4 @@
-# suite/t/rpl_ndb/rpl_ndb_wait_schema_logging.inc
+# suite/t/rpl_ndb/wait_schema_logging.inc
 #
 # SUMMARY
 #   Waits until schema op performed on $source_server is
@@ -49,8 +49,8 @@ while (`SELECT INSTR("$_event","$wait_bi
 
   connection $source_server;
   --disable_query_log
-  create table rpl_ndb_wait_schema_logging (a int primary key) engine=ndb;
-  drop table rpl_ndb_wait_schema_logging;
+  create table wait_schema_logging (a int primary key) engine=ndb;
+  drop table wait_schema_logging;
   --enable_query_log
   connection $dest_server;
 

=== renamed file 'mysql-test/suite/rpl_ndb/r/rpl_row_basic_7ndb.result' => 'mysql-test/suite/rpl_ndb/r/rpl_ndb_row_basic.result'
=== modified file 'mysql-test/suite/rpl_ndb/t/disabled.def'
--- a/mysql-test/suite/rpl_ndb/t/disabled.def	2010-03-12 13:57:24 +0000
+++ b/mysql-test/suite/rpl_ndb/t/disabled.def	2011-05-13 07:40:50 +0000
@@ -10,5 +10,4 @@
 #
 ##############################################################################
 
-rpl_ndb_ctype_ucs2_def : bug #34661 rpl_ndb_ctype_ucs2_def fails in 6.2
 rpl_ndb_set_null       : bug #51100

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_UUID.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_UUID.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_UUID.test	2011-05-13 07:40:50 +0000
@@ -3,7 +3,7 @@
 ########################################################
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source include/master-slave.inc
 let $engine_type=NDB;
 --source extra/rpl_tests/rpl_row_UUID.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_blob2.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob2.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_blob2.test	2011-05-13 07:40:50 +0000
@@ -4,7 +4,7 @@
 # code between engine tests     #
 #################################
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDBCLUSTER;
 -- source extra/rpl_tests/rpl_row_blob.test
 

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_commit_afterflush.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_commit_afterflush.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_commit_afterflush.test	2011-05-13 07:40:50 +0000
@@ -5,7 +5,7 @@
 # By JBM 2004-02-15                 #
 #####################################
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDB;
 -- source extra/rpl_tests/rpl_commit_after_flush.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_ddl.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_ddl.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_ddl.test	2011-05-13 07:40:50 +0000
@@ -25,7 +25,7 @@
 
 --source include/have_ndb.inc
 --source include/have_binlog_format_mixed_or_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source include/master-slave.inc
 let $engine_type= NDB;
 let $temp_engine_type= MEMORY;
 let $show_binlog = 0;

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_delete_nowhere.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_delete_nowhere.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_delete_nowhere.test	2011-05-13 07:40:50 +0000
@@ -3,7 +3,7 @@
 # Share test code between engine tests  #
 #########################################
 --source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDB;
 -- source extra/rpl_tests/rpl_delete_no_where.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_extra_col_master.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_extra_col_master.test	2011-04-27 08:22:04 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_extra_col_master.test	2011-05-13 07:40:50 +0000
@@ -2,7 +2,7 @@
 # Purpose: To test having extra columns on the master WL#3915
 #############################################################
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 -- source include/have_binlog_format_mixed_or_row.inc
 
 let $engine_type = 'NDB';

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_extra_col_slave.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_extra_col_slave.test	2011-04-27 08:14:37 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_extra_col_slave.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 -- source include/have_binlog_format_row.inc
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 
 #
 # This is a disgrace...but slave randomly spits out these warnings

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_func003.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_func003.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_func003.test	2011-05-13 07:40:50 +0000
@@ -7,7 +7,7 @@
 # reduce test case code           #
 ###################################
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDB;
 -- source extra/rpl_tests/rpl_row_func003.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_insert_ignore.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_insert_ignore.test	2011-04-27 07:59:12 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_insert_ignore.test	2011-05-13 07:40:50 +0000
@@ -3,7 +3,7 @@
 #####################################
 -- source include/have_ndb.inc
 -- source include/have_binlog_format_row.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDB;
 -- source extra/rpl_tests/rpl_insert_ignore.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update2.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update2.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update2.test	2011-05-13 07:40:50 +0000
@@ -7,7 +7,7 @@
 # Run this only for row based replication, as replication of
 # auto_increment values are not supported with NDB as storage engine
 -- source include/have_binlog_format_mixed_or_row.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 
 let $engine_type=NDB;
 --source extra/rpl_tests/rpl_multi_update2.test

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update3.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update3.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_multi_update3.test	2011-05-13 07:40:50 +0000
@@ -3,7 +3,7 @@
 # to reuse test code between engine runs                   #
 ############################################################
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDB;
 -- source extra/rpl_tests/rpl_multi_update3.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_relayrotate.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_relayrotate.test	2011-04-15 12:45:47 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_relayrotate.test	2011-05-13 07:40:50 +0000
@@ -3,7 +3,7 @@
 # to reuse test code between engine runs                   #
 ############################################################
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDB;
 -- source extra/rpl_tests/rpl_relayrotate.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_row_001.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_row_001.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_row_001.test	2011-05-13 07:40:50 +0000
@@ -2,7 +2,7 @@
 # By JBM 2005-02-15 Wrapped to allow reuse of test code#
 ########################################################
 --source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDB;
 -- source extra/rpl_tests/rpl_row_001.test
 --source include/rpl_end.inc

=== renamed file 'mysql-test/suite/rpl_ndb/t/rpl_row_basic_7ndb.test' => 'mysql-test/suite/rpl_ndb/t/rpl_ndb_row_basic.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_row_basic_7ndb.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_row_basic.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 -- source include/have_ndb.inc
 -- source include/have_binlog_format_mixed_or_row.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 
 let $type= 'NDB' ;
 let $extra_index= ;

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_set_null.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_set_null.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_set_null.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 -- source include/have_ndb.inc
 -- source include/have_binlog_format_mixed_or_row.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 
 -- let $engine= NDB
 -- source extra/rpl_tests/rpl_set_null.test

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_sp003.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp003.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp003.test	2011-05-13 07:40:50 +0000
@@ -5,7 +5,7 @@
 # For different engines         #
 #################################
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDBCLUSTER;
 -- source extra/rpl_tests/rpl_row_sp003.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_sp006.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp006.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_sp006.test	2011-05-13 07:40:50 +0000
@@ -5,7 +5,7 @@
 # For different engines         #
 #################################
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDBCLUSTER;
 -- source extra/rpl_tests/rpl_row_sp006.test
 --source include/rpl_end.inc

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_trig004.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_trig004.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_trig004.test	2011-05-13 07:40:50 +0000
@@ -8,7 +8,7 @@
 
 # Includes
 -- source include/have_ndb.inc
--- source suite/rpl_ndb/ndb_master-slave.inc
+-- source include/master-slave.inc
 let $engine_type=NDB;
 -- source extra/rpl_tests/rpl_trig004.test
 

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_all.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_all.test	2011-04-21 12:36:25 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_all.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source include/master-slave.inc
 let $engine_type=NDBCLUSTER;
 
 connection slave;

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_lossy.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_lossy.test	2011-04-21 12:36:25 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_lossy.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source include/master-slave.inc
 let $engine_type=NDBCLUSTER;
 
 connection slave;

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_nonlossy.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_nonlossy.test	2011-04-21 12:36:25 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_nonlossy.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source include/master-slave.inc
 let $engine_type=NDBCLUSTER;
 
 connection slave;

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_strict.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_strict.test	2011-04-21 12:36:25 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_typeconv_strict.test	2011-05-13 07:40:50 +0000
@@ -1,6 +1,6 @@
 --source include/have_ndb.inc
 --source include/have_binlog_format_row.inc
---source suite/rpl_ndb/ndb_master-slave.inc
+--source include/master-slave.inc
 let $engine_type=NDBCLUSTER;
 
 connection slave;

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2011-03-28 08:11:37 +0000
+++ b/storage/ndb/CMakeLists.txt	2011-05-06 13:26:05 +0000
@@ -17,6 +17,41 @@
 SET(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
     ${CMAKE_SOURCE_DIR}/cmake
     ${CMAKE_SOURCE_DIR}/storage/ndb/cmake)
+    
+# Check if this is MySQL Cluster build i.e the MySQL Server
+# version string ends in -ndb-Y.Y.Y[-status]    
+MACRO(NDB_CHECK_MYSQL_CLUSTER version_string)
+
+  IF(${version_string} MATCHES "(.*)-ndb-(.*)")
+    SET(mysql_version ${CMAKE_MATCH_1})
+    SET(cluster_version ${CMAKE_MATCH_2})
+    
+    MESSAGE(STATUS  "This is MySQL Cluster ${cluster_version}")
+    
+    # Sanity check that the mysql_version matches precalcuated
+    # values from higher level scripts  
+    IF(NOT ${mysql_version} EQUAL "${MYSQL_NO_DASH_VERSION}")
+      MESSAGE(FATAL_ERROR "Sanity check of version_string failed!")
+    ENDIF()
+
+    # Split the cluster_version further into Y.Y.Y subcomponents
+    IF(${cluster_version} MATCHES "([0-9]+)\\.([0-9]+)\\.([0-9]+)")
+      SET(MYSQL_CLUSTER_VERSION_MAJOR ${CMAKE_MATCH_1} CACHE INTERNAL
+        "MySQL Cluster Major version calculated from MySQL version" FORCE)
+      SET(MYSQL_CLUSTER_VERSION_MINOR ${CMAKE_MATCH_2} CACHE INTERNAL
+        "MySQL Cluster Minor version calculated from MySQL version" FORCE)
+      SET(MYSQL_CLUSTER_VERSION_BUILD ${CMAKE_MATCH_3} CACHE INTERNAL
+        "MySQL Cluster Build version calculated from MySQL version" FORCE)
+    ENDIF()
+
+    # Finally set MYSQL_CLUSTER_VERSION to be used as an indicator
+    # that this is a MySQL Cluster build, yay!
+    SET(MYSQL_CLUSTER_VERSION ${cluster_version} CACHE INTERNAL
+      "This is MySQL Cluster" FORCE)
+
+  ENDIF()
+ENDMACRO()
+
 
 # Temporarily remove -Werror from compiler flags until
 # storage/ndb/ can be built with it
@@ -37,6 +72,9 @@ IF(SOURCE_SUBLIBS)
   # NDBCLUSTER_SOURCES and NDBCLUSTER_LIBS, don't configure
   # again
 ELSE()
+
+  NDB_CHECK_MYSQL_CLUSTER(${VERSION})
+
   INCLUDE(${CMAKE_CURRENT_SOURCE_DIR}/ndb_configure.cmake)
 
   INCLUDE_DIRECTORIES(
@@ -116,8 +154,16 @@ IF(EXISTS ${CMAKE_SOURCE_DIR}/storage/my
   MYSQL_STORAGE_ENGINE(NDBCLUSTER)
 ELSE()
   # New plugin support, cross-platform
+  
+  # NDB is DEFAULT plugin in MySQL Cluster
+  SET(is_default_plugin "")
+  IF(MYSQL_CLUSTER_VERSION)
+    SET(is_default_plugin "DEFAULT")
+  ENDIF()
+
   MYSQL_ADD_PLUGIN(ndbcluster ${NDBCLUSTER_SOURCES} STORAGE_ENGINE
-    DEFAULT STATIC_ONLY RECOMPILE_FOR_EMBEDDED LINK_LIBRARIES ndbclient)
+    ${is_default_plugin} STATIC_ONLY RECOMPILE_FOR_EMBEDDED
+    LINK_LIBRARIES ndbclient)
 
   IF (NOT MCP_BUG58158)
     IF(WITH_EMBEDDED_SERVER)

=== modified file 'storage/ndb/include/CMakeLists.txt'
--- a/storage/ndb/include/CMakeLists.txt	2011-02-03 14:20:36 +0000
+++ b/storage/ndb/include/CMakeLists.txt	2011-05-03 09:20:34 +0000
@@ -70,6 +70,18 @@ IF(NOT DEFINED NDB_VERSION_MAJOR OR
   MESSAGE(FATAL_ERROR "Couldn't parse version numbers from ndb_configure.m4")
 ENDIF()
 
+IF (DEFINED MYSQL_CLUSTER_VERSION)
+  # This is MySQL Cluster, the MySQL Cluster version must match NDB version
+  IF(NOT MYSQL_CLUSTER_VERSION_MAJOR EQUAL NDB_VERSION_MAJOR OR
+     NOT MYSQL_CLUSTER_VERSION_MINOR EQUAL NDB_VERSION_MINOR OR  
+     NOT MYSQL_CLUSTER_VERSION_BUILD EQUAL NDB_VERSION_BUILD)
+    MESSAGE(STATUS "MYSQL_CLUSTER_VERSION_MAJOR: ${MYSQL_CLUSTER_VERSION_MAJOR}")
+    MESSAGE(STATUS "MYSQL_CLUSTER_VERSION_MINOR: ${MYSQL_CLUSTER_VERSION_MINOR}")
+    MESSAGE(STATUS "MYSQL_CLUSTER_VERSION_BUILD: ${MYSQL_CLUSTER_VERSION_BUILD}")
+    MESSAGE(FATAL_ERROR "MySQL Cluster version does not match NDB version")
+  ENDIF()
+ENDIF()
+
 # Create ndb_version.h
 CONFIGURE_FILE(ndb_version.h.in
                ${CMAKE_CURRENT_SOURCE_DIR}/ndb_version.h

=== modified file 'storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp'
--- a/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/include/kernel/signaldata/DumpStateOrd.hpp	2011-05-07 06:17:02 +0000
@@ -170,6 +170,7 @@ public:
     DumpBackup = 13000,
     DumpBackupSetCompressed = 13001,
     DumpBackupSetCompressedLCP = 13002,
+    BackupErrorInsert = 13003,
 
     DumpDbinfo = 14000,
     DbinfoListTables = 14001,

=== modified file 'storage/ndb/include/kernel/signaldata/QueryTree.hpp'
--- a/storage/ndb/include/kernel/signaldata/QueryTree.hpp	2011-02-23 19:28:26 +0000
+++ b/storage/ndb/include/kernel/signaldata/QueryTree.hpp	2011-05-04 11:45:33 +0000
@@ -99,6 +99,17 @@ struct DABits
      */
     NI_LINKED_DISK    = 0x100,
 
+    /**
+     * If REPEAT_SCAN_RESULT is set, multiple star-joined (or bushy, or X)
+     * scan results are handled by repeating the other scans result 
+     * when we advance to the next batch chunk for the current 'active'
+     * result set.
+     * This removes the requirement for the API client to being able 
+     * buffer an (possible huge) amount of scan result relating to 
+     * the same parent scan.
+     */
+    NI_REPEAT_SCAN_RESULT = 0x200,
+
     NI_END = 0
   };
 

=== modified file 'storage/ndb/include/ndbapi/NdbReceiver.hpp'
--- a/storage/ndb/include/ndbapi/NdbReceiver.hpp	2011-02-09 14:18:53 +0000
+++ b/storage/ndb/include/ndbapi/NdbReceiver.hpp	2011-05-11 13:31:44 +0000
@@ -118,7 +118,7 @@ private:
   */
   void do_setup_ndbrecord(const NdbRecord *ndb_record, Uint32 batch_size,
                           Uint32 key_size, Uint32 read_range_no,
-                          Uint32 rowsize, char *buf, Uint32 column_count);
+                          Uint32 rowsize, char *buf);
 
   static
   Uint32 ndbrecord_rowsize(const NdbRecord *ndb_record,
@@ -137,33 +137,27 @@ private:
     new NdbRecord style operation.
   */
   bool m_using_ndb_record;
-  union {
-    /* members used for NdbRecAttr operation. */
-    struct {
-      Uint32 m_hidden_count;
-    } m_recattr;
-
-    /* members used for NdbRecord operation. */
-    struct {
-      Uint32 m_column_count;
-      const NdbRecord *m_ndb_record;
-      char *m_row;
-      /* Block of memory used to receive all rows in a batch during scan. */
-      char *m_row_buffer;
-      /*
-        Offsets between two rows in m_row_buffer.
-        This can be different from m_ndb_record->m_row_size, as we sometimes
-        store extra information after each row (range_no and keyinfo).
-        For non-scan operations, this is set to zero.
-      */
-      Uint32 m_row_offset;
-      /*
-        m_read_range_no is true if we are storing the range_no at the end of
-        each row during scans.
-      */
-      bool m_read_range_no;
-    } m_record;
-  };
+
+  /* members used for NdbRecord operation. */
+  struct {
+    const NdbRecord *m_ndb_record;
+    char *m_row;
+    /* Block of memory used to receive all rows in a batch during scan. */
+    char *m_row_buffer;
+    /*
+      Offsets between two rows in m_row_buffer.
+      This can be different from m_ndb_record->m_row_size, as we sometimes
+      store extra information after each row (range_no and keyinfo).
+      For non-scan operations, this is set to zero.
+    */
+    Uint32 m_row_offset;
+    /*
+      m_read_range_no is true if we are storing the range_no at the end of
+      each row during scans.
+    */
+    bool m_read_range_no;
+  } m_record;
+
   class NdbRecAttr* theFirstRecAttr;
   class NdbRecAttr* theCurrentRecAttr;
 
@@ -212,7 +206,6 @@ private:
 
   bool hasResults() const { return m_result_rows > 0; }
   bool nextResult() const { return m_current_row < m_result_rows; }
-  NdbRecAttr* copyout(NdbReceiver&);
   Uint32 receive_packed_recattr(NdbRecAttr**, Uint32 bmlen, 
                                 const Uint32* aDataPtr, Uint32 aLength);
   Uint32 receive_packed_ndbrecord(Uint32 bmlen,

=== modified file 'storage/ndb/ndb_configure.m4'
--- a/storage/ndb/ndb_configure.m4	2011-04-26 07:25:51 +0000
+++ b/storage/ndb/ndb_configure.m4	2011-05-16 11:44:52 +0000
@@ -48,6 +48,9 @@ AC_DEFUN([NDB_CHECK_NDBMTD], [
       AC_TRY_RUN(
         [
         #include "storage/ndb/src/kernel/vm/mt-asm.h"
+        #ifdef NDB_NO_ASM
+        #error "compiler/arch does not have asm needed for ndbmtd"
+        #endif
         int main()
         {
           unsigned int a = 0;

=== modified file 'storage/ndb/src/common/portlib/CMakeLists.txt'
--- a/storage/ndb/src/common/portlib/CMakeLists.txt	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/common/portlib/CMakeLists.txt	2011-05-11 12:23:24 +0000
@@ -37,4 +37,8 @@ SET_TARGET_PROPERTIES(NdbDir-t
                       PROPERTIES COMPILE_FLAGS "-DTEST_NDBDIR")
 TARGET_LINK_LIBRARIES(NdbDir-t ndbportlib)
 
+ADD_EXECUTABLE(NdbGetInAddr-t NdbTCP.cpp)
+SET_TARGET_PROPERTIES(NdbGetInAddr-t
+                      PROPERTIES COMPILE_FLAGS "-DTEST_NDBGETINADDR")
+
 

=== modified file 'storage/ndb/src/common/portlib/Makefile.am'
--- a/storage/ndb/src/common/portlib/Makefile.am	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/portlib/Makefile.am	2011-05-11 12:23:24 +0000
@@ -33,7 +33,7 @@ EXTRA_PROGRAMS = memtest PortLibTest mun
 PortLibTest_SOURCES = NdbPortLibTest.cpp
 munmaptest_SOURCES = munmaptest.cpp
 
-noinst_PROGRAMS = NdbDir-t NdbNuma-t
+noinst_PROGRAMS = NdbDir-t NdbNuma-t NdbGetInAddr-t
 
 NdbDir_t_SOURCES = NdbDir.cpp \
        $(top_srcdir)/storage/ndb/src/common/util/basestring_vsnprintf.c
@@ -46,3 +46,5 @@ NdbNuma_t_SOURCES = NdbNuma.cpp
 NdbNuma_t_CXXFLAGS = -DTEST_NDBNUMA
 NdbNuma_t_LDADD = $(top_builddir)/mysys/libmysyslt.la @LIBDL@
 
+NdbGetInAddr_t_SOURCES = NdbTCP.cpp
+NdbGetInAddr_t_CXXFLAGS = -DTEST_NDBGETINADDR

=== modified file 'storage/ndb/src/common/portlib/NdbMutex.c'
--- a/storage/ndb/src/common/portlib/NdbMutex.c	2011-02-03 14:20:36 +0000
+++ b/storage/ndb/src/common/portlib/NdbMutex.c	2011-05-17 07:57:55 +0000
@@ -88,12 +88,14 @@ int NdbMutex_InitWithName(NdbMutex* pNdb
   defined(HAVE_PTHREAD_MUTEXATTR_INIT) && \
   defined(HAVE_PTHREAD_MUTEXATTR_SETTYPE)
 
-  pthread_mutexattr_t t;
-  pthread_mutexattr_init(&t);
-  pthread_mutexattr_settype(&t, PTHREAD_MUTEX_ERRORCHECK);
-  result = pthread_mutex_init(p, &t);
-  assert(result == 0);
-  pthread_mutexattr_destroy(&t);
+  {
+    pthread_mutexattr_t t;
+    pthread_mutexattr_init(&t);
+    pthread_mutexattr_settype(&t, PTHREAD_MUTEX_ERRORCHECK);
+    result = pthread_mutex_init(p, &t);
+    assert(result == 0);
+    pthread_mutexattr_destroy(&t);
+  }
 #else
   result = pthread_mutex_init(p, 0);
 #endif

=== modified file 'storage/ndb/src/common/portlib/NdbTCP.cpp'
--- a/storage/ndb/src/common/portlib/NdbTCP.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/portlib/NdbTCP.cpp	2011-05-12 12:53:51 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -17,43 +17,203 @@
 
 
 #include <ndb_global.h>
-#include <my_net.h>
 #include <NdbTCP.h>
 
 
+/* On some operating systems (e.g. Solaris) INADDR_NONE is not defined */
+#ifndef INADDR_NONE
+#define INADDR_NONE -1                          /* Error value from inet_addr */
+#endif
+
 
 extern "C"
-int 
+int
 Ndb_getInAddr(struct in_addr * dst, const char *address)
 {
+  struct addrinfo hints;
+  memset(&hints, 0, sizeof(hints));
+  hints.ai_family = AF_INET; // Only IPv4 address
+  hints.ai_socktype = SOCK_STREAM;
+  hints.ai_protocol = IPPROTO_TCP;
+
+  struct addrinfo* ai_list;
+  if (getaddrinfo(address, NULL, &hints, &ai_list) != 0)
+  {
+    dst->s_addr = INADDR_NONE;
+    return -1;
+  }
+
+  /* Return sin_addr for the first address returned */
+  struct sockaddr_in* sin = (struct sockaddr_in*)ai_list->ai_addr;
+  memcpy(dst, &sin->sin_addr, sizeof(struct in_addr));
+
+  freeaddrinfo(ai_list);
+  return 0;
+}
+
+#ifdef TEST_NDBGETINADDR
+#include <NdbTap.hpp>
+
+static void
+CHECK(const char* address, int expected_res, bool is_numeric= false)
+{
+  struct in_addr addr;
+
+  fprintf(stderr, "Testing '%s'\n", address);
+
+  int res= Ndb_getInAddr(&addr, address);
+
+  if (res != expected_res)
+  {
+    fprintf(stderr, "> unexpected result: %d, expected: %d\n",
+            res, expected_res);
+    abort();
+  }
+
+  if (res != 0)
+  {
+    fprintf(stderr, "> returned -1, checking INADDR_NONE\n");
+
+    // Should return INADDR_NONE when when lookup fails
+    struct in_addr none;
+    none.s_addr = INADDR_NONE;
+    if (memcmp(&addr, &none, sizeof(none)) != 0)
+    {
+      fprintf(stderr, "> didn't return INADDR_NONE after failure, "
+             "got: '%s', expected; '%s'\n",
+             inet_ntoa(addr), inet_ntoa(none));
+      abort();
+    }
+    fprintf(stderr, "> ok\n");
+    return;
+  }
+
+  fprintf(stderr, "> '%s' -> '%s'\n", address, inet_ntoa(addr));
+
+  if (is_numeric)
   {
-    int tmp_errno;
-    struct hostent tmp_hostent, *hp;
-    char buff[GETHOSTBYNAME_BUFF_SIZE];
-    hp = my_gethostbyname_r(address,&tmp_hostent,buff,sizeof(buff),
-			    &tmp_errno);
-    if (hp)
+    // Check that numeric address always map back to itself
+    // ie. compare to value returned by 'inet_aton'
+    fprintf(stderr, "> Checking numeric address against inet_addr\n");
+    struct in_addr addr2;
+    addr2.s_addr = inet_addr(address);
+    fprintf(stderr, "> inet_addr(%s) -> '%s'\n", address, inet_ntoa(addr2));
+
+    if (memcmp(&addr, &addr2, sizeof(struct in_addr)) != 0)
     {
-      memcpy(dst, hp->h_addr, min(sizeof(*dst), (size_t) hp->h_length));
-      my_gethostbyname_r_free();
-      return 0;
+      fprintf(stderr, "> numeric address '%s' didn't map to same value as "
+              "inet_addr: '%s'", address, inet_ntoa(addr2));
+      abort();
     }
-    my_gethostbyname_r_free();
+    fprintf(stderr, "> ok\n");
+  }
+}
+
+
+/*
+  socket_library_init
+   - Normally done by ndb_init(), but to avoid
+     having to link with "everything", implement it locally
+*/
+
+static void
+socket_library_init(void)
+{
+#ifdef _WIN32
+  WORD requested_version = MAKEWORD( 2, 0 );
+  WSADATA wsa_data;
+  if (WSAStartup( requested_version, &wsa_data ))
+  {
+    fprintf(stderr, "failed to init Winsock\n");
+    abort();
+  }
+
+  // Confirm that the requested version of the library was loaded
+  if (wsa_data.wVersion != requested_version)
+  {
+    (void)WSACleanup();
+    fprintf(stderr, "Wrong version of Winsock loaded\n");
+    abort();
   }
-  /* Try it as aaa.bbb.ccc.ddd. */
-  dst->s_addr = inet_addr(address);
-  if (dst->s_addr != 
-#ifdef INADDR_NONE
-      INADDR_NONE
-#else
-      -1
 #endif
-      )
+}
+
+
+static void
+socket_library_end()
+{
+#ifdef _WIN32
+  (void)WSACleanup();
+#endif
+}
+
+static bool
+can_resolve_hostname(const char* name)
+{
+  fprintf(stderr, "Checking if '%s' can be used for testing\n", name);
+  struct addrinfo hints;
+  memset(&hints, 0, sizeof(hints));
+  hints.ai_family = AF_INET; // Only IPv4 address
+  hints.ai_socktype = SOCK_STREAM;
+  hints.ai_protocol = IPPROTO_TCP;
+
+  struct addrinfo* ai_list;
+  int err = getaddrinfo(name, NULL, &hints, &ai_list);
+
+  if (err)
+  {
+    fprintf(stderr, "> '%s' -> error: %d '%s'\n",
+             name, err, gai_strerror(err));
+
+    if (err == EAI_NODATA ||
+	err == EAI_NONAME)
+    {
+      // An OK error 
+      fprintf(stderr, ">  skipping tests with this name...\n");
+      return false;
+    }
+
+    // Another unhandled error
+    abort();
+  }
+
+  freeaddrinfo(ai_list);
+
+  return true;
+}
+
+
+TAPTEST(NdbGetInAddr)
+{
+  socket_library_init();
+
+  if (can_resolve_hostname("localhost"))
+    CHECK("localhost", 0);
+  CHECK("127.0.0.1", 0, true);
+
+  char hostname_buf[256];
+  if (gethostname(hostname_buf, sizeof(hostname_buf)) == 0 &&
+      can_resolve_hostname(hostname_buf))
   {
-    return 0;
+    // Check this machines hostname
+    CHECK(hostname_buf, 0);
+
+    struct in_addr addr;
+    Ndb_getInAddr(&addr, hostname_buf);
+    // Convert hostname to dotted decimal string ip and check
+    CHECK(inet_ntoa(addr), 0, true);
   }
-  return -1;
+  CHECK("unknown_?host", -1); // Does not exist
+  CHECK("3ffe:1900:4545:3:200:f8ff:fe21:67cf", -1); // No IPv6
+  CHECK("fe80:0:0:0:200:f8ff:fe21:67cf", -1);
+  CHECK("fe80::200:f8ff:fe21:67cf", -1);
+  CHECK("::1", -1); // the loopback, but still No IPv6
+
+  socket_library_end();
+
+  return 1; // OK
 }
+#endif
 
 
 static inline

=== modified file 'storage/ndb/src/kernel/blocks/backup/Backup.cpp'
--- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2011-02-24 09:46:11 +0000
+++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2011-05-07 06:17:02 +0000
@@ -662,6 +662,16 @@ Backup::execDUMP_STATE_ORD(Signal* signa
     c_defaults.m_compressed_lcp= signal->theData[1];
     infoEvent("Compressed LCP: %d", c_defaults.m_compressed_lcp);
   }
+
+  if (signal->theData[0] == DumpStateOrd::BackupErrorInsert)
+  {
+    if (signal->getLength() == 1)
+      ndbout_c("BACKUP: setting error %u", signal->theData[1]);
+    else
+      ndbout_c("BACKUP: setting error %u, %u",
+               signal->theData[1], signal->theData[2]);
+    SET_ERROR_INSERT_VALUE2(signal->theData[1], signal->theData[2]);
+  }
 }
 
 void Backup::execDBINFO_SCANREQ(Signal *signal)
@@ -4578,6 +4588,13 @@ Backup::checkScan(Signal* signal, Backup
       sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, 
 		 AbortBackupOrd::SignalLength, JBB);
     }
+#ifdef ERROR_INSERT
+    else if (ERROR_INSERTED(10042) && filePtr.p->tableId ==c_error_insert_extra)
+    {
+      sendSignalWithDelay(lqhRef, GSN_SCAN_NEXTREQ, signal,
+			  10, ScanFragNextReq::SignalLength);
+    }
+#endif
     else
     {
       sendSignal(lqhRef, GSN_SCAN_NEXTREQ, signal, 

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-04-12 11:59:36 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-05-02 13:36:19 +0000
@@ -1043,7 +1043,14 @@ void Cmvmi::execCONNECT_REP(Signal *sign
    * Inform QMGR that client has connected
    */
   signal->theData[0] = hostId;
-  sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA);
+  if (ERROR_INSERTED(9005))
+  {
+    sendSignalWithDelay(QMGR_REF, GSN_CONNECT_REP, signal, 50, 1);
+  }
+  else
+  {
+    sendSignal(QMGR_REF, GSN_CONNECT_REP, signal, 1, JBA);
+  }
 
   /* Automatically subscribe events for MGM nodes.
    */

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-04-11 13:36:12 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-05-17 13:09:45 +0000
@@ -3914,6 +3914,7 @@ Dbdict::checkPendingSchemaTrans(XSchemaF
         }
       }
 
+      transEntry->m_tableType = DictTabInfo::UndefTableType;
       transEntry->m_tableState = SchemaFile::SF_UNUSED;
       transEntry->m_transId = 0;
     }
@@ -8028,9 +8029,23 @@ Dbdict::alterTable_parse(Signal* signal,
 
     // the new temporary table record seized from pool
     newTablePtr = parseRecord.tablePtr;
+    alterTabPtr.p->m_newTable_realObjectId = newTablePtr.p->tableId;
     newTablePtr.p->tableId = impl_req->tableId; // set correct table id...(not the temporary)
   }
 
+
+  {
+    /**
+     * Mark SchemaObject as in-use so that it's won't be found by other op
+     *   choose a state that will be automatically cleaned incase we crash
+     */
+    SchemaFile::TableEntry * objEntry =
+      objEntry = getTableEntry(alterTabPtr.p->m_newTable_realObjectId);
+    objEntry->m_tableType = DictTabInfo::SchemaTransaction;
+    objEntry->m_tableState = SchemaFile::SF_STARTED;
+    objEntry->m_transId = trans_ptr.p->m_transId + 1;
+  }
+
   // set the new version now
   impl_req->newTableVersion =
     newTablePtr.p->tableVersion =
@@ -9469,6 +9484,15 @@ Dbdict::alterTable_fromCommitComplete(Si
 	       JBB, ptr, 1);
   }
 
+  {
+    // Remark object as free
+    SchemaFile::TableEntry * objEntry =
+      objEntry = getTableEntry(alterTabPtr.p->m_newTable_realObjectId);
+    objEntry->m_tableType = DictTabInfo::UndefTableType;
+    objEntry->m_tableState = SchemaFile::SF_UNUSED;
+    objEntry->m_transId = 0;
+  }
+
   releaseTableObject(alterTabPtr.p->m_newTablePtr.i, false);
   sendTransConf(signal, op_ptr);
 }
@@ -9551,6 +9575,16 @@ Dbdict::alterTable_abortParse(Signal* si
   if (!newTablePtr.isNull()) {
     jam();
     // release the temporary work table
+
+    {
+      // Remark object as free
+      SchemaFile::TableEntry * objEntry =
+        objEntry = getTableEntry(alterTabPtr.p->m_newTable_realObjectId);
+      objEntry->m_tableType = DictTabInfo::UndefTableType;
+      objEntry->m_tableState = SchemaFile::SF_UNUSED;
+      objEntry->m_transId = 0;
+    }
+
     releaseTableObject(newTablePtr.i, false);
     newTablePtr.setNull();
   }

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-02-16 14:53:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-05-12 09:01:21 +0000
@@ -2365,6 +2365,7 @@ private:
     // current and new temporary work table
     TableRecordPtr m_tablePtr;
     TableRecordPtr m_newTablePtr;
+    Uint32 m_newTable_realObjectId;
 
     // before image
     RopeHandle m_oldTableName;

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2011-04-21 09:21:18 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2011-05-17 11:41:50 +0000
@@ -28,6 +28,7 @@
 #include <signaldata/CopyGCIReq.hpp>
 #include <blocks/mutexes.hpp>
 #include <signaldata/LCP.hpp>
+#include <NdbSeqLock.hpp>
 
 #ifdef DBDIH_C
 
@@ -441,7 +442,17 @@ public:
    * TO LOCATE A FRAGMENT AND TO TRANSLATE A KEY OF A TUPLE TO THE FRAGMENT IT
    * BELONGS
    */
-  struct TabRecord {
+  struct TabRecord
+  {
+    TabRecord() { }
+
+    /**
+     * rw-lock that protects multiple parallel DIGETNODES (readers) from
+     *   updates to fragmenation changes (e.g CREATE_FRAGREQ)...
+     *   search for DIH_TAB_WRITE_LOCK
+     */
+    NdbSeqLock m_lock;
+
     /**
      * State for copying table description into pages
      */

=== modified file 'storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-04-21 09:21:18 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2011-05-17 11:41:50 +0000
@@ -15325,6 +15325,7 @@ void Dbdih::initRestorableGciFiles()
 
 void Dbdih::initTable(TabRecordPtr tabPtr)
 {
+  new (tabPtr.p) TabRecord();
   tabPtr.p->noOfFragChunks = 0;
   tabPtr.p->method = TabRecord::NOTDEFINED;
   tabPtr.p->tabStatus = TabRecord::TS_IDLE;

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2011-02-23 19:28:26 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2011-05-04 11:45:33 +0000
@@ -27,6 +27,7 @@
 #include <SLList.hpp>
 #include <ArenaPool.hpp>
 #include <DataBuffer2.hpp>
+#include <Bitmask.hpp>
 #include <signaldata/DbspjErr.hpp>
 #include "../dbtup/tuppage.hpp"
 
@@ -104,6 +105,7 @@ public:
   typedef LocalDataBuffer2<14, LocalArenaPoolImpl> Local_dependency_map;
   typedef DataBuffer2<14, LocalArenaPoolImpl> PatternStore;
   typedef LocalDataBuffer2<14, LocalArenaPoolImpl> Local_pattern_store;
+  typedef Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> TreeNodeBitMask;
 
   struct RowRef
   {
@@ -296,6 +298,8 @@ public:
     Signal* m_start_signal; // Argument to first node in tree
     SegmentedSectionPtr m_keyPtr;
 
+    TreeNodeBitMask m_scans; // TreeNodes doing scans
+
     // Used for resolving dependencies
     Ptr<TreeNode> m_node_list[NDB_SPJ_MAX_TREE_NODES];
   };
@@ -415,6 +419,18 @@ public:
     void (Dbspj::*m_parent_batch_complete)(Signal*,Ptr<Request>,Ptr<TreeNode>);
 
     /**
+     * This function is called on the *child* by the *parent* when this
+     *   child should prepare to resend results related to parents current batch
+     */
+    void (Dbspj::*m_parent_batch_repeat)(Signal*,Ptr<Request>,Ptr<TreeNode>);
+
+    /**
+     * This function is called on the *child* by the *parent* when
+     *   child should release buffers related to parents current batch
+     */
+    void (Dbspj::*m_parent_batch_cleanup)(Ptr<Request>,Ptr<TreeNode>);
+
+    /**
      * This function is called when getting a SCAN_NEXTREQ
      */
     void (Dbspj::*m_execSCAN_NEXTREQ)(Signal*, Ptr<Request>,Ptr<TreeNode>);
@@ -441,7 +457,7 @@ public:
      *  should only do local cleanup(s)
      */
     void (Dbspj::*m_cleanup)(Ptr<Request>, Ptr<TreeNode>);
-  };
+  };  //struct OpInfo
 
   struct LookupData
   {
@@ -520,6 +536,7 @@ public:
     Uint16 m_frags_outstanding;
     Uint32 m_rows_received;  // #execTRANSID_AI
     Uint32 m_rows_expecting; // Sum(ScanFragConf)
+    Uint32 m_batch_chunks;   // #SCAN_FRAGREQ + #SCAN_NEXTREQ to retrieve batch
     Uint32 m_scanCookie;
     Uint32 m_fragCount;
     ScanFragHandle_list::HeadPOD m_fragments; // ScanFrag states
@@ -547,7 +564,8 @@ public:
 
     TreeNode()
     : m_magic(MAGIC), m_state(TN_END),
-      m_parentPtrI(RNIL), m_requestPtrI(0)
+      m_parentPtrI(RNIL), m_requestPtrI(0),
+      m_ancestors()
     {
     }
 
@@ -555,6 +573,7 @@ public:
     : m_magic(MAGIC),
       m_info(0), m_bits(T_LEAF), m_state(TN_BUILDING),
       m_parentPtrI(RNIL), m_requestPtrI(request),
+      m_ancestors(),
       nextList(RNIL), prevList(RNIL)
     {
 //    m_send.m_ref = 0;
@@ -658,7 +677,7 @@ public:
       T_REPORT_BATCH_COMPLETE  = 0x200,
 
       /**
-       * Do I need to know when parent batch is cimpleted
+       * Do I need to know when parent batch is completed
        */
       T_NEED_REPORT_BATCH_COMPLETED = 0x400,
 
@@ -677,6 +696,11 @@ public:
        */
       T_SCAN_PARALLEL = 0x2000,
 
+      /**
+       * Possible requesting resultset for this index scan to be repeated
+       */
+      T_SCAN_REPEATABLE = 0x4000,
+
       // End marker...
       T_END = 0
     };
@@ -689,6 +713,7 @@ public:
     Uint32 m_batch_size;
     Uint32 m_parentPtrI;
     const Uint32 m_requestPtrI;
+    TreeNodeBitMask m_ancestors;
     Dependency_map::Head m_dependent_nodes;
     PatternStore::Head m_keyPattern;
     PatternStore::Head m_attrParamPattern;
@@ -725,7 +750,7 @@ public:
       Uint32 nextPool;
     };
     Uint32 prevList;
-  };
+  };  //struct TreeNode
 
   static const Ptr<TreeNode> NullTreeNodePtr;
 
@@ -745,12 +770,13 @@ public:
   {
     enum RequestBits
     {
-      RT_SCAN = 0x1            // unbounded result set, scan interface
-      ,RT_ROW_BUFFERS = 0x2    // Do any of the node use row-buffering
-      ,RT_MULTI_SCAN  = 0x4    // Is there several scans in request
-      ,RT_VAR_ALLOC   = 0x8    // Is var-allocation used for row-buffer
-      ,RT_NEED_PREPARE = 0x10  // Does any node need m_prepare hook
-      ,RT_NEED_COMPLETE = 0x20 // Does any node need m_complete hook
+      RT_SCAN                = 0x1  // unbounded result set, scan interface
+      ,RT_ROW_BUFFERS        = 0x2  // Do any of the node use row-buffering
+      ,RT_MULTI_SCAN         = 0x4  // Is there several scans in request
+      ,RT_VAR_ALLOC          = 0x8  // Is var-allocation used for row-buffer
+      ,RT_NEED_PREPARE       = 0x10 // Does any node need m_prepare hook
+      ,RT_NEED_COMPLETE      = 0x20 // Does any node need m_complete hook
+      ,RT_REPEAT_SCAN_RESULT = 0x40 // Repeat bushy scan result when required
     };
 
     enum RequestState
@@ -765,7 +791,7 @@ public:
 
       RS_ABORTED    = 0x2008, // Aborted and waiting for SCAN_NEXTREQ
       RS_END = 0
-    };
+    };  //struct Request
 
     Request() {}
     Request(const ArenaHead & arena) : m_arena(arena) {}
@@ -781,7 +807,8 @@ public:
     TreeNode_list::Head m_nodes;
     TreeNodeCursor_list::Head m_cursor_nodes;
     Uint32 m_cnt_active;       // No of "running" nodes
-    Bitmask<1> m_active_nodes; // Nodes which will return more data
+    TreeNodeBitMask
+           m_active_nodes;     // Nodes which will return more data in NEXTREQ
     Uint32 m_rows;             // Rows accumulated in current batch
     Uint32 m_outstanding;      // Outstanding signals, when 0, batch is done
     Uint16 m_lookup_node_data[MAX_NDB_NODES];
@@ -976,6 +1003,7 @@ private:
   void start(Signal*, Ptr<Request>);
   void checkBatchComplete(Signal*, Ptr<Request>, Uint32 cnt);
   void batchComplete(Signal*, Ptr<Request>);
+  void prepareNextBatch(Signal*, Ptr<Request>);
   void sendConf(Signal*, Ptr<Request>, bool is_complete);
   void complete(Signal*, Ptr<Request>);
   void cleanup(Ptr<Request>);
@@ -988,12 +1016,11 @@ private:
   void releaseRequestBuffers(Ptr<Request> requestPtr, bool reset);
   void releaseNodeRows(Ptr<Request> requestPtr, Ptr<TreeNode>);
   void releaseRow(Ptr<Request>, RowRef ref);
-  Uint32 releaseScanBuffers(Ptr<Request> requestPtr, Ptr<TreeNode>);
-  void registerCursor(Ptr<Request>, Ptr<TreeNode>);
+  void registerActiveCursor(Ptr<Request>, Ptr<TreeNode>);
   void nodeFail_checkRequests(Signal*);
 
+  void cleanupChildBranch(Ptr<Request>, Ptr<TreeNode>);
   void cleanup_common(Ptr<Request>, Ptr<TreeNode>);
-  void mark_active(Ptr<Request>, Ptr<TreeNode>, bool value);
 
   /**
    * Row buffering
@@ -1141,13 +1168,17 @@ private:
   Uint32 scanIndex_findFrag(Local_ScanFragHandle_list &, Ptr<ScanFragHandle>&,
                             Uint32 fragId);
   void scanIndex_parent_batch_complete(Signal*, Ptr<Request>, Ptr<TreeNode>);
+  void scanIndex_parent_batch_repeat(Signal*, Ptr<Request>, Ptr<TreeNode>);
   void scanIndex_execSCAN_NEXTREQ(Signal*, Ptr<Request>,Ptr<TreeNode>);
   void scanIndex_complete(Signal*, Ptr<Request>, Ptr<TreeNode>);
   void scanIndex_abort(Signal*, Ptr<Request>, Ptr<TreeNode>);
   Uint32 scanIndex_execNODE_FAILREP(Signal*signal, Ptr<Request>, Ptr<TreeNode>,
                                   NdbNodeBitmask);
+  void scanIndex_parent_batch_cleanup(Ptr<Request>, Ptr<TreeNode>);
   void scanIndex_cleanup(Ptr<Request>, Ptr<TreeNode>);
 
+  void scanIndex_release_rangekeys(Ptr<Request>, Ptr<TreeNode>);
+
   /**
    * Page manager
    */

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-04-29 09:11:12 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-05-13 08:38:01 +0000
@@ -973,19 +973,6 @@ Dbspj::build(Build_context& ctx,
       jam();
       requestPtr.p->m_bits |= Request::RT_VAR_ALLOC;
     }
-
-    {
-      /**
-       * If multi scan, then cursors are determined when one batch is complete
-       *   hence clear list here...
-       * But if it's single scan...the list will already contain the
-       *   only scan in the tree
-       */
-      Local_TreeNodeCursor_list list(m_treenode_pool,
-                                     requestPtr.p->m_cursor_nodes);
-      ndbassert(list.noOfElements() > 1);
-      list.remove();
-    }
   }
 
   return 0;
@@ -1119,6 +1106,8 @@ Dbspj::batchComplete(Signal* signal, Ptr
     {
       ndbassert(is_complete);
     }
+
+    prepareNextBatch(signal, requestPtr);
     sendConf(signal, requestPtr, is_complete);
   }
   else if (is_complete && need_complete_phase)
@@ -1158,6 +1147,132 @@ Dbspj::batchComplete(Signal* signal, Ptr
   }
 }
 
+/**
+ * Locate next TreeNode(s) to retrieve more rows from.
+ *
+ *   Calculate set of the 'm_active_nodes' we will receive from in NEXTREQ.
+ *   Add these TreeNodes to the cursor list to be iterated.
+ */
+void
+Dbspj::prepareNextBatch(Signal* signal, Ptr<Request> requestPtr)
+{
+  requestPtr.p->m_cursor_nodes.init();
+  requestPtr.p->m_active_nodes.clear();
+
+  if (requestPtr.p->m_cnt_active == 0)
+  {
+    jam();
+    return;
+  }
+
+  if (requestPtr.p->m_bits & Request::RT_REPEAT_SCAN_RESULT)
+  {
+    /**
+     * If REPEAT_SCAN_RESULT we handle bushy scans by return more *new* rows
+     * from only one of the active child scans. If there are multiple 
+     * bushy scans not being able to return their current result set in 
+     * a single batch, result sets from the other child scans are repeated
+     * until all rows has been returned to the API client.
+     *
+     * Hence, the cross joined results from the bushy scans are partly
+     * produced within the SPJ block on a 'batchsize granularity', 
+     * and partly is the responsibility of the API-client by iterating
+     * the result rows within the current result batches.
+     * (Opposed to non-REPEAT_SCAN_RESULT, the client only have to care about 
+     *  the current batched rows - no buffering is required)
+     */
+    jam();
+    Ptr<TreeNode> nodePtr;
+    Local_TreeNode_list list(m_treenode_pool, requestPtr.p->m_nodes);
+
+    /**
+     * Locate last 'TN_ACTIVE' TreeNode which is the only one choosen 
+     * to return more *new* rows.
+     */
+    for (list.last(nodePtr); !nodePtr.isNull(); list.prev(nodePtr))
+    {
+      if (nodePtr.p->m_state == TreeNode::TN_ACTIVE)
+      {
+        jam();
+        DEBUG("Will fetch more from 'active' m_node_no: " << nodePtr.p->m_node_no);
+        /**
+         * A later NEXTREQ will request a *new* batch of rows from this TreeNode.
+         */
+        registerActiveCursor(requestPtr, nodePtr);
+        break;
+      }
+    }
+
+    /**
+     *  Restart/repeat other (index scan) child batches which:
+     *    - Being 'after' nodePtr located above.
+     *    - Not being an ancestor of (depends on) any 'active' TreeNode.
+     *      (As these scans are started when rows from these parent nodes
+     *      arrives.)
+     */
+    if (!nodePtr.isNull())
+    {
+      jam();
+      DEBUG("Calculate 'active', w/ cursor on m_node_no: " << nodePtr.p->m_node_no);
+
+      /* Restart any partial index-scans after this 'TN_ACTIVE' TreeNode */
+      for (list.next(nodePtr); !nodePtr.isNull(); list.next(nodePtr))
+      {
+        jam();
+        if (!nodePtr.p->m_ancestors.overlaps (requestPtr.p->m_active_nodes))
+        {
+          jam();
+          ndbrequire(nodePtr.p->m_state != TreeNode::TN_ACTIVE);
+          ndbrequire(nodePtr.p->m_info != 0);
+          if (nodePtr.p->m_info->m_parent_batch_repeat != 0)
+          {
+            jam();
+            (this->*(nodePtr.p->m_info->m_parent_batch_repeat))(signal,
+                                                                requestPtr,
+                                                                nodePtr);
+          }
+        }
+      }
+    } // if (!nodePtr.isNull()
+  }
+  else  // not 'RT_REPEAT_SCAN_RESULT'
+  {
+    /**
+     * If not REPEAT_SCAN_RESULT multiple active TreeNodes may return their 
+     * remaining result simultaneously. In case of bushy-scans, these
+     * concurrent result streams are cross joins of each other
+     * in SQL terms. In order to produce the cross joined result, it is
+     * the responsibility of the API-client to buffer these streams and
+     * iterate them to produce the cross join.
+     */
+    jam();
+    Ptr<TreeNode> nodePtr;
+    Local_TreeNode_list list(m_treenode_pool, requestPtr.p->m_nodes);
+    TreeNodeBitMask ancestors_of_active;
+
+    for (list.last(nodePtr); !nodePtr.isNull(); list.prev(nodePtr))
+    {
+      /**
+       * If we are active (i.e not consumed all rows originating
+       *   from parent rows) and we are not in the set of parents 
+       *   for any active child:
+       *
+       * Then, this is a position that execSCAN_NEXTREQ should continue
+       */
+      if (nodePtr.p->m_state == TreeNode::TN_ACTIVE &&
+         !ancestors_of_active.get (nodePtr.p->m_node_no))
+      {
+        jam();
+        DEBUG("Add 'active' m_node_no: " << nodePtr.p->m_node_no);
+        registerActiveCursor(requestPtr, nodePtr);
+        ancestors_of_active.bitOR(nodePtr.p->m_ancestors);
+      }
+    }
+  } // if (RT_REPEAT_SCAN_RESULT)
+
+  DEBUG("Calculated 'm_active_nodes': " << requestPtr.p->m_active_nodes.rep.data[0]);
+}
+
 void
 Dbspj::sendConf(Signal* signal, Ptr<Request> requestPtr, bool is_complete)
 {
@@ -1283,51 +1398,60 @@ void
 Dbspj::releaseScanBuffers(Ptr<Request> requestPtr)
 {
   Ptr<TreeNode> treeNodePtr;
-  {
-    Local_TreeNode_list list(m_treenode_pool, requestPtr.p->m_nodes);
-    list.first(treeNodePtr);
-  }
-
-  /**
-   * This is calling recursive function...buh!
-   *   but i can't figure out how to do it someother way...
-   */
+  Local_TreeNode_list list(m_treenode_pool, requestPtr.p->m_nodes);
+  TreeNodeBitMask ancestors_of_active;
 
-  /**
-   * This recursive function will register nodes to be notified
-   *   about SCAN_NEXTREQ.
-   *
-   * Clear it first, so that nodes won't end up in it several times...
-   */
-  requestPtr.p->m_cursor_nodes.init();
+  for (list.last(treeNodePtr); !treeNodePtr.isNull(); list.prev(treeNodePtr))
+  {
+    /**
+     * If there are no active children,
+     *   then we can cleanup in our sub-branch
+     */
+    if (!ancestors_of_active.get(treeNodePtr.p->m_node_no))
+    {
+      if (treeNodePtr.p->m_bits & TreeNode::T_ROW_BUFFER)
+      {
+        jam();
+        releaseNodeRows(requestPtr, treeNodePtr);
+      }
+      
+      /**
+       * Cleanup ACTIVE nodes fetching more rows in a NEXTREQ,
+       * or nodes being in 'm_active_nodes' as they will 'repeat'.
+       * (and then become active)
+       */
+      if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE ||
+          requestPtr.p->m_active_nodes.get(treeNodePtr.p->m_node_no))
+      {
+        jam();
+        cleanupChildBranch(requestPtr, treeNodePtr);
+      }
+    }
 
+    /**
+      * Collect ancestors of all nodes which are, or will
+      * become active in NEXTREQ (possibly repeated)
+      */
+    if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE ||
+        requestPtr.p->m_active_nodes.get(treeNodePtr.p->m_node_no))
+    {
+      ancestors_of_active.bitOR(treeNodePtr.p->m_ancestors);
+    }
+  }
   /**
    * Needs to be atleast 1 active otherwise we should have
    *   taken the cleanup "path" in batchComplete
    */
-  ndbrequire(releaseScanBuffers(requestPtr, treeNodePtr) > 0);
+  ndbrequire(requestPtr.p->m_cnt_active >= 1);
 }
 
 void
-Dbspj::mark_active(Ptr<Request> requestPtr,
-                   Ptr<TreeNode> treeNodePtr,
-                   bool value)
+Dbspj::registerActiveCursor(Ptr<Request> requestPtr, Ptr<TreeNode> treeNodePtr)
 {
   Uint32 bit = treeNodePtr.p->m_node_no;
-  if (value)
-  {
-    ndbassert(requestPtr.p->m_active_nodes.get(bit) == false);
-  }
-  else
-  {
-    ndbassert(requestPtr.p->m_active_nodes.get(bit) == true);
-  }
-  requestPtr.p->m_active_nodes.set(bit, value);
-}
+  ndbrequire(!requestPtr.p->m_active_nodes.get(bit));
+  requestPtr.p->m_active_nodes.set(bit);
 
-void
-Dbspj::registerCursor(Ptr<Request> requestPtr, Ptr<TreeNode> treeNodePtr)
-{
   Local_TreeNodeCursor_list list(m_treenode_pool, requestPtr.p->m_cursor_nodes);
 #ifdef VM_TRACE
   {
@@ -1341,12 +1465,9 @@ Dbspj::registerCursor(Ptr<Request> reque
   list.add(treeNodePtr);
 }
 
-Uint32
-Dbspj::releaseScanBuffers(Ptr<Request> requestPtr,
-                          Ptr<TreeNode> treeNodePtr)
+void
+Dbspj::cleanupChildBranch(Ptr<Request> requestPtr, Ptr<TreeNode> treeNodePtr)
 {
-  Uint32 active_child = 0;
-
   LocalArenaPoolImpl pool(requestPtr.p->m_arena, m_dependency_map_pool);
   Local_dependency_map list(pool, treeNodePtr.p->m_dependent_nodes);
   Dependency_map::ConstDataBufferIterator it;
@@ -1354,40 +1475,15 @@ Dbspj::releaseScanBuffers(Ptr<Request> r
   {
     jam();
     Ptr<TreeNode> childPtr;
-    m_treenode_pool.getPtr(childPtr, * it.data);
-    active_child += releaseScanBuffers(requestPtr, childPtr);
-  }
-
-  const bool active = treeNodePtr.p->m_state == TreeNode::TN_ACTIVE;
-  if (active_child == 0)
-  {
-    jam();
-
-    /**
-     * If there is no active children,
-     *   then we can release our own (optionally) buffered rows
-     */
-    if (treeNodePtr.p->m_bits & TreeNode::T_ROW_BUFFER)
-    {
-      jam();
-      releaseNodeRows(requestPtr, treeNodePtr);
-    }
-
-    /**
-     * If we have no active children,
-     *   and we ourself is active (i.e not consumed all rows originating
-     *   from parent rows)
-     *
-     * Then, this is a position that execSCAN_NEXTREQ should continue
-     */
-    if (active)
+    m_treenode_pool.getPtr(childPtr, *it.data);
+    if (childPtr.p->m_info->m_parent_batch_cleanup != 0)
     {
       jam();
-      registerCursor(requestPtr, treeNodePtr);
+      (this->*(childPtr.p->m_info->m_parent_batch_cleanup))(requestPtr,
+                                                            childPtr);
     }
+    cleanupChildBranch(requestPtr,childPtr);
   }
-
-  return active_child + (active ? 1 : 0);
 }
 
 void
@@ -1672,7 +1768,7 @@ Dbspj::complete(Signal* signal, Ptr<Requ
 void
 Dbspj::cleanup(Ptr<Request> requestPtr)
 {
-  ndbrequire(requestPtr.p->m_active_nodes.isclear());
+  ndbrequire(requestPtr.p->m_cnt_active == 0);
   {
     Ptr<TreeNode> nodePtr;
     Local_TreeNode_list list(m_treenode_pool, requestPtr.p->m_nodes);
@@ -1949,16 +2045,46 @@ Dbspj::execSCAN_NEXTREQ(Signal* signal)
     Ptr<TreeNode> treeNodePtr;
     Local_TreeNodeCursor_list list(m_treenode_pool,
                                    requestPtr.p->m_cursor_nodes);
+    Uint32 cnt_active = 0;
+
     for (list.first(treeNodePtr); !treeNodePtr.isNull(); list.next(treeNodePtr))
     {
-      jam();
-      ndbrequire(treeNodePtr.p->m_state == TreeNode::TN_ACTIVE);
-      ndbrequire(treeNodePtr.p->m_info != 0 &&
-                 treeNodePtr.p->m_info->m_execSCAN_NEXTREQ != 0);
-      (this->*(treeNodePtr.p->m_info->m_execSCAN_NEXTREQ))(signal,
-                                                           requestPtr,
-                                                           treeNodePtr);
+      if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE)
+      {
+        jam();
+        DEBUG("SCAN_NEXTREQ on TreeNode: " << treeNodePtr.i
+           << ",  m_node_no: " << treeNodePtr.p->m_node_no
+           << ", w/ m_parentPtrI: " << treeNodePtr.p->m_parentPtrI);
+
+        ndbrequire(treeNodePtr.p->m_info != 0 &&
+                   treeNodePtr.p->m_info->m_execSCAN_NEXTREQ != 0);
+        (this->*(treeNodePtr.p->m_info->m_execSCAN_NEXTREQ))(signal,
+                                                             requestPtr,
+                                                             treeNodePtr);
+        cnt_active++;
+      }
+      else
+      {
+        /**
+         * Restart any other scans not being 'TN_ACTIVE'
+         * (Only effective if 'RT_REPEAT_SCAN_RESULT')
+         */
+        jam();
+        ndbrequire(requestPtr.p->m_bits & Request::RT_REPEAT_SCAN_RESULT);
+        DEBUG("  Restart TreeNode: " << treeNodePtr.i
+           << ",  m_node_no: " << treeNodePtr.p->m_node_no
+           << ", w/ m_parentPtrI: " << treeNodePtr.p->m_parentPtrI);
+
+        ndbrequire(treeNodePtr.p->m_info != 0 &&
+                   treeNodePtr.p->m_info->m_parent_batch_complete !=0 );
+        (this->*(treeNodePtr.p->m_info->m_parent_batch_complete))(signal,
+                                                                  requestPtr,
+                                                                  treeNodePtr);
+      }
     }
+    /* Expected only a single ACTIVE TreeNode among the cursors */
+    ndbrequire(cnt_active == 1 ||
+               !(requestPtr.p->m_bits & Request::RT_REPEAT_SCAN_RESULT));
   }
 }
 
@@ -2559,6 +2685,8 @@ Dbspj::g_LookupOpInfo =
   0, // execSCAN_FRAGCONF
   &Dbspj::lookup_parent_row,
   &Dbspj::lookup_parent_batch_complete,
+  0, // Dbspj::lookup_parent_batch_repeat,
+  0, // Dbspj::lookup_parent_batch_cleanup,
   0, // Dbspj::lookup_execSCAN_NEXTREQ
   0, // Dbspj::lookup_complete
   &Dbspj::lookup_abort,
@@ -3613,6 +3741,8 @@ Dbspj::g_ScanFragOpInfo =
   &Dbspj::scanFrag_execSCAN_FRAGCONF,
   0, // parent row
   0, // parent batch complete
+  0, // parent batch repeat
+  0, // Dbspj::scanFrag_parent_batch_cleanup,
   &Dbspj::scanFrag_execSCAN_NEXTREQ,
   0, // Dbspj::scanFrag_complete
   &Dbspj::scanFrag_abort,
@@ -3716,13 +3846,7 @@ Dbspj::scanFrag_build(Build_context& ctx
     }
 
     ctx.m_scan_cnt++;
-    /**
-     * In the scenario with only 1 scan in tree,
-     *   register cursor here, so we don't need to search for in after build
-     * If m_scan_cnt > 1,
-     *   then this list will simply be cleared after build
-     */
-    registerCursor(requestPtr, treeNodePtr);
+    ctx.m_scans.set(treeNodePtr.p->m_node_no);
 
     if (ctx.m_start_signal)
     {
@@ -3833,7 +3957,6 @@ Dbspj::scanFrag_send(Signal* signal,
 
   requestPtr.p->m_outstanding++;
   requestPtr.p->m_cnt_active++;
-  mark_active(requestPtr, treeNodePtr, true);
   treeNodePtr.p->m_state = TreeNode::TN_ACTIVE;
   Ptr<ScanFragHandle> scanFragHandlePtr;
   m_scanfraghandle_pool.getPtr(scanFragHandlePtr, treeNodePtr.p->
@@ -3966,7 +4089,6 @@ Dbspj::scanFrag_execSCAN_FRAGREF(Signal*
   ndbrequire(requestPtr.p->m_outstanding);
   requestPtr.p->m_outstanding--;
   treeNodePtr.p->m_state = TreeNode::TN_INACTIVE;
-  mark_active(requestPtr, treeNodePtr, false);
 
   abort(signal, requestPtr, errCode);
 }
@@ -4014,7 +4136,6 @@ Dbspj::scanFrag_execSCAN_FRAGCONF(Signal
     ndbrequire(requestPtr.p->m_cnt_active);
     requestPtr.p->m_cnt_active--;
     treeNodePtr.p->m_state = TreeNode::TN_INACTIVE;
-    mark_active(requestPtr, treeNodePtr, false);
     scanFragHandlePtr.p->m_state = ScanFragHandle::SFH_COMPLETE;
   }
   else
@@ -4170,6 +4291,8 @@ Dbspj::g_ScanIndexOpInfo =
   &Dbspj::scanIndex_execSCAN_FRAGCONF,
   &Dbspj::scanIndex_parent_row,
   &Dbspj::scanIndex_parent_batch_complete,
+  &Dbspj::scanIndex_parent_batch_repeat,
+  &Dbspj::scanIndex_parent_batch_cleanup,
   &Dbspj::scanIndex_execSCAN_NEXTREQ,
   &Dbspj::scanIndex_complete,
   &Dbspj::scanIndex_abort,
@@ -4282,14 +4405,23 @@ Dbspj::scanIndex_build(Build_context& ct
       nodePtr.i = nodePtr.p->m_parentPtrI;
     }
 
-    ctx.m_scan_cnt++;
     /**
-     * In the scenario with only 1 scan in tree,
-     *   register cursor here, so we don't need to search for in after build
-     * If m_scan_cnt > 1,
-     *   then this list will simply be cleared after build
+     * If there exists other scan TreeNodes not being among 
+     * my ancestors, results from this scanIndex may be repeated 
+     * as part of an X-scan.
+     *
+     * NOTE: The scan nodes being along the left deep ancestor chain
+     *       are not 'repeatable' as they are driving the
+     *       repeated X-scan and are thus not repeated themself.
      */
-    registerCursor(requestPtr, treeNodePtr);
+    if (requestPtr.p->m_bits & Request::RT_REPEAT_SCAN_RESULT &&
+       !treeNodePtr.p->m_ancestors.contains(ctx.m_scans))
+    {
+      nodePtr.p->m_bits |= TreeNode::T_SCAN_REPEATABLE;
+    }
+
+    ctx.m_scan_cnt++;
+    ctx.m_scans.set(treeNodePtr.p->m_node_no);
 
     return 0;
   } while (0);
@@ -4317,6 +4449,7 @@ Dbspj::parseScanIndex(Build_context& ctx
     data.m_fragments.init();
     data.m_frags_outstanding = 0;
     data.m_frags_not_complete = 0;
+    data.m_batch_chunks = 0;
 
     err = parseDA(ctx, requestPtr, treeNodePtr,
                   tree, treeBits, param, paramBits);
@@ -4860,6 +4993,32 @@ Dbspj::scanIndex_parent_batch_complete(S
 }
 
 void
+Dbspj::scanIndex_parent_batch_repeat(Signal* signal,
+                                      Ptr<Request> requestPtr,
+                                      Ptr<TreeNode> treeNodePtr)
+{
+  jam();
+  ScanIndexData& data = treeNodePtr.p->m_scanindex_data;
+
+  DEBUG("scanIndex_parent_batch_repeat(), m_node_no: " << treeNodePtr.p->m_node_no
+        << ", m_batch_chunks: " << data.m_batch_chunks);
+
+  /**
+   * Register index-scans to be restarted if we didn't get all
+   * previously fetched parent related child rows in a single batch.
+   */
+  if (data.m_batch_chunks > 1)
+  {
+    jam();
+    DEBUG("Register TreeNode for restart, m_node_no: " << treeNodePtr.p->m_node_no);
+    ndbrequire(treeNodePtr.p->m_state != TreeNode::TN_ACTIVE);
+    registerActiveCursor(requestPtr, treeNodePtr);
+    data.m_frags_not_complete = 1;
+    data.m_batch_chunks = 0;
+  }
+}
+
+void
 Dbspj::scanIndex_send(Signal* signal,
                       Ptr<Request> requestPtr,
                       Ptr<TreeNode> treeNodePtr)
@@ -4884,12 +5043,24 @@ Dbspj::scanIndex_send(Signal* signal,
   }
 
   /**
-   * keys,
-   * - sliced out to each ScanFragHandle => release = true
-   * - all kept on first ScanFragHandle => release = false
+   * if (m_bits & prunemask):
+   * - Range keys sliced out to each ScanFragHandle
+   * - Else, range keys kept on first (and only) ScanFragHandle
    */
   Uint32 prunemask = TreeNode::T_PRUNE_PATTERN | TreeNode::T_CONST_PRUNE;
-  bool release = (treeNodePtr.p->m_bits & prunemask) != 0;
+
+  /**
+   * Don't release keyInfo if it may be sent multiple times, eiter:
+   *   - Not pruned -> same keyInfo goes to all datanodes.
+   *   - Result handling is REPEAT_SCAN_RESULT and same batch may be 
+   *     repeated multiple times due to incomplete bushy X-scans.
+   *     (by ::scanIndex_parent_batch_repeat())
+   *
+   * When not released, ::scanIndex_parent_batch_cleanup() will 
+   * eventually release them when preparing arrival of a new parent batch.
+   */
+  const bool release = ((treeNodePtr.p->m_bits & prunemask) != 0 &&
+                        (treeNodePtr.p->m_bits & TreeNode::T_SCAN_REPEATABLE) == 0);
 
   ScanFragReq* req = reinterpret_cast<ScanFragReq*>(signal->getDataPtrSend());
   memcpy(req, org, sizeof(data.m_scanFragReq));
@@ -4901,18 +5072,16 @@ Dbspj::scanIndex_send(Signal* signal,
   Ptr<ScanFragHandle> fragPtr;
   Local_ScanFragHandle_list list(m_scanfraghandle_pool, data.m_fragments);
 
-  Uint32 keyInfoPtrI;
-  if (release == false)
+  Uint32 keyInfoPtrI = RNIL;
+  list.first(fragPtr);
+  if ((treeNodePtr.p->m_bits & prunemask) == 0)
   {
     jam();
-    list.first(fragPtr);
     keyInfoPtrI = fragPtr.p->m_rangePtrI;
     ndbrequire(keyInfoPtrI != RNIL);
-    fragPtr.p->m_rangePtrI = RNIL;
   }
 
   Uint32 batchRange = 0;
-  list.first(fragPtr);
   for (Uint32 i = 0; i < cnt && !fragPtr.isNull(); list.next(fragPtr))
   {
     jam();
@@ -4928,7 +5097,7 @@ Dbspj::scanIndex_send(Signal* signal,
     req->senderData = fragPtr.i;
     req->fragmentNoKeyLen = fragPtr.p->m_fragId;
 
-    if (release)
+    if ((treeNodePtr.p->m_bits & prunemask))
     {
       jam();
       keyInfoPtrI = fragPtr.p->m_rangePtrI;
@@ -4938,8 +5107,9 @@ Dbspj::scanIndex_send(Signal* signal,
         fragPtr.p->m_state = ScanFragHandle::SFH_COMPLETE;
         continue;
       }
-      fragPtr.p->m_rangePtrI = RNIL;
-
+    }
+    if (release)
+    {
       /**
        * If we'll use sendSignal() and we need to send the attrInfo several
        *   times, we need to copy them
@@ -4948,7 +5118,6 @@ Dbspj::scanIndex_send(Signal* signal,
       ndbrequire(dupSection(tmp, attrInfoPtrI)); // TODO handle error
       attrInfoPtrI = tmp;
     }
-    fragPtr.p->reset_ranges();
 
     req->variableData[0] = batchRange;
     getSection(handle.m_ptr[0], attrInfoPtrI);
@@ -4982,6 +5151,8 @@ Dbspj::scanIndex_send(Signal* signal,
       jam();
       sendSignal(ref, GSN_SCAN_FRAGREQ, signal,
                  NDB_ARRAY_SIZE(data.m_scanFragReq), JBB, &handle);
+      fragPtr.p->m_rangePtrI = RNIL;
+      fragPtr.p->reset_ranges();
     }
     else
     {
@@ -4997,14 +5168,6 @@ Dbspj::scanIndex_send(Signal* signal,
     batchRange += bs_rows;
   }
 
-  if (release == false)
-  {
-    jam();
-    // only supported for now...
-    ndbrequire(treeNodePtr.p->m_bits & TreeNode::T_SCAN_PARALLEL);
-    releaseSection(keyInfoPtrI);
-  }
-
   if (treeNodePtr.p->m_bits & TreeNode::T_SCAN_PARALLEL)
   {
     ndbrequire(data.m_frags_outstanding == data.m_frags_not_complete);
@@ -5014,9 +5177,9 @@ Dbspj::scanIndex_send(Signal* signal,
     ndbrequire(data.m_frags_outstanding == 1);
   }
 
+  data.m_batch_chunks = 1;
   requestPtr.p->m_cnt_active++;
   requestPtr.p->m_outstanding++;
-  mark_active(requestPtr, treeNodePtr, true);
   treeNodePtr.p->m_state = TreeNode::TN_ACTIVE;
 }
 
@@ -5117,7 +5280,6 @@ Dbspj::scanIndex_execSCAN_FRAGCONF(Signa
       ndbrequire(requestPtr.p->m_cnt_active);
       requestPtr.p->m_cnt_active--;
       treeNodePtr.p->m_state = TreeNode::TN_INACTIVE;
-      mark_active(requestPtr, treeNodePtr, false);
     }
   }
 
@@ -5180,7 +5342,6 @@ Dbspj::scanIndex_execSCAN_FRAGREF(Signal
     ndbrequire(requestPtr.p->m_cnt_active);
     requestPtr.p->m_cnt_active--;
     treeNodePtr.p->m_state = TreeNode::TN_INACTIVE;
-    mark_active(requestPtr, treeNodePtr, false);
   }
 
   if (data.m_frags_outstanding == 0)
@@ -5265,6 +5426,8 @@ Dbspj::scanIndex_execSCAN_NEXTREQ(Signal
    *   so require that we did actually send something
    */
   ndbrequire(data.m_frags_outstanding > 0);
+  ndbrequire(data.m_batch_chunks > 0);
+  data.m_batch_chunks++;
 
   requestPtr.p->m_outstanding++;
   ndbassert(treeNodePtr.p->m_state == TreeNode::TN_ACTIVE);
@@ -5452,26 +5615,25 @@ Dbspj::scanIndex_execNODE_FAILREP(Signal
     ndbrequire(requestPtr.p->m_cnt_active);
     requestPtr.p->m_cnt_active--;
     treeNodePtr.p->m_state = TreeNode::TN_INACTIVE;
-    mark_active(requestPtr, treeNodePtr, false);
   }
 
   return sum;
 }
 
 void
-Dbspj::scanIndex_cleanup(Ptr<Request> requestPtr,
-                         Ptr<TreeNode> treeNodePtr)
+Dbspj::scanIndex_release_rangekeys(Ptr<Request> requestPtr,
+                                   Ptr<TreeNode> treeNodePtr)
 {
+  jam();
+  DEBUG("scanIndex_release_rangekeys(), m_node_no: " << treeNodePtr.p->m_node_no);
+
   ScanIndexData& data = treeNodePtr.p->m_scanindex_data;
   Local_ScanFragHandle_list list(m_scanfraghandle_pool, data.m_fragments);
-  if (requestPtr.p->m_state & Request::RS_ABORTING)
+  Ptr<ScanFragHandle> fragPtr;
+
+  if (treeNodePtr.p->m_bits & TreeNode::T_PRUNE_PATTERN)
   {
-    /**
-     * If we're aborting...there can be keys attached...that has not
-     *   (and will not) be sent...release them to avoid memleak
-     */
     jam();
-    Ptr<ScanFragHandle> fragPtr;
     for (list.first(fragPtr); !fragPtr.isNull(); list.next(fragPtr))
     {
       if (fragPtr.p->m_rangePtrI != RNIL)
@@ -5479,20 +5641,52 @@ Dbspj::scanIndex_cleanup(Ptr<Request> re
         releaseSection(fragPtr.p->m_rangePtrI);
         fragPtr.p->m_rangePtrI = RNIL;
       }
+      fragPtr.p->reset_ranges();
     }
   }
   else
   {
-#ifdef VM_TRACE
-    Ptr<ScanFragHandle> fragPtr;
-    for (list.first(fragPtr); !fragPtr.isNull(); list.next(fragPtr))
+    jam();
+    list.first(fragPtr);
+    if (fragPtr.p->m_rangePtrI != RNIL)
     {
-      ndbrequire(fragPtr.p->m_rangePtrI == RNIL);
+      releaseSection(fragPtr.p->m_rangePtrI);
+      fragPtr.p->m_rangePtrI = RNIL;
     }
-#endif
+    fragPtr.p->reset_ranges();
   }
-  list.remove();
+}
+
+/**
+ * Parent batch has completed, and will not refetch (X-joined) results
+ * from its childs. Release & reset range keys which are unsent or we
+ * have kept for possible resubmits.
+ */
+void
+Dbspj::scanIndex_parent_batch_cleanup(Ptr<Request> requestPtr,
+                                      Ptr<TreeNode> treeNodePtr)
+{
+  DEBUG("scanIndex_parent_batch_cleanup");
+  scanIndex_release_rangekeys(requestPtr,treeNodePtr);
+}
 
+void
+Dbspj::scanIndex_cleanup(Ptr<Request> requestPtr,
+                         Ptr<TreeNode> treeNodePtr)
+{
+  ScanIndexData& data = treeNodePtr.p->m_scanindex_data;
+  DEBUG("scanIndex_cleanup");
+
+  /**
+   * Range keys has been collected wherever there are uncompleted
+   * parent batches...release them to avoid memleak.
+   */
+  scanIndex_release_rangekeys(requestPtr,treeNodePtr);
+
+  {
+    Local_ScanFragHandle_list list(m_scanfraghandle_pool, data.m_fragments);
+    list.remove();
+  }
   if (treeNodePtr.p->m_bits & TreeNode::T_PRUNE_PATTERN)
   {
     jam();
@@ -6085,7 +6279,7 @@ Dbspj::expandS(Uint32 & _dst, Local_patt
     case QueryPattern::P_PARENT:
       jam();
       // P_PARENT is a prefix to another pattern token
-      // that permits code to access rows from earlier than imediate parent.
+      // that permits code to access rows from earlier than immediate parent.
       // val is no of levels to move up the tree
       err = appendFromParent(dst, pattern, it, val, row, hasNull);
       break;
@@ -6150,7 +6344,7 @@ Dbspj::expandL(Uint32 & _dst, Local_patt
     case QueryPattern::P_PARENT:
       jam();
       // P_PARENT is a prefix to another pattern token
-      // that permits code to access rows from earlier than imediate parent
+      // that permits code to access rows from earlier than immediate parent
       // val is no of levels to move up the tree
       err = appendFromParent(dst, pattern, it, val, row, hasNull);
       break;
@@ -6360,6 +6554,13 @@ Dbspj::parseDA(Build_context& ctx,
 
   do
   {
+    if (treeBits & DABits::NI_REPEAT_SCAN_RESULT)
+    {
+      jam();
+      DEBUG("use REPEAT_SCAN_RESULT when returning results");
+      requestPtr.p->m_bits |= Request::RT_REPEAT_SCAN_RESULT;
+    } // DABits::NI_HAS_PARENT
+
     if (treeBits & DABits::NI_HAS_PARENT)
     {
       jam();
@@ -6405,6 +6606,10 @@ Dbspj::parseDA(Build_context& ctx,
         }
         parentPtr.p->m_bits &= ~(Uint32)TreeNode::T_LEAF;
         treeNodePtr.p->m_parentPtrI = parentPtr.i;
+
+        // Build Bitmask of all ancestors to treeNode
+        treeNodePtr.p->m_ancestors = parentPtr.p->m_ancestors;
+        treeNodePtr.p->m_ancestors.set(parentPtr.p->m_node_no);
       }
 
       if (unlikely(err != 0))

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2011-05-03 13:48:54 +0000
@@ -8407,7 +8407,7 @@ void Dbtc::execNODE_FAILREP(Signal* sign
     Uint32 ok = 1;
     for(Uint32 n = c_alive_nodes.find_first();
         n != c_alive_nodes.NotFound;
-        n = c_alive_nodes.find_next(n))
+        n = c_alive_nodes.find_next(n+1))
     {
       if (!ndbd_deferred_unique_constraints(getNodeInfo(n).m_version))
       {

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-04 11:58:38 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-17 12:47:21 +0000
@@ -723,9 +723,10 @@ struct Fragrecord {
   Uint32 m_free_page_id_list;
   DynArr256::Head m_page_map;
   DLFifoList<Page>::Head thFreeFirst;   // pages with atleast 1 free record
-  
+
   Uint32 m_lcp_scan_op;
-  Uint32 m_lcp_keep_list;
+  Local_key m_lcp_keep_list_head;
+  Local_key m_lcp_keep_list_tail;
 
   enum FragState
   { FS_FREE
@@ -1439,9 +1440,8 @@ typedef Ptr<HostBuffer> HostBufferPtr;
     STATIC_CONST( MM_SHRINK   = 0x00200000 ); // Has MM part shrunk
     STATIC_CONST( MM_GROWN    = 0x00400000 ); // Has MM part grown
     STATIC_CONST( FREED       = 0x00800000 ); // Is freed
+    STATIC_CONST( FREE        = 0x00800000 ); // alias
     STATIC_CONST( LCP_SKIP    = 0x01000000 ); // Should not be returned in LCP
-    STATIC_CONST( LCP_KEEP    = 0x02000000 ); // Should be returned in LCP
-    STATIC_CONST( FREE        = 0x02800000 ); // Is free
     STATIC_CONST( VAR_PART    = 0x04000000 ); // Is there a varpart
     STATIC_CONST( REORG_MOVE  = 0x08000000 );
 
@@ -3216,6 +3216,8 @@ private:
   Uint32* get_default_ptr(const Tablerec*, Uint32&);
   Uint32 get_len(Ptr<Page>* pagePtr, Var_part_ref ref);
 
+  STATIC_CONST( COPY_TUPLE_HEADER32 = 4 );
+
   Tuple_header* alloc_copy_tuple(const Tablerec* tabPtrP, Local_key* ptr){
     Uint32 * dst = c_undo_buffer.alloc_copy_tuple(ptr,
                                                   tabPtrP->total_rec_size);
@@ -3225,7 +3227,7 @@ private:
     bzero(dst, tabPtrP->total_rec_size);
 #endif
     Uint32 count = tabPtrP->m_no_of_attributes;
-    ChangeMask * mask = (ChangeMask*)(dst);
+    ChangeMask * mask = (ChangeMask*)(dst + COPY_TUPLE_HEADER32);
     mask->m_cols = count;
     return (Tuple_header*)(mask->end_of_mask(count));
   }
@@ -3235,11 +3237,12 @@ private:
   }
 
   Tuple_header * get_copy_tuple(Uint32 * rawptr) {
-    return (Tuple_header*)(get_change_mask_ptr(rawptr)->end_of_mask());
+    return (Tuple_header*)
+      (get_change_mask_ptr(rawptr)->end_of_mask());
   }
 
   ChangeMask * get_change_mask_ptr(Uint32 * rawptr) {
-    return (ChangeMask*)(rawptr);
+    return (ChangeMask*)(rawptr + COPY_TUPLE_HEADER32);
   }
 
   Tuple_header* get_copy_tuple(const Local_key* ptr){
@@ -3251,7 +3254,7 @@ private:
     Uint32 * tmp = raw - (1 + ((tabP->m_no_of_attributes + 31) >> 5));
     ChangeMask* mask = (ChangeMask*)tmp;
     assert(mask->end_of_mask() == raw);
-    assert(get_copy_tuple(tmp) == copytuple);
+    assert(get_copy_tuple(tmp - COPY_TUPLE_HEADER32) == copytuple);
     return mask;
   }
 
@@ -3383,10 +3386,10 @@ private:
                          Page_cache_client::Request,
                          OperationrecPtr);
   int retrieve_log_page(Signal*, FragrecordPtr, OperationrecPtr);
-  
-  void dealloc_tuple(Signal* signal, Uint32, Page*, Tuple_header*, 
-		     Operationrec*, Fragrecord*, Tablerec*);
-  
+
+  void dealloc_tuple(Signal* signal, Uint32, Page*, Tuple_header*,
+		     KeyReqStruct*, Operationrec*, Fragrecord*, Tablerec*);
+
   int handle_size_change_after_update(KeyReqStruct* req_struct,
 				      Tuple_header* org,
 				      Operationrec*,
@@ -3412,7 +3415,31 @@ private:
   void check_page_map(Fragrecord*);
   bool find_page_id_in_list(Fragrecord*, Uint32 pid);
 #endif
-  void handle_lcp_keep(Signal*, Fragrecord*, ScanOp*, Uint32 rowid);
+  void handle_lcp_keep(Signal*, Fragrecord*, ScanOp*);
+  void handle_lcp_keep_commit(const Local_key*,
+                              KeyReqStruct *,
+                              Operationrec*, Fragrecord*, Tablerec*);
+
+  void setup_lcp_read_copy_tuple( KeyReqStruct *,
+                                  Operationrec*,
+                                  Fragrecord*,
+                                  Tablerec*);
+
+  bool isCopyTuple(Uint32 pageid, Uint32 pageidx) const {
+    return (pageidx & (Uint16(1) << 15)) != 0;
+  }
+
+  void setCopyTuple(Uint32& pageid, Uint16& pageidx) const {
+    assert(!isCopyTuple(pageid, pageidx));
+    pageidx |= (Uint16(1) << 15);
+    assert(isCopyTuple(pageid, pageidx));
+  }
+
+  void clearCopyTuple(Uint32& pageid, Uint16& pageidx) const {
+    assert(isCopyTuple(pageid, pageidx));
+    pageidx &= ~(Uint16(1) << 15);
+    assert(!isCopyTuple(pageid, pageidx));
+  }
 };
 
 #if 0

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupCommit.cpp	2011-05-07 06:17:02 +0000
@@ -51,15 +51,8 @@ void Dbtup::execTUP_DEALLOCREQ(Signal* s
     PagePtr pagePtr;
     Tuple_header* ptr= (Tuple_header*)get_ptr(&pagePtr, &tmp, regTabPtr.p);
 
-    ndbassert(ptr->m_header_bits & Tuple_header::FREE);
+    ndbassert(ptr->m_header_bits & Tuple_header::FREED);
 
-    if (ptr->m_header_bits & Tuple_header::LCP_KEEP)
-    {
-      ndbassert(! (ptr->m_header_bits & Tuple_header::FREED));
-      ptr->m_header_bits |= Tuple_header::FREED;
-      return;
-    }
-    
     if (regTabPtr.p->m_attributes[MM].m_no_of_varsize +
         regTabPtr.p->m_attributes[MM].m_no_of_dynamic)
     {
@@ -157,12 +150,12 @@ Dbtup::dealloc_tuple(Signal* signal,
 		     Uint32 gci,
 		     Page* page,
 		     Tuple_header* ptr, 
+                     KeyReqStruct * req_struct,
 		     Operationrec* regOperPtr, 
 		     Fragrecord* regFragPtr, 
 		     Tablerec* regTabPtr)
 {
   Uint32 lcpScan_ptr_i= regFragPtr->m_lcp_scan_op;
-  Uint32 lcp_keep_list = regFragPtr->m_lcp_keep_list;
 
   Uint32 bits = ptr->m_header_bits;
   Uint32 extra_bits = Tuple_header::FREED;
@@ -189,9 +182,15 @@ Dbtup::dealloc_tuple(Signal* signal,
     if (!is_rowid_lcp_scanned(rowid, *scanOp.p))
     {
       jam();
-      extra_bits = Tuple_header::LCP_KEEP; // Note REMOVE FREE
-      ptr->m_operation_ptr_i = lcp_keep_list;
-      regFragPtr->m_lcp_keep_list = rowid.ref();
+
+      /**
+       * We're committing a delete, on a row that should
+       *   be part of LCP. Copy original row into copy-tuple
+       *   and add this copy-tuple to lcp-keep-list
+       *
+       */
+      handle_lcp_keep_commit(&rowid,
+                             req_struct, regOperPtr, regFragPtr, regTabPtr);
     }
   }
   
@@ -204,6 +203,69 @@ Dbtup::dealloc_tuple(Signal* signal,
   }
 }
 
+void
+Dbtup::handle_lcp_keep_commit(const Local_key* rowid,
+                              KeyReqStruct * req_struct,
+                              Operationrec * opPtrP,
+                              Fragrecord * regFragPtr,
+                              Tablerec * regTabPtr)
+{
+  bool disk = false;
+  Uint32 sizes[4];
+  Uint32 * copytuple = get_copy_tuple_raw(&opPtrP->m_copy_tuple_location);
+  Tuple_header * dst = get_copy_tuple(copytuple);
+  Tuple_header * org = req_struct->m_tuple_ptr;
+  if (regTabPtr->need_expand(disk))
+  {
+    setup_fixed_part(req_struct, opPtrP, regTabPtr);
+    req_struct->m_tuple_ptr = dst;
+    expand_tuple(req_struct, sizes, org, regTabPtr, disk);
+    shrink_tuple(req_struct, sizes+2, regTabPtr, disk);
+  }
+  else
+  {
+    memcpy(dst, org, 4*regTabPtr->m_offsets[MM].m_fix_header_size);
+  }
+  dst->m_header_bits |= Tuple_header::COPY_TUPLE;
+
+  /**
+   * Store original row-id in copytuple[0,1]
+   * Store next-ptr in copytuple[1,2] (set to RNIL/RNIL)
+   *
+   */
+  assert(sizeof(Local_key) == 8);
+  memcpy(copytuple+0, rowid, sizeof(Local_key));
+
+  Local_key nil;
+  nil.setNull();
+  memcpy(copytuple+2, &nil, sizeof(nil));
+
+  /**
+   * Link it to list
+   */
+  if (regFragPtr->m_lcp_keep_list_tail.isNull())
+  {
+    jam();
+    regFragPtr->m_lcp_keep_list_head = opPtrP->m_copy_tuple_location;
+  }
+  else
+  {
+    jam();
+    Uint32 * tail = get_copy_tuple_raw(&regFragPtr->m_lcp_keep_list_tail);
+    Local_key nextptr;
+    memcpy(&nextptr, tail+2, sizeof(Local_key));
+    ndbassert(nextptr.isNull());
+    nextptr = opPtrP->m_copy_tuple_location;
+    memcpy(tail+2, &nextptr, sizeof(Local_key));
+  }
+  regFragPtr->m_lcp_keep_list_tail = opPtrP->m_copy_tuple_location;
+
+  /**
+   * And finally clear m_copy_tuple_location so that it won't be freed
+   */
+  opPtrP->m_copy_tuple_location.setNull();
+}
+
 #if 0
 static void dump_buf_hex(unsigned char *p, Uint32 bytes)
 {
@@ -786,7 +848,7 @@ skip_disk:
 	ndbassert(tuple_ptr->m_header_bits & Tuple_header::DISK_PART);
       }
       dealloc_tuple(signal, gci_hi, page.p, tuple_ptr,
-		    regOperPtr.p, regFragPtr.p, regTabPtr.p); 
+		    &req_struct, regOperPtr.p, regFragPtr.p, regTabPtr.p);
     }
   } 
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-04 14:45:46 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2011-05-17 12:47:21 +0000
@@ -635,6 +635,17 @@ void Dbtup::execTUPKEYREQ(Signal* signal
      goto do_insert;
    }
 
+   if (unlikely(isCopyTuple(pageid, pageidx)))
+   {
+     /**
+      * Only LCP reads a copy-tuple "directly"
+      */
+     ndbassert(Roptype == ZREAD);
+     ndbassert(disk_page == RNIL);
+     setup_lcp_read_copy_tuple(&req_struct, regOperPtr, regFragPtr, regTabPtr);
+     goto do_read;
+   }
+
    /**
     * Get pointer to tuple
     */
@@ -652,6 +663,7 @@ void Dbtup::execTUPKEYREQ(Signal* signal
      if (setup_read(&req_struct, regOperPtr, regFragPtr, regTabPtr, 
 		    disk_page != RNIL))
      {
+   do_read:
        if(handleReadReq(signal, regOperPtr, regTabPtr, &req_struct) != -1) 
        {
 	 req_struct.log_size= 0;
@@ -847,6 +859,44 @@ Dbtup::setup_fixed_part(KeyReqStruct* re
   req_struct->attr_descr= tab_descr; 
 }
 
+void
+Dbtup::setup_lcp_read_copy_tuple(KeyReqStruct* req_struct,
+                                 Operationrec* regOperPtr,
+                                 Fragrecord* regFragPtr,
+                                 Tablerec* regTabPtr)
+{
+  Local_key tmp;
+  tmp.m_page_no = req_struct->frag_page_id;
+  tmp.m_page_idx = regOperPtr->m_tuple_location.m_page_idx;
+  clearCopyTuple(tmp.m_page_no, tmp.m_page_idx);
+
+  Uint32 * copytuple = get_copy_tuple_raw(&tmp);
+  Local_key rowid;
+  memcpy(&rowid, copytuple+0, sizeof(Local_key));
+
+  req_struct->frag_page_id = rowid.m_page_no;
+  regOperPtr->m_tuple_location.m_page_idx = rowid.m_page_idx;
+
+  Tuple_header * th = get_copy_tuple(copytuple);
+  req_struct->m_page_ptr.setNull();
+  req_struct->m_tuple_ptr = (Tuple_header*)th;
+  th->m_operation_ptr_i = RNIL;
+  ndbassert((th->m_header_bits & Tuple_header::COPY_TUPLE) != 0);
+
+  Uint32 num_attr= regTabPtr->m_no_of_attributes;
+  Uint32 descr_start= regTabPtr->tabDescriptor;
+  TableDescriptor *tab_descr= &tableDescriptor[descr_start];
+  ndbrequire(descr_start + (num_attr << ZAD_LOG_SIZE) <= cnoOfTabDescrRec);
+  req_struct->attr_descr= tab_descr;
+
+  bool disk = false;
+  if (regTabPtr->need_expand(disk))
+  {
+    jam();
+    prepare_read(req_struct, regTabPtr, disk);
+  }
+}
+
  /* ---------------------------------------------------------------- */
  /* ------------------------ CONFIRM REQUEST ----------------------- */
  /* ---------------------------------------------------------------- */
@@ -1904,6 +1954,13 @@ int Dbtup::handleDeleteReq(Signal* signa
                            KeyReqStruct *req_struct,
 			   bool disk)
 {
+  Tuple_header* dst = alloc_copy_tuple(regTabPtr,
+                                       &regOperPtr->m_copy_tuple_location);
+  if (dst == 0) {
+    terrorCode = ZMEM_NOMEM_ERROR;
+    goto error;
+  }
+
   // delete must set but not increment tupVersion
   if (!regOperPtr->is_first_operation())
   {
@@ -1911,24 +1968,25 @@ int Dbtup::handleDeleteReq(Signal* signa
     regOperPtr->tupVersion= prevOp->tupVersion;
     // make copy since previous op is committed before this one
     const Tuple_header* org = get_copy_tuple(&prevOp->m_copy_tuple_location);
-    Tuple_header* dst = alloc_copy_tuple(regTabPtr,
-                                         &regOperPtr->m_copy_tuple_location);
-    if (dst == 0) {
-      terrorCode = ZMEM_NOMEM_ERROR;
-      goto error;
-    }
-    Uint32 len = regTabPtr->total_rec_size - 
-      Uint32(((Uint32*)dst) - 
+    Uint32 len = regTabPtr->total_rec_size -
+      Uint32(((Uint32*)dst) -
              get_copy_tuple_raw(&regOperPtr->m_copy_tuple_location));
     memcpy(dst, org, 4 * len);
     req_struct->m_tuple_ptr = dst;
-    set_change_mask_info(regTabPtr, get_change_mask_ptr(regTabPtr, dst));
   }
-  else 
+  else
   {
     regOperPtr->tupVersion= req_struct->m_tuple_ptr->get_tuple_version();
+    if (regTabPtr->m_no_of_disk_attributes)
+    {
+      dst->m_header_bits = req_struct->m_tuple_ptr->m_header_bits;
+      memcpy(dst->get_disk_ref_ptr(regTabPtr),
+	     req_struct->m_tuple_ptr->get_disk_ref_ptr(regTabPtr),
+             sizeof(Local_key));
+    }
   }
   req_struct->changeMask.set();
+  set_change_mask_info(regTabPtr, get_change_mask_ptr(regTabPtr, dst));
 
   if(disk && regOperPtr->m_undo_buffer_space == 0)
   {

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp	2011-05-07 06:17:02 +0000
@@ -670,7 +670,6 @@ void Dbtup::initializeDefaultValuesFrag(
   DefaultValuesFragment.p->fragStatus = Fragrecord::FS_ONLINE;
   DefaultValuesFragment.p->m_undo_complete= false;
   DefaultValuesFragment.p->m_lcp_scan_op = RNIL;
-  DefaultValuesFragment.p->m_lcp_keep_list = RNIL;
   DefaultValuesFragment.p->noOfPages = 0;
   DefaultValuesFragment.p->noOfVarPages = 0;
   DefaultValuesFragment.p->m_max_page_no = 0;

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-04-18 15:36:25 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp	2011-05-07 06:17:02 +0000
@@ -703,7 +703,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signa
   regFragPtr.p->m_tablespace_id= tablespace_id;
   regFragPtr.p->m_undo_complete= false;
   regFragPtr.p->m_lcp_scan_op = RNIL;
-  regFragPtr.p->m_lcp_keep_list = RNIL;
+  regFragPtr.p->m_lcp_keep_list_head.setNull();
+  regFragPtr.p->m_lcp_keep_list_tail.setNull();
   regFragPtr.p->noOfPages = 0;
   regFragPtr.p->noOfVarPages = 0;
   regFragPtr.p->m_max_page_no = 0;
@@ -1573,6 +1574,8 @@ Dbtup::computeTableMetaData(Tablerec *re
   /* Room for changemask */
   total_rec_size += 1 + ((regTabPtr->m_no_of_attributes + 31) >> 5);
 
+  total_rec_size += COPY_TUPLE_HEADER32;
+
   regTabPtr->total_rec_size= total_rec_size;
 
   setUpQueryRoutines(regTabPtr);

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp	2011-04-19 09:01:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp	2011-05-07 06:17:02 +0000
@@ -287,9 +287,8 @@ Dbtup::execACC_CHECK_SCAN(Signal* signal
   }
 
   const bool lcp = (scan.m_bits & ScanOp::SCAN_LCP);
-  Uint32 lcp_list = fragPtr.p->m_lcp_keep_list;
 
-  if (lcp && lcp_list != RNIL)
+  if (lcp && ! fragPtr.p->m_lcp_keep_list_head.isNull())
   {
     jam();
     /**
@@ -297,7 +296,7 @@ Dbtup::execACC_CHECK_SCAN(Signal* signal
      *   So that scan state is not alterer
      *   if lcp_keep rows are found in ScanOp::First
      */
-    handle_lcp_keep(signal, fragPtr.p, scanPtr.p, lcp_list);
+    handle_lcp_keep(signal, fragPtr.p, scanPtr.p);
     return;
   }
 
@@ -692,19 +691,18 @@ Dbtup::scanNext(Signal* signal, ScanOpPt
  
   const bool mm = (bits & ScanOp::SCAN_DD);
   const bool lcp = (bits & ScanOp::SCAN_LCP);
-  
-  Uint32 lcp_list = fragPtr.p->m_lcp_keep_list;
+
   const Uint32 size = ((bits & ScanOp::SCAN_VS) == 0) ?
     table.m_offsets[mm].m_fix_header_size : 1;
   const Uint32 first = ((bits & ScanOp::SCAN_VS) == 0) ? 0 : 1;
 
-  if (lcp && lcp_list != RNIL)
+  if (lcp && ! fragPtr.p->m_lcp_keep_list_head.isNull())
   {
     jam();
     /**
      * Handle lcp keep list here to, due to scanCont
      */
-    handle_lcp_keep(signal, fragPtr.p, scanPtr.p, lcp_list);
+    handle_lcp_keep(signal, fragPtr.p, scanPtr.p);
     return false;
   }
 
@@ -1130,57 +1128,40 @@ Dbtup::scanNext(Signal* signal, ScanOpPt
 void
 Dbtup::handle_lcp_keep(Signal* signal,
                        Fragrecord* fragPtrP,
-                       ScanOp* scanPtrP,
-                       Uint32 lcp_list)
+                       ScanOp* scanPtrP)
 {
   TablerecPtr tablePtr;
   tablePtr.i = scanPtrP->m_tableId;
   ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
 
-  Local_key tmp;
-  tmp.assref(lcp_list);
-  tmp.m_page_no = getRealpid(fragPtrP, tmp.m_page_no);
-  
-  Ptr<Page> pagePtr;
-  c_page_pool.getPtr(pagePtr, tmp.m_page_no);
-  Tuple_header* ptr = (Tuple_header*)
-    ((Fix_page*)pagePtr.p)->get_ptr(tmp.m_page_idx, 0);
-  Uint32 headerbits = ptr->m_header_bits;
-  ndbrequire(headerbits & Tuple_header::LCP_KEEP);
-  
-  Uint32 next = ptr->m_operation_ptr_i;
-  ptr->m_operation_ptr_i = RNIL;
-  ptr->m_header_bits = headerbits & ~(Uint32)Tuple_header::FREE;
-  
-  if (tablePtr.p->m_bits & Tablerec::TR_Checksum) {
+  ndbassert(!fragPtrP->m_lcp_keep_list_head.isNull());
+  Local_key tmp = fragPtrP->m_lcp_keep_list_head;
+  Uint32 * copytuple = get_copy_tuple_raw(&tmp);
+  memcpy(&fragPtrP->m_lcp_keep_list_head,
+         copytuple+2,
+         sizeof(Local_key));
+
+  if (fragPtrP->m_lcp_keep_list_head.isNull())
+  {
     jam();
-    setChecksum(ptr, tablePtr.p);
+    ndbassert(tmp.m_page_no == fragPtrP->m_lcp_keep_list_tail.m_page_no);
+    ndbassert(tmp.m_page_idx == fragPtrP->m_lcp_keep_list_tail.m_page_idx);
+    fragPtrP->m_lcp_keep_list_tail.setNull();
   }
 
+  Local_key save = tmp;
+  setCopyTuple(tmp.m_page_no, tmp.m_page_idx);
   NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
   conf->scanPtr = scanPtrP->m_userPtr;
   conf->accOperationPtr = (Uint32)-1;
   conf->fragId = fragPtrP->fragmentId;
-  conf->localKey[0] = Local_key::ref2page_id(lcp_list);
-  conf->localKey[1] = Local_key::ref2page_idx(lcp_list);
+  conf->localKey[0] = tmp.m_page_no;
+  conf->localKey[1] = tmp.m_page_idx;
   conf->gci = 0;
   Uint32 blockNo = refToMain(scanPtrP->m_userRef);
   EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6);
-  
-  fragPtrP->m_lcp_keep_list = next;
-  ptr->m_header_bits |= Tuple_header::FREED; // RESTORE free flag
-  if (headerbits & Tuple_header::FREED)
-  {
-    if (tablePtr.p->m_attributes[MM].m_no_of_varsize +
-        tablePtr.p->m_attributes[MM].m_no_of_dynamic)
-    {
-      jam();
-      free_var_rec(fragPtrP, tablePtr.p, &tmp, pagePtr);
-    } else {
-      jam();
-      free_fix_rec(fragPtrP, tablePtr.p, &tmp, (Fix_page*)pagePtr.p);
-    }
-  }
+
+  c_undo_buffer.free_copy_tuple(&save);
 }
 
 void
@@ -1320,4 +1301,7 @@ Dbtup::execLCP_FRAG_ORD(Signal* signal)
   new (scanPtr.p) ScanOp;
   scanPtr.p->m_fragPtrI = fragPtr.i;
   scanPtr.p->m_state = ScanOp::First;
+
+  ndbassert(frag.m_lcp_keep_list_head.isNull());
+  ndbassert(frag.m_lcp_keep_list_tail.isNull());
 }

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-04-15 13:52:53 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-05-02 13:36:19 +0000
@@ -42,6 +42,9 @@
 #include <signaldata/DihRestart.hpp>
 #include <ndb_version.h>
 
+#include <EventLogger.hpp>
+extern EventLogger * g_eventLogger;
+
 //#define DEBUG_QMGR_START
 #ifdef DEBUG_QMGR_START
 #include <DebuggerNames.hpp>
@@ -745,6 +748,27 @@ void Qmgr::execCM_REGREQ(Signal* signal)
   Uint32 start_type = ~0;
   NdbNodeBitmask skip_nodes;
 
+  if (!c_connectedNodes.get(cmRegReq->nodeId))
+  {
+    jam();
+
+    /**
+     * With ndbmtd, there is a race condition such that
+     *   CM_REGREQ can arrive prior to CONNECT_REP
+     *   since CONNECT_REP is sent from CMVMI
+     *
+     * In such cases, ignore the CM_REGREQ which is safe
+     *   as it will anyway be resent by starting node
+     */
+    g_eventLogger->info("discarding CM_REGREQ from %u "
+                        "as we're not yet connected (isNdbMt: %u)",
+                        cmRegReq->nodeId,
+                        (unsigned)isNdbMt());
+
+    ndbrequire(isNdbMt());
+    return;
+  }
+
   if (signal->getLength() == CmRegReq::SignalLength)
   {
     jam();
@@ -2351,8 +2375,6 @@ void Qmgr::execCM_ACKADD(Signal* signal)
  * WE HAVE BEEN INCLUDED INTO THE CLUSTER. IT IS NOW TIME TO CALCULATE WHICH 
  * ARE OUR LEFT AND RIGHT NEIGHBOURS FOR THE HEARTBEAT PROTOCOL. 
  *--------------------------------------------------------------------------*/
-#include <EventLogger.hpp>
-extern EventLogger * g_eventLogger;
 void Qmgr::findNeighbours(Signal* signal, Uint32 from) 
 {
   UintR toldLeftNeighbour;

=== added file 'storage/ndb/src/kernel/vm/NdbSeqLock.hpp'
--- a/storage/ndb/src/kernel/vm/NdbSeqLock.hpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/kernel/vm/NdbSeqLock.hpp	2011-05-17 07:06:30 +0000
@@ -0,0 +1,95 @@
+/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
+
+#ifndef NDB_SEQLOCK_HPP
+#define NDB_SEQLOCK_HPP
+
+#include <ndb_types.h>
+#include "mt-asm.h"
+
+#if defined (NDB_HAVE_RMB) && defined(NDB_HAVE_WMB)
+struct NdbSeqLock
+{
+  NdbSeqLock() { m_seq = 0;}
+  volatile Uint32 m_seq;
+
+  void write_lock();
+  void write_unlock();
+
+  Uint32 read_lock();
+  bool read_unlock(Uint32 val) const;
+};
+
+inline
+void
+NdbSeqLock::write_lock()
+{
+  assert((m_seq & 1) == 0);
+  m_seq++;
+  wmb();
+}
+
+inline
+void
+NdbSeqLock::write_unlock()
+{
+  assert((m_seq & 1) == 1);
+  wmb();
+  m_seq++;
+}
+
+inline
+Uint32
+NdbSeqLock::read_lock()
+{
+loop:
+  Uint32 val = m_seq;
+  rmb();
+  if (unlikely(val & 1))
+  {
+#ifdef NDB_HAVE_CPU_PAUSE
+    cpu_pause();
+#endif
+    goto loop;
+  }
+  return val;
+}
+
+inline
+bool
+NdbSeqLock::read_unlock(Uint32 val) const
+{
+  rmb();
+  return val == m_seq;
+}
+#else /** ! rmb() or wmb() */
+/**
+ * Only for ndbd...
+ */
+
+struct NdbSeqLock
+{
+  NdbSeqLock() { }
+
+  void write_lock() {}
+  void write_unlock() {}
+
+  Uint32 read_lock() {}
+  bool read_unlock(Uint32 val) const { return true;}
+};
+
+#endif
+
+#endif

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.cpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2011-05-16 12:24:55 +0000
@@ -4397,3 +4397,13 @@ SimulatedBlock::ndbinfo_send_scan_conf(S
              signal_length, JBB);
 }
 
+#ifdef VM_TRACE
+void
+SimulatedBlock::assertOwnThread()
+{
+#ifdef NDBD_MULTITHREADED
+  mt_assert_own_thread(this);
+#endif
+}
+
+#endif

=== modified file 'storage/ndb/src/kernel/vm/SimulatedBlock.hpp'
--- a/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2011-01-30 20:56:00 +0000
+++ b/storage/ndb/src/kernel/vm/SimulatedBlock.hpp	2011-05-16 12:24:55 +0000
@@ -188,6 +188,15 @@ public:
   static bool isNdbMtLqh() { return globalData.isNdbMtLqh; }
   static Uint32 getLqhWorkers() { return globalData.ndbMtLqhWorkers; }
 
+  /**
+   * Assert that thread calling this function is "owner" of block instance
+   */
+#ifdef VM_TRACE
+  void assertOwnThread();
+#else
+  void assertOwnThread(){ }
+#endif
+
   /*
    * Instance key (1-4) is used only when sending a signal.  Receiver
    * maps it to actual instance (0, if receiver is not MT LQH).

=== modified file 'storage/ndb/src/kernel/vm/mt-asm.h'
--- a/storage/ndb/src/kernel/vm/mt-asm.h	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/kernel/vm/mt-asm.h	2011-05-16 11:44:52 +0000
@@ -25,22 +25,31 @@
  * GCC
  *******************/
 #if defined(__x86_64__) || defined (__i386__)
+
+#define NDB_HAVE_MB
+#define NDB_HAVE_RMB
+#define NDB_HAVE_WMB
+#define NDB_HAVE_READ_BARRIER_DEPENDS
+#define NDB_HAVE_XCNG
+#define NDB_HAVE_CPU_PAUSE
+
 /* Memory barriers, these definitions are for x64_64. */
 #define mb()    asm volatile("mfence":::"memory")
 /* According to Intel docs, it does not reorder loads. */
-/* #define rmb() asm volatile("lfence":::"memory") */                      
+/* #define rmb() asm volatile("lfence":::"memory") */
 #define rmb()   asm volatile("" ::: "memory")
 #define wmb()   asm volatile("" ::: "memory")
 #define read_barrier_depends()  do {} while(0)
 
-#define NDB_HAVE_XCNG
-static inline
+static
+inline
 int
 xcng(volatile unsigned * addr, int val)
 {
   asm volatile ("xchg %0, %1;" : "+r" (val) , "+m" (*addr));
   return val;
 }
+
 static
 inline
 void
@@ -50,6 +59,12 @@ cpu_pause()
 }
 
 #elif defined(__sparc__)
+
+#define NDB_HAVE_MB
+#define NDB_HAVE_RMB
+#define NDB_HAVE_WMB
+#define NDB_HAVE_READ_BARRIER_DEPENDS
+
 #define mb()    asm volatile("membar #LoadLoad | #LoadStore | #StoreLoad | #StoreStore":::"memory")
 #define rmb()   asm volatile("membar #LoadLoad" ::: "memory")
 #define wmb()   asm volatile("membar #StoreStore" ::: "memory")
@@ -71,6 +86,7 @@ xcng(volatile unsigned * addr, int val)
 }
 #define cpu_pause()
 #define NDB_HAVE_XCNG
+#define NDB_HAVE_CPU_PAUSE
 #else
 /* link error if used incorrectly (i.e wo/ having NDB_HAVE_XCNG) */
 extern  int xcng(volatile unsigned * addr, int val);
@@ -78,7 +94,7 @@ extern void cpu_pause();
 #endif
 
 #else
-#error "Unsupported architecture (gcc)"
+#define NDB_NO_ASM "Unsupported architecture (gcc)"
 #endif
 
 #elif defined(__sun)
@@ -91,20 +107,32 @@ extern void cpu_pause();
  *      i.e that it clobbers memory
  */
 #if defined(__x86_64__)
+#define NDB_HAVE_MB
+#define NDB_HAVE_RMB
+#define NDB_HAVE_WMB
+#define NDB_HAVE_READ_BARRIER_DEPENDS
+
 #define mb()    asm ("mfence")
 /* According to Intel docs, it does not reorder loads. */
 /* #define rmb() asm ("lfence") */
 #define rmb()   asm ("")
 #define wmb()   asm ("")
 #define read_barrier_depends()  do {} while(0)
+
 #elif defined(__sparc)
+#define NDB_HAVE_MB
+#define NDB_HAVE_RMB
+#define NDB_HAVE_WMB
+#define NDB_HAVE_READ_BARRIER_DEPENDS
+
 #define mb() asm ("membar #LoadLoad | #LoadStore | #StoreLoad | #StoreStore")
 #define rmb() asm ("membar #LoadLoad")
 #define wmb() asm ("membar #StoreStore")
 #define read_barrier_depends()  do {} while(0)
 #else
-#error "Unsupported architecture (sun studio)"
+#define NDB_NO_ASM "Unsupported architecture (sun studio)"
 #endif
+
 #if defined(__x86_64__) || defined(__sparc)
 /**
  * we should probably use assembler for x86 aswell...
@@ -116,6 +144,7 @@ extern void cpu_pause();
 
 #ifdef HAVE_ATOMIC_SWAP_32
 #define NDB_HAVE_XCNG
+#define NDB_HAVE_CPU_PAUSE
 #if defined(__sparc)
 static inline
 int
@@ -154,6 +183,12 @@ extern void cpu_pause();
 #endif
 #endif
 #elif defined (_MSC_VER)
+
+#define NDB_HAVE_MB
+#define NDB_HAVE_RMB
+#define NDB_HAVE_WMB
+#define NDB_HAVE_READ_BARRIER_DEPENDS
+
 #include <windows.h>
 #define mb()    MemoryBarrier()
 #define read_barrier_depends()  do {} while(0)
@@ -171,6 +206,8 @@ extern void cpu_pause();
 #endif
 
 #define NDB_HAVE_XCNG
+#define NDB_HAVE_CPU_PAUSE
+
 static inline
 int
 xcng(volatile unsigned * addr, int val)
@@ -186,7 +223,7 @@ cpu_pause()
   YieldProcessor();
 }
 #else
-#error "Unsupported compiler"
+#define NDB_NO_ASM "Unsupported compiler"
 #endif
 
 #endif

=== modified file 'storage/ndb/src/kernel/vm/mt.cpp'
--- a/storage/ndb/src/kernel/vm/mt.cpp	2011-04-20 05:46:35 +0000
+++ b/storage/ndb/src/kernel/vm/mt.cpp	2011-05-16 12:24:55 +0000
@@ -4170,6 +4170,22 @@ mt_wakeup(class SimulatedBlock* block)
   wakeup(&thrptr->m_waiter);
 }
 
+#ifdef VM_TRACE
+void
+mt_assert_own_thread(SimulatedBlock* block)
+{
+  Uint32 thr_no = block->getThreadId();
+  thr_data *thrptr = g_thr_repository.m_thread + thr_no;
+
+  if (unlikely(pthread_equal(thrptr->m_thr_id, pthread_self()) == 0))
+  {
+    fprintf(stderr, "mt_assert_own_thread() - assertion-failure\n");
+    fflush(stderr);
+    abort();
+  }
+}
+#endif
+
 /**
  * Global data
  */

=== modified file 'storage/ndb/src/kernel/vm/mt.hpp'
--- a/storage/ndb/src/kernel/vm/mt.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/kernel/vm/mt.hpp	2011-05-16 12:24:55 +0000
@@ -74,4 +74,11 @@ Uint32 mt_get_thread_references_for_bloc
  */
 void mt_wakeup(class SimulatedBlock*);
 
+#ifdef VM_TRACE
+/**
+ * Assert that thread calling this function is "owner" of block instance
+ */
+void mt_assert_own_thread(class SimulatedBlock*);
+#endif
+
 #endif

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-04-15 08:09:04 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-05-12 09:26:38 +0000
@@ -1345,7 +1345,7 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<
     ndb_nodes_to_stop.copyto(NdbNodeBitmask::Size, stopReq->nodes);
     StopReq::setStopNodes(stopReq->requestInfo, 1);
   }
-  else
+  else if (ndb_nodes_to_stop.count() == 1)
   {
     Uint32 nodeId = ndb_nodes_to_stop.find(0);
     if (okToSendTo(nodeId, true) == 0)

=== modified file 'storage/ndb/src/ndbapi/DictCache.cpp'
--- a/storage/ndb/src/ndbapi/DictCache.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/ndbapi/DictCache.cpp	2011-05-12 09:01:21 +0000
@@ -457,7 +457,6 @@ GlobalDictCache::alter_table_rep(const c
 				 bool altered)
 {
   DBUG_ENTER("GlobalDictCache::alter_table_rep");
-  assert(! is_ndb_blob_table(name));
   const Uint32 len = (Uint32)strlen(name);
   Vector<TableVersion> * vers = 
     m_tableHash.getData(name, len);
@@ -467,6 +466,7 @@ GlobalDictCache::alter_table_rep(const c
     DBUG_VOID_RETURN;
   }
 
+  assert(! is_ndb_blob_table(name));
   const Uint32 sz = vers->size();
   if(sz == 0)
   {

=== modified file 'storage/ndb/src/ndbapi/Ndb.cpp'
--- a/storage/ndb/src/ndbapi/Ndb.cpp	2011-04-27 10:48:16 +0000
+++ b/storage/ndb/src/ndbapi/Ndb.cpp	2011-05-17 07:23:31 +0000
@@ -201,6 +201,7 @@ Ndb::NDB_connect(Uint32 tNode, Uint32 in
   tSignal->setData(theMyRef, 2);	// Set my block reference
   tSignal->setData(instance, 3);        // Set requested instance
   tNdbCon->Status(NdbTransaction::Connecting); // Set status to connecting
+  tNdbCon->theDBnode = tNode;
   Uint32 nodeSequence;
   tReturnCode= sendRecSignal(tNode, WAIT_TC_SEIZE, tSignal,
                              0, &nodeSequence);

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-02-23 12:15:04 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-05-12 09:01:21 +0000
@@ -938,6 +938,72 @@ NdbTableImpl::getName() const
   return m_externalName.c_str();
 }
 
+int
+NdbTableImpl::getDbName(char buf[], size_t len) const
+{
+  if (len == 0)
+    return -1;
+
+  // db/schema/table
+  const char *ptr = m_internalName.c_str();
+
+  size_t pos = 0;
+  while (ptr[pos] && ptr[pos] != table_name_separator)
+  {
+    buf[pos] = ptr[pos];
+    pos++;
+
+    if (pos == len)
+      return -1;
+  }
+  buf[pos] = 0;
+  return 0;
+}
+
+int
+NdbTableImpl::getSchemaName(char buf[], size_t len) const
+{
+  if (len == 0)
+    return -1;
+
+  // db/schema/table
+  const char *ptr = m_internalName.c_str();
+
+  // skip over "db"
+  while (*ptr && *ptr != table_name_separator)
+    ptr++;
+
+  buf[0] = 0;
+  if (*ptr == table_name_separator)
+  {
+    ptr++;
+    size_t pos = 0;
+    while (ptr[pos] && ptr[pos] != table_name_separator)
+    {
+      buf[pos] = ptr[pos];
+      pos++;
+
+      if (pos == len)
+        return -1;
+    }
+    buf[pos] = 0;
+  }
+
+  return 0;
+}
+
+void
+NdbTableImpl::setDbSchema(const char * db, const char * schema)
+{
+  m_internalName.assfmt("%s%c%s%c%s",
+                        db,
+                        table_name_separator,
+                        schema,
+                        table_name_separator,
+                        m_externalName.c_str());
+  updateMysqlName();
+}
+
 void
 NdbTableImpl::computeAggregates()
 {
@@ -3090,7 +3156,8 @@ int NdbDictionaryImpl::alterTableGlobal(
 {
   DBUG_ENTER("NdbDictionaryImpl::alterTableGlobal");
   // Alter the table
-  int ret = m_receiver.alterTable(m_ndb, old_impl, impl);
+  Uint32 changeMask = 0;
+  int ret = m_receiver.alterTable(m_ndb, old_impl, impl, changeMask);
 #if ndb_bug41905
   old_impl.m_status = NdbDictionary::Object::Invalid;
 #endif
@@ -3107,18 +3174,93 @@ int NdbDictionaryImpl::alterTableGlobal(
     m_globalHash->unlock();
     if (ret != 0)
       m_error.code = 723;
+
+    if (ret == 0 && AlterTableReq::getNameFlag(changeMask) != 0)
+    {
+      char db0[MAX_TAB_NAME_SIZE];
+      char db1[MAX_TAB_NAME_SIZE];
+      if (old_impl.getDbName(db0, sizeof(db0)) != 0)
+      {
+        m_error.code = 705;
+        DBUG_RETURN(-1);
+      }
+      if (impl.getDbName(db1, sizeof(db0)) != 0)
+      {
+        m_error.code = 705;
+        DBUG_RETURN(-1);
+      }
+
+      bool db_change = strcmp(db0, db1) != 0;
+      if (old_impl.getSchemaName(db0, sizeof(db0)) != 0)
+      {
+        m_error.code = 705;
+        DBUG_RETURN(-1);
+      }
+      if (impl.getSchemaName(db1, sizeof(db0)) != 0)
+      {
+        m_error.code = 705;
+        DBUG_RETURN(-1);
+      }
+
+      bool schema_change = strcmp(db0, db1) != 0;
+      if (db_change || schema_change)
+      {
+        if (renameBlobTables(old_impl, impl) != 0)
+        {
+          DBUG_RETURN(-1);
+        }
+      }
+    }
     DBUG_RETURN(ret);
   }
   ERR_RETURN(getNdbError(), ret);
 }
 
 int
+NdbDictionaryImpl::renameBlobTables(const NdbTableImpl & old_tab,
+                                    const NdbTableImpl & new_tab)
+{
+  if (old_tab.m_noOfBlobs == 0)
+    return 0;
+
+  char db[MAX_TAB_NAME_SIZE];
+  char schema[MAX_TAB_NAME_SIZE];
+  new_tab.getDbName(db, sizeof(db));
+  new_tab.getSchemaName(schema, sizeof(schema));
+
+  for (unsigned i = 0; i < old_tab.m_columns.size(); i++)
+  {
+    NdbColumnImpl & c = *old_tab.m_columns[i];
+    if (! c.getBlobType() || c.getPartSize() == 0)
+      continue;
+    NdbTableImpl* _bt = c.m_blobTable;
+    if (_bt == NULL)
+    {
+      continue; // "force" mode on
+    }
+
+    NdbDictionary::Table& bt = * _bt->m_facade;
+    NdbDictionary::Table new_bt(bt);
+    new_bt.m_impl.setDbSchema(db, schema);
+
+    Uint32 changeMask = 0;
+    int ret = m_receiver.alterTable(m_ndb, bt.m_impl, new_bt.m_impl,changeMask);
+    if (ret != 0)
+    {
+      return ret;
+    }
+    assert(AlterTableReq::getNameFlag(changeMask) != 0);
+  }
+  return 0;
+}
+
+int
 NdbDictInterface::alterTable(Ndb & ndb,
                              const NdbTableImpl &old_impl,
-                             NdbTableImpl &impl)
+                             NdbTableImpl &impl,
+                             Uint32 & change_mask)
 {
   int ret;
-  Uint32 change_mask;
 
   DBUG_ENTER("NdbDictInterface::alterTable");
 
@@ -3168,8 +3310,9 @@ NdbDictInterface::compChangeMask(const N
                       impl.m_internalName.c_str()));
   if(impl.m_internalName != old_impl.m_internalName)
   {
-    if (unlikely(is_ndb_blob_table(old_impl.m_externalName.c_str()) ||
-                 is_ndb_blob_table(impl.m_externalName.c_str())))
+    bool old_blob = is_ndb_blob_table(old_impl.m_externalName.c_str());
+    bool new_blob = is_ndb_blob_table(impl.m_externalName.c_str());
+    if (unlikely(old_blob != new_blob))
     {
       /* Attempt to alter to/from Blob part table name */
       DBUG_PRINT("info", ("Attempt to alter to/from Blob part table name"));
@@ -3260,7 +3403,9 @@ NdbDictInterface::compChangeMask(const N
          col->m_autoIncrement ||                   // ToDo: allow this?
 	 (col->getBlobType() && col->getPartSize())
          )
+      {
         goto invalid_alter_table;
+      }
     }
     AlterTableReq::setAddAttrFlag(change_mask, true);
   }

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-02-23 12:15:04 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-05-12 09:01:21 +0000
@@ -185,14 +185,18 @@ public:
   int validate(NdbError& error);
 
   Uint32 m_primaryTableId;
-  BaseString m_internalName;
-  BaseString m_externalName;
-  BaseString m_mysqlName;
+  BaseString m_internalName; // db/schema/table
+  BaseString m_externalName; //           table
+  BaseString m_mysqlName;    //        db/table
   UtilBuffer m_frm; 
   Vector<Uint32> m_fd;
   Vector<Int32> m_range;
   NdbDictionary::Object::FragmentType m_fragmentType;
 
+  int getDbName(char * buf, size_t len) const;
+  int getSchemaName(char * buf, size_t len) const;
+  void setDbSchema(const char * db, const char * schema);
+
   /**
    * 
    */
@@ -617,7 +621,7 @@ public:
   int createTable(class Ndb & ndb, NdbTableImpl &);
   bool supportedAlterTable(const NdbTableImpl &,
 			   NdbTableImpl &);
-  int alterTable(class Ndb & ndb, const NdbTableImpl &, NdbTableImpl &);
+  int alterTable(class Ndb & ndb, const NdbTableImpl &, NdbTableImpl&, Uint32&);
   void syncInternalName(Ndb & ndb, NdbTableImpl &impl);
   int compChangeMask(const NdbTableImpl &old_impl,
                      const NdbTableImpl &impl,
@@ -828,6 +832,7 @@ public:
   int dropTable(const char * name);
   int dropTable(NdbTableImpl &);
   int dropBlobTables(NdbTableImpl &);
+  int renameBlobTables(const NdbTableImpl &old_impl, const NdbTableImpl &impl);
   int invalidateObject(NdbTableImpl &);
   int removeCachedObject(NdbTableImpl &);
 

=== modified file 'storage/ndb/src/ndbapi/NdbOperationExec.cpp'
--- a/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/src/ndbapi/NdbOperationExec.cpp	2011-05-11 13:31:44 +0000
@@ -1032,7 +1032,6 @@ NdbOperation::buildSignalsNdbRecord(Uint
       readMask.set(attrId);
       requestedCols++;
     }
-    theReceiver.m_record.m_column_count= requestedCols;
 
     /* Are there any columns to read via NdbRecord? */
     if (requestedCols > 0)

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-05-04 14:45:46 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-05-17 12:47:21 +0000
@@ -857,13 +857,9 @@ NdbQueryBuilder::readTuple(const NdbDict
                                        ident,
                                        m_impl.m_operations.size(),
                                        error);
-  returnErrIf(op==0, Err_MemoryAlloc);
-  if (unlikely(error != 0))
-  {
-    m_impl.setErrorCode(error);
-    delete op;
-    return NULL;
-  }
+
+  returnErrIf(m_impl.takeOwnership(op)!=0, Err_MemoryAlloc);
+  returnErrIf(error!=0, error); // C'tor returned error, bailout
 
   Uint32 keyindex = 0;
   for (i=0; i<colcount; ++i)
@@ -873,11 +869,7 @@ NdbQueryBuilder::readTuple(const NdbDict
     {
       assert (keyindex==col->m_keyInfoPos);
       int error = op->m_keys[col->m_keyInfoPos]->bindOperand(*col,*op);
-      if (unlikely(error))
-      { m_impl.setErrorCode(error);
-        delete op;
-        return NULL;
-      }
+      returnErrIf(error!=0, error);
 
       keyindex++;
       if (keyindex >= static_cast<Uint32>(keyfields))
@@ -885,17 +877,7 @@ NdbQueryBuilder::readTuple(const NdbDict
     }
   }
   
-  if (likely(m_impl.m_operations.push_back(op) == 0))
-  {
-    return &op->m_interface;
-  }
-  else
-  {
-    assert(errno == ENOMEM);
-    delete op;
-    m_impl.setErrorCode(Err_MemoryAlloc);
-    return NULL;
-  }
+  return &op->m_interface;
 }
 
 
@@ -944,13 +926,9 @@ NdbQueryBuilder::readTuple(const NdbDict
                                        ident,
                                        m_impl.m_operations.size(),
                                        error);
-  returnErrIf(op==0, Err_MemoryAlloc);
-  if (unlikely(error != 0))
-  {
-    m_impl.setErrorCode(error);
-    delete op;
-    return NULL;
-  }
+
+  returnErrIf(m_impl.takeOwnership(op)!=0, Err_MemoryAlloc);
+  returnErrIf(error!=0, error); // C'tor returned error, bailout
 
   // Bind to Column and check type compatibility
   for (i=0; i<inxfields; ++i)
@@ -959,24 +937,10 @@ NdbQueryBuilder::readTuple(const NdbDict
     assert (col.getColumnNo() == i);
 
     error = keys[i]->getImpl().bindOperand(col,*op);
-    if (unlikely(error))
-    { m_impl.setErrorCode(error);
-      delete op;
-      return NULL;
-    }
+    returnErrIf(error!=0, error);
   }
 
-  if (likely(m_impl.m_operations.push_back(op) == 0))
-  {
-    return &op->m_interface;
-  }
-  else
-  {
-    assert(errno == ENOMEM);
-    delete op;
-    m_impl.setErrorCode(Err_MemoryAlloc);
-    return NULL;
-  }
+  return &op->m_interface;
 }
 
 
@@ -996,24 +960,9 @@ NdbQueryBuilder::scanTable(const NdbDict
                                           ident,
                                           m_impl.m_operations.size(),
                                           error);
-  returnErrIf(op==0, Err_MemoryAlloc);
-  if (unlikely(error != 0))
-  {
-    m_impl.setErrorCode(error);
-    delete op;
-    return NULL;
-  }
-
-  if (unlikely(m_impl.m_operations.push_back(op) != 0))
-  {
-    assert(errno == ENOMEM);
-    delete op;
-    m_impl.setErrorCode(Err_MemoryAlloc);
-    return NULL;
-  }
 
-  error = op->markScanAncestors();
-  returnErrIf(error!=0, error);
+  returnErrIf(m_impl.takeOwnership(op)!=0, Err_MemoryAlloc);
+  returnErrIf(error!=0, error); // C'tor returned error, bailout
 
   return &op->m_interface;
 }
@@ -1053,20 +1002,13 @@ NdbQueryBuilder::scanIndex(const NdbDict
                                           ident,
                                           m_impl.m_operations.size(),
                                           error);
-  returnErrIf(op==0, Err_MemoryAlloc);
-  if (unlikely(error != 0))
-  {
-    m_impl.setErrorCode(error);
-    delete op;
-    return NULL;
-  }
 
-  if (unlikely(op->m_bound.lowKeys  > indexImpl.getNoOfColumns() ||
-               op->m_bound.highKeys > indexImpl.getNoOfColumns()))
-  { m_impl.setErrorCode(QRY_TOO_MANY_KEY_VALUES);
-    delete op;
-    return NULL;
-  }
+  returnErrIf(m_impl.takeOwnership(op)!=0, Err_MemoryAlloc);
+  returnErrIf(error!=0, error); // C'tor returned error, bailout
+
+  returnErrIf(op->m_bound.lowKeys  > indexImpl.getNoOfColumns() ||
+              op->m_bound.highKeys > indexImpl.getNoOfColumns(),
+              QRY_TOO_MANY_KEY_VALUES);
 
   // Bind lowKeys, and if applicable, highKeys to the column being refered
   Uint32 i;
@@ -1078,11 +1020,7 @@ NdbQueryBuilder::scanIndex(const NdbDict
        ? op->m_bound.low[i]->bindOperand(col,*op) || op->m_bound.high[i]->bindOperand(col,*op)
        : op->m_bound.low[i]->bindOperand(col,*op);
 
-    if (unlikely(error))
-    { m_impl.setErrorCode(error);
-      delete op;
-      return NULL;
-    }
+    returnErrIf(error!=0, error);
   }
 
   // Bind any remaining highKeys past '#lowKeys'
@@ -1090,31 +1028,10 @@ NdbQueryBuilder::scanIndex(const NdbDict
   {
     const NdbColumnImpl& col = NdbColumnImpl::getImpl(*indexImpl.getColumn(i));
     error = op->m_bound.high[i]->bindOperand(col,*op);
-    if (unlikely(error))
-    { m_impl.setErrorCode(error);
-      delete op;
-      return NULL;
-    }
-  }
-
-  error = op->markScanAncestors();
-  if (unlikely(error))
-  { m_impl.setErrorCode(error);
-    delete op;
-    return NULL;
+    returnErrIf(error!=0, error);
   }
 
-  if (likely(m_impl.m_operations.push_back(op) == 0))
-  {
-    return &op->m_interface;
-  }
-  else
-  {
-    assert(errno == ENOMEM);
-    delete op;
-    m_impl.setErrorCode(Err_MemoryAlloc);
-    return NULL;
-  }
+  return &op->m_interface;
 }
 
 const NdbQueryDef*
@@ -1220,27 +1137,43 @@ NdbQueryBuilderImpl::prepare()
   return def;
 }
 
-
-NdbQueryOperand* 
-NdbQueryBuilderImpl::addOperand(NdbQueryOperandImpl* operand)
+inline int 
+NdbQueryBuilderImpl::takeOwnership(NdbQueryOperandImpl* operand)
 {
   if (unlikely(operand == NULL))
   {
-    setErrorCode(Err_MemoryAlloc);
-    return NULL;
+    return Err_MemoryAlloc;
+  }
+  else if (unlikely(m_operands.push_back(operand) != 0))
+  {
+    assert(errno == ENOMEM);
+    delete operand;
+    return Err_MemoryAlloc;
   }
+  return 0;
+}
 
-  if (likely(m_operands.push_back(operand) == 0))
+inline int 
+NdbQueryBuilderImpl::takeOwnership(NdbQueryOperationDefImpl* operation)
+{
+  if (unlikely(operation == NULL))
   {
-    return &operand->getInterface();
+    return Err_MemoryAlloc;
   }
-  else
+  else if (unlikely(m_operations.push_back(operation) != 0))
   {
     assert(errno == ENOMEM);
-    delete operand;
-    setErrorCode(Err_MemoryAlloc);
-    return NULL;
+    delete operation;
+    return Err_MemoryAlloc;
   }
+  return 0;
+}
+
+NdbQueryOperand* 
+NdbQueryBuilderImpl::addOperand(NdbQueryOperandImpl* operand)
+{
+  returnErrIf(takeOwnership(operand)!=0, Err_MemoryAlloc);
+  return &operand->getInterface();
 }
 
 ///////////////////////////////////
@@ -1896,7 +1829,6 @@ NdbQueryOperationDefImpl::NdbQueryOperat
                                      int& error)
   :m_isPrepared(false), 
    m_diskInChildProjection(false), 
-   m_hasScanDescendant(false),
    m_table(table), 
    m_ident(ident), 
    m_ix(ix), m_id(ix),
@@ -2049,33 +1981,6 @@ int NdbQueryOperationDefImpl::addParamRe
   return 0;
 }
 
-
-int NdbQueryOperationDefImpl::markScanAncestors()
-{
-  // Verify that parent links have been established.
-  assert(m_ix == 0 || m_parent != NULL);
-  assert(isScanOperation());
-  NdbQueryOperationDefImpl* operation = getParentOperation();
-  while (operation != NULL)
-  {
-    if (operation->m_hasScanDescendant)
-    {
-      /* Remove this line if you want to allow bushy scans. Result sets will
-       * probably be wrong, but 'explain' output etc. may be useful for
-       * debugging.
-       */
-      return QRY_MULTIPLE_SCAN_BRANCHES;
-    }
-    operation->m_hasScanDescendant = true;
-    if (operation->isScanOperation())
-    {
-      break;
-    }
-    operation = operation->getParentOperation();
-  }
-  return 0;
-}
-
 /** This class is used for serializing sequences of 16 bit integers,
  * where the first 16 bit integer specifies the length of the sequence.
  */
@@ -2827,7 +2732,8 @@ NdbQueryScanOperationDefImpl::serialize(
     }
     node->tableId = tableOrIndex.getObjectId();
     node->tableVersion = tableOrIndex.getObjectVersion();
-    node->requestInfo = requestInfo;
+    // Need NI_REPEAT_SCAN_RESULT if there are star-joined scans 
+    node->requestInfo = requestInfo | DABits::NI_REPEAT_SCAN_RESULT;
     QueryNode::setOpLen(node->len, QueryNode::QN_SCAN_INDEX, length);
   }
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-04-06 14:16:13 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-05-05 11:06:08 +0000
@@ -42,9 +42,8 @@
 #define QRY_SCAN_ORDER_ALREADY_SET 4821
 #define QRY_PARAMETER_HAS_WRONG_TYPE 4822
 #define QRY_CHAR_PARAMETER_TRUNCATED 4823
-#define QRY_MULTIPLE_SCAN_BRANCHES 4824
-#define QRY_MULTIPLE_SCAN_SORTED 4825
-#define QRY_BATCH_SIZE_TOO_SMALL 4826
+#define QRY_MULTIPLE_SCAN_SORTED 4824
+#define QRY_BATCH_SIZE_TOO_SMALL 4825
 
 #ifdef __cplusplus
 #include <Vector.hpp>
@@ -389,15 +388,6 @@ public:
   // Return 'true' is query type is a multi-row scan
   virtual bool isScanOperation() const = 0;
 
-  /** Return true if this operation or any of its descendants is a scan.*/
-  bool hasScanDescendant() const
-  { return m_hasScanDescendant; }
-
-  /** Mark lookup ancestors of this operation as having a scan decendant.
-   * @return Possible error code.
-   */
-  int markScanAncestors();
-
   virtual const NdbQueryOperationDef& getInterface() const = 0; 
 
   /** Make a serialized representation of this operation, corresponding to
@@ -468,9 +458,6 @@ protected:
    */
   bool m_diskInChildProjection;
 
-  /** True if this operation or any of its descendants is a scan.*/
-  bool m_hasScanDescendant;
-
 private:
   bool isChildOf(const NdbQueryOperationDefImpl* parentOp) const;
 
@@ -664,6 +651,17 @@ private:
    */
   NdbQueryOperand* addOperand(NdbQueryOperandImpl* operand);
 
+  /**
+   * Take ownership of specified object: From now on it is the
+   * responsibility of this NdbQueryBuilderImpl to manage the
+   * lifetime of the object. If takeOwnership() fails, the 
+   * specified object is deleted before it returns.
+   * @param[in] operand to take ownership for (may be NULL).
+   * @return 0 if ok, else there has been an 'Err_MemoryAlloc'
+   */
+  int takeOwnership(NdbQueryOperandImpl*);
+  int takeOwnership(NdbQueryOperationDefImpl*);
+
   bool contains(const NdbQueryOperationDefImpl*);
 
   NdbQueryBuilder m_interface;

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-04-27 10:48:16 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-05-11 13:31:44 +0000
@@ -47,7 +47,7 @@
  */
 #define UNUSED(x) ((void)(x))
 
-//#define TEST_SCANREQ
+//#define TEST_NEXTREQ
 
 /* Various error codes that are not specific to NdbQuery. */
 static const int Err_TupleNotFound = 626;
@@ -333,32 +333,35 @@ public:
   { return m_iterState == Iter_finished; }
 
   /** 
-   * This method is only used for result streams of scan operations. It is
+   * This method is
    * used for marking a stream as holding the last batch of a sub scan. 
    * This means that it is the last batch of the scan that was instantiated 
    * from the current batch of its parent operation.
    */
-  void setSubScanComplete(bool complete)
+  void setSubScanCompletion(bool complete)
   { 
-    assert(m_operation.getQueryOperationDef().isScanOperation());
+    // Lookups should always be 'complete'
+    assert(complete || m_operation.getQueryOperationDef().isScanOperation());
     m_subScanComplete = complete; 
   }
 
   /** 
-   * This method is only relevant for result streams of scan operations. It 
+   * This method 
    * returns true if this result stream holds the last batch of a sub scan
    * This means that it is the last batch of the scan that was instantiated 
    * from the current batch of its parent operation.
    */
   bool isSubScanComplete() const
   { 
-    assert(m_operation.getQueryOperationDef().isScanOperation());
+    // Lookups should always be 'complete'
+    assert(m_subScanComplete || m_operation.getQueryOperationDef().isScanOperation());
     return m_subScanComplete; 
   }
 
   /** Variant of isSubScanComplete() above which checks that this resultstream
    * and all its descendants have consumed all batches of rows instantiated 
-   * from their parent operation(s). */
+   * from their parent operation(s).
+   */
   bool isAllSubScansComplete() const;
 
   /** For debugging.*/
@@ -530,7 +533,7 @@ NdbResultStream::NdbResultStream(NdbQuer
   m_operation(operation),
   m_iterState(Iter_notStarted),
   m_currentRow(tupleNotFound),
-  m_subScanComplete(false),
+  m_subScanComplete(true),
   m_tupleSet(NULL)
 {};
 
@@ -578,8 +581,10 @@ NdbResultStream::reset()
 
   clearTupleSet();
   m_receiver.prepareSend();
-  /* If this stream will get new rows in the next batch, then so will
-   * all of its descendants.*/
+  /**
+   * If this stream will get new rows in the next batch, then so will
+   * all of its descendants.
+   */
   for (Uint32 childNo = 0; childNo < m_operation.getNoOfChildOperations();
        childNo++)
   {
@@ -605,8 +610,10 @@ NdbResultStream::clearTupleSet()
 bool
 NdbResultStream::isAllSubScansComplete() const
 { 
-  if (m_operation.getQueryOperationDef().isScanOperation() && 
-      !m_subScanComplete)
+  // Lookups should always be 'complete'
+  assert(m_subScanComplete || m_operation.getQueryOperationDef().isScanOperation());
+
+  if (!m_subScanComplete)
     return false;
 
   for (Uint32 childNo = 0; childNo < m_operation.getNoOfChildOperations(); 
@@ -2814,31 +2821,21 @@ NdbQueryImpl::sendFetchMore(NdbRootFragm
   assert(!emptyFrag.finalBatchReceived());
   assert(m_queryDef.isScanQuery());
 
+  const Uint32 fragNo = emptyFrag.getFragNo();
   emptyFrag.reset();
 
   for (unsigned opNo=0; opNo<m_countOperations; opNo++) 
   {
-    const NdbQueryOperationImpl& op = getQueryOperation(opNo);
-    // Check if this is a leaf scan.
-    if (!op.getQueryOperationDef().hasScanDescendant() &&
-        op.getQueryOperationDef().isScanOperation())
-    {
-      // Find first scan ancestor that is not finished.
-      const NdbQueryOperationImpl* ancestor = &op;
-      while (ancestor != NULL && 
-             (!ancestor->getQueryOperationDef().isScanOperation() ||
-              ancestor->getResultStream(emptyFrag.getFragNo())
-              .isSubScanComplete())
-              )
-      {
-        ancestor = ancestor->getParentOperation();
-      }
-      if (ancestor!=NULL)
-      {
-        /* Reset ancestor and all its descendants, since all these
-         * streams will get a new set of rows in the next batch. */ 
-        ancestor->getResultStream(emptyFrag.getFragNo()).reset();
-      }
+    NdbResultStream& resultStream = 
+       getQueryOperation(opNo).getResultStream(fragNo);
+
+    if (!resultStream.isSubScanComplete())
+    {
+      /**
+       * Reset resultstream and all its descendants, since all these
+       * streams will get a new set of rows in the next batch.
+       */ 
+      resultStream.reset();
     }
   }
 
@@ -3946,7 +3943,7 @@ NdbQueryOperationImpl
   if (myClosestScan != NULL)
   {
 
-#ifdef TEST_SCANREQ
+#ifdef TEST_NEXTREQ
     // To force usage of SCAN_NEXTREQ even for small scans resultsets
     if (this == &getRoot())
     {
@@ -4072,8 +4069,7 @@ NdbQueryOperationImpl::prepareReceiver()
                           0 /*key_size*/, 
                           0 /*read_range_no*/, 
                           getRowSize(),
-                          rowBuf,
-                          0);
+                          rowBuf);
     m_resultStreams[i]->getReceiver().prepareSend();
   }
   // So that we can test for for buffer overrun.
@@ -4703,21 +4699,18 @@ NdbQueryOperationImpl::execSCAN_TABCONF(
   for (Uint32 opNo = 0; opNo < queryDef.getNoOfOperations(); opNo++)
   {
     const NdbQueryOperationImpl& op = m_queryImpl.getQueryOperation(opNo);
-    /* Find the node number seen by the SPJ block. Since a unique index
+    /**
+     * Find the node number seen by the SPJ block. Since a unique index
      * operation will have two distincts nodes in the tree used by the
-     * SPJ block, this number may be different from 'opNo'.*/
+     * SPJ block, this number may be different from 'opNo'.
+     */
     const Uint32 internalOpNo = op.getQueryOperationDef().getQueryOperationId();
     assert(internalOpNo >= opNo);
-    const bool maskSet = ((nodeMask >> internalOpNo) & 1) == 1;
+    const bool complete = ((nodeMask >> internalOpNo) & 1) == 0;
 
-    if (op.getQueryOperationDef().isScanOperation())
-    {
-      rootFrag->getResultStream(opNo).setSubScanComplete(!maskSet);
-    }
-    else
-    {
-      assert(!maskSet);
-    }
+    // Lookups should always be 'complete'
+    assert(complete ||  op.getQueryOperationDef().isScanOperation());
+    rootFrag->getResultStream(opNo).setSubScanCompletion(complete);
   }
   // Check that nodeMask does not have more bits than we have operations. 
   assert(nodeMask >> 

=== modified file 'storage/ndb/src/ndbapi/NdbReceiver.cpp'
--- a/storage/ndb/src/ndbapi/NdbReceiver.cpp	2011-04-06 14:16:13 +0000
+++ b/storage/ndb/src/ndbapi/NdbReceiver.cpp	2011-05-11 13:31:44 +0000
@@ -60,7 +60,6 @@ NdbReceiver::init(ReceiverType type, boo
     m_record.m_row_buffer= NULL;
     m_record.m_row_offset= 0;
     m_record.m_read_range_no= false;
-    m_record.m_column_count= 0;
   }
   theFirstRecAttr = NULL;
   theCurrentRecAttr = NULL;
@@ -216,8 +215,7 @@ NdbReceiver::calculate_batch_size(Uint32
 void
 NdbReceiver::do_setup_ndbrecord(const NdbRecord *ndb_record, Uint32 batch_size,
                                 Uint32 key_size, Uint32 read_range_no,
-                                Uint32 rowsize, char *row_buffer,
-                                Uint32 column_count)
+                                Uint32 rowsize, char *row_buffer)
 {
   m_using_ndb_record= true;
   m_record.m_ndb_record= ndb_record;
@@ -225,7 +223,6 @@ NdbReceiver::do_setup_ndbrecord(const Nd
   m_record.m_row_buffer= row_buffer;
   m_record.m_row_offset= rowsize;
   m_record.m_read_range_no= read_range_no;
-  m_record.m_column_count= column_count;
 }
 
 //static
@@ -261,26 +258,6 @@ NdbReceiver::ndbrecord_rowsize(const Ndb
   return rowsize;
 }
 
-NdbRecAttr*
-NdbReceiver::copyout(NdbReceiver & dstRec){
-  assert(!m_using_ndb_record);
-  NdbRecAttr *src = m_rows[m_current_row++];
-  NdbRecAttr *dst = dstRec.theFirstRecAttr;
-  NdbRecAttr *start = src;
-  Uint32 tmp = m_recattr.m_hidden_count;
-  while(tmp--)
-    src = src->next();
-  
-  while(dst){
-    Uint32 len = src->get_size_in_bytes();
-    dst->receive_data((Uint32*)src->aRef(), len);
-    src = src->next();
-    dst = dst->next();
-  }
-
-  return start;
-}
-
 /**
  * pad
  * This function determines how much 'padding' should be applied

=== modified file 'storage/ndb/src/ndbapi/NdbScanOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2011-05-04 14:45:46 +0000
+++ b/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2011-05-17 12:47:21 +0000
@@ -395,8 +395,6 @@ NdbScanOperation::generatePackedReadAIs(
     columnCount++;
   }
 
-  theReceiver.m_record.m_column_count= columnCount;
-
   int result= 0;
 
   /* Are there any columns to read via NdbRecord? 
@@ -2334,8 +2332,7 @@ int NdbScanOperation::prepareSendScan(Ui
   {
     m_receivers[i]->do_setup_ndbrecord(m_attribute_record, batch_size,
                                        key_size, m_read_range_no,
-                                       rowsize, buf,
-                                       theReceiver.m_record.m_column_count);
+                                       rowsize, buf);
     buf+= bufsize;
   }
 

=== modified file 'storage/ndb/src/ndbapi/Ndbinit.cpp'
--- a/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-04-15 06:29:59 +0000
+++ b/storage/ndb/src/ndbapi/Ndbinit.cpp	2011-05-04 09:35:08 +0000
@@ -201,8 +201,7 @@ NdbImpl::NdbImpl(Ndb_cluster_connection
     m_transporter_facade(ndb_cluster_connection->m_impl.m_transporter_facade),
     m_dictionary(ndb),
     theCurrentConnectIndex(0),
-    theNdbObjectIdMap(m_transporter_facade->theMutexPtr,
-		      1024,1024),
+    theNdbObjectIdMap(1024,1024),
     theNoOfDBnodes(0),
     theWaiter(this),
     m_ev_op(0),

=== modified file 'storage/ndb/src/ndbapi/ObjectMap.cpp'
--- a/storage/ndb/src/ndbapi/ObjectMap.cpp	2011-04-07 14:02:50 +0000
+++ b/storage/ndb/src/ndbapi/ObjectMap.cpp	2011-05-04 09:35:08 +0000
@@ -18,13 +18,13 @@
 
 #include "ObjectMap.hpp"
 
-NdbObjectIdMap::NdbObjectIdMap(NdbMutex* mutex, Uint32 sz, Uint32 eSz)
+NdbObjectIdMap::NdbObjectIdMap(Uint32 sz, Uint32 eSz):
+  m_expandSize(eSz),
+  m_size(0),
+  m_firstFree(InvalidId),
+  m_lastFree(InvalidId),
+  m_map(0)
 {
-  m_size = 0;
-  m_firstFree = InvalidId;
-  m_map = 0;
-  m_mutex = mutex;
-  m_expandSize = eSz;
   expand(sz);
 #ifdef DEBUG_OBJECTMAP
   ndbout_c("NdbObjectIdMap:::NdbObjectIdMap(%u)", sz);
@@ -33,12 +33,14 @@ NdbObjectIdMap::NdbObjectIdMap(NdbMutex*
 
 NdbObjectIdMap::~NdbObjectIdMap()
 {
+  assert(checkConsistency());
   free(m_map);
+  m_map = NULL;
 }
 
 int NdbObjectIdMap::expand(Uint32 incSize)
 {
-  NdbMutex_Lock(m_mutex);
+  assert(checkConsistency());
   Uint32 newSize = m_size + incSize;
   MapEntry * tmp = (MapEntry*)realloc(m_map, newSize * sizeof(MapEntry));
 
@@ -46,20 +48,45 @@ int NdbObjectIdMap::expand(Uint32 incSiz
   {
     m_map = tmp;
     
-    for(Uint32 i = m_size; i < newSize; i++){
-      m_map[i].m_next = 2 * (i + 1) + 1;
+    for(Uint32 i = m_size; i < newSize-1; i++)
+    {
+      m_map[i].setNext(i+1);
     }
-    m_firstFree = (2 * m_size) + 1;
-    m_map[newSize-1].m_next = Uint32(InvalidId);
+    m_firstFree = m_size;
+    m_lastFree = newSize - 1;
+    m_map[newSize-1].setNext(InvalidId);
     m_size = newSize;
+    assert(checkConsistency());
   }
   else
   {
-    NdbMutex_Unlock(m_mutex);
     g_eventLogger->error("NdbObjectIdMap::expand: realloc(%u*%lu) failed",
                          newSize, sizeof(MapEntry));
     return -1;
   }
-  NdbMutex_Unlock(m_mutex);
   return 0;
 }
+
+bool NdbObjectIdMap::checkConsistency()
+{
+  if (m_firstFree == InvalidId)
+  {
+    for (Uint32 i = 0; i<m_size; i++)
+    {
+      if (m_map[i].isFree())
+      {
+        assert(false);
+        return false;
+      }
+    }
+    return true;
+  }
+
+  Uint32 i = m_firstFree;
+  while (m_map[i].getNext() != InvalidId)
+  {
+    i = m_map[i].getNext();
+  }
+  assert(i == m_lastFree);
+  return i == m_lastFree;
+}

=== modified file 'storage/ndb/src/ndbapi/ObjectMap.hpp'
--- a/storage/ndb/src/ndbapi/ObjectMap.hpp	2011-04-07 14:02:50 +0000
+++ b/storage/ndb/src/ndbapi/ObjectMap.hpp	2011-05-04 09:35:08 +0000
@@ -20,7 +20,6 @@
 #define NDB_OBJECT_ID_MAP_HPP
 
 #include <ndb_global.h>
-//#include <NdbMutex.h>
 #include <NdbOut.hpp>
 
 #include <EventLogger.hpp>
@@ -31,11 +30,11 @@ extern EventLogger * g_eventLogger;
 /**
   * Global ObjectMap
   */
-class NdbObjectIdMap //: NdbLockable
+class NdbObjectIdMap
 {
 public:
-  STATIC_CONST( InvalidId = ~(Uint32)0 );
-  NdbObjectIdMap(NdbMutex*, Uint32 initalSize = 128, Uint32 expandSize = 10);
+  STATIC_CONST( InvalidId = 0x7fffffff );
+  NdbObjectIdMap(Uint32 initalSize, Uint32 expandSize);
   ~NdbObjectIdMap();
 
   Uint32 map(void * object);
@@ -43,34 +42,75 @@ public:
   
   void * getObject(Uint32 id);
 private:
+  const Uint32 m_expandSize;
   Uint32 m_size;
-  Uint32 m_expandSize;
   Uint32 m_firstFree;
-  union MapEntry {
-     UintPtr m_next;
-     void * m_obj;
-  } * m_map;
+  /**
+   * We put released entries at the end of the free list. That way, we delay
+   * re-use of an object id as long as possible. This minimizes the chance
+   * of sending an incoming message to the wrong object because the recipient
+   * object id was reused. 
+   */
+  Uint32 m_lastFree;
+
+  class MapEntry
+  {
+  public:
+    bool isFree() const
+    { 
+      return (m_val & 1) == 1; 
+    }
+
+    Uint32 getNext() const
+    {
+      assert(isFree());
+      return static_cast<Uint32>(m_val >> 1);
+    }
+
+    void setNext(Uint32 next)
+    { 
+      m_val = (next << 1) | 1; 
+    }
+
+    void* getObj() const
+    {
+      assert((m_val & 3) == 0);
+      return reinterpret_cast<void*>(m_val);
+    }
+    
+    void setObj(void* obj)
+    { 
+      m_val = reinterpret_cast<UintPtr>(obj); 
+      assert((m_val & 3) == 0);
+    }
+    
+  private:
+    /**
+     * This holds either a pointer to a mapped object *or* the index of the
+     * next entry in the free list. If it is a pointer, then the two least
+     * significant bits should be zero (requiring all mapped objects to be
+     * four-byte aligned). If it is an index, then bit 0 should be set.
+     */ 
+    UintPtr m_val;
+  };
+
+  MapEntry* m_map;
 
-  NdbMutex * m_mutex;
   int expand(Uint32 newSize);
+  // For debugging purposes.
+  bool checkConsistency();
 };
 
 inline
 Uint32
-NdbObjectIdMap::map(void * object){
-  
-  //  lock();
-  assert((UintPtr(object) & 3) == 0);
-  
-  if(m_firstFree == Uint32(InvalidId) && expand(m_expandSize))
+NdbObjectIdMap::map(void * object)
+{
+  if(m_firstFree == InvalidId && expand(m_expandSize))
     return InvalidId;
   
-  Uint32 ff = m_firstFree >> 1;
-  assert(UintPtr(m_map[ff].m_next) == Uint32(m_map[ff].m_next));
-  m_firstFree = Uint32(m_map[ff].m_next);
-  m_map[ff].m_obj = object;
-  
-  //  unlock();
+  const Uint32 ff = m_firstFree;
+  m_firstFree = m_map[ff].getNext();
+  m_map[ff].setObj(object);
   
   DBUG_PRINT("info",("NdbObjectIdMap::map(0x%lx) %u", (long) object, ff<<2));
 
@@ -79,26 +119,37 @@ NdbObjectIdMap::map(void * object){
 
 inline
 void *
-NdbObjectIdMap::unmap(Uint32 id, void *object){
-
-  Uint32 i = id>>2;
+NdbObjectIdMap::unmap(Uint32 id, void *object)
+{
+  const Uint32 i = id>>2;
 
-  //  lock();
-  if(i < m_size){
-    void * obj = m_map[i].m_obj;
-    if (object == obj) {
-      m_map[i].m_next = m_firstFree;
-      m_firstFree = (2 * i) + 1;
-    } else {
+  assert(i < m_size);
+  if(i < m_size)
+  {
+    void * const obj = m_map[i].getObj();
+    if (object == obj) 
+    {
+      m_map[i].setNext(InvalidId);
+      if (m_firstFree == InvalidId)
+      {
+        m_firstFree = i;
+      }
+      else
+      {
+        m_map[m_lastFree].setNext(i);
+      }
+      m_lastFree = i;
+    } 
+    else 
+    {
       g_eventLogger->error("NdbObjectIdMap::unmap(%u, 0x%lx) obj=0x%lx",
                            id, (long) object, (long) obj);
       DBUG_PRINT("error",("NdbObjectIdMap::unmap(%u, 0x%lx) obj=0x%lx",
                           id, (long) object, (long) obj));
+      assert(false);
       return 0;
     }
     
-    //  unlock();
-    
     DBUG_PRINT("info",("NdbObjectIdMap::unmap(%u) obj=0x%lx", id, (long) obj));
     
     return obj;
@@ -107,12 +158,21 @@ NdbObjectIdMap::unmap(Uint32 id, void *o
 }
 
 inline void *
-NdbObjectIdMap::getObject(Uint32 id){
+NdbObjectIdMap::getObject(Uint32 id)
+{
   // DBUG_PRINT("info",("NdbObjectIdMap::getObject(%u) obj=0x%x", id,  m_map[id>>2].m_obj));
   id >>= 2;
-  if(id < m_size){
-    if ((m_map[id].m_next & 3) == 0)
-      return m_map[id].m_obj;
+  assert(id < m_size);
+  if(id < m_size)
+  {
+    if(m_map[id].isFree())
+    {
+      return 0;
+    }
+    else
+    {
+      return m_map[id].getObj();
+    }
   }
   return 0;
 }

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-04-28 12:07:13 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-05-07 06:17:02 +0000
@@ -187,7 +187,7 @@ ErrorBundle ErrorCodes[] = {
   { 805,  DMEC, TR, "Out of attrinfo records in tuple manager" },
   { 830,  DMEC, TR, "Out of add fragment operation records" },
   { 873,  DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
-  { 899,  DMEC, TR, "Rowid already allocated" },
+  { 899,  DMEC, IE, "Internal error: rowid already allocated" },
   { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
   { 1218, DMEC, TR, "Send Buffers overloaded in NDB kernel" },
   { 1220, DMEC, TR, "REDO log files overloaded (increase FragmentLogFileSize)" },
@@ -797,8 +797,6 @@ ErrorBundle ErrorCodes[] = {
     "Parameter value has an incompatible datatype" },
   { QRY_CHAR_PARAMETER_TRUNCATED, DMEC, AE, 
     "Character Parameter was right truncated" },
-  { QRY_MULTIPLE_SCAN_BRANCHES, DMEC, AE, 
-    "Query has scans that are not descendants/ancestors of each other." },
   { QRY_MULTIPLE_SCAN_SORTED, DMEC, AE, 
     "Query with multiple scans may not be sorted." },
   { QRY_SEQUENTIAL_SCAN_SORTED, DMEC, AE, 

=== modified file 'storage/ndb/test/ndbapi/testBasic.cpp'
--- a/storage/ndb/test/ndbapi/testBasic.cpp	2011-04-07 07:22:49 +0000
+++ b/storage/ndb/test/ndbapi/testBasic.cpp	2011-05-07 06:17:02 +0000
@@ -2267,6 +2267,154 @@ runBug59496_case2(NDBT_Context* ctx, NDB
   return NDBT_OK;
 }
 
+#define CHK_RET_FAILED(x) if (!(x)) { ndbout_c("Failed on line: %u", __LINE__); return NDBT_FAILED; }
+
+int
+runTest899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab = ctx->getTab();
+
+  const int rows = ctx->getNumRecords();
+  const int loops = ctx->getNumLoops();
+  const int batch = ctx->getProperty("Batch", Uint32(50));
+  const int until_stopped = ctx->getProperty("UntilStopped");
+
+  const NdbRecord * pRowRecord = pTab->getDefaultRecord();
+  CHK_RET_FAILED(pRowRecord != 0);
+
+  const Uint32 len = NdbDictionary::getRecordRowLength(pRowRecord);
+  Uint8 * pRow = new Uint8[len];
+
+  int count_ok = 0;
+  int count_failed = 0;
+  int count_899 = 0;
+  for (int i = 0; i < loops || (until_stopped && !ctx->isTestStopped()); i++)
+  {
+    ndbout_c("loop: %d",i);
+    int result = 0;
+    for (int rowNo = 0; rowNo < rows;)
+    {
+      NdbTransaction* pTrans = pNdb->startTransaction();
+      CHK_RET_FAILED(pTrans != 0);
+
+      for (int b = 0; rowNo < rows && b < batch; rowNo++, b++)
+      {
+        bzero(pRow, len);
+
+        HugoCalculator calc(* pTab);
+
+        NdbOperation::OperationOptions opts;
+        bzero(&opts, sizeof(opts));
+
+        const NdbOperation* pOp = 0;
+        switch(i % 2){
+        case 0:
+          calc.setValues(pRow, pRowRecord, rowNo, rand());
+          pOp = pTrans->writeTuple(pRowRecord, (char*)pRow,
+                                   pRowRecord, (char*)pRow,
+                                   0,
+                                   &opts,
+                                   sizeof(opts));
+          result = pTrans->execute(NoCommit);
+          break;
+        case 1:
+          calc.setValues(pRow, pRowRecord, rowNo, rand());
+          pOp = pTrans->deleteTuple(pRowRecord, (char*)pRow,
+                                    pRowRecord, (char*)pRow,
+                                    0,
+                                    &opts,
+                                    sizeof(opts));
+          result = pTrans->execute(NoCommit, AO_IgnoreError);
+          break;
+        }
+
+        CHK_RET_FAILED(pOp != 0);
+
+        if (result != 0)
+        {
+          goto found_error;
+        }
+      }
+      result = pTrans->execute(Commit);
+
+      if (result != 0)
+      {
+    found_error:
+        count_failed++;
+        NdbError err = pTrans->getNdbError();
+        if (! (err.status == NdbError::TemporaryError ||
+               err.classification == NdbError::NoDataFound ||
+               err.classification == NdbError::ConstraintViolation))
+        {
+          ndbout << err << endl;
+        }
+        CHK_RET_FAILED(err.status == NdbError::TemporaryError ||
+                       err.classification == NdbError::NoDataFound ||
+                       err.classification == NdbError::ConstraintViolation);
+        if (err.code == 899)
+        {
+          count_899++;
+          ndbout << err << endl;
+        }
+      }
+      else
+      {
+        count_ok++;
+      }
+      pTrans->close();
+    }
+  }
+
+  ndbout_c("count_ok: %d count_failed: %d (899: %d)",
+           count_ok, count_failed, count_899);
+  delete [] pRow;
+
+  return count_899 == 0 ? NDBT_OK : NDBT_FAILED;
+}
+
+int
+runInit899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbRestarter restarter;
+  int val = DumpStateOrd::DihMinTimeBetweenLCP;
+  restarter.dumpStateAllNodes(&val, 1);
+
+  Ndb* pNdb = GETNDB(step);
+  const NdbDictionary::Table* pTab = ctx->getTab();
+  const NdbDictionary::Table * pTab2 = pNdb->getDictionary()->
+    getTable(pTab->getName());
+
+  int tableId = pTab2->getObjectId();
+  int val2[] = { DumpStateOrd::BackupErrorInsert, 10042, tableId };
+
+  for (int i = 0; i < restarter.getNumDbNodes(); i++)
+  {
+    if (i & 1)
+    {
+      int nodeId = restarter.getDbNodeId(i);
+      ndbout_c("Setting slow LCP of table %d on node %d",
+               tableId, nodeId);
+      restarter.dumpStateOneNode(nodeId, val2, 3);
+    }
+  }
+
+  return NDBT_OK;
+}
+
+int
+runEnd899(NDBT_Context* ctx, NDBT_Step* step)
+{
+  // reset LCP speed
+  NdbRestarter restarter;
+  int val[] = { DumpStateOrd::DihMinTimeBetweenLCP, 0 };
+  restarter.dumpStateAllNodes(val, 2);
+
+  restarter.insertErrorInAllNodes(0);
+  return NDBT_OK;
+}
+
+
 NDBT_TESTSUITE(testBasic);
 TESTCASE("PkInsert", 
 	 "Verify that we can insert and delete from this table using PK"
@@ -2618,6 +2766,13 @@ TESTCASE("Bug59496_case2", "")
   STEP(runBug59496_case2);
   STEPS(runBug59496_scan, 10);
 }
+TESTCASE("899", "")
+{
+  INITIALIZER(runLoadTable);
+  INITIALIZER(runInit899);
+  STEP(runTest899);
+  FINALIZER(runEnd899);
+}
 NDBT_TESTSUITE_END(testBasic);
 
 #if 0

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-04-28 07:47:53 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-05-07 06:17:02 +0000
@@ -20,7 +20,7 @@ max-time: 900
 cmd: testIndex
 args: -n DeferredMixedLoad T1 T6 T13
 
-max-time: 900
+max-time: 1800
 cmd: testIndex
 args: -n DeferredMixedLoadError T1 T6 T13
 
@@ -32,6 +32,10 @@ max-time: 900
 cmd: testIndex
 args: -n NF_Mixed T1 T6 T13
 
+max-time: 900
+cmd: testBasic
+args: -r 5000 -n 899 T15 D1 D2
+
 max-time: 600
 cmd: atrt-testBackup
 args: -n NFMaster T1

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0-wl4163 branch (pekka:4371 to 4373) Pekka Nousiainen19 May