List:Commits« Previous MessageNext Message »
From:Pekka Nousiainen Date:July 1 2011 7:14pm
Subject:bzr push into mysql-5.1-telco-7.0-wl4124-new1 branch (pekka.nousiainen:4405
to 4406)
View as plain text  
 4406 Pekka Nousiainen	2011-07-01 [merge]
      merge telco-7.0 to wl4124-new1

    added:
      mysql-test/suite/ndb/bug12712109.ini
      mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test
      sql/ndb_mi.cc
      sql/ndb_mi.h
      storage/ndb/compile-cluster
      storage/ndb/include/kernel/signaldata/GetConfig.hpp
      storage/ndb/src/common/debugger/signaldata/GetConfig.cpp
    modified:
      configure.in
      mysql-test/suite/funcs_1/r/ndb_views.result
      mysql-test/suite/ndb/r/ndb_basic.result
      mysql-test/suite/ndb/r/ndb_config.result
      mysql-test/suite/ndb/t/ndb_config.test
      sql/Makefile.am
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      storage/ndb/CMakeLists.txt
      storage/ndb/Makefile.am
      storage/ndb/include/kernel/GlobalSignalNumbers.h
      storage/ndb/include/kernel/signaldata/SignalData.hpp
      storage/ndb/include/mgmapi/mgmapi.h
      storage/ndb/include/ndb_version.h.in
      storage/ndb/include/ndbapi/NdbIndexStat.hpp
      storage/ndb/ndb_configure.m4
      storage/ndb/src/common/debugger/signaldata/CMakeLists.txt
      storage/ndb/src/common/debugger/signaldata/Makefile.am
      storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
      storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
      storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
      storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp
      storage/ndb/src/kernel/blocks/suma/Suma.cpp
      storage/ndb/src/kernel/vm/Configuration.cpp
      storage/ndb/src/kernel/vm/Configuration.hpp
      storage/ndb/src/mgmapi/mgmapi.cpp
      storage/ndb/src/mgmapi/mgmapi_internal.h
      storage/ndb/src/mgmsrv/ConfigInfo.cpp
      storage/ndb/src/mgmsrv/Defragger.hpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/mgmsrv/MgmtSrvr.hpp
      storage/ndb/src/mgmsrv/Services.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
      storage/ndb/test/ndbapi/testMgm.cpp
      storage/ndb/test/ndbapi/testMgmd.cpp
      storage/ndb/test/ndbapi/testScan.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
      storage/ndb/tools/ndb_config.cpp
 4405 Pekka Nousiainen	2011-06-24 [merge]
      merge telco-7.0 to wl4124-new1

    added:
      mysql-test/suite/ndb/r/ndb_row_count.result
      mysql-test/suite/ndb/t/ndb_row_count.test
    modified:
      mysql-test/suite/ndb/r/ndb_condition_pushdown.result
      mysql-test/suite/ndb/r/ndb_index_unique.result
      mysql-test/t/ctype_cp932_binlog_stm.test
      sql/ha_ndb_index_stat.cc
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      storage/ndb/include/mgmapi/mgmapi.h
      storage/ndb/include/mgmapi/ndb_logevent.h
      storage/ndb/include/ndb_types.h.in
      storage/ndb/src/CMakeLists.txt
      storage/ndb/src/common/debugger/CMakeLists.txt
      storage/ndb/src/common/debugger/signaldata/CMakeLists.txt
      storage/ndb/src/common/logger/CMakeLists.txt
      storage/ndb/src/common/mgmcommon/CMakeLists.txt
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/transporter/CMakeLists.txt
      storage/ndb/src/common/transporter/TransporterRegistry.cpp
      storage/ndb/src/common/util/CMakeLists.txt
      storage/ndb/src/kernel/CMakeLists.txt
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/mgmapi/CMakeLists.txt
      storage/ndb/src/mgmapi/mgmapi.cpp
      storage/ndb/src/mgmsrv/CMakeLists.txt
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/mgmsrv/MgmtSrvr.hpp
      storage/ndb/src/ndbapi/CMakeLists.txt
      storage/ndb/src/ndbapi/ClusterMgr.cpp
      storage/ndb/test/ndbapi/testBasic.cpp
      storage/ndb/test/ndbapi/testMgmd.cpp
      storage/ndb/tools/ndb_dump_frm_data.cpp
=== modified file 'configure.in'
--- a/configure.in	2011-05-24 08:44:31 +0000
+++ b/configure.in	2011-06-29 08:14:18 +0000
@@ -12,7 +12,7 @@ dnl
 dnl When changing the major version number please also check the switch
 dnl statement in mysqlbinlog::check_master_version().  You may also need
 dnl to update version.c in ndb.
-AC_INIT([MySQL Server], [5.1.56-ndb-7.0.26], [], [mysql])
+AC_INIT([MySQL Server], [5.1.56-ndb-7.0.27], [], [mysql])
 
 AC_CONFIG_SRCDIR([sql/mysqld.cc])
 AC_CANONICAL_SYSTEM

=== modified file 'mysql-test/suite/funcs_1/r/ndb_views.result'
--- a/mysql-test/suite/funcs_1/r/ndb_views.result	2009-10-27 16:44:30 +0000
+++ b/mysql-test/suite/funcs_1/r/ndb_views.result	2011-06-25 14:19:31 +0000
@@ -21439,7 +21439,7 @@ f1
 1.000
 EXPLAIN SELECT * FROM test3.v32;
 id	select_type	table	type	possible_keys	key	key_len	ref	rows	Extra
-1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	1	
+1	SIMPLE	t1	ALL	NULL	NULL	NULL	NULL	2	
 DROP VIEW test3.v0;
 SHOW CREATE VIEW test3.v32;
 View	Create View	character_set_client	collation_connection

=== added file 'mysql-test/suite/ndb/bug12712109.ini'
--- a/mysql-test/suite/ndb/bug12712109.ini	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb/bug12712109.ini	2011-07-01 09:16:46 +0000
@@ -0,0 +1,8 @@
+[TCP DEFAULT]
+OverloadLimit=10M
+
+[ndb_mgmd]
+Hostname=localhost
+[ndbd]
+[ndbd]
+[mysqld]

=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2011-06-16 18:16:01 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2011-06-29 23:28:01 +0000
@@ -76,6 +76,7 @@ Ndb_number_of_data_nodes	#
 Ndb_number_of_ready_data_nodes	#
 Ndb_pruned_scan_count	#
 Ndb_scan_count	#
+Ndb_slave_max_replicated_epoch	#
 SHOW GLOBAL VARIABLES LIKE 'ndb\_%';
 Variable_name	Value
 ndb_autoincrement_prefetch_sz	#

=== modified file 'mysql-test/suite/ndb/r/ndb_config.result'
--- a/mysql-test/suite/ndb/r/ndb_config.result	2010-10-13 13:56:13 +0000
+++ b/mysql-test/suite/ndb/r/ndb_config.result	2011-07-01 17:15:00 +0000
@@ -37,3 +37,7 @@ tcp,3,4,55,3 tcp,3,5,55,3 tcp,3,6,55,3 t
 == 18 == bug56185
 0,0-65535
 2,37-48 1,1-2
+== 19 == bug12712109
+10485760 10485760 10485760 10485760 10485760
+1
+1

=== modified file 'mysql-test/suite/ndb/t/ndb_config.test'
--- a/mysql-test/suite/ndb/t/ndb_config.test	2010-08-27 12:12:51 +0000
+++ b/mysql-test/suite/ndb/t/ndb_config.test	2011-07-01 17:15:00 +0000
@@ -61,3 +61,11 @@ echo == 18 == bug56185;
 # Read bitmask value and enum from config.ini
 --exec $NDB_CONFIG --config-file=$MYSQL_TEST_DIR/suite/ndb/bug56185.ini --query=Arbitration,LockExecuteThreadToCPU --type=ndbd
 
+echo == 19 == bug12712109;
+--exec $NDB_CONFIG --config-file=$MYSQL_TEST_DIR/suite/ndb/bug12712109.ini --query=OverloadLimit --connections 2>&1
+
+# Read config generation number from nodes
+# From management server
+--exec $NDB_CONFIG --system --query=ConfigGenerationNumber
+# From a data node
+--exec $NDB_CONFIG --system --config-from-node=2 --query=ConfigGenerationNumber

=== added file 'mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result'
--- a/mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result	2011-06-29 23:28:01 +0000
@@ -0,0 +1,169 @@
+include/rpl_init.inc [topology=1->2,4->3]
+include/rpl_connect.inc [creating master]
+include/rpl_connect.inc [creating master1]
+include/rpl_connect.inc [creating slave]
+include/rpl_connect.inc [creating slave1]
+include/rpl_start_slaves.inc
+Cluster A servers have no epoch replication info
+select count(1) from mysql.ndb_apply_status;
+count(1)
+0
+Cluster A servers have no max replicated epoch value
+Master(1)
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Master1(3)
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Make a change originating at Cluster A
+Master(1)
+use test;
+create table t1 (a int primary key, b varchar(100)) engine=ndb;
+insert into t1 values (1, "Venice");
+Allow it to propagate to Cluster B
+Originate a second unrelated change at Cluster B, to allow us to wait for
+reverse propagation in the testcase
+Slave1 (4)
+insert into t1 values (2, "Death");
+Allow it to propagate to Cluster A
+Observe new entry in ndb_apply_status on Cluster A
+Master (1)
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+4
+Non-slave server on Cluster A will have no value for Max Replicated Epoch
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Slave server on Cluster A has current value for Max Replicated Epoch
+Master1 (3)
+Expect count 1
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now wait for all replication to quiesce
+Now swap replication channels around
+include/rpl_stop_slaves.inc
+include/rpl_change_topology.inc [new topology=2->1,3->4]
+Get current master status on Cluster A new master (next pos in Binlog)
+Master1 (3)
+Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+is skipped over.
+flush logs;
+Setup slave on Cluster B to use it
+Slave1 (4)
+Get current master status on Cluster B new master (next pos in Binlog)
+Slave (2)
+Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+is skipped over.
+flush logs;
+Setup slave on Cluster A to use it
+Master (1)
+Master (1)
+Show that Cluster A Slave server (old master) has no Max replicated epoch before receiving data
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Master1 (3)
+Cluster A Master server (old slave) has old Max replicated epoch
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now start slaves up
+include/rpl_start_slaves.inc
+Show that applying something from Cluster B causes the
+old Max Rep Epoch to be loaded from ndb_apply_status
+There is no new Max Rep Epoch from Cluster A as it has not changed
+anything yet
+Slave (2)
+insert into test.t1 values (3, "From the Sea");
+Allow to propagate to Cluster A
+Master (1)
+New Slave server on Cluster A has loaded old Max-Replicated-Epoch
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+4
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now make a new Cluster A change and see that the Max Replicated Epoch advances
+once it has propagated
+Master1 (3)
+insert into test.t1 values (4, "Brooke");
+Propagate to Cluster B
+Make change on Cluster B to allow waiting for reverse propagation
+Slave (2)
+insert into test.t1 values (5, "Rupert");
+Wait for propagation back to Cluster A
+Master (1)
+Show that Cluster A now has 2 different server_id entries in ndb_apply_status
+Those from the new master (server_id 3) are highest.
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+3
+4
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 3
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+local_server_with_max_epoch
+3
+Done
+drop table t1;
+include/rpl_stop_slaves.inc
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+include/rpl_start_slaves.inc
+include/rpl_end.inc

=== added file 'mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result'
--- a/mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result	2011-06-29 23:28:01 +0000
@@ -0,0 +1,78 @@
+include/master-slave.inc
+[connection master]
+reset master;
+stop slave;
+Generate something in the Masters Binlog
+use test;
+create table t1 (a int primary key, b int) engine=ndb;
+insert into t1 values (1,1);
+Initial state
+select * from mysql.ndb_apply_status;
+server_id	epoch	log_name	start_pos	end_pos
+select variable_value from information_schema.global_status
+where variable_name like '%Ndb_slave_max_replicated_epoch%';
+variable_value
+0
+select @slave_server_id:=(variable_value+0) from information_schema.global_variables
+where variable_name like 'server_id';
+@slave_server_id:=(variable_value+0)
+2
+Default, no data, max replicated epoch will be 0.
+reset slave;
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+0
+Default, load of own serverid from ndb_apply_status, should be 111
+drop table test.t1;
+stop slave;
+reset slave;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+111
+drop table test.t1;
+Check that reset slave resets Ndb_slave_max_replicated_epoch
+stop slave;
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+111
+reset slave;
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+0
+Multiple-channel, load highest of configured serverids, should be 222
+set @other_local_server_id=@slave_server_id+1;
+set @other_remote_server_id=@slave_server_id+2;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_local_server_id, 222, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_remote_server_id, 444, 'Fictional log', 222, 333);
+CHANGE MASTER TO IGNORE_SERVER_IDS=(3);;
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+3
+4
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+222
+stop slave;
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+start slave;
+drop table test.t1;
+include/rpl_end.inc

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf	2011-06-29 23:28:01 +0000
@@ -0,0 +1,14 @@
+!include ndb_rpl_circular_2ch.cnf
+
+[mysqld.1.1]
+ndb-log-apply-status
+
+[mysqld.2.1]
+ndb-log-apply-status
+
+[mysqld.1.slave]
+ndb-log-apply-status
+
+[mysqld.2.slave]
+ndb-log-apply-status
+

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test	2011-06-29 23:28:01 +0000
@@ -0,0 +1,247 @@
+--source include/have_ndb.inc
+--source suite/ndb_rpl/ndb_master-slave_2ch.inc
+--source include/have_binlog_format_mixed_or_row.inc
+
+#
+# Test that the Maximum replicated epoch is maintained
+# as expected in a circular, 2 channel configuration.
+# The channels are swapped, and replication is restarted
+# The MaxReplicatedEpoch is reloaded from ndb_apply_status
+# for the Servers considered local (IGNORE_SERVER_IDS)
+#
+--connection master
+--echo Cluster A servers have no epoch replication info
+select count(1) from mysql.ndb_apply_status;
+
+--echo Cluster A servers have no max replicated epoch value
+--echo Master(1)
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+--connection master1
+--echo Master1(3)
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--echo Make a change originating at Cluster A
+--connection master
+--echo Master(1)
+use test;
+create table t1 (a int primary key, b varchar(100)) engine=ndb;
+insert into t1 values (1, "Venice");
+
+--echo Allow it to propagate to Cluster B
+--sync_slave_with_master slave
+
+--echo Originate a second unrelated change at Cluster B, to allow us to wait for
+--echo reverse propagation in the testcase
+--connection slave1
+--echo Slave1 (4)
+insert into t1 values (2, "Death");
+
+--echo Allow it to propagate to Cluster A
+--sync_slave_with_master master1
+
+--echo Observe new entry in ndb_apply_status on Cluster A
+--connection master
+--echo Master (1)
+select server_id from mysql.ndb_apply_status order by server_id;
+
+--echo Non-slave server on Cluster A will have no value for Max Replicated Epoch
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--echo Slave server on Cluster A has current value for Max Replicated Epoch
+--connection master1
+--echo Master1 (3)
+--echo Expect count 1
+# Here we join the max rep epoch with ndb_apply_status for server id 1
+# (Our site's current master server)
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now wait for all replication to quiesce
+
+--echo Now swap replication channels around
+--source include/rpl_stop_slaves.inc
+--let $rpl_topology= 2->1,3->4
+--source include/rpl_change_topology.inc
+
+# We've changed the direction, but need to set binlog filenames
+# and positions
+
+#
+# 'Normally' we should use the ndb_apply_status max applied epoch,
+# then lookup ndb_binlog_index etc.
+# However, in this case (and probably in lots of real cases), no changes
+# have been made after the last applied epoch, so there is no epoch
+# after the current one, and therefore no entry in ndb_binlog_index
+# to get the correct position from.
+# We could just re-apply the last epoch applied, but that's imprecise,
+# and causes us to create an ndb_apply_status entry for Server 3 when
+# it has not really been master for those changes.
+# So we just look at the Master status instead.
+#
+#--echo Get max applied epochs from a server on each cluster
+#--connection slave
+#let $max_applied_cluster_a_epoch = query_get_value("SELECT MAX(epoch) AS epoch FROM mysql.ndb_apply_status WHERE server_id IN (1,3)", epoch, 1);
+#--connection master
+#let $max_applied_cluster_b_epoch = query_get_value("SELECT MAX(epoch) AS epoch FROM mysql.ndb_apply_status WHERE server_id IN (2,4)", epoch, 1);
+#
+#--echo Get corresponding Binlog filename + pos from new Master servers
+#--connection master1
+#eval select * from mysql.ndb_binlog_index where epoch > $max_applied_cluster_a_epoch ;
+#let $cluster_a_master_log_file = query_get_value("SELECT SUBSTRING_INDEX(File, '/', -1) as File from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_a_epoch", File, 1);
+#let $cluster_a_master_log_pos = query_get_value("SELECT Position from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_a_epoch", Position, 1);
+#--connection slave
+#eval select * from mysql.ndb_binlog_index where epoch > $max_applied_cluster_b_epoch;
+#let $cluster_b_master_log_file = query_get_value("SELECT SUBSTRING_INDEX(File, '/', -1) as File from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_b_epoch", File, 1);
+#let $cluster_b_master_log_pos = query_get_value("SELECT Position from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_b_epoch", Position, 1);
+#--echo Now change new Slave servers to new Master file + pos
+#--connection master
+#--echo Changing master to $cluster_b_master_log_file, $cluster_b_master_log_pos
+#eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_b_master_log_file", MASTER_LOG_POS=$cluster_b_master_log_pos;
+#--connection slave1
+#--echo Changing master to $cluster_a_master_log_file, $cluster_a_master_log_pos
+#eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_a_master_log_file", MASTER_LOG_POS=$cluster_a_master_log_pos;
+
+--echo Get current master status on Cluster A new master (next pos in Binlog)
+--connection master1
+--echo Master1 (3)
+--echo Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+--echo is skipped over.
+flush logs;
+let $cluster_a_master_log_file = query_get_value("SHOW MASTER STATUS", "File", 1);
+let $cluster_a_master_log_pos = query_get_value("SHOW MASTER STATUS", "Position", 1);
+--echo Setup slave on Cluster B to use it
+--connection slave1
+--echo Slave1 (4)
+--disable_query_log
+eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_a_master_log_file", MASTER_LOG_POS=$cluster_a_master_log_pos;
+--enable_query_log
+
+--echo Get current master status on Cluster B new master (next pos in Binlog)
+--connection slave
+--echo Slave (2)
+--echo  Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+--echo is skipped over.
+flush logs;
+let $cluster_b_master_log_file = query_get_value("SHOW MASTER STATUS", "File", 1);
+let $cluster_b_master_log_pos = query_get_value("SHOW MASTER STATUS", "Position", 1);
+--echo Setup slave on Cluster A to use it
+--connection master
+--echo Master (1)
+--disable_query_log
+eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_b_master_log_file", MASTER_LOG_POS=$cluster_b_master_log_pos;
+--enable_query_log
+
+--connection master
+--echo Master (1)
+--echo Show that Cluster A Slave server (old master) has no Max replicated epoch before receiving data
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--connection master1
+--echo Master1 (3)
+--echo Cluster A Master server (old slave) has old Max replicated epoch
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now start slaves up
+--source include/rpl_start_slaves.inc
+
+--echo Show that applying something from Cluster B causes the
+--echo old Max Rep Epoch to be loaded from ndb_apply_status
+--echo There is no new Max Rep Epoch from Cluster A as it has not changed
+--echo anything yet
+
+--connection slave
+--echo Slave (2)
+insert into test.t1 values (3, "From the Sea");
+
+--echo Allow to propagate to Cluster A
+--sync_slave_with_master master
+
+--connection master
+--echo Master (1)
+--echo New Slave server on Cluster A has loaded old Max-Replicated-Epoch
+select server_id from mysql.ndb_apply_status order by server_id;
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now make a new Cluster A change and see that the Max Replicated Epoch advances
+--echo once it has propagated
+
+--connection master1
+--echo Master1 (3)
+insert into test.t1 values (4, "Brooke");
+
+--echo Propagate to Cluster B
+--sync_slave_with_master slave1
+
+--echo Make change on Cluster B to allow waiting for reverse propagation
+--connection slave
+--echo Slave (2)
+insert into test.t1 values (5, "Rupert");
+
+--echo Wait for propagation back to Cluster A
+--sync_slave_with_master master
+
+--connection master
+--echo Master (1)
+--echo Show that Cluster A now has 2 different server_id entries in ndb_apply_status
+--echo Those from the new master (server_id 3) are highest.
+select server_id from mysql.ndb_apply_status order by server_id;
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 3
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+let $max_epoch = query_get_value("select max(epoch) as epoch from mysql.ndb_apply_status where server_id in (1,3)","epoch", 1);
+--disable_query_log
+# We have to constrain the search to master server ids 1,3 in case the
+# Slave happens to have similar epoch values
+eval select server_id as local_server_with_max_epoch from mysql.ndb_apply_status where epoch=$max_epoch and server_id in (1,3);
+--enable_query_log
+
+--echo Done
+
+--connection master1
+drop table t1;
+--sync_slave_with_master slave1
+
+--source suite/ndb_rpl/ndb_master-slave_2ch_end.inc
+

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test	2011-06-29 23:28:01 +0000
@@ -0,0 +1,89 @@
+--source include/have_ndb.inc
+--source include/have_binlog_format_mixed_or_row.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
+
+# Test Slave initialisation of Ndb_slave_max_replicated_epoch status var
+
+--connection slave
+reset master;
+stop slave;
+
+--connection master
+--echo Generate something in the Masters Binlog
+use test;
+create table t1 (a int primary key, b int) engine=ndb;
+
+insert into t1 values (1,1);
+
+--connection slave
+--echo Initial state
+select * from mysql.ndb_apply_status;
+select variable_value from information_schema.global_status
+  where variable_name like '%Ndb_slave_max_replicated_epoch%';
+select @slave_server_id:=(variable_value+0) from information_schema.global_variables
+  where variable_name like 'server_id';
+
+--echo Default, no data, max replicated epoch will be 0.
+reset slave;
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+--echo Default, load of own serverid from ndb_apply_status, should be 111
+drop table test.t1;
+stop slave;
+reset slave;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+drop table test.t1;
+
+--echo Check that reset slave resets Ndb_slave_max_replicated_epoch
+stop slave;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+reset slave;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+--echo Multiple-channel, load highest of configured serverids, should be 222
+set @other_local_server_id=@slave_server_id+1;
+set @other_remote_server_id=@slave_server_id+2;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_local_server_id, 222, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_remote_server_id, 444, 'Fictional log', 222, 333);
+
+let $local_server_ids = `select @other_local_server_id`;
+
+--eval CHANGE MASTER TO IGNORE_SERVER_IDS=($local_server_ids);
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+# Clean up
+stop slave;
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+start slave;
+--connection master
+drop table test.t1;
+sync_slave_with_master;
+
+--source include/rpl_end.inc
+

=== modified file 'sql/Makefile.am'
--- a/sql/Makefile.am	2011-06-15 10:37:56 +0000
+++ b/sql/Makefile.am	2011-07-01 09:00:54 +0000
@@ -63,6 +63,7 @@ noinst_HEADERS =	item.h item_func.h item
 			ha_ndbcluster_lock_ext.h ha_ndbinfo.h \
 			ha_ndbcluster_glue.h \
 			ha_ndb_index_stat.h \
+                        ndb_mi.h \
 			ha_partition.h rpl_constants.h \
 			debug_sync.h \
 			opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \
@@ -132,13 +133,14 @@ mysqld_SOURCES =	sql_lex.cc sql_handler.
 
 nodist_mysqld_SOURCES =	mini_client_errors.c pack.c client.c my_time.c my_user.c 
 
-libndb_la_CPPFLAGS=	@ndbcluster_includes@
+libndb_la_CPPFLAGS=	@ndbcluster_includes@ @ndbcluster_sql_defines@
 libndb_la_SOURCES=	ha_ndbcluster.cc \
 			ha_ndbcluster_binlog.cc \
 			ha_ndbcluster_connection.cc \
 			ha_ndbcluster_cond.cc \
 			ha_ndb_index_stat.cc \
-			ha_ndbinfo.cc
+			ha_ndbinfo.cc \
+			ndb_mi.cc
 
 gen_lex_hash_SOURCES =	gen_lex_hash.cc
 gen_lex_hash_LDFLAGS =  @NOINST_LDFLAGS@

=== modified file 'sql/ha_ndb_index_stat.cc'
--- a/sql/ha_ndb_index_stat.cc	2011-06-22 07:37:51 +0000
+++ b/sql/ha_ndb_index_stat.cc	2011-07-01 09:00:54 +0000
@@ -486,7 +486,7 @@ ndb_index_stat_option_update(MYSQL_THD,
   DBUG_PRINT("index_stat", ("str: %s", str));
   Ndb_index_stat_opt& opt= ndb_index_stat_opt;
   int ret= ndb_index_stat_str2opt(str, opt);
-  assert(ret == 0);
+  assert(ret == 0); NDB_IGNORE_VALUE(ret);
   *(const char**)var_ptr= ndb_index_stat_opt.option;
   DBUG_VOID_RETURN;
 }
@@ -701,7 +701,7 @@ ndb_index_stat_list_to_error(Ndb_index_s
   Ndb_index_stat_glob &glob= ndb_index_stat_glob;
 
   assert(st != 0);
-  const int lt= st->lt;
+  const int lt= st->lt; NDB_IGNORE_VALUE(lt);
   assert(1 <= lt && lt < Ndb_index_stat::LT_Count);
   assert(lt != Ndb_index_stat::LT_Error);
 
@@ -1084,10 +1084,10 @@ ndb_index_stat_proc_idle(Ndb_index_stat_
   const time_t check_wait=
     st->check_time == 0 ? 0 : st->check_time + check_delay - pr.now;
 
-  DBUG_PRINT("index_stat", ("st %s check wait:%ds force update:%u"
-                            " clean wait:%ds cache clean:%d",
-                            st->id, check_wait, st->force_update,
-                            clean_wait, st->cache_clean));
+  DBUG_PRINT("index_stat", ("st %s check wait:%lds force update:%u"
+                            " clean wait:%lds cache clean:%d",
+                            st->id, (long)check_wait, st->force_update,
+                            (long)clean_wait, st->cache_clean));
 
   if (!st->cache_clean && clean_wait <= 0)
   {

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-06-23 12:19:32 +0000
+++ b/sql/ha_ndbcluster.cc	2011-07-01 09:31:55 +0000
@@ -30,8 +30,6 @@
 
 #include "ha_ndbcluster_glue.h"
 
-#include "rpl_mi.h"
-
 #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
 #include "ha_ndbcluster.h"
 #include <ndbapi/NdbApi.hpp>
@@ -46,6 +44,7 @@
 
 #include <mysql/plugin.h>
 #include <ndb_version.h>
+#include "ndb_mi.h"
 
 #ifdef ndb_dynamite
 #undef assert
@@ -463,7 +462,11 @@ update_slave_api_stats(Ndb* ndb)
 st_ndb_slave_state g_ndb_slave_state;
 
 st_ndb_slave_state::st_ndb_slave_state()
-  : current_conflict_defined_op_count(0)
+  : current_conflict_defined_op_count(0),
+    current_master_server_epoch(0),
+    current_max_rep_epoch(0),
+    max_rep_epoch(0),
+    sql_run_id(~Uint32(0))
 {
   memset(current_violation_count, 0, sizeof(current_violation_count));
   memset(total_violation_count, 0, sizeof(total_violation_count));
@@ -475,6 +478,7 @@ st_ndb_slave_state::atTransactionAbort()
   /* Reset current-transaction counters + state */
   memset(current_violation_count, 0, sizeof(current_violation_count));
   current_conflict_defined_op_count = 0;
+  current_max_rep_epoch = 0;
 }
 
 void
@@ -489,8 +493,195 @@ st_ndb_slave_state::atTransactionCommit(
     current_violation_count[i] = 0;
   }
   current_conflict_defined_op_count = 0;
+  if (current_max_rep_epoch > max_rep_epoch)
+  {
+    DBUG_PRINT("info", ("Max replicated epoch increases from %llu to %llu",
+                        max_rep_epoch,
+                        current_max_rep_epoch));
+
+    max_rep_epoch = current_max_rep_epoch;
+  }
+  current_max_rep_epoch = 0;
+}
+
+void
+st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id,
+                                       Uint32 row_server_id,
+                                       Uint64 row_epoch,
+                                       bool is_row_server_id_local)
+{
+  if (row_server_id == master_server_id)
+  {
+    /*
+       WRITE_ROW to ndb_apply_status injected by MySQLD
+       immediately upstream of us.
+       Record epoch
+    */
+    current_master_server_epoch = row_epoch;
+    assert(! is_row_server_id_local);
+  }
+  else if (is_row_server_id_local)
+  {
+    DBUG_PRINT("info", ("Recording application of local server %u epoch %llu "
+                        " which is %s.",
+                        row_server_id, row_epoch,
+                        (row_epoch > g_ndb_slave_state.current_max_rep_epoch)?
+                        " new highest." : " older than previously applied"));
+    if (row_epoch > current_max_rep_epoch)
+    {
+      /*
+        Store new highest epoch in thdvar.  If we commit successfully
+        then this can become the new global max
+      */
+      current_max_rep_epoch = row_epoch;
+    }
+  }
+}
+
+void
+st_ndb_slave_state::atResetSlave()
+{
+  /* Reset the Maximum replicated epoch vars
+   * on slave reset
+   * No need to touch the sql_run_id as that
+   * will increment if the slave is started
+   * again.
+   */
+  current_max_rep_epoch = 0;
+  max_rep_epoch = 0;
+}
+
+static int check_slave_state(THD* thd)
+{
+  DBUG_ENTER("check_slave_state");
+
+#ifdef HAVE_NDB_BINLOG
+  if (!thd->slave_thread)
+    DBUG_RETURN(0);
+
+  const Uint32 runId = ndb_mi_get_slave_run_id();
+  DBUG_PRINT("info", ("Slave SQL thread run id is %u",
+                      runId));
+  if (unlikely(runId != g_ndb_slave_state.sql_run_id))
+  {
+    DBUG_PRINT("info", ("Slave run id changed from %u, "
+                        "treating as Slave restart",
+                        g_ndb_slave_state.sql_run_id));
+    g_ndb_slave_state.sql_run_id = runId;
+
+    /* Always try to load the Max Replicated Epoch info
+     * first.
+     * Could be made optional if it's a problem
+     */
+    {
+      /*
+         Load highest replicated epoch from a local
+         MySQLD from the cluster.
+      */
+      DBUG_PRINT("info", ("Loading applied epoch information from %s",
+                          NDB_APPLY_TABLE));
+      NdbError ndb_error;
+      Uint64 highestAppliedEpoch = 0;
+      do
+      {
+        Ndb* ndb= check_ndb_in_thd(thd);
+        NDBDICT* dict= ndb->getDictionary();
+        NdbTransaction* trans= NULL;
+        ndb->setDatabaseName(NDB_REP_DB);
+        Ndb_table_guard ndbtab_g(dict, NDB_APPLY_TABLE);
+
+        const NDBTAB* ndbtab= ndbtab_g.get_table();
+        if (unlikely(ndbtab == NULL))
+        {
+          ndb_error = dict->getNdbError();
+          break;
+        }
+
+        trans= ndb->startTransaction();
+        if (unlikely(trans == NULL))
+        {
+          ndb_error = ndb->getNdbError();
+          break;
+        }
+
+        do
+        {
+          NdbScanOperation* sop = trans->getNdbScanOperation(ndbtab);
+          if (unlikely(sop == NULL))
+          {
+            ndb_error = trans->getNdbError();
+            break;
+          }
+
+          const Uint32 server_id_col_num = 0;
+          const Uint32 epoch_col_num = 1;
+          NdbRecAttr* server_id_ra = 0;
+          NdbRecAttr* epoch_ra = 0;
+
+          if (unlikely((sop->readTuples(NdbOperation::LM_CommittedRead) != 0)   ||
+                       ((server_id_ra = sop->getValue(server_id_col_num)) == NULL)  ||
+                       ((epoch_ra = sop->getValue(epoch_col_num)) == NULL)))
+          {
+            ndb_error = sop->getNdbError();
+            break;
+          }
+
+          if (trans->execute(NdbTransaction::Commit))
+          {
+            ndb_error = trans->getNdbError();
+            break;
+          }
+
+          int rc = 0;
+          while (0 == (rc= sop->nextResult(true)))
+          {
+            Uint32 serverid = server_id_ra->u_32_value();
+            Uint64 epoch = epoch_ra->u_64_value();
+
+            if ((serverid == ::server_id) ||
+                (ndb_mi_get_ignore_server_id(serverid)))
+            {
+              highestAppliedEpoch = MAX(epoch, highestAppliedEpoch);
+            }
+          }
+
+          if (rc != 1)
+          {
+            ndb_error = sop->getNdbError();
+            break;
+          }
+        } while (0);
+
+        trans->close();
+      } while(0);
+
+      if (ndb_error.code != 0)
+      {
+        sql_print_warning("NDB Slave : Could not determine maximum replicated epoch from %s.%s "
+                          "at Slave start, error %u %s",
+                          NDB_REP_DB,
+                          NDB_APPLY_TABLE,
+                          ndb_error.code, ndb_error.message);
+      }
+
+      /*
+        Set Global status variable to the Highest Applied Epoch from
+        the Cluster DB.
+        If none was found, this will be zero.
+      */
+      g_ndb_slave_state.max_rep_epoch = highestAppliedEpoch;
+      sql_print_information("NDB Slave : MaxReplicatedEpoch set to %llu (%u/%u) at Slave start",
+                            g_ndb_slave_state.max_rep_epoch,
+                            (Uint32)(g_ndb_slave_state.max_rep_epoch >> 32),
+                            (Uint32)(g_ndb_slave_state.max_rep_epoch & 0xffffffff));
+    } // Load highest replicated epoch
+  } // New Slave SQL thread run id
+#endif
+
+  DBUG_RETURN(0);
 }
 
+
 static int update_status_variables(Thd_ndb *thd_ndb,
                                    st_ndb_status *ns,
                                    Ndb_cluster_connection *c)
@@ -620,6 +811,7 @@ SHOW_VAR ndb_status_injector_variables[]
 
 SHOW_VAR ndb_status_slave_variables[]= {
   NDBAPI_COUNTERS("_slave", &g_slave_api_client_stats),
+  {"slave_max_replicated_epoch", (char*) &g_ndb_slave_state.max_rep_epoch, SHOW_LONGLONG},
   {NullS, NullS, SHOW_LONG}
 };
 
@@ -712,7 +904,7 @@ static int ndb_to_mysql_error(const NdbE
 }
 
 #ifdef HAVE_NDB_BINLOG
-extern Master_info *active_mi;
+
 /* Write conflicting row to exceptions table. */
 static int write_conflict_row(NDB_SHARE *share,
                               NdbTransaction *trans,
@@ -740,8 +932,8 @@ static int write_conflict_row(NDB_SHARE
   }
   {
     uint32 server_id= (uint32)::server_id;
-    uint32 master_server_id= (uint32)active_mi->master_id;
-    uint64 master_epoch= (uint64)active_mi->master_epoch;
+    uint32 master_server_id= (uint32) ndb_mi_get_master_server_id();
+    uint64 master_epoch= (uint64) g_ndb_slave_state.current_master_server_epoch;
     uint32 count= (uint32)++(cfn_share->m_count);
     if (ex_op->setValue((Uint32)0, (const char *)&(server_id)) ||
         ex_op->setValue((Uint32)1, (const char *)&(master_server_id)) ||
@@ -3914,7 +4106,7 @@ bool ha_ndbcluster::isManualBinlogExec(T
 #ifndef EMBEDDED_LIBRARY
   return thd ? 
     ( thd->rli_fake? 
-      thd->rli_fake->get_flag(Relay_log_info::IN_STMT) : false)
+      ndb_mi_get_in_relay_log_statement(thd->rli_fake) : false)
     : false;
 #else
   /* For Embedded library, we can't determine if we're
@@ -4170,21 +4362,38 @@ handle_conflict_op_error(Thd_ndb* thd_nd
 #endif /* HAVE_NDB_BINLOG */
 
 
+#ifdef HAVE_NDB_BINLOG
+/*
+  is_serverid_local
+*/
+static bool is_serverid_local(Uint32 serverid)
+{
+  /*
+     If it's not our serverid, check the
+     IGNORE_SERVER_IDS setting to check if
+     it's local.
+  */
+  return ((serverid == ::server_id) ||
+          ndb_mi_get_ignore_server_id(serverid));
+}
+#endif
+
 int ha_ndbcluster::write_row(uchar *record)
 {
   DBUG_ENTER("ha_ndbcluster::write_row");
 #ifdef HAVE_NDB_BINLOG
   if (m_share == ndb_apply_status_share && table->in_use->slave_thread)
   {
-    uint32 sid, master_server_id= active_mi->master_id;
-    memcpy(&sid, table->field[0]->ptr + (record - table->record[0]), sizeof(sid));
-    if (sid == master_server_id)
-    {
-      uint64 master_epoch;
-      memcpy(&master_epoch, table->field[1]->ptr + (record - table->record[0]),
-             sizeof(master_epoch));
-      active_mi->master_epoch= master_epoch;
-    }
+    uint32 row_server_id, master_server_id= ndb_mi_get_master_server_id();
+    uint64 row_epoch;
+    memcpy(&row_server_id, table->field[0]->ptr + (record - table->record[0]),
+           sizeof(row_server_id));
+    memcpy(&row_epoch, table->field[1]->ptr + (record - table->record[0]),
+           sizeof(row_epoch));
+    g_ndb_slave_state.atApplyStatusWrite(master_server_id,
+                                         row_server_id,
+                                         row_epoch,
+                                         is_serverid_local(row_server_id));
   }
 #endif /* HAVE_NDB_BINLOG */
   DBUG_RETURN(ndb_write_row(record, FALSE, FALSE));
@@ -4208,6 +4417,10 @@ int ha_ndbcluster::ndb_write_row(uchar *
   Uint32 num_sets= 0;
   DBUG_ENTER("ha_ndbcluster::ndb_write_row");
 
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   has_auto_increment= (table->next_number_field && record == table->record[0]);
 
   if (has_auto_increment && table_share->primary_key != MAX_KEY) 
@@ -4752,6 +4965,11 @@ int ha_ndbcluster::ndb_update_row(const
 
   DBUG_ENTER("ndb_update_row");
   DBUG_ASSERT(trans);
+
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   /*
    * If IGNORE the ignore constraint violations on primary and unique keys,
    * but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE
@@ -5086,6 +5304,10 @@ int ha_ndbcluster::ndb_delete_row(const
   DBUG_ENTER("ndb_delete_row");
   DBUG_ASSERT(trans);
 
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   ha_statistic_increment(&SSV::ha_delete_count);
   m_rows_changed++;
 
@@ -5298,7 +5520,7 @@ void ha_ndbcluster::unpack_record(uchar
           my_bitmap_map *old_map=
             dbug_tmp_use_all_columns(table, table->write_set);
           int res = field_bit->store(value, true);
-          assert(res == 0);
+          assert(res == 0); NDB_IGNORE_VALUE(res);
           dbug_tmp_restore_column_map(table->write_set, old_map);
           field->move_field_offset(-dst_offset);
         }
@@ -6570,30 +6792,15 @@ static int ndbcluster_update_apply_statu
     r|= op->setValue(1u, (Uint64)0);
     DBUG_ASSERT(r == 0);
   }
-#if MYSQL_VERSION_ID < 50600
-  const char* group_master_log_name =
-    active_mi->rli.group_master_log_name;
-  const Uint64 group_master_log_pos =
-    (Uint64)active_mi->rli.group_master_log_pos;
-  const Uint64 future_event_relay_log_pos =
-    (Uint64)active_mi->rli.future_event_relay_log_pos;
-  const Uint64 group_relay_log_pos =
-    (Uint64)active_mi->rli.group_relay_log_pos;
-#else
-  /*
-    - Master_info's rli member returns Relay_log_info*
-    - Relay_log_info members are protected and must be accessed
-      using accessor functions
-  */
   const char* group_master_log_name =
-    active_mi->rli->get_group_master_log_name();
+    ndb_mi_get_group_master_log_name();
   const Uint64 group_master_log_pos =
-    (Uint64)active_mi->rli->get_group_master_log_pos();
+    ndb_mi_get_group_master_log_pos();
   const Uint64 future_event_relay_log_pos =
-    (Uint64)active_mi->rli->get_future_event_relay_log_pos();
+    ndb_mi_get_future_event_relay_log_pos();
   const Uint64 group_relay_log_pos =
-    (Uint64)active_mi->rli->get_group_relay_log_pos();
-#endif
+    ndb_mi_get_group_relay_log_pos();
+
   // log_name
   char tmp_buf[FN_REFLEN];
   ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf,
@@ -9196,7 +9403,7 @@ int ha_ndbcluster::rename_table(const ch
                              share->key, share->use_count));
     ndbcluster_prepare_rename_share(share, to);
     int ret = ndbcluster_rename_share(thd, share);
-    assert(ret == 0);
+    assert(ret == 0); NDB_IGNORE_VALUE(ret);
   }
 
   NdbDictionary::Table new_tab= *orig_tab;
@@ -9207,7 +9414,7 @@ int ha_ndbcluster::rename_table(const ch
     if (share)
     {
       int ret = ndbcluster_undo_rename_share(thd, share);
-      assert(ret == 0);
+      assert(ret == 0); NDB_IGNORE_VALUE(ret);
       /* ndb_share reference temporary free */
       DBUG_PRINT("NDB_SHARE", ("%s temporary free  use_count: %u",
                                share->key, share->use_count));

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-06-23 12:19:32 +0000
+++ b/sql/ha_ndbcluster.h	2011-07-01 09:00:54 +0000
@@ -34,6 +34,8 @@
 #include <ndbapi/ndbapi_limits.h>
 #include <kernel/ndb_limits.h>
 
+#define NDB_IGNORE_VALUE(x) (void)x
+
 #define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8
 
 class Ndb;             // Forward declaration
@@ -344,13 +346,23 @@ struct st_ndb_slave_state
   /* Counter values for current slave transaction */
   Uint32 current_conflict_defined_op_count;
   Uint32 current_violation_count[CFT_NUMBER_OF_CFTS];
+  Uint64 current_master_server_epoch;
+  Uint64 current_max_rep_epoch;
 
   /* Cumulative counter values */
   Uint64 total_violation_count[CFT_NUMBER_OF_CFTS];
+  Uint64 max_rep_epoch;
+  Uint32 sql_run_id;
 
   /* Methods */
   void atTransactionCommit();
   void atTransactionAbort();
+  void atResetSlave();
+
+  void atApplyStatusWrite(Uint32 master_server_id,
+                          Uint32 row_server_id,
+                          Uint64 row_epoch,
+                          bool is_row_server_id_local);
 
   st_ndb_slave_state();
 };

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-06-16 18:16:01 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-07-01 09:00:54 +0000
@@ -47,6 +47,7 @@ extern my_bool opt_ndb_log_updated_only;
 extern my_bool opt_ndb_log_binlog_index;
 extern my_bool opt_ndb_log_apply_status;
 extern ulong opt_ndb_extra_logging;
+extern st_ndb_slave_state g_ndb_slave_state;
 
 bool ndb_log_empty_epochs(void);
 
@@ -892,6 +893,8 @@ static void ndbcluster_reset_slave(THD *
     thd_stmt_da(thd)->reset_diagnostics_area();
   }
 
+  g_ndb_slave_state.atResetSlave();
+
   DBUG_VOID_RETURN;
 }
 
@@ -5360,7 +5363,7 @@ ndbcluster_create_event_ops(THD *thd, ND
     {
       // set injector_ndb database/schema from table internal name
       int ret= ndb->setDatabaseAndSchemaName(ndbtab);
-      assert(ret == 0);
+      assert(ret == 0); NDB_IGNORE_VALUE(ret);
       op= ndb->createEventOperation(event_name);
       // reset to catch errors
       ndb->setDatabaseName("");
@@ -6669,7 +6672,7 @@ restart_cluster_failure:
       };
     int ret = inj->record_incident(thd, INCIDENT_LOST_EVENTS,
                                    msg[incident_id]);
-    assert(ret == 0);
+    assert(ret == 0); NDB_IGNORE_VALUE(ret);
     do_incident = false; // Don't report incident again, unless we get started
     break;
   }
@@ -7112,7 +7115,7 @@ restart_cluster_failure:
                                 table->s->fields));
             injector::transaction::table tbl(table, true);
             int ret = trans.use_table(::server_id, tbl);
-            assert(ret == 0);
+            assert(ret == 0); NDB_IGNORE_VALUE(ret);
           }
         }
         if (trans.good())
@@ -7126,7 +7129,7 @@ restart_cluster_failure:
 #endif
             injector::transaction::table tbl(apply_status_table, true);
             int ret = trans.use_table(::server_id, tbl);
-            assert(ret == 0);
+            assert(ret == 0); NDB_IGNORE_VALUE(ret);
 
             /* add the gci to the record */
             Field *field= apply_status_table->field[1];

=== added file 'sql/ndb_mi.cc'
--- a/sql/ndb_mi.cc	1970-01-01 00:00:00 +0000
+++ b/sql/ndb_mi.cc	2011-06-29 23:28:01 +0000
@@ -0,0 +1,86 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include "ndb_mi.h"
+#include "my_sys.h"
+#include "hash.h"
+#include "rpl_mi.h"
+
+#ifdef HAVE_NDB_BINLOG
+
+extern Master_info *active_mi;
+
+
+uint32 ndb_mi_get_master_server_id()
+{
+  return (uint32) active_mi->master_id;
+}
+
+const char* ndb_mi_get_group_master_log_name()
+{
+#if MYSQL_VERSION_ID < 50600
+  return active_mi->rli.group_master_log_name;
+#else
+  return active_mi->rli->get_group_master_log_name();
+#endif
+}
+
+uint64 ndb_mi_get_group_master_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (uint64) active_mi->rli.group_master_log_pos;
+#else
+  return (uint64) active_mi->rli->get_group_master_log_pos();
+#endif
+}
+
+uint64 ndb_mi_get_future_event_relay_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (uint64) active_mi->rli.future_event_relay_log_pos;
+#else
+  return (uint64) active_mi->rli->get_future_event_relay_log_pos();
+#endif
+}
+
+uint64 ndb_mi_get_group_relay_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (uint64) active_mi->rli.group_relay_log_pos;
+#else
+  return (uint64) active_mi->rli->get_group_relay_log_pos();
+#endif
+}
+
+bool ndb_mi_get_ignore_server_id(uint32 server_id)
+{
+  return (active_mi->shall_ignore_server_id(server_id) != 0);
+}
+
+uint32 ndb_mi_get_slave_run_id()
+{
+  return active_mi->rli.slave_run_id;
+}
+
+bool ndb_mi_get_in_relay_log_statement(Relay_log_info* rli)
+{
+  return (rli->get_flag(Relay_log_info::IN_STMT) != 0);
+}
+
+/* #ifdef HAVE_NDB_BINLOG */
+
+#endif

=== added file 'sql/ndb_mi.h'
--- a/sql/ndb_mi.h	1970-01-01 00:00:00 +0000
+++ b/sql/ndb_mi.h	2011-06-29 23:28:01 +0000
@@ -0,0 +1,47 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_MI_H
+#define NDB_MI_H
+
+#include <my_global.h>
+
+/*
+   This file defines methods for interacting with the
+   Master Info structure on a Slave MySQLD.
+   These methods are only valid when running in an
+   active slave thread.
+*/
+
+/*
+  Accessors
+*/
+uint32 ndb_mi_get_master_server_id();
+const char* ndb_mi_get_group_master_log_name();
+uint64 ndb_mi_get_group_master_log_pos();
+uint64 ndb_mi_get_future_event_relay_log_pos();
+uint64 ndb_mi_get_group_relay_log_pos();
+bool ndb_mi_get_ignore_server_id(uint32 server_id);
+uint32 ndb_mi_get_slave_run_id();
+
+/*
+   Relay log info related functions
+*/
+bool ndb_mi_get_in_relay_log_statement(class Relay_log_info* rli);
+
+// #ifndef NDB_MI_H
+#endif

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2011-06-15 10:37:56 +0000
+++ b/storage/ndb/CMakeLists.txt	2011-06-29 23:28:01 +0000
@@ -147,7 +147,8 @@ SET(NDBCLUSTER_SOURCES
   ../../sql/ha_ndbcluster_connection.cc
   ../../sql/ha_ndbcluster_binlog.cc
   ../../sql/ha_ndb_index_stat.cc
-  ../../sql/ha_ndbinfo.cc)
+  ../../sql/ha_ndbinfo.cc
+  ../../sql/ndb_mi.cc)
 INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/ndb/include)
 
 IF(EXISTS ${CMAKE_SOURCE_DIR}/storage/mysql_storage_engine.cmake)

=== modified file 'storage/ndb/Makefile.am'
--- a/storage/ndb/Makefile.am	2011-04-08 11:06:53 +0000
+++ b/storage/ndb/Makefile.am	2011-06-30 11:33:45 +0000
@@ -15,7 +15,7 @@
 
 SUBDIRS = include src tools . @ndb_opt_subdirs@
 DIST_SUBDIRS = src tools include test docs
-EXTRA_DIST = config cmake ndbapi-examples plug.in CMakeLists.txt ndb_configure.cmake
+EXTRA_DIST = config cmake ndbapi-examples plug.in CMakeLists.txt ndb_configure.cmake compile-cluster
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 
 include $(top_srcdir)/storage/ndb/config/common.mk.am

=== added file 'storage/ndb/compile-cluster'
--- a/storage/ndb/compile-cluster	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/compile-cluster	2011-06-30 13:28:29 +0000
@@ -0,0 +1,173 @@
+#!/usr/bin/perl
+
+# Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+# -*- cperl -*-
+#
+# MySQL Cluster compile script to bridge the gap between
+# different build systems in different versions of MySQL Server
+#
+# This script is intended for internal use
+#
+use strict;
+use Cwd 'abs_path';
+use File::Basename;
+use Getopt::Long;
+
+# Only add the command line options handled by this script, 
+# thus acting like a filter and passing all other arguments
+# straight through
+my $opt_debug;
+Getopt::Long::Configure("pass_through");
+GetOptions(
+
+  # Build MySQL Server and NDB with debug
+  'debug' => \$opt_debug,
+
+
+) or exit(1);
+
+# Find source root directory, assume this script is
+# in <srcroot>/storage/ndb/
+my $srcdir = dirname(dirname(dirname(abs_path($0))));
+die unless -d $srcdir; # Sanity check that the srcdir exist
+
+# Windows build is special case...
+if ($^O eq "cygwin" or $^O eq "MSWin32")
+{
+  if ($^O eq "cygwin") {
+    # Convert posix path to Windows mixed path since cmake
+    # is most likely a windows binary
+    $srcdir= `cygpath -m $srcdir`;
+    chomp $srcdir;
+  }
+
+  # Check that cmake exists and figure out it's version 
+  my $cmake_version_id;
+  {
+    my $version_text = `cmake --version`;
+    print $version_text;
+    die "Could not find cmake" if ($?);
+    if ( $version_text =~ /^cmake version ([0-9]*)\.([0-9]*)\.*([^\s]*)/ )
+    {
+      #print "1: $1 2: $2 3: $3\n";
+      $cmake_version_id= $1*10000 + $2*100 + $3;
+      print "cmake_version_id: $cmake_version_id\n";
+    }
+    die "Could not parse cmake version" unless ($cmake_version_id);
+  }
+
+  die "You need to install cmake with version > 2.8"
+      if ($cmake_version_id < 20800);
+
+  # Configure
+  {
+    my @args;
+    push(@args, "$srcdir/win/configure.js");
+
+    # NDB options
+    push(@args, "WITH_NDBCLUSTER_STORAGE_ENGINE");
+    push(@args, "WITH_NDB_TEST");
+
+
+    foreach my $arg (@ARGV)
+    {
+      # Convert args from --arg to ARG format 
+      $arg =~ s/^--//; # Remove leading -- 
+      $arg = uc($arg); # Uppercase
+      $arg =~ s/-/_/g; # Convert - to _ 
+      push(@args, $arg);
+    }
+
+    cmd("cscript", @args);
+  }
+
+  # cmake
+  {
+    my @args;
+    push(@args, "$srcdir");
+    cmd("cmake", @args);
+  }
+ 
+  # Build
+  {
+    # Use universal "cmake --build <srcdir>"
+    my @args;
+    push(@args, "--build");
+    push(@args, "$srcdir");
+
+    if ($opt_debug)
+    {
+      push(@args, "--config");
+      push(@args, "Debug");
+    }
+    else
+    {
+      # Legacy default
+      push(@args, "--config");
+      push(@args, "RelWithDebInfo");
+    }
+    cmd("cmake", @args);
+  }
+
+  exit(0);
+}
+
+#
+# Build MySQL autotools
+#
+{
+  cmd("$srcdir/BUILD/autorun.sh"); 
+}
+
+#
+# Configure
+#
+{
+  my @args;
+  push(@args, "$srcdir/configure");
+
+  # MySQL Server options
+  push(@args, "--with-ssl");
+ 
+  if ($opt_debug)
+  {
+    push(@args, "--with-debug");
+  }
+
+  # NDB options
+  push(@args, "--with-plugin-ndbcluster");
+  push(@args, "--with-ndb-test");
+
+  cmd($^X, @args, @ARGV);
+}
+
+#
+# Build
+#
+{
+    cmd("make -C $srcdir");
+}
+
+sub cmd {
+  my ($cmd, @a)= @_;
+  print "compile-cluster: calling '$cmd ", join(' ', @a), "'\n";
+  system($cmd, @a)
+    and print("command failed: $!\n")
+      and exit(1);
+}
+
+exit(0);

=== modified file 'storage/ndb/include/kernel/GlobalSignalNumbers.h'
--- a/storage/ndb/include/kernel/GlobalSignalNumbers.h	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/include/kernel/GlobalSignalNumbers.h	2011-07-01 17:15:00 +0000
@@ -111,9 +111,10 @@ extern const GlobalSignalNumber NO_OF_SI
 #define GSN_CONFIG_CHECK_REF            52
 #define GSN_CONFIG_CHECK_CONF           53
 
-/* 54 unused */
-/* 55 unused */
-/* 56 unused */
+#define GSN_GET_CONFIG_REQ        54
+#define GSN_GET_CONFIG_REF        55
+#define GSN_GET_CONFIG_CONF       56
+
 /* 57 unused */
 /* 58 unused */
 /* 59 unused */

=== added file 'storage/ndb/include/kernel/signaldata/GetConfig.hpp'
--- a/storage/ndb/include/kernel/signaldata/GetConfig.hpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/include/kernel/signaldata/GetConfig.hpp	2011-07-01 17:15:00 +0000
@@ -0,0 +1,75 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef GET_CONFIG_HPP
+#define GET_CONFIG_HPP
+
+#include "SignalData.hpp"
+
+/**
+ * GetConfig - Get the node's current configuration
+ *
+ * Successfull return = GET_CONFIG_CONF -  a long signal
+ */
+class GetConfigReq {
+  /**
+   * Sender(s) / Reciver(s)
+   */
+  // Blocks
+  friend class Cmvmi;
+  friend class MgmtSrvr;
+  friend bool printGET_CONFIG_REQ(FILE *, const Uint32 *, Uint32, Uint16);
+
+  STATIC_CONST( SignalLength = 2 );
+
+  Uint32 nodeId; // Node id of the receiver node
+  Uint32 senderRef;
+};
+
+class GetConfigRef {
+  /**
+   * Sender/Receiver
+   */
+  friend class Cmvmi;
+  friend class MgmtSrvr;
+  friend bool printGET_CONFIG_REF(FILE *, const Uint32 *, Uint32, Uint16);
+
+  STATIC_CONST( SignalLength = 1 );
+
+  Uint32 error;
+
+  enum ErrorCode {
+    WrongSender = 1,
+    WrongNodeId = 2,
+    NoConfig = 3
+  };
+};
+
+class GetConfigConf {
+  /**
+   * Sender(s) / Reciver(s)
+   */
+  // Blocks
+  friend class Cmvmi;
+  friend class MgmtSrvr;
+  friend bool printGET_CONFIG_CONF(FILE *, const Uint32 *, Uint32, Uint16);
+
+  STATIC_CONST( SignalLength = 1 );
+
+  Uint32 configLength; // config blob size
+};
+#endif

=== modified file 'storage/ndb/include/kernel/signaldata/SignalData.hpp'
--- a/storage/ndb/include/kernel/signaldata/SignalData.hpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/include/kernel/signaldata/SignalData.hpp	2011-07-01 17:15:00 +0000
@@ -317,4 +317,8 @@ GSN_PRINT_SIGNATURE(printINDEX_STAT_IMPL
 GSN_PRINT_SIGNATURE(printINDEX_STAT_IMPL_REF);
 GSN_PRINT_SIGNATURE(printINDEX_STAT_REP);
 
+GSN_PRINT_SIGNATURE(printGET_CONFIG_REQ);
+GSN_PRINT_SIGNATURE(printGET_CONFIG_REF);
+GSN_PRINT_SIGNATURE(printGET_CONFIG_CONF);
+
 #endif

=== modified file 'storage/ndb/include/mgmapi/mgmapi.h'
--- a/storage/ndb/include/mgmapi/mgmapi.h	2011-06-21 13:10:37 +0000
+++ b/storage/ndb/include/mgmapi/mgmapi.h	2011-07-01 17:15:00 +0000
@@ -710,6 +710,18 @@ extern "C" {
 			 int num_args,
 			 struct ndb_mgm_reply* reply);
 
+  /**
+   * Get the current configuration from a node.
+   *
+   * @param handle the NDB management handle.
+   * @param nodeId of the node for which the configuration is requested.
+   * @return the current configuration from the requested node.
+   */
+  struct ndb_mgm_configuration *
+  ndb_mgm_get_configuration_from_node(NdbMgmHandle handle,
+                                      int nodeid);
+
+
   /** @} *********************************************************************/
   /**
    * @name Functions: Start/stop nodes

=== modified file 'storage/ndb/include/ndb_version.h.in'
--- a/storage/ndb/include/ndb_version.h.in	2011-05-25 13:19:02 +0000
+++ b/storage/ndb/include/ndb_version.h.in	2011-07-01 17:15:00 +0000
@@ -676,4 +676,21 @@ ndb_refresh_tuple(Uint32 x)
   }
 }
 
+#define NDBD_GET_CONFIG_SUPPORT_70 NDB_MAKE_VERSION(7,0,27)
+#define NDBD_GET_CONFIG_SUPPORT_71 NDB_MAKE_VERSION(7,1,16)
+
+static
+inline
+int
+ndbd_get_config_supported(Uint32 x)
+{
+  const Uint32 major = (x >> 16) & 0xFF;
+  const Uint32 minor = (x >>  8) & 0xFF;
+
+  if (major == 7 && minor == 0)
+    return x >= NDBD_GET_CONFIG_SUPPORT_70;
+
+  return x >= NDBD_GET_CONFIG_SUPPORT_71;
+}
+
 #endif

=== modified file 'storage/ndb/include/ndbapi/NdbIndexStat.hpp'
--- a/storage/ndb/include/ndbapi/NdbIndexStat.hpp	2011-06-07 10:03:02 +0000
+++ b/storage/ndb/include/ndbapi/NdbIndexStat.hpp	2011-06-28 16:13:49 +0000
@@ -334,9 +334,9 @@ public:
   struct Mem {
     Mem();
     virtual ~Mem();
-    virtual void* mem_alloc(size_t size) = 0;
+    virtual void* mem_alloc(UintPtr size) = 0;
     virtual void mem_free(void* ptr) = 0;
-    virtual size_t mem_used() const = 0;
+    virtual UintPtr mem_used() const = 0;
   };
 
   /*

=== modified file 'storage/ndb/ndb_configure.m4'
--- a/storage/ndb/ndb_configure.m4	2011-05-24 08:38:04 +0000
+++ b/storage/ndb/ndb_configure.m4	2011-07-01 09:00:54 +0000
@@ -2,7 +2,7 @@
 # Should be updated when creating a new NDB version
 NDB_VERSION_MAJOR=7
 NDB_VERSION_MINOR=0
-NDB_VERSION_BUILD=26
+NDB_VERSION_BUILD=27
 NDB_VERSION_STATUS=""
 
 dnl for build ndb docs
@@ -403,6 +403,7 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
   ndbcluster_includes="-I\$(top_builddir)/storage/ndb/include -I\$(top_srcdir)/storage/ndb/include -I\$(top_srcdir)/storage/ndb/include/ndbapi -I\$(top_srcdir)/storage/ndb/include/mgmapi"
   ndbcluster_libs="\$(top_builddir)/storage/ndb/src/.libs/libndbclient.a"
   ndbcluster_system_libs=""
+  ndbcluster_sql_defines=""
 
   MYSQL_CHECK_NDB_OPTIONS
   NDB_CHECK_NDBMTD
@@ -498,6 +499,7 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
     then
       NDB_DEFS=""
     else
+      ndbcluster_sql_defines="-DNDEBUG"
       NDB_DEFS="-DNDEBUG"
     fi
   fi
@@ -600,6 +602,7 @@ AC_DEFUN([MYSQL_SETUP_NDBCLUSTER], [
   AC_SUBST(ndbcluster_libs)
   AC_SUBST(ndbcluster_system_libs)
   AC_SUBST(NDB_SCI_LIBS)
+  AC_SUBST(ndbcluster_sql_defines)
 
   AC_SUBST(ndb_transporter_opt_objs)
   AC_SUBST(ndb_bin_am_ldflags)

=== modified file 'storage/ndb/src/common/debugger/signaldata/CMakeLists.txt'
--- a/storage/ndb/src/common/debugger/signaldata/CMakeLists.txt	2011-06-21 13:58:00 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/CMakeLists.txt	2011-07-01 17:15:00 +0000
@@ -45,5 +45,5 @@ ADD_CONVENIENCE_LIBRARY(ndbsignaldata
         ScanFrag.cpp ApiVersion.cpp
         LocalRouteOrd.cpp
 	DbinfoScan.cpp NodePing.cpp
-	IndexStatSignal.cpp)
+	IndexStatSignal.cpp GetConfig.cpp)
 

=== added file 'storage/ndb/src/common/debugger/signaldata/GetConfig.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/GetConfig.cpp	1970-01-01 00:00:00 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/GetConfig.cpp	2011-07-01 17:15:00 +0000
@@ -0,0 +1,48 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include <signaldata/GetConfig.hpp>
+
+bool
+printGET_CONFIG_REQ(FILE * output, const Uint32 * theData,
+                   Uint32 len, Uint16 receiverBlockNo)
+{
+  const GetConfigReq* sig = (const GetConfigReq*)theData;
+  fprintf(output, " nodeId : %u senderRef : %x\n",
+          sig->nodeId,
+          sig->senderRef);
+  return true;
+}
+
+bool
+printGET_CONFIG_REF(FILE * output, const Uint32 * theData,
+                   Uint32 len, Uint16 receiverBlockNo) {
+  const GetConfigRef* sig = (const GetConfigRef*)theData;
+  fprintf(output, " error : %u\n",
+          sig->error);
+  return true;
+}
+
+
+bool
+printGET_CONFIG_CONF(FILE * output, const Uint32 * theData,
+                   Uint32 len, Uint16 receiverBlockNo) {
+  const GetConfigConf* sig = (const GetConfigConf*)theData;
+  fprintf(output, " Config size : %u\n",
+          sig->configLength);
+  return true;
+}

=== modified file 'storage/ndb/src/common/debugger/signaldata/Makefile.am'
--- a/storage/ndb/src/common/debugger/signaldata/Makefile.am	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/Makefile.am	2011-07-01 17:15:00 +0000
@@ -47,7 +47,7 @@ libsignaldataprint_la_SOURCES = \
  	  CreateIndxImpl.cpp DropIndxImpl.cpp AlterIndxImpl.cpp \
  	  BuildIndx.cpp BuildIndxImpl.cpp ApiVersion.cpp \
           LocalRouteOrd.cpp DbinfoScan.cpp NodePing.cpp \
-	  IndexStatSignal.cpp
+	  IndexStatSignal.cpp GetConfig.cpp
 
 include $(top_srcdir)/storage/ndb/config/common.mk.am
 include $(top_srcdir)/storage/ndb/config/type_ndbapi.mk.am

=== modified file 'storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp	2011-07-01 17:15:00 +0000
@@ -268,6 +268,10 @@ SignalDataPrintFunctions[] = {
   ,{ GSN_INDEX_STAT_IMPL_REF, printINDEX_STAT_IMPL_REF }
   ,{ GSN_INDEX_STAT_REP, printINDEX_STAT_REP }
 
+  ,{ GSN_GET_CONFIG_REQ, printGET_CONFIG_REQ }
+  ,{ GSN_GET_CONFIG_REF, printGET_CONFIG_REF }
+  ,{ GSN_GET_CONFIG_CONF, printGET_CONFIG_CONF }
+
   ,{ 0, 0 }
 };
 

=== modified file 'storage/ndb/src/common/debugger/signaldata/SignalNames.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp	2011-05-19 09:16:32 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp	2011-07-01 17:15:00 +0000
@@ -771,5 +771,9 @@ const GsnName SignalNames [] = {
   ,{ GSN_INDEX_STAT_IMPL_CONF, "INDEX_STAT_IMPL_CONF" }
   ,{ GSN_INDEX_STAT_IMPL_REF, "INDEX_STAT_IMPL_REF" }
   ,{ GSN_INDEX_STAT_REP, "INDEX_STAT_REP" }
+
+  ,{ GSN_GET_CONFIG_REQ, "GET_CONFIG_REQ" }
+  ,{ GSN_GET_CONFIG_REF, "GET_CONFIG_REF" }
+  ,{ GSN_GET_CONFIG_CONF, "GET_CONFIG_CONF" }
 };
 const unsigned short NO_OF_SIGNAL_NAMES = sizeof(SignalNames)/sizeof(GsnName);

=== modified file 'storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp'
--- a/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/common/mgmcommon/ConfigRetriever.cpp	2011-07-01 17:15:00 +0000
@@ -170,10 +170,12 @@ ConfigRetriever::getConfig(Uint32 nodeid
 ndb_mgm_configuration *
 ConfigRetriever::getConfig(NdbMgmHandle mgm_handle)
 {
+  const int from_node = 0;
   ndb_mgm_configuration * conf =
     ndb_mgm_get_configuration2(mgm_handle,
                                m_version,
-                               m_node_type);
+                               m_node_type,
+                               from_node);
   if(conf == 0)
   {
     BaseString tmp(ndb_mgm_get_latest_error_msg(mgm_handle));

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-05-25 15:03:11 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp	2011-07-01 17:15:00 +0000
@@ -44,6 +44,7 @@
 #include <signaldata/Sync.hpp>
 #include <signaldata/AllocMem.hpp>
 #include <signaldata/NodeStateSignalData.hpp>
+#include <signaldata/GetConfig.hpp>
 
 #include <EventLogger.hpp>
 #include <TimeQueue.hpp>
@@ -124,6 +125,8 @@ Cmvmi::Cmvmi(Block_context& ctx) :
   addRecSignal(GSN_ALLOC_MEM_REF, &Cmvmi::execALLOC_MEM_REF);
   addRecSignal(GSN_ALLOC_MEM_CONF, &Cmvmi::execALLOC_MEM_CONF);
 
+  addRecSignal(GSN_GET_CONFIG_REQ, &Cmvmi::execGET_CONFIG_REQ);
+
   subscriberPool.setSize(5);
   c_syncReqPool.setSize(5);
 
@@ -3171,3 +3174,57 @@ Cmvmi::execROUTE_ORD(Signal* signal)
   warningEvent("Unable to route GSN: %d from %x to %x",
 	       gsn, srcRef, dstRef);
 }
+
+
+void Cmvmi::execGET_CONFIG_REQ(Signal *signal)
+{
+  jamEntry();
+  const GetConfigReq* const req = (const GetConfigReq *)signal->getDataPtr();
+
+  Uint32 error = 0;
+  Uint32 retRef = req->senderRef; // mgm servers ref
+
+  if (retRef != signal->header.theSendersBlockRef)
+  {
+    error = GetConfigRef::WrongSender;
+  }
+
+  if (req->nodeId != getOwnNodeId())
+  {
+    error = GetConfigRef::WrongNodeId;
+  }
+
+  const Uint32 config_length = m_ctx.m_config.m_clusterConfigPacked.length();
+  if (config_length == 0)
+  {
+    error = GetConfigRef::NoConfig;
+  }
+
+  if (error)
+  {
+    warningEvent("execGET_CONFIG_REQ: failed %u", error);
+    GetConfigRef *ref = (GetConfigRef *)signal->getDataPtrSend();
+    ref->error = error;
+    sendSignal(retRef, GSN_GET_CONFIG_REF, signal,
+               GetConfigRef::SignalLength, JBB);
+    return;
+  }
+
+  const Uint32 nSections= 1;
+  LinearSectionPtr ptr[3];
+  ptr[0].p = (Uint32*)(m_ctx.m_config.m_clusterConfigPacked.get_data());
+  ptr[0].sz = (config_length + 3) / 4;
+
+  GetConfigConf *conf = (GetConfigConf *)signal->getDataPtrSend();
+
+  conf->configLength = config_length;
+
+  sendFragmentedSignal(retRef,
+                       GSN_GET_CONFIG_CONF,
+                       signal,
+                       GetConfigConf::SignalLength,
+                       JBB,
+                       ptr,
+                       nSections,
+                       TheEmptyCallback);
+}

=== modified file 'storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp'
--- a/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/kernel/blocks/cmvmi/Cmvmi.hpp	2011-07-01 17:15:00 +0000
@@ -78,6 +78,8 @@ private:
   void execALLOC_MEM_REF(Signal*);
   void execALLOC_MEM_CONF(Signal*);
 
+  void execGET_CONFIG_REQ(Signal*);
+
   char theErrorMessage[256];
   void sendSTTORRY(Signal* signal);
 

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-25 13:19:02 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-06-29 07:56:14 +0000
@@ -978,10 +978,10 @@ ArrayPool<TupTriggerData> c_triggerPool;
       subscriptionDeleteTriggers(triggerPool),
       subscriptionUpdateTriggers(triggerPool),
       constraintUpdateTriggers(triggerPool),
-      tuxCustomTriggers(triggerPool),
       deferredInsertTriggers(triggerPool),
+      deferredUpdateTriggers(triggerPool),
       deferredDeleteTriggers(triggerPool),
-      deferredUpdateTriggers(triggerPool)
+      tuxCustomTriggers(triggerPool)
       {}
     
     Bitmask<MAXNROFATTRIBUTESINWORDS> notNullAttributeMask;

=== modified file 'storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp	2011-05-24 17:54:07 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtux/DbtuxStat.cpp	2011-07-01 06:08:38 +0000
@@ -23,18 +23,6 @@
 // debug note: uses new-style debug macro "D" unlike rest of DBTUX
 // there is no filtering feature (yet) like "DebugStat"
 
-#ifdef VM_TRACE
-inline NdbOut&
-NdbOut::operator<<(double x)
-{
-  NdbOut& out = *this;
-  char buf[100];
-  snprintf(buf, sizeof(buf), "%.02f", x);
-  out << buf;
-  return out;
-}
-#endif
-
 void
 Dbtux::execREAD_PSEUDO_REQ(Signal* signal)
 {

=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.cpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2011-05-31 12:28:59 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2011-06-29 07:47:26 +0000
@@ -132,9 +132,9 @@ Suma::execREAD_CONFIG_REQ(Signal* signal
 
   // SumaParticipant
   Uint32 noTables, noAttrs, maxBufferedEpochs;
-  ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES,  
+  ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE,
 			    &noTables);
-  ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES,  
+  ndb_mgm_get_int_parameter(p, CFG_DICT_ATTRIBUTE,
 			    &noAttrs);
   ndb_mgm_get_int_parameter(p, CFG_DB_MAX_BUFFERED_EPOCHS,
                             &maxBufferedEpochs);

=== modified file 'storage/ndb/src/kernel/vm/Configuration.cpp'
--- a/storage/ndb/src/kernel/vm/Configuration.cpp	2011-04-12 11:59:36 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.cpp	2011-07-01 17:15:00 +0000
@@ -32,6 +32,8 @@
 #include <mgmapi_configuration.hpp>
 #include <kernel_config_parameters.h>
 
+#include <util/ConfigValues.hpp>
+
 #include <ndbapi_limits.h>
 
 #include <EventLogger.hpp>
@@ -191,6 +193,9 @@ Configuration::fetch_configuration(const
   
   m_clusterConfig = p;
 
+  const ConfigValues * cfg = (ConfigValues*)m_clusterConfig;
+  cfg->pack(m_clusterConfigPacked);
+
   {
     Uint32 generation;
     ndb_mgm_configuration_iterator sys_iter(*p, CFG_SECTION_SYSTEM);

=== modified file 'storage/ndb/src/kernel/vm/Configuration.hpp'
--- a/storage/ndb/src/kernel/vm/Configuration.hpp	2011-04-12 11:59:36 +0000
+++ b/storage/ndb/src/kernel/vm/Configuration.hpp	2011-07-01 17:15:00 +0000
@@ -26,6 +26,7 @@
 #include <NdbMutex.h>
 #include <NdbThread.h>
 #include <util/SparseBitmask.hpp>
+#include <util/UtilBuffer.hpp>
 
 enum ThreadTypes
 {
@@ -147,6 +148,7 @@ private:
 
   ndb_mgm_configuration * m_ownConfig;
   ndb_mgm_configuration * m_clusterConfig;
+  UtilBuffer m_clusterConfigPacked;
 
   ndb_mgm_configuration_iterator * m_clusterConfigIter;
   ndb_mgm_configuration_iterator * m_ownConfigIterator;

=== modified file 'storage/ndb/src/mgmapi/mgmapi.cpp'
--- a/storage/ndb/src/mgmapi/mgmapi.cpp	2011-06-21 13:10:37 +0000
+++ b/storage/ndb/src/mgmapi/mgmapi.cpp	2011-07-01 17:15:00 +0000
@@ -2068,6 +2068,15 @@ ndb_mgm_dump_state(NdbMgmHandle handle,
 }
 
 extern "C"
+struct ndb_mgm_configuration *
+ndb_mgm_get_configuration_from_node(NdbMgmHandle handle,
+                                    int nodeid)
+{
+  return ndb_mgm_get_configuration2(handle, 0,
+                                    NDB_MGM_NODE_TYPE_UNKNOWN, nodeid);
+}
+
+extern "C"
 int 
 ndb_mgm_start_signallog(NdbMgmHandle handle, int nodeId, 
 			struct ndb_mgm_reply* reply) 
@@ -2466,7 +2475,7 @@ ndb_mgm_abort_backup(NdbMgmHandle handle
 extern "C"
 struct ndb_mgm_configuration *
 ndb_mgm_get_configuration2(NdbMgmHandle handle, unsigned int version,
-                           enum ndb_mgm_node_type nodetype)
+                           enum ndb_mgm_node_type nodetype, int from_node)
 {
   DBUG_ENTER("ndb_mgm_get_configuration2");
 
@@ -2487,6 +2496,20 @@ ndb_mgm_get_configuration2(NdbMgmHandle
     args.put("nodetype", nodetype);
   }
 
+  if (check_version_ge(handle->mgmd_version(),
+                       NDB_MAKE_VERSION(7,1,16),
+                       NDB_MAKE_VERSION(7,0,27),
+                       0))
+  {
+    args.put("from_node", from_node);
+  }
+  else
+  {
+    SET_ERROR(handle, NDB_MGM_GET_CONFIG_FAILED,
+              "The mgm server does not support getting config from_node");
+    DBUG_RETURN(0);
+  }
+
   const ParserRow<ParserDummy> reply[] = {
     MGM_CMD("get config reply", NULL, ""),
     MGM_ARG("result", String, Mandatory, "Error message"),    

=== modified file 'storage/ndb/src/mgmapi/mgmapi_internal.h'
--- a/storage/ndb/src/mgmapi/mgmapi_internal.h	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/mgmapi/mgmapi_internal.h	2011-07-01 17:15:00 +0000
@@ -94,7 +94,8 @@ extern "C" {
   struct ndb_mgm_configuration *
   ndb_mgm_get_configuration2(NdbMgmHandle handle,
                              unsigned version,
-                             enum ndb_mgm_node_type nodetype);
+                             enum ndb_mgm_node_type nodetype,
+                             int from_node = 0);
 
 
 #ifdef __cplusplus

=== modified file 'storage/ndb/src/mgmsrv/ConfigInfo.cpp'
--- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2011-06-15 10:55:06 +0000
+++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp	2011-07-01 09:16:46 +0000
@@ -2747,7 +2747,7 @@ const ConfigInfo::ParamInfo ConfigInfo::
     ConfigInfo::CI_INT,
     "0",
     "0",
-    "0"
+    STR_VALUE(MAX_INT_RNIL)
   },
 
   /****************************************************************************

=== modified file 'storage/ndb/src/mgmsrv/Defragger.hpp'
--- a/storage/ndb/src/mgmsrv/Defragger.hpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/src/mgmsrv/Defragger.hpp	2011-07-01 17:15:00 +0000
@@ -61,7 +61,14 @@ class Defragger {
 
 public:
   Defragger() {};
-  ~Defragger() {};
+  ~Defragger()
+  {
+    for (size_t i = m_buffers.size(); i > 0; --i)
+    {
+      delete m_buffers[i-1]; // free the memory of the fragment
+    }
+    // m_buffers will be freed by ~Vector
+  };
 
   /*
     return true when complete signal received
@@ -113,13 +120,12 @@ public:
     clear any unassembled signal buffers from node
   */
   void node_failed(NodeId nodeId) {
-    for (size_t i = 0; i < m_buffers.size(); i++)
+    for (size_t i = m_buffers.size(); i > 0; --i)
     {
-      DefragBuffer* dbuf = m_buffers[i];
-      if (dbuf->m_node_id == nodeId)
+      if (m_buffers[i-1]->m_node_id == nodeId)
       {
-        delete dbuf; // MASV ?
-        m_buffers.erase(i);
+        delete m_buffers[i]; // free the memory of the signal fragment
+	m_buffers.erase(i); // remove the reference from the vector.
       }
     }
   }

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-06-22 08:57:03 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-07-01 17:15:00 +0000
@@ -21,6 +21,7 @@
 #include "ndb_mgmd_error.h"
 #include "Services.hpp"
 #include "ConfigManager.hpp"
+#include "Defragger.hpp"
 
 #include <NdbOut.hpp>
 #include <NdbApiSignal.hpp>
@@ -43,6 +44,7 @@
 #include <signaldata/CreateNodegroup.hpp>
 #include <signaldata/DropNodegroup.hpp>
 #include <signaldata/Sync.hpp>
+#include <signaldata/GetConfig.hpp>
 #include <NdbSleep.h>
 #include <portlib/NdbDir.hpp>
 #include <EventLogger.hpp>
@@ -737,6 +739,159 @@ MgmtSrvr::get_packed_config(ndb_mgm_node
   return m_config_manager->get_packed_config(node_type, &buf64, error);
 }
 
+bool
+MgmtSrvr::get_packed_config_from_node(NodeId nodeId,
+                            BaseString& buf64, BaseString& error)
+{
+  DBUG_ENTER("get_packed_config_from_node");
+
+  if (nodeId >= MAX_NODES_ID)
+  {
+    error.assfmt("Nodeid %d is greater than max nodeid %d. ",
+                 nodeId, MAX_NODES_ID);
+    DBUG_RETURN(false);
+  }
+
+  if (getNodeType(nodeId) == NDB_MGM_NODE_TYPE_UNKNOWN)
+  {
+    error.assfmt("Nodeid %d does not exist. ", nodeId);
+    DBUG_RETURN(false);
+  }
+
+  if (getNodeType(nodeId) != NDB_MGM_NODE_TYPE_NDB)
+  {
+    error.assfmt("Node %d is not an NDB node. ", nodeId);
+    DBUG_RETURN(false);
+  }
+
+  trp_node node = getNodeInfo(nodeId);
+
+  if (!node.m_alive)
+  {
+    error.assfmt("Data node %d is not alive. ", nodeId);
+    DBUG_RETURN(false);
+  }
+
+  const Uint32 version = node.m_info.m_version;
+
+  if (!ndbd_get_config_supported(version))
+  {
+    error.assfmt("Data node %d (version %d.%d.%d) does not support getting config. ",
+                 nodeId, ndbGetMajor(version),
+                 ndbGetMinor(version), ndbGetBuild(version));
+    DBUG_RETURN(false);
+  }
+
+  INIT_SIGNAL_SENDER(ss,nodeId);
+
+  SimpleSignal ssig;
+  GetConfigReq* req = CAST_PTR(GetConfigReq, ssig.getDataPtrSend());
+  req->senderRef = ss.getOwnRef();
+  req->nodeId = nodeId;
+
+  g_eventLogger->debug("Sending GET_CONFIG_REQ to %d", nodeId);
+
+  ssig.set(ss, TestOrd::TraceAPI, CMVMI, GSN_GET_CONFIG_REQ,
+           GetConfigReq::SignalLength);
+  if ((ss.sendSignal(nodeId, &ssig)) != SEND_OK)
+  {
+    DBUG_RETURN(false);
+  }
+
+  Defragger defragger;
+  while (true)
+  {
+    SimpleSignal *signal = ss.waitFor();
+    int gsn = signal->readSignalNumber();
+
+    switch (gsn)
+    {
+    case GSN_GET_CONFIG_CONF:
+    {
+      if (refToNode(signal->header.theSendersBlockRef) != nodeId)
+      {
+        error.assfmt("Internal Error: Reply from wrong node %d, expected from %d. ",
+                     refToNode(signal->header.theSendersBlockRef),
+                     nodeId);
+        DBUG_RETURN(false);
+      }
+
+      const GetConfigConf * const conf =
+	CAST_CONSTPTR(GetConfigConf, signal->getDataPtr());
+
+      if (signal->header.m_noOfSections != 1)
+      {
+        error.assfmt("Internal Error: Wrong number of sections %d received, expected %d. ",
+                     signal->header.m_noOfSections, 1);
+        DBUG_RETURN(false);
+      }
+
+      if (defragger.defragment(signal))
+      {
+        ConfigValuesFactory cf;
+        require(cf.unpack(signal->ptr[0].p, conf->configLength));
+
+        Config received_config(cf.getConfigValues());
+        if (!received_config.pack64(buf64))
+        {
+          error.assign("Failed to pack64");
+          DBUG_RETURN(false);
+        }
+        DBUG_RETURN(true);
+      }
+      // wait until all fragments are received
+      continue;
+    }
+
+    case GSN_GET_CONFIG_REF:
+    {
+      if (refToNode(ssig.header.theSendersBlockRef) != nodeId)
+      {
+        error.assfmt("Internal Error: Reply from wrong node %d, expected from %d. ",
+                     refToNode(signal->header.theSendersBlockRef),
+                     nodeId);
+        DBUG_RETURN(false);
+      }
+      const GetConfigRef * const ref =
+	CAST_CONSTPTR(GetConfigRef, signal->getDataPtr());
+      error.assfmt("Error in retrieving config from node %d: Internal error: %d",
+                   nodeId, ref->error);
+
+      DBUG_RETURN(false);
+    }
+
+    case GSN_NF_COMPLETEREP:
+    {
+      const NFCompleteRep * rep = CAST_CONSTPTR(NFCompleteRep,
+                                                signal->getDataPtr());
+      if (rep->failedNodeId == nodeId)
+      {
+        error.assfmt("Node %d is not available", nodeId);
+        DBUG_RETURN(false);
+      }
+      continue;
+    }
+
+    case GSN_NODE_FAILREP:
+    {
+      // Wait until GSN_NODE_COMPLETEREP is received.
+      continue;
+    }
+
+    case GSN_API_REGCONF:
+    case GSN_TAKE_OVERTCCONF:
+    case GSN_CONNECT_REP:
+      // Ignore
+      continue;
+
+    default:
+      report_unknown_signal(signal);
+      DBUG_RETURN(SEND_OR_RECEIVE_FAILED);
+    }
+  }
+  // Should never come here
+  require(false);
+}
 
 MgmtSrvr::~MgmtSrvr()
 {
@@ -1081,10 +1236,6 @@ MgmtSrvr::sendall_STOP_REQ(NodeBitmask &
         else
           failed++;
       }
-      else
-      {
-        failed++;
-      }
     }
   }
 
@@ -3961,7 +4112,7 @@ MgmtSrvr::change_config(Config& new_conf
     case GSN_NF_COMPLETEREP:
     {
       NodeId nodeId = refToNode(signal->header.theSendersBlockRef);
-      msg.assign("Node %d failed uring configuration change", nodeId);
+      msg.assign("Node %d failed during configuration change", nodeId);
       return false;
       break;
     }

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.hpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.hpp	2011-06-21 13:10:37 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.hpp	2011-07-01 17:15:00 +0000
@@ -488,6 +488,10 @@ public:
   bool get_packed_config(ndb_mgm_node_type nodetype,
                          BaseString& buf64, BaseString& error);
 
+  /* Get copy of configuration packed with base64 from node nodeid */
+  bool get_packed_config_from_node(NodeId nodeid,
+                         BaseString& buf64, BaseString& error);
+
   void print_config(const char* section_filter = NULL,
                     NodeId nodeid_filter = 0,
                     const char* param_filter = NULL,

=== modified file 'storage/ndb/src/mgmsrv/Services.cpp'
--- a/storage/ndb/src/mgmsrv/Services.cpp	2011-06-01 07:40:49 +0000
+++ b/storage/ndb/src/mgmsrv/Services.cpp	2011-07-01 17:15:00 +0000
@@ -126,6 +126,7 @@ ParserRow<MgmApiSession> commands[] = {
     MGM_ARG("version", Int, Mandatory, "Configuration version number"),
     MGM_ARG("node", Int, Optional, "Node ID"),
     MGM_ARG("nodetype", Int, Optional, "Type of requesting node"),
+    MGM_ARG("from_node", Int, Optional, "Node to get config from"),
 
   MGM_CMD("get nodeid", &MgmApiSession::get_nodeid, ""),
     MGM_ARG("version", Int, Mandatory, "Configuration version number"),
@@ -608,10 +609,12 @@ MgmApiSession::getConfig(Parser_t::Conte
                          const class Properties &args)
 {
   Uint32 nodetype = NDB_MGM_NODE_TYPE_UNKNOWN;
+  Uint32 from_node = 0;
 
   // Ignoring mandatory parameter "version"
   // Ignoring optional parameter "node"
   args.get("nodetype", &nodetype);
+  args.get("from_node", &from_node);
 
   SLEEP_ERROR_INSERTED(1);
   m_output->println("get config reply");
@@ -619,8 +622,14 @@ MgmApiSession::getConfig(Parser_t::Conte
   BaseString pack64, error;
 
   UtilBuffer packed;
-  if (!m_mgmsrv.get_packed_config((ndb_mgm_node_type)nodetype,
-                                  pack64, error))
+
+  bool success =
+   (from_node == 0 || from_node == m_mgmsrv.getOwnNodeId()) ?
+                m_mgmsrv.get_packed_config((ndb_mgm_node_type)nodetype,
+                                           pack64, error) :
+                m_mgmsrv.get_packed_config_from_node(from_node,
+                                                     pack64, error);
+  if (!success)
   {
     m_output->println("result: %s", error.c_str());
     m_output->print("\n");

=== modified file 'storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-16 18:16:01 +0000
+++ b/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-28 16:13:49 +0000
@@ -2257,7 +2257,7 @@ NdbIndexStatImpl::MemDefault::~MemDefaul
 }
 
 void*
-NdbIndexStatImpl::MemDefault::mem_alloc(size_t size)
+NdbIndexStatImpl::MemDefault::mem_alloc(UintPtr size)
 {
   if (size == 0 || size % 4 != 0)
   {
@@ -2290,7 +2290,7 @@ NdbIndexStatImpl::MemDefault::mem_free(v
   }
 }
 
-size_t
+UintPtr
 NdbIndexStatImpl::MemDefault::mem_used() const
 {
   return m_used;

=== modified file 'storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp	2011-06-12 16:54:32 +0000
+++ b/storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp	2011-06-28 16:13:49 +0000
@@ -281,9 +281,9 @@ public:
 
   // default memory allocator
   struct MemDefault : public Mem {
-    virtual void* mem_alloc(size_t bytes);
+    virtual void* mem_alloc(UintPtr bytes);
     virtual void mem_free(void* p);
-    virtual size_t mem_used() const;
+    virtual UintPtr mem_used() const;
     MemDefault();
     virtual ~MemDefault();
   private:

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-06-16 09:32:43 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-06-30 08:27:52 +0000
@@ -318,6 +318,11 @@ NdbQueryDef::isScanQuery() const
 { return m_impl.isScanQuery();
 }
 
+NdbQueryDef::QueryType
+NdbQueryDef::getQueryType() const
+{ return m_impl.getQueryType();
+}
+
 NdbQueryDefImpl& 
 NdbQueryDef::getImpl() const{
   return m_impl;
@@ -1095,24 +1100,10 @@ NdbQueryBuilderImpl::contains(const NdbQ
 const NdbQueryDefImpl*
 NdbQueryBuilderImpl::prepare()
 {
-  /* Check if query is sorted and has multiple scan operations. This 
-   * combination is not implemented.
-   */
-  if (m_operations.size() > 0 && 
-      m_operations[0]->isScanOperation() &&
-      m_operations[0]->getOrdering() 
-        != NdbQueryOptions::ScanOrdering_unordered &&
-      m_operations[0]->getOrdering() != NdbQueryOptions::ScanOrdering_void)
-  {
-    for (Uint32 i = 1; i<m_operations.size(); i++)
-    {
-      if (m_operations[i]->isScanOperation())
-      {
-        setErrorCode(QRY_MULTIPLE_SCAN_SORTED);
-        return NULL;
-      } 
-    }
-  }
+  const bool sorted =
+    m_operations.size() > 0 &&
+    m_operations[0]->getOrdering() != NdbQueryOptions::ScanOrdering_unordered &&
+    m_operations[0]->getOrdering() != NdbQueryOptions::ScanOrdering_void;
 
   int error;
   NdbQueryDefImpl* def = new NdbQueryDefImpl(m_operations, m_operands, error);
@@ -1127,6 +1118,16 @@ NdbQueryBuilderImpl::prepare()
     return NULL;
   }
 
+  /* Check if query is sorted and has multiple scan operations. This 
+   * combination is not implemented.
+   */
+  if (sorted && def->getQueryType() == NdbQueryDef::MultiScanQuery)
+  {
+    delete def;
+    setErrorCode(QRY_MULTIPLE_SCAN_SORTED);
+    return NULL;
+  }
+
   if (doPrintQueryTree)
   {
     ndbout << "Query tree:" << endl;
@@ -1256,6 +1257,20 @@ NdbQueryDefImpl::getQueryOperation(const
   return NULL;
 }
 
+NdbQueryDef::QueryType
+NdbQueryDefImpl::getQueryType() const
+{
+  if (!m_operations[0]->isScanOperation())
+    return NdbQueryDef::LookupQuery;
+
+  for (Uint32 i=1; i<m_operations.size(); ++i)
+  {
+    if (m_operations[i]->isScanOperation())
+      return NdbQueryDef::MultiScanQuery;
+  }
+  return NdbQueryDef::SingleScanQuery;
+}
+
 ////////////////////////////////////////////////////////////////
 // The (hidden) Impl of NdbQueryOperand (w/ various subclasses)
 ////////////////////////////////////////////////////////////////

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.hpp	2011-06-20 13:25:48 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.hpp	2011-06-30 08:27:52 +0000
@@ -493,6 +493,15 @@ class NdbQueryDef
 
 public:
 
+  /**
+   * The different types of query types supported
+   */
+  enum QueryType {
+    LookupQuery,     ///< All operations are PrimaryKey- or UniqueIndexAccess
+    SingleScanQuery, ///< Root is Table- or OrderedIndexScan, childs are 'lookup'
+    MultiScanQuery   ///< Root, and some childs are scans
+  };
+
   Uint32 getNoOfOperations() const;
 
   // Get a specific NdbQueryOperationDef by ident specified
@@ -504,6 +513,9 @@ public:
   // the client has completed access to it.
   bool isScanQuery() const;
 
+  // Return the 'enum QueryType' as defined above.
+  QueryType getQueryType() const;
+
   // Remove this NdbQueryDef including operation and operands it contains
   void destroy() const;
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-06-16 09:32:43 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-06-30 08:27:52 +0000
@@ -595,6 +595,8 @@ public:
   bool isScanQuery() const
   { return m_operations[0]->isScanOperation(); }
 
+  NdbQueryDef::QueryType getQueryType() const;
+
   Uint32 getNoOfOperations() const
   { return m_operations.size(); }
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-06-20 13:25:48 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-07-01 10:02:15 +0000
@@ -507,7 +507,7 @@ void NdbBulkAllocator::reset(){
   // Overwrite with 0xff bytes to detect accidental use of released memory.
   assert(m_buffer == NULL || 
          memset(m_buffer, 0xff, m_maxObjs * m_objSize) != NULL);
-  delete m_buffer;
+  delete [] m_buffer;
   m_buffer = NULL;
   m_nextObjNo = 0;
   m_maxObjs = 0;
@@ -4771,17 +4771,11 @@ NdbQueryOperationImpl::setOrdering(NdbQu
   /* Check if query is sorted and has multiple scan operations. This 
    * combination is not implemented.
    */
-  if (ordering != NdbQueryOptions::ScanOrdering_unordered)
+  if (ordering != NdbQueryOptions::ScanOrdering_unordered &&
+      getQueryDef().getQueryType() == NdbQueryDef::MultiScanQuery)
   {
-    for (Uint32 i = 1; i < getQuery().getNoOfOperations(); i++)
-    {
-      if (getQuery().getQueryOperation(i).getQueryOperationDef()
-          .isScanOperation())
-      {
-        getQuery().setErrorCode(QRY_MULTIPLE_SCAN_SORTED);
-        return -1;
-      }
-    }
+    getQuery().setErrorCode(QRY_MULTIPLE_SCAN_SORTED);
+    return -1;
   }
   
   m_ordering = ordering;

=== modified file 'storage/ndb/src/ndbapi/NdbTransaction.cpp'
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-05-25 13:19:02 +0000
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-06-28 08:47:18 +0000
@@ -937,7 +937,10 @@ int NdbTransaction::refresh()
       scan_op != 0; scan_op = (NdbIndexScanOperation *) scan_op->theNext)
   {
     NdbTransaction* scan_trans = scan_op->theNdbCon;
-    scan_trans->sendTC_HBREP();
+    if (scan_trans)
+    {
+      scan_trans->sendTC_HBREP();
+    }
   }
   return sendTC_HBREP();
 }

=== modified file 'storage/ndb/test/ndbapi/testMgm.cpp'
--- a/storage/ndb/test/ndbapi/testMgm.cpp	2011-02-22 08:40:01 +0000
+++ b/storage/ndb/test/ndbapi/testMgm.cpp	2011-07-01 17:15:00 +0000
@@ -23,6 +23,7 @@
 #include <InputStream.hpp>
 #include <signaldata/EventReport.hpp>
 #include <NdbRestarter.hpp>
+#include <random.h>
 
 /*
   Tests that only need the mgmd(s) started
@@ -714,6 +715,174 @@ int runGetConfigUntilStopped(NDBT_Contex
 }
 
 
+// Find a random node of a given type.
+
+static bool
+get_nodeid_of_type(NdbMgmd& mgmd, ndb_mgm_node_type type, int *nodeId)
+{
+  ndb_mgm_node_type
+    node_types[2] = { type,
+                      NDB_MGM_NODE_TYPE_UNKNOWN };
+
+  ndb_mgm_cluster_state *cs = ndb_mgm_get_status2(mgmd.handle(), node_types);
+  if (cs == NULL)
+  {
+    g_err << "ndb_mgm_get_status2 failed, error: "
+            << ndb_mgm_get_latest_error(mgmd.handle()) << " "
+            << ndb_mgm_get_latest_error_msg(mgmd.handle()) << endl;
+    return false;
+  }
+
+  int noOfNodes = cs->no_of_nodes;
+  int randomnode = myRandom48(noOfNodes);
+  ndb_mgm_node_state *ns = cs->node_states + randomnode;
+  assert(ns->node_type == (Uint32)type);
+  assert(ns->node_id);
+
+  *nodeId = ns->node_id;
+  g_info << "Got node id " << *nodeId << " of type " << type << endl;
+
+  free(cs);
+  return true;
+}
+
+
+// Ensure getting config from an illegal node fails.
+// Return true in that case.
+
+static bool
+get_config_from_illegal_node(NdbMgmd& mgmd, int nodeId)
+{
+  struct ndb_mgm_configuration* conf=
+      ndb_mgm_get_configuration_from_node(mgmd.handle(), nodeId);
+
+  // Get conf from an illegal node should fail.
+  if (ndb_mgm_get_latest_error(mgmd.handle()) != NDB_MGM_GET_CONFIG_FAILED)
+  {
+      g_err << "ndb_mgm_get_configuration from illegal node "
+            << nodeId << " not failed, error: "
+            << ndb_mgm_get_latest_error(mgmd.handle()) << " "
+            << ndb_mgm_get_latest_error_msg(mgmd.handle()) << endl;
+      return false;
+  }
+
+  if (conf)
+  {
+    // Should not get a conf from an illegal node.
+    g_err << "ndb_mgm_get_configuration from illegal node: "
+          << nodeId << ", error: "
+          << ndb_mgm_get_latest_error(mgmd.handle()) << " "
+          << ndb_mgm_get_latest_error_msg(mgmd.handle()) << endl;
+    free(conf);
+    return false;
+  }
+  return true;
+}
+
+
+// Check get_config from a non-existing node fails.
+
+static bool
+check_get_config_illegal_node(NdbMgmd& mgmd)
+{
+  // Find a node that does not exist
+  Config conf;
+  if (!mgmd.get_config(conf))
+    return false;
+
+  int nodeId = 0;
+  for(Uint32 i= 1; i < MAX_NODES; i++){
+    ConfigIter iter(&conf, CFG_SECTION_NODE);
+    if (iter.find(CFG_NODE_ID, i) != 0){
+      nodeId = i;
+      break;
+    }
+  }
+  if (nodeId == 0)
+    return true; // All nodes probably defined
+
+  return get_config_from_illegal_node(mgmd, nodeId);
+}
+
+
+
+// Check get_config from a non-NDB/MGM node type fails
+
+static bool
+check_get_config_wrong_type(NdbMgmd& mgmd)
+{
+  int nodeId = 0;
+
+  if (get_nodeid_of_type(mgmd, NDB_MGM_NODE_TYPE_API, &nodeId))
+  {
+    return get_config_from_illegal_node(mgmd, nodeId);
+  }
+  // No API nodes found.
+  return true;
+}
+
+/* Find management node or a random data node, and get config from it.
+ * Also ensure failure when getting config from
+ * an illegal node (a non-NDB/MGM type, nodeid not defined,
+ * or nodeid > MAX_NODES).
+ */
+int runGetConfigFromNode(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbMgmd mgmd;
+  if (!mgmd.connect())
+    return NDBT_FAILED;
+
+  if (!check_get_config_wrong_type(mgmd) ||
+      !check_get_config_illegal_node(mgmd) ||
+      !get_config_from_illegal_node(mgmd, MAX_NODES + 2))
+  {
+    return NDBT_FAILED;
+  }
+
+  int loops= ctx->getNumLoops();
+  for (int l= 0; l < loops; l++)
+  {
+    /* Get config from a node of type:
+     * NDB_MGM_NODE_TYPE_NDB or NDB_MGM_NODE_TYPE_MGM
+     */
+    int myChoice = myRandom48(2);
+    ndb_mgm_node_type randomAllowedType = (myChoice) ?
+                                          NDB_MGM_NODE_TYPE_NDB :
+                                          NDB_MGM_NODE_TYPE_MGM;
+    int nodeId = 0;
+    if (get_nodeid_of_type(mgmd, randomAllowedType, &nodeId))
+    {
+      struct ndb_mgm_configuration* conf =
+        ndb_mgm_get_configuration_from_node(mgmd.handle(), nodeId);
+      if (!conf)
+      {
+        g_err << "ndb_mgm_get_configuration_from_node "
+              << nodeId << " failed, error: "
+              << ndb_mgm_get_latest_error(mgmd.handle()) << " "
+              << ndb_mgm_get_latest_error_msg(mgmd.handle()) << endl;
+        return NDBT_FAILED;
+      }
+      free(conf);
+    }
+    else
+    {
+      // ignore
+    }
+  }
+  return NDBT_OK;
+}
+
+
+int runGetConfigFromNodeUntilStopped(NDBT_Context* ctx, NDBT_Step* step)
+{
+  int result= NDBT_OK;
+  while(!ctx->isTestStopped() &&
+        (result= runGetConfigFromNode(ctx, step)) == NDBT_OK)
+    ;
+  return result;
+}
+
+
 int runTestStatus(NDBT_Context* ctx, NDBT_Step* step)
 {
   ndb_mgm_node_type types[2] = {
@@ -2659,6 +2828,7 @@ TESTCASE("Stress",
   STEP(runTestGetNodeIdUntilStopped);
   STEP(runSetConfigUntilStopped);
   STEPS(runGetConfigUntilStopped, 10);
+  STEPS(runGetConfigFromNodeUntilStopped, 10);
   STEPS(runTestStatusUntilStopped, 10);
   STEPS(runTestGetVersionUntilStopped, 5);
   STEP(runSleepAndStop);
@@ -2668,6 +2838,7 @@ TESTCASE("Stress2",
   STEP(runTestGetNodeIdUntilStopped);
   STEPS(runTestSetConfigParallelUntilStopped, 5);
   STEPS(runGetConfigUntilStopped, 10);
+  STEPS(runGetConfigFromNodeUntilStopped, 10);
   STEPS(runTestStatusUntilStopped, 10);
   STEPS(runTestGetVersionUntilStopped, 5);
   STEP(runSleepAndStop);

=== modified file 'storage/ndb/test/ndbapi/testMgmd.cpp'
--- a/storage/ndb/test/ndbapi/testMgmd.cpp	2011-06-21 13:10:37 +0000
+++ b/storage/ndb/test/ndbapi/testMgmd.cpp	2011-06-27 06:26:01 +0000
@@ -280,6 +280,8 @@ public:
 
   }
 
+  NdbMgmHandle handle() { return m_mgmd_client.handle(); }
+
 private:
 
   bool get_section_string(const Properties& config,
@@ -1099,6 +1101,55 @@ int runTestBug12352191(NDBT_Context* ctx
 
 }
 
+int
+runBug61607(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NDBT_Workingdir wd("test_mgmd"); // temporary working directory
+
+  // Create config.ini
+  const int cnt_mgmd = 1;
+  Properties config = ConfigFactory::create(cnt_mgmd);
+  CHECK(ConfigFactory::write_config_ini(config,
+                                        path(wd.path(),
+                                             "config.ini",
+                                             NULL).c_str()));
+  // Start ndb_mgmd(s)
+  MgmdProcessList mgmds;
+  for (int i = 1; i <= cnt_mgmd; i++)
+  {
+    Mgmd* mgmd = new Mgmd(i);
+    mgmds.push_back(mgmd);
+    CHECK(mgmd->start_from_config_ini(wd.path()));
+  }
+
+  // Connect the ndb_mgmd(s)
+  for (unsigned i = 0; i < mgmds.size(); i++)
+    CHECK(mgmds[i]->connect(config));
+
+  // wait for confirmed config
+  for (unsigned i = 0; i < mgmds.size(); i++)
+    CHECK(mgmds[i]->wait_confirmed_config());
+
+  // Check binary config files created
+  CHECK(file_exists(path(wd.path(),
+                         "ndb_1_config.bin.1",
+                         NULL).c_str()));
+
+  int no_of_nodes = 0;
+  int * node_ids = 0;
+  int initialstart = 0;
+  int nostart = 0;
+  int abort = 0;
+  int force = 0;
+  int need_disconnect = 0;
+  int res = ndb_mgm_restart4(mgmds[0]->handle(), no_of_nodes, node_ids,
+                             initialstart, nostart, abort, force,
+                             &need_disconnect);
+
+
+  return res == 0 ? NDBT_OK : NDBT_FAILED;
+}
+
 NDBT_TESTSUITE(testMgmd);
 DRIVER(DummyDriver); /* turn off use of NdbApi */
 
@@ -1151,6 +1202,10 @@ TESTCASE("Bug12352191",
 {
   INITIALIZER(runTestBug12352191);
 }
+TESTCASE("Bug61607", "")
+{
+  INITIALIZER(runBug61607);
+}
 
 NDBT_TESTSUITE_END(testMgmd);
 

=== modified file 'storage/ndb/test/ndbapi/testScan.cpp'
--- a/storage/ndb/test/ndbapi/testScan.cpp	2011-04-07 07:22:49 +0000
+++ b/storage/ndb/test/ndbapi/testScan.cpp	2011-06-28 08:47:18 +0000
@@ -1424,6 +1424,86 @@ runBug54945(NDBT_Context* ctx, NDBT_Step
   return NDBT_OK;
 }
 
+int
+runCloseRefresh(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb * pNdb = GETNDB(step);
+
+  const Uint32 codeWords= 1;
+  Uint32 codeSpace[ codeWords ];
+  NdbInterpretedCode code(NULL, // Table is irrelevant
+                          &codeSpace[0],
+                          codeWords);
+  if ((code.interpret_exit_last_row() != 0) ||
+      (code.finalise() != 0))
+  {
+    ERR(code.getNdbError());
+    return NDBT_FAILED;
+  }
+
+  const NdbDictionary::Table*  pTab = ctx->getTab();
+  NdbTransaction* pTrans = pNdb->startTransaction();
+  NdbScanOperation* pOp = pTrans->getNdbScanOperation(pTab->getName());
+  if (pOp == NULL)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  if (pOp->readTuples(NdbOperation::LM_CommittedRead) != 0)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  if (pOp->setInterpretedCode(&code) == -1 )
+  {
+    ERR(pTrans->getNdbError());
+    pNdb->closeTransaction(pTrans);
+    return NDBT_FAILED;
+  }
+
+  if (pOp->getValue(NdbDictionary::Column::ROW_COUNT) == 0)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  pTrans->execute(NdbTransaction::NoCommit);
+  pOp->close(); // close this
+
+  pOp = pTrans->getNdbScanOperation(pTab->getName());
+  if (pOp == NULL)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  if (pOp->readTuples(NdbOperation::LM_CommittedRead) != 0)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  if (pOp->setInterpretedCode(&code) == -1 )
+  {
+    ERR(pTrans->getNdbError());
+    pNdb->closeTransaction(pTrans);
+    return NDBT_FAILED;
+  }
+
+  if (pOp->getValue(NdbDictionary::Column::ROW_COUNT) == 0)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  pTrans->execute(NdbTransaction::NoCommit);
+  pTrans->refresh();
+  pTrans->close();
+  return NDBT_OK;
+}
+
 #define CHK_RET_FAILED(x) if (!(x)) { ndbout_c("Failed on line: %u", __LINE__); return NDBT_FAILED; }
 
 int
@@ -2066,6 +2146,10 @@ TESTCASE("Bug42559", "")
   FINALIZER(finalizeBug42559);
   FINALIZER(runClearTable);
 }
+TESTCASE("CloseRefresh", "")
+{
+  INITIALIZER(runCloseRefresh);
+}
 TESTCASE("Bug54945", "")
 {
   INITIALIZER(runBug54945);

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-06-07 10:03:02 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-06-28 08:47:18 +0000
@@ -517,6 +517,10 @@ args: -n NoCloseTransaction T6 D1 D2
 
 max-time: 500
 cmd: testScan
+args: -n CloseRefresh T1
+
+max-time: 500
+cmd: testScan
 args: -n CheckInactivityTimeOut T6 D1 D2 
 
 max-time: 500

=== modified file 'storage/ndb/tools/ndb_config.cpp'
--- a/storage/ndb/tools/ndb_config.cpp	2011-05-24 11:51:39 +0000
+++ b/storage/ndb/tools/ndb_config.cpp	2011-07-01 17:15:00 +0000
@@ -16,7 +16,45 @@
 */
 
 /**
- * ndb_config --nodes --query=nodeid --type=ndbd --host=local1
+ * Description of config variables, including their min, max, default
+ * values can be printed (--configinfo). This can also be printed
+ * in xml format (--xml).
+ *
+ * Config can be retrieved from only one of the following sources:
+ ** config stored at mgmd (default. The options --config_from_node=0,
+ ** or --config_from_node=1 also give the same results.)
+ ** config stored at a data node (--config_from_node)
+ ** my.cnf (--mycnf=<fullPath/mycnfFileName>)
+ ** config.file  (--config_file=<fullPath/configFileName>
+ *
+ * Config variables are displayed from only one of the following
+ * sections of the retrieved config:
+ ** CFG_SECTION_NODE (default, or --nodes)
+ ** CFG_SECTION_CONNECTION (--connections)
+ ** CFG_SECTION_SYSTEM (--system)
+ */
+
+/**
+ * Examples:
+ * Get config from mgmd (default):
+ ** Display results from section CFG_SECTION_NODE (default)
+ *** ndb_config --nodes --query=nodeid --type=ndbd --host=local1
+ *** ndb_config  --query=nodeid,host
+ *
+ ** Display results from section CFG_SECTION_SYSTEM
+ *** ndb_config --system --query=ConfigGenerationNumber
+ *
+ ** Display results from section CFG_SECTION_CONNECTION
+ *** ndb_config --connections --query=type
+ *
+ * Get config from eg. node 2, which is a data node:
+ *
+ ** ndb_config --config_from_node=2 --system --query=ConfigGenerationNumber
+ ** ndb_config --config_from_node=2 --connections --query=type
+ ** ndb_config --config_from_node=2 --query=id,NoOfFragmentLogFiles
+ *
+ ** Display results for only node 2:
+ *** ndb_config --config_from_node=2 --query=id,NoOfFragmentLogFiles --nodeid=2
  */
 
 #include <ndb_global.h>
@@ -36,7 +74,7 @@
 static int g_verbose = 0;
 static int try_reconnect = 3;
 
-static int g_nodes, g_connections, g_section;
+static int g_nodes, g_connections, g_system, g_section;
 static const char * g_query = 0;
 
 static int g_nodeid = 0;
@@ -48,6 +86,7 @@ static const char * g_config_file = 0;
 static int g_mycnf = 0;
 static int g_configinfo = 0;
 static int g_xml = 0;
+static int g_config_from_node = 0;
 
 const char *load_default_groups[]= { "mysql_cluster",0 };
 
@@ -62,6 +101,9 @@ static struct my_option my_long_options[
   { "connections", NDB_OPT_NOSHORT, "Print connections",
     (uchar**) &g_connections, (uchar**) &g_connections,
     0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+  { "system", NDB_OPT_NOSHORT, "Print system",
+    (uchar**) &g_system, (uchar**) &g_system,
+    0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
   { "query", 'q', "Query option(s)",
     (uchar**) &g_query, (uchar**) &g_query,
     0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
@@ -95,6 +137,9 @@ static struct my_option my_long_options[
   { "xml", NDB_OPT_NOSHORT, "Print configinfo in xml format",
     (uchar**) &g_xml, (uchar**) &g_xml,
     0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
+  { "config_from_node", NDB_OPT_NOSHORT, "Use current config from node with given nodeid",
+    (uchar**) &g_config_from_node, (uchar**) &g_config_from_node,
+    0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 
@@ -154,7 +199,7 @@ static int parse_query(Vector<Apply*>&,
 static int parse_where(Vector<Match*>&, int &argc, char**& argv);
 static int eval(const Iter&, const Vector<Match*>&);
 static int apply(const Iter&, const Vector<Apply*>&);
-static ndb_mgm_configuration* fetch_configuration();
+static ndb_mgm_configuration* fetch_configuration(int from_node);
 static ndb_mgm_configuration* load_configuration();
 
 
@@ -178,23 +223,47 @@ main(int argc, char** argv){
     exit(0);
   }
 
-  if (g_nodes && g_connections)
+  if ((g_nodes && g_connections) ||
+       g_system && (g_nodes || g_connections))
   {
     fprintf(stderr,
-	    "Only one option of --nodes and --connections allowed\n");
+	    "Error: Only one of the section-options: --nodes, --connections, --system is allowed.\n");
+    exit(255);
+  }
+
+  /* There is no explicit option for the user to set
+   * 'retrieving config from mgmd', but this is the default.
+   * Therefore will not contradict with other sources.
+   */
+
+  if ((g_config_file && g_mycnf) ||
+       g_config_from_node && (g_config_file || g_mycnf))
+  {
+    fprintf(stderr,
+	    "Error: Config should be retrieved from only one of the following sources:\n");
+    fprintf(stderr,
+            "\tconfig stored at mgmd (default),\n");
+    fprintf(stderr,
+            "\tconfig stored at a data node (--config_from_node=<nodeid>), \n");
+    fprintf(stderr,
+            "\tmy.cnf(--mycnf=<my.cnf file>),\n");
+    fprintf(stderr,
+             "\tconfig.file (--config_file=<config file>).\n");
     exit(255);
   }
 
   g_section = CFG_SECTION_NODE; //default
   if (g_connections)
     g_section = CFG_SECTION_CONNECTION;
+  else if (g_system)
+    g_section = CFG_SECTION_SYSTEM;
 
   ndb_mgm_configuration * conf = 0;
 
   if (g_config_file || g_mycnf)
     conf = load_configuration();
   else
-    conf = fetch_configuration();
+    conf = fetch_configuration(g_config_from_node);
 
   if (conf == 0)
   {
@@ -289,7 +358,9 @@ parse_query(Vector<Apply*>& select, int
 	     (g_section == CFG_SECTION_NODE &&
               (strcmp(ConfigInfo::m_ParamInfo[p]._section, "DB") == 0 ||
                strcmp(ConfigInfo::m_ParamInfo[p]._section, "API") == 0 ||
-               strcmp(ConfigInfo::m_ParamInfo[p]._section, "MGM") == 0)))
+               strcmp(ConfigInfo::m_ParamInfo[p]._section, "MGM") == 0))
+             ||
+	     (g_section == CFG_SECTION_SYSTEM))
 	  {
 	    if(strcasecmp(ConfigInfo::m_ParamInfo[p]._fname, str) == 0)
 	    {
@@ -496,8 +567,8 @@ ConnectionTypeApply::apply(const Iter& i
 }
 
 static ndb_mgm_configuration*
-fetch_configuration()
-{  
+fetch_configuration(int from_node)
+{
   ndb_mgm_configuration* conf = 0;
   NdbMgmHandle mgm = ndb_mgm_create_handle();
   if(mgm == NULL) {
@@ -532,11 +603,17 @@ fetch_configuration()
 	    ndb_mgm_get_connected_port(mgm));
   }
 	  
-  conf = ndb_mgm_get_configuration(mgm, 0);
+  if (from_node > 1)
+  {
+    conf = ndb_mgm_get_configuration_from_node(mgm, from_node);
+  }
+  else
+     conf = ndb_mgm_get_configuration(mgm, 0);
+
   if(conf == 0)
   {
-    fprintf(stderr, "Could not get configuration");
-    fprintf(stderr, "code: %d, msg: %s\n",
+    fprintf(stderr, "Could not get configuration, ");
+    fprintf(stderr, "error code: %d, error msg: %s\n",
 	    ndb_mgm_get_latest_error(mgm),
 	    ndb_mgm_get_latest_error_msg(mgm));
   }

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0-wl4124-new1 branch (pekka.nousiainen:4405to 4406) Pekka Nousiainen4 Jul