List:Commits« Previous MessageNext Message »
From:Ole John Aske Date:June 30 2011 8:50am
Subject:bzr push into mysql-5.1-telco-7.0-spj-scan-vs-scan branch
(ole.john.aske:3520 to 3521)
View as plain text  
 3521 Ole John Aske	2011-06-30 [merge]
      merge mysql-5.1-telco-7.0 -> spj-scan-scan

    added:
      mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test
      sql/ndb_mi.cc
      sql/ndb_mi.h
    modified:
      configure.in
      mysql-test/suite/ndb/r/ndb_basic.result
      sql/Makefile.am
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
      storage/ndb/CMakeLists.txt
      storage/ndb/include/ndbapi/NdbIndexStat.hpp
      storage/ndb/ndb_configure.m4
      storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
      storage/ndb/src/kernel/blocks/suma/Suma.cpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.hpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/src/ndbapi/NdbQueryOperation.cpp
      storage/ndb/src/ndbapi/NdbTransaction.cpp
      storage/ndb/test/ndbapi/testMgmd.cpp
      storage/ndb/test/ndbapi/testScan.cpp
      storage/ndb/test/run-test/daily-basic-tests.txt
 3520 Ole John Aske	2011-06-28
      Removed explicit 'analyze table...' from ndb_join_pushdown.test as 
      it is now obsolete after the 'maintained statistics' patch.

    modified:
      mysql-test/suite/ndb/r/ndb_join_pushdown.result
      mysql-test/suite/ndb/t/ndb_join_pushdown.test
=== modified file 'configure.in'
--- a/configure.in	2011-05-24 08:44:31 +0000
+++ b/configure.in	2011-06-29 08:14:18 +0000
@@ -12,7 +12,7 @@ dnl
 dnl When changing the major version number please also check the switch
 dnl statement in mysqlbinlog::check_master_version().  You may also need
 dnl to update version.c in ndb.
-AC_INIT([MySQL Server], [5.1.56-ndb-7.0.26], [], [mysql])
+AC_INIT([MySQL Server], [5.1.56-ndb-7.0.27], [], [mysql])
 
 AC_CONFIG_SRCDIR([sql/mysqld.cc])
 AC_CANONICAL_SYSTEM

=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2011-06-17 12:41:11 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2011-06-30 08:49:22 +0000
@@ -80,6 +80,7 @@ Ndb_pushed_queries_dropped	#
 Ndb_pushed_queries_executed	#
 Ndb_pushed_reads	#
 Ndb_scan_count	#
+Ndb_slave_max_replicated_epoch	#
 Ndb_sorted_scan_count	#
 SHOW GLOBAL VARIABLES LIKE 'ndb\_%';
 Variable_name	Value

=== added file 'mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result'
--- a/mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result	2011-06-29 23:28:01 +0000
@@ -0,0 +1,169 @@
+include/rpl_init.inc [topology=1->2,4->3]
+include/rpl_connect.inc [creating master]
+include/rpl_connect.inc [creating master1]
+include/rpl_connect.inc [creating slave]
+include/rpl_connect.inc [creating slave1]
+include/rpl_start_slaves.inc
+Cluster A servers have no epoch replication info
+select count(1) from mysql.ndb_apply_status;
+count(1)
+0
+Cluster A servers have no max replicated epoch value
+Master(1)
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Master1(3)
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Make a change originating at Cluster A
+Master(1)
+use test;
+create table t1 (a int primary key, b varchar(100)) engine=ndb;
+insert into t1 values (1, "Venice");
+Allow it to propagate to Cluster B
+Originate a second unrelated change at Cluster B, to allow us to wait for
+reverse propagation in the testcase
+Slave1 (4)
+insert into t1 values (2, "Death");
+Allow it to propagate to Cluster A
+Observe new entry in ndb_apply_status on Cluster A
+Master (1)
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+4
+Non-slave server on Cluster A will have no value for Max Replicated Epoch
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Slave server on Cluster A has current value for Max Replicated Epoch
+Master1 (3)
+Expect count 1
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now wait for all replication to quiesce
+Now swap replication channels around
+include/rpl_stop_slaves.inc
+include/rpl_change_topology.inc [new topology=2->1,3->4]
+Get current master status on Cluster A new master (next pos in Binlog)
+Master1 (3)
+Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+is skipped over.
+flush logs;
+Setup slave on Cluster B to use it
+Slave1 (4)
+Get current master status on Cluster B new master (next pos in Binlog)
+Slave (2)
+Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+is skipped over.
+flush logs;
+Setup slave on Cluster A to use it
+Master (1)
+Master (1)
+Show that Cluster A Slave server (old master) has no Max replicated epoch before receiving data
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Master1 (3)
+Cluster A Master server (old slave) has old Max replicated epoch
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now start slaves up
+include/rpl_start_slaves.inc
+Show that applying something from Cluster B causes the
+old Max Rep Epoch to be loaded from ndb_apply_status
+There is no new Max Rep Epoch from Cluster A as it has not changed
+anything yet
+Slave (2)
+insert into test.t1 values (3, "From the Sea");
+Allow to propagate to Cluster A
+Master (1)
+New Slave server on Cluster A has loaded old Max-Replicated-Epoch
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+4
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now make a new Cluster A change and see that the Max Replicated Epoch advances
+once it has propagated
+Master1 (3)
+insert into test.t1 values (4, "Brooke");
+Propagate to Cluster B
+Make change on Cluster B to allow waiting for reverse propagation
+Slave (2)
+insert into test.t1 values (5, "Rupert");
+Wait for propagation back to Cluster A
+Master (1)
+Show that Cluster A now has 2 different server_id entries in ndb_apply_status
+Those from the new master (server_id 3) are highest.
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+3
+4
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 3
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+local_server_with_max_epoch
+3
+Done
+drop table t1;
+include/rpl_stop_slaves.inc
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+include/rpl_start_slaves.inc
+include/rpl_end.inc

=== added file 'mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result'
--- a/mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result	2011-06-29 23:28:01 +0000
@@ -0,0 +1,78 @@
+include/master-slave.inc
+[connection master]
+reset master;
+stop slave;
+Generate something in the Masters Binlog
+use test;
+create table t1 (a int primary key, b int) engine=ndb;
+insert into t1 values (1,1);
+Initial state
+select * from mysql.ndb_apply_status;
+server_id	epoch	log_name	start_pos	end_pos
+select variable_value from information_schema.global_status
+where variable_name like '%Ndb_slave_max_replicated_epoch%';
+variable_value
+0
+select @slave_server_id:=(variable_value+0) from information_schema.global_variables
+where variable_name like 'server_id';
+@slave_server_id:=(variable_value+0)
+2
+Default, no data, max replicated epoch will be 0.
+reset slave;
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+0
+Default, load of own serverid from ndb_apply_status, should be 111
+drop table test.t1;
+stop slave;
+reset slave;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+111
+drop table test.t1;
+Check that reset slave resets Ndb_slave_max_replicated_epoch
+stop slave;
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+111
+reset slave;
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+0
+Multiple-channel, load highest of configured serverids, should be 222
+set @other_local_server_id=@slave_server_id+1;
+set @other_remote_server_id=@slave_server_id+2;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_local_server_id, 222, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_remote_server_id, 444, 'Fictional log', 222, 333);
+CHANGE MASTER TO IGNORE_SERVER_IDS=(3);;
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+3
+4
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+222
+stop slave;
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+start slave;
+drop table test.t1;
+include/rpl_end.inc

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf	2011-06-29 23:28:01 +0000
@@ -0,0 +1,14 @@
+!include ndb_rpl_circular_2ch.cnf
+
+[mysqld.1.1]
+ndb-log-apply-status
+
+[mysqld.2.1]
+ndb-log-apply-status
+
+[mysqld.1.slave]
+ndb-log-apply-status
+
+[mysqld.2.slave]
+ndb-log-apply-status
+

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test	2011-06-29 23:28:01 +0000
@@ -0,0 +1,247 @@
+--source include/have_ndb.inc
+--source suite/ndb_rpl/ndb_master-slave_2ch.inc
+--source include/have_binlog_format_mixed_or_row.inc
+
+#
+# Test that the Maximum replicated epoch is maintained
+# as expected in a circular, 2 channel configuration.
+# The channels are swapped, and replication is restarted
+# The MaxReplicatedEpoch is reloaded from ndb_apply_status
+# for the Servers considered local (IGNORE_SERVER_IDS)
+#
+--connection master
+--echo Cluster A servers have no epoch replication info
+select count(1) from mysql.ndb_apply_status;
+
+--echo Cluster A servers have no max replicated epoch value
+--echo Master(1)
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+--connection master1
+--echo Master1(3)
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--echo Make a change originating at Cluster A
+--connection master
+--echo Master(1)
+use test;
+create table t1 (a int primary key, b varchar(100)) engine=ndb;
+insert into t1 values (1, "Venice");
+
+--echo Allow it to propagate to Cluster B
+--sync_slave_with_master slave
+
+--echo Originate a second unrelated change at Cluster B, to allow us to wait for
+--echo reverse propagation in the testcase
+--connection slave1
+--echo Slave1 (4)
+insert into t1 values (2, "Death");
+
+--echo Allow it to propagate to Cluster A
+--sync_slave_with_master master1
+
+--echo Observe new entry in ndb_apply_status on Cluster A
+--connection master
+--echo Master (1)
+select server_id from mysql.ndb_apply_status order by server_id;
+
+--echo Non-slave server on Cluster A will have no value for Max Replicated Epoch
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--echo Slave server on Cluster A has current value for Max Replicated Epoch
+--connection master1
+--echo Master1 (3)
+--echo Expect count 1
+# Here we join the max rep epoch with ndb_apply_status for server id 1
+# (Our site's current master server)
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now wait for all replication to quiesce
+
+--echo Now swap replication channels around
+--source include/rpl_stop_slaves.inc
+--let $rpl_topology= 2->1,3->4
+--source include/rpl_change_topology.inc
+
+# We've changed the direction, but need to set binlog filenames
+# and positions
+
+#
+# 'Normally' we should use the ndb_apply_status max applied epoch,
+# then lookup ndb_binlog_index etc.
+# However, in this case (and probably in lots of real cases), no changes
+# have been made after the last applied epoch, so there is no epoch
+# after the current one, and therefore no entry in ndb_binlog_index
+# to get the correct position from.
+# We could just re-apply the last epoch applied, but that's imprecise,
+# and causes us to create an ndb_apply_status entry for Server 3 when
+# it has not really been master for those changes.
+# So we just look at the Master status instead.
+#
+#--echo Get max applied epochs from a server on each cluster
+#--connection slave
+#let $max_applied_cluster_a_epoch = query_get_value("SELECT MAX(epoch) AS epoch FROM mysql.ndb_apply_status WHERE server_id IN (1,3)", epoch, 1);
+#--connection master
+#let $max_applied_cluster_b_epoch = query_get_value("SELECT MAX(epoch) AS epoch FROM mysql.ndb_apply_status WHERE server_id IN (2,4)", epoch, 1);
+#
+#--echo Get corresponding Binlog filename + pos from new Master servers
+#--connection master1
+#eval select * from mysql.ndb_binlog_index where epoch > $max_applied_cluster_a_epoch ;
+#let $cluster_a_master_log_file = query_get_value("SELECT SUBSTRING_INDEX(File, '/', -1) as File from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_a_epoch", File, 1);
+#let $cluster_a_master_log_pos = query_get_value("SELECT Position from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_a_epoch", Position, 1);
+#--connection slave
+#eval select * from mysql.ndb_binlog_index where epoch > $max_applied_cluster_b_epoch;
+#let $cluster_b_master_log_file = query_get_value("SELECT SUBSTRING_INDEX(File, '/', -1) as File from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_b_epoch", File, 1);
+#let $cluster_b_master_log_pos = query_get_value("SELECT Position from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_b_epoch", Position, 1);
+#--echo Now change new Slave servers to new Master file + pos
+#--connection master
+#--echo Changing master to $cluster_b_master_log_file, $cluster_b_master_log_pos
+#eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_b_master_log_file", MASTER_LOG_POS=$cluster_b_master_log_pos;
+#--connection slave1
+#--echo Changing master to $cluster_a_master_log_file, $cluster_a_master_log_pos
+#eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_a_master_log_file", MASTER_LOG_POS=$cluster_a_master_log_pos;
+
+--echo Get current master status on Cluster A new master (next pos in Binlog)
+--connection master1
+--echo Master1 (3)
+--echo Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+--echo is skipped over.
+flush logs;
+let $cluster_a_master_log_file = query_get_value("SHOW MASTER STATUS", "File", 1);
+let $cluster_a_master_log_pos = query_get_value("SHOW MASTER STATUS", "Position", 1);
+--echo Setup slave on Cluster B to use it
+--connection slave1
+--echo Slave1 (4)
+--disable_query_log
+eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_a_master_log_file", MASTER_LOG_POS=$cluster_a_master_log_pos;
+--enable_query_log
+
+--echo Get current master status on Cluster B new master (next pos in Binlog)
+--connection slave
+--echo Slave (2)
+--echo  Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+--echo is skipped over.
+flush logs;
+let $cluster_b_master_log_file = query_get_value("SHOW MASTER STATUS", "File", 1);
+let $cluster_b_master_log_pos = query_get_value("SHOW MASTER STATUS", "Position", 1);
+--echo Setup slave on Cluster A to use it
+--connection master
+--echo Master (1)
+--disable_query_log
+eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_b_master_log_file", MASTER_LOG_POS=$cluster_b_master_log_pos;
+--enable_query_log
+
+--connection master
+--echo Master (1)
+--echo Show that Cluster A Slave server (old master) has no Max replicated epoch before receiving data
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--connection master1
+--echo Master1 (3)
+--echo Cluster A Master server (old slave) has old Max replicated epoch
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now start slaves up
+--source include/rpl_start_slaves.inc
+
+--echo Show that applying something from Cluster B causes the
+--echo old Max Rep Epoch to be loaded from ndb_apply_status
+--echo There is no new Max Rep Epoch from Cluster A as it has not changed
+--echo anything yet
+
+--connection slave
+--echo Slave (2)
+insert into test.t1 values (3, "From the Sea");
+
+--echo Allow to propagate to Cluster A
+--sync_slave_with_master master
+
+--connection master
+--echo Master (1)
+--echo New Slave server on Cluster A has loaded old Max-Replicated-Epoch
+select server_id from mysql.ndb_apply_status order by server_id;
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now make a new Cluster A change and see that the Max Replicated Epoch advances
+--echo once it has propagated
+
+--connection master1
+--echo Master1 (3)
+insert into test.t1 values (4, "Brooke");
+
+--echo Propagate to Cluster B
+--sync_slave_with_master slave1
+
+--echo Make change on Cluster B to allow waiting for reverse propagation
+--connection slave
+--echo Slave (2)
+insert into test.t1 values (5, "Rupert");
+
+--echo Wait for propagation back to Cluster A
+--sync_slave_with_master master
+
+--connection master
+--echo Master (1)
+--echo Show that Cluster A now has 2 different server_id entries in ndb_apply_status
+--echo Those from the new master (server_id 3) are highest.
+select server_id from mysql.ndb_apply_status order by server_id;
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 3
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+let $max_epoch = query_get_value("select max(epoch) as epoch from mysql.ndb_apply_status where server_id in (1,3)","epoch", 1);
+--disable_query_log
+# We have to constrain the search to master server ids 1,3 in case the
+# Slave happens to have similar epoch values
+eval select server_id as local_server_with_max_epoch from mysql.ndb_apply_status where epoch=$max_epoch and server_id in (1,3);
+--enable_query_log
+
+--echo Done
+
+--connection master1
+drop table t1;
+--sync_slave_with_master slave1
+
+--source suite/ndb_rpl/ndb_master-slave_2ch_end.inc
+

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test	2011-06-29 23:28:01 +0000
@@ -0,0 +1,89 @@
+--source include/have_ndb.inc
+--source include/have_binlog_format_mixed_or_row.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
+
+# Test Slave initialisation of Ndb_slave_max_replicated_epoch status var
+
+--connection slave
+reset master;
+stop slave;
+
+--connection master
+--echo Generate something in the Masters Binlog
+use test;
+create table t1 (a int primary key, b int) engine=ndb;
+
+insert into t1 values (1,1);
+
+--connection slave
+--echo Initial state
+select * from mysql.ndb_apply_status;
+select variable_value from information_schema.global_status
+  where variable_name like '%Ndb_slave_max_replicated_epoch%';
+select @slave_server_id:=(variable_value+0) from information_schema.global_variables
+  where variable_name like 'server_id';
+
+--echo Default, no data, max replicated epoch will be 0.
+reset slave;
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+--echo Default, load of own serverid from ndb_apply_status, should be 111
+drop table test.t1;
+stop slave;
+reset slave;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+drop table test.t1;
+
+--echo Check that reset slave resets Ndb_slave_max_replicated_epoch
+stop slave;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+reset slave;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+--echo Multiple-channel, load highest of configured serverids, should be 222
+set @other_local_server_id=@slave_server_id+1;
+set @other_remote_server_id=@slave_server_id+2;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_local_server_id, 222, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_remote_server_id, 444, 'Fictional log', 222, 333);
+
+let $local_server_ids = `select @other_local_server_id`;
+
+--eval CHANGE MASTER TO IGNORE_SERVER_IDS=($local_server_ids);
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+# Clean up
+stop slave;
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+start slave;
+--connection master
+drop table test.t1;
+sync_slave_with_master;
+
+--source include/rpl_end.inc
+

=== modified file 'sql/Makefile.am'
--- a/sql/Makefile.am	2011-06-17 12:41:11 +0000
+++ b/sql/Makefile.am	2011-06-30 08:49:22 +0000
@@ -64,6 +64,7 @@ noinst_HEADERS =	item.h item_func.h item
 			ha_ndbcluster_lock_ext.h ha_ndbinfo.h \
 			ha_ndbcluster_glue.h \
 			ha_ndb_index_stat.h \
+                        ndb_mi.h \
 			ha_partition.h rpl_constants.h \
 			debug_sync.h \
 			opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \
@@ -141,7 +142,8 @@ libndb_la_SOURCES=	ha_ndbcluster.cc \
 			ha_ndbcluster_cond.cc \
 			ha_ndbcluster_push.cc \
 			ha_ndb_index_stat.cc \
-			ha_ndbinfo.cc
+			ha_ndbinfo.cc \
+			ndb_mi.cc
 
 gen_lex_hash_SOURCES =	gen_lex_hash.cc
 gen_lex_hash_LDFLAGS =  @NOINST_LDFLAGS@

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-06-24 12:32:05 +0000
+++ b/sql/ha_ndbcluster.cc	2011-06-30 08:49:22 +0000
@@ -30,8 +30,6 @@
 
 #include "ha_ndbcluster_glue.h"
 
-#include "rpl_mi.h"
-
 #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
 #include "ha_ndbcluster.h"
 #include <ndbapi/NdbApi.hpp>
@@ -50,6 +48,7 @@
 
 #include <mysql/plugin.h>
 #include <ndb_version.h>
+#include "ndb_mi.h"
 
 #ifdef ndb_dynamite
 #undef assert
@@ -483,7 +482,11 @@ update_slave_api_stats(Ndb* ndb)
 st_ndb_slave_state g_ndb_slave_state;
 
 st_ndb_slave_state::st_ndb_slave_state()
-  : current_conflict_defined_op_count(0)
+  : current_conflict_defined_op_count(0),
+    current_master_server_epoch(0),
+    current_max_rep_epoch(0),
+    max_rep_epoch(0),
+    sql_run_id(~Uint32(0))
 {
   memset(current_violation_count, 0, sizeof(current_violation_count));
   memset(total_violation_count, 0, sizeof(total_violation_count));
@@ -495,6 +498,7 @@ st_ndb_slave_state::atTransactionAbort()
   /* Reset current-transaction counters + state */
   memset(current_violation_count, 0, sizeof(current_violation_count));
   current_conflict_defined_op_count = 0;
+  current_max_rep_epoch = 0;
 }
 
 void
@@ -509,8 +513,195 @@ st_ndb_slave_state::atTransactionCommit(
     current_violation_count[i] = 0;
   }
   current_conflict_defined_op_count = 0;
+  if (current_max_rep_epoch > max_rep_epoch)
+  {
+    DBUG_PRINT("info", ("Max replicated epoch increases from %llu to %llu",
+                        max_rep_epoch,
+                        current_max_rep_epoch));
+
+    max_rep_epoch = current_max_rep_epoch;
+  }
+  current_max_rep_epoch = 0;
+}
+
+void
+st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id,
+                                       Uint32 row_server_id,
+                                       Uint64 row_epoch,
+                                       bool is_row_server_id_local)
+{
+  if (row_server_id == master_server_id)
+  {
+    /*
+       WRITE_ROW to ndb_apply_status injected by MySQLD
+       immediately upstream of us.
+       Record epoch
+    */
+    current_master_server_epoch = row_epoch;
+    assert(! is_row_server_id_local);
+  }
+  else if (is_row_server_id_local)
+  {
+    DBUG_PRINT("info", ("Recording application of local server %u epoch %llu "
+                        " which is %s.",
+                        row_server_id, row_epoch,
+                        (row_epoch > g_ndb_slave_state.current_max_rep_epoch)?
+                        " new highest." : " older than previously applied"));
+    if (row_epoch > current_max_rep_epoch)
+    {
+      /*
+        Store new highest epoch in thdvar.  If we commit successfully
+        then this can become the new global max
+      */
+      current_max_rep_epoch = row_epoch;
+    }
+  }
+}
+
+void
+st_ndb_slave_state::atResetSlave()
+{
+  /* Reset the Maximum replicated epoch vars
+   * on slave reset
+   * No need to touch the sql_run_id as that
+   * will increment if the slave is started
+   * again.
+   */
+  current_max_rep_epoch = 0;
+  max_rep_epoch = 0;
 }
 
+static int check_slave_state(THD* thd)
+{
+  DBUG_ENTER("check_slave_state");
+
+#ifdef HAVE_NDB_BINLOG
+  if (!thd->slave_thread)
+    DBUG_RETURN(0);
+
+  const Uint32 runId = ndb_mi_get_slave_run_id();
+  DBUG_PRINT("info", ("Slave SQL thread run id is %u",
+                      runId));
+  if (unlikely(runId != g_ndb_slave_state.sql_run_id))
+  {
+    DBUG_PRINT("info", ("Slave run id changed from %u, "
+                        "treating as Slave restart",
+                        g_ndb_slave_state.sql_run_id));
+    g_ndb_slave_state.sql_run_id = runId;
+
+    /* Always try to load the Max Replicated Epoch info
+     * first.
+     * Could be made optional if it's a problem
+     */
+    {
+      /*
+         Load highest replicated epoch from a local
+         MySQLD from the cluster.
+      */
+      DBUG_PRINT("info", ("Loading applied epoch information from %s",
+                          NDB_APPLY_TABLE));
+      NdbError ndb_error;
+      Uint64 highestAppliedEpoch = 0;
+      do
+      {
+        Ndb* ndb= check_ndb_in_thd(thd);
+        NDBDICT* dict= ndb->getDictionary();
+        NdbTransaction* trans= NULL;
+        ndb->setDatabaseName(NDB_REP_DB);
+        Ndb_table_guard ndbtab_g(dict, NDB_APPLY_TABLE);
+
+        const NDBTAB* ndbtab= ndbtab_g.get_table();
+        if (unlikely(ndbtab == NULL))
+        {
+          ndb_error = dict->getNdbError();
+          break;
+        }
+
+        trans= ndb->startTransaction();
+        if (unlikely(trans == NULL))
+        {
+          ndb_error = ndb->getNdbError();
+          break;
+        }
+
+        do
+        {
+          NdbScanOperation* sop = trans->getNdbScanOperation(ndbtab);
+          if (unlikely(sop == NULL))
+          {
+            ndb_error = trans->getNdbError();
+            break;
+          }
+
+          const Uint32 server_id_col_num = 0;
+          const Uint32 epoch_col_num = 1;
+          NdbRecAttr* server_id_ra;
+          NdbRecAttr* epoch_ra;
+
+          if (unlikely((sop->readTuples(NdbOperation::LM_CommittedRead) != 0)   ||
+                       ((server_id_ra = sop->getValue(server_id_col_num)) == NULL)  ||
+                       ((epoch_ra = sop->getValue(epoch_col_num)) == NULL)))
+          {
+            ndb_error = sop->getNdbError();
+            break;
+          }
+
+          if (trans->execute(NdbTransaction::Commit))
+          {
+            ndb_error = trans->getNdbError();
+            break;
+          }
+
+          int rc = 0;
+          while (0 == (rc= sop->nextResult(true)))
+          {
+            Uint32 serverid = server_id_ra->u_32_value();
+            Uint64 epoch = epoch_ra->u_64_value();
+
+            if ((serverid == ::server_id) ||
+                (ndb_mi_get_ignore_server_id(serverid)))
+            {
+              highestAppliedEpoch = MAX(epoch, highestAppliedEpoch);
+            }
+          }
+
+          if (rc != 1)
+          {
+            ndb_error = sop->getNdbError();
+            break;
+          }
+        } while (0);
+
+        trans->close();
+      } while(0);
+
+      if (ndb_error.code != 0)
+      {
+        sql_print_warning("NDB Slave : Could not determine maximum replicated epoch from %s.%s "
+                          "at Slave start, error %u %s",
+                          NDB_REP_DB,
+                          NDB_APPLY_TABLE,
+                          ndb_error.code, ndb_error.message);
+      }
+
+      /*
+        Set Global status variable to the Highest Applied Epoch from
+        the Cluster DB.
+        If none was found, this will be zero.
+      */
+      g_ndb_slave_state.max_rep_epoch = highestAppliedEpoch;
+      sql_print_information("NDB Slave : MaxReplicatedEpoch set to %llu (%u/%u) at Slave start",
+                            g_ndb_slave_state.max_rep_epoch,
+                            (Uint32)(g_ndb_slave_state.max_rep_epoch >> 32),
+                            (Uint32)(g_ndb_slave_state.max_rep_epoch & 0xffffffff));
+    } // Load highest replicated epoch
+  } // New Slave SQL thread run id
+#endif
+
+  DBUG_RETURN(0);
+}
+
+
 static int update_status_variables(Thd_ndb *thd_ndb,
                                    st_ndb_status *ns,
                                    Ndb_cluster_connection *c)
@@ -653,6 +844,7 @@ SHOW_VAR ndb_status_injector_variables[]
 
 SHOW_VAR ndb_status_slave_variables[]= {
   NDBAPI_COUNTERS("_slave", &g_slave_api_client_stats),
+  {"slave_max_replicated_epoch", (char*) &g_ndb_slave_state.max_rep_epoch, SHOW_LONGLONG},
   {NullS, NullS, SHOW_LONG}
 };
 
@@ -772,7 +964,7 @@ static int ndb_to_mysql_error(const NdbE
 }
 
 #ifdef HAVE_NDB_BINLOG
-extern Master_info *active_mi;
+
 /* Write conflicting row to exceptions table. */
 static int write_conflict_row(NDB_SHARE *share,
                               NdbTransaction *trans,
@@ -800,8 +992,8 @@ static int write_conflict_row(NDB_SHARE 
   }
   {
     uint32 server_id= (uint32)::server_id;
-    uint32 master_server_id= (uint32)active_mi->master_id;
-    uint64 master_epoch= (uint64)active_mi->master_epoch;
+    uint32 master_server_id= (uint32) ndb_mi_get_master_server_id();
+    uint64 master_epoch= (uint64) g_ndb_slave_state.current_master_server_epoch;
     uint32 count= (uint32)++(cfn_share->m_count);
     if (ex_op->setValue((Uint32)0, (const char *)&(server_id)) ||
         ex_op->setValue((Uint32)1, (const char *)&(master_server_id)) ||
@@ -4359,7 +4551,7 @@ bool ha_ndbcluster::isManualBinlogExec(T
 #ifndef EMBEDDED_LIBRARY
   return thd ? 
     ( thd->rli_fake? 
-      thd->rli_fake->get_flag(Relay_log_info::IN_STMT) : false)
+      ndb_mi_get_in_relay_log_statement(thd->rli_fake) : false)
     : false;
 #else
   /* For Embedded library, we can't determine if we're
@@ -4615,21 +4807,38 @@ handle_conflict_op_error(Thd_ndb* thd_nd
 #endif /* HAVE_NDB_BINLOG */
 
 
+#ifdef HAVE_NDB_BINLOG
+/*
+  is_serverid_local
+*/
+static bool is_serverid_local(Uint32 serverid)
+{
+  /*
+     If it's not our serverid, check the
+     IGNORE_SERVER_IDS setting to check if
+     it's local.
+  */
+  return ((serverid == ::server_id) ||
+          ndb_mi_get_ignore_server_id(serverid));
+}
+#endif
+
 int ha_ndbcluster::write_row(uchar *record)
 {
   DBUG_ENTER("ha_ndbcluster::write_row");
 #ifdef HAVE_NDB_BINLOG
   if (m_share == ndb_apply_status_share && table->in_use->slave_thread)
   {
-    uint32 sid, master_server_id= active_mi->master_id;
-    memcpy(&sid, table->field[0]->ptr + (record - table->record[0]), sizeof(sid));
-    if (sid == master_server_id)
-    {
-      uint64 master_epoch;
-      memcpy(&master_epoch, table->field[1]->ptr + (record - table->record[0]),
-             sizeof(master_epoch));
-      active_mi->master_epoch= master_epoch;
-    }
+    uint32 row_server_id, master_server_id= ndb_mi_get_master_server_id();
+    uint64 row_epoch;
+    memcpy(&row_server_id, table->field[0]->ptr + (record - table->record[0]),
+           sizeof(row_server_id));
+    memcpy(&row_epoch, table->field[1]->ptr + (record - table->record[0]),
+           sizeof(row_epoch));
+    g_ndb_slave_state.atApplyStatusWrite(master_server_id,
+                                         row_server_id,
+                                         row_epoch,
+                                         is_serverid_local(row_server_id));
   }
 #endif /* HAVE_NDB_BINLOG */
   DBUG_RETURN(ndb_write_row(record, FALSE, FALSE));
@@ -4653,6 +4862,10 @@ int ha_ndbcluster::ndb_write_row(uchar *
   Uint32 num_sets= 0;
   DBUG_ENTER("ha_ndbcluster::ndb_write_row");
 
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   has_auto_increment= (table->next_number_field && record == table->record[0]);
 
   if (has_auto_increment && table_share->primary_key != MAX_KEY) 
@@ -5197,6 +5410,11 @@ int ha_ndbcluster::ndb_update_row(const 
 
   DBUG_ENTER("ndb_update_row");
   DBUG_ASSERT(trans);
+
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   /*
    * If IGNORE the ignore constraint violations on primary and unique keys,
    * but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE
@@ -5531,6 +5749,10 @@ int ha_ndbcluster::ndb_delete_row(const 
   DBUG_ENTER("ndb_delete_row");
   DBUG_ASSERT(trans);
 
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   ha_statistic_increment(&SSV::ha_delete_count);
   m_rows_changed++;
 
@@ -7097,30 +7319,15 @@ static int ndbcluster_update_apply_statu
     r|= op->setValue(1u, (Uint64)0);
     DBUG_ASSERT(r == 0);
   }
-#if MYSQL_VERSION_ID < 50600
   const char* group_master_log_name =
-    active_mi->rli.group_master_log_name;
+    ndb_mi_get_group_master_log_name();
   const Uint64 group_master_log_pos =
-    (Uint64)active_mi->rli.group_master_log_pos;
+    ndb_mi_get_group_master_log_pos();
   const Uint64 future_event_relay_log_pos =
-    (Uint64)active_mi->rli.future_event_relay_log_pos;
+    ndb_mi_get_future_event_relay_log_pos();
   const Uint64 group_relay_log_pos =
-    (Uint64)active_mi->rli.group_relay_log_pos;
-#else
-  /*
-    - Master_info's rli member returns Relay_log_info*
-    - Relay_log_info members are protected and must be accessed
-      using accessor functions
-  */
-  const char* group_master_log_name =
-    active_mi->rli->get_group_master_log_name();
-  const Uint64 group_master_log_pos =
-    (Uint64)active_mi->rli->get_group_master_log_pos();
-  const Uint64 future_event_relay_log_pos =
-    (Uint64)active_mi->rli->get_future_event_relay_log_pos();
-  const Uint64 group_relay_log_pos =
-    (Uint64)active_mi->rli->get_group_relay_log_pos();
-#endif
+    ndb_mi_get_group_relay_log_pos();
+
   // log_name
   char tmp_buf[FN_REFLEN];
   ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf,

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-06-24 12:32:05 +0000
+++ b/sql/ha_ndbcluster.h	2011-06-30 08:49:22 +0000
@@ -349,13 +349,23 @@ struct st_ndb_slave_state
   /* Counter values for current slave transaction */
   Uint32 current_conflict_defined_op_count;
   Uint32 current_violation_count[CFT_NUMBER_OF_CFTS];
+  Uint64 current_master_server_epoch;
+  Uint64 current_max_rep_epoch;
 
   /* Cumulative counter values */
   Uint64 total_violation_count[CFT_NUMBER_OF_CFTS];
+  Uint64 max_rep_epoch;
+  Uint32 sql_run_id;
 
   /* Methods */
   void atTransactionCommit();
   void atTransactionAbort();
+  void atResetSlave();
+
+  void atApplyStatusWrite(Uint32 master_server_id,
+                          Uint32 row_server_id,
+                          Uint64 row_epoch,
+                          bool is_row_server_id_local);
 
   st_ndb_slave_state();
 };

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-06-16 18:16:01 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-06-29 23:28:01 +0000
@@ -47,6 +47,7 @@ extern my_bool opt_ndb_log_updated_only;
 extern my_bool opt_ndb_log_binlog_index;
 extern my_bool opt_ndb_log_apply_status;
 extern ulong opt_ndb_extra_logging;
+extern st_ndb_slave_state g_ndb_slave_state;
 
 bool ndb_log_empty_epochs(void);
 
@@ -892,6 +893,8 @@ static void ndbcluster_reset_slave(THD *
     thd_stmt_da(thd)->reset_diagnostics_area();
   }
 
+  g_ndb_slave_state.atResetSlave();
+
   DBUG_VOID_RETURN;
 }
 

=== added file 'sql/ndb_mi.cc'
--- a/sql/ndb_mi.cc	1970-01-01 00:00:00 +0000
+++ b/sql/ndb_mi.cc	2011-06-29 23:28:01 +0000
@@ -0,0 +1,86 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include "ndb_mi.h"
+#include "my_sys.h"
+#include "hash.h"
+#include "rpl_mi.h"
+
+#ifdef HAVE_NDB_BINLOG
+
+extern Master_info *active_mi;
+
+
+uint32 ndb_mi_get_master_server_id()
+{
+  return (uint32) active_mi->master_id;
+}
+
+const char* ndb_mi_get_group_master_log_name()
+{
+#if MYSQL_VERSION_ID < 50600
+  return active_mi->rli.group_master_log_name;
+#else
+  return active_mi->rli->get_group_master_log_name();
+#endif
+}
+
+uint64 ndb_mi_get_group_master_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (uint64) active_mi->rli.group_master_log_pos;
+#else
+  return (uint64) active_mi->rli->get_group_master_log_pos();
+#endif
+}
+
+uint64 ndb_mi_get_future_event_relay_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (uint64) active_mi->rli.future_event_relay_log_pos;
+#else
+  return (uint64) active_mi->rli->get_future_event_relay_log_pos();
+#endif
+}
+
+uint64 ndb_mi_get_group_relay_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (uint64) active_mi->rli.group_relay_log_pos;
+#else
+  return (uint64) active_mi->rli->get_group_relay_log_pos();
+#endif
+}
+
+bool ndb_mi_get_ignore_server_id(uint32 server_id)
+{
+  return (active_mi->shall_ignore_server_id(server_id) != 0);
+}
+
+uint32 ndb_mi_get_slave_run_id()
+{
+  return active_mi->rli.slave_run_id;
+}
+
+bool ndb_mi_get_in_relay_log_statement(Relay_log_info* rli)
+{
+  return (rli->get_flag(Relay_log_info::IN_STMT) != 0);
+}
+
+/* #ifdef HAVE_NDB_BINLOG */
+
+#endif

=== added file 'sql/ndb_mi.h'
--- a/sql/ndb_mi.h	1970-01-01 00:00:00 +0000
+++ b/sql/ndb_mi.h	2011-06-29 23:28:01 +0000
@@ -0,0 +1,47 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_MI_H
+#define NDB_MI_H
+
+#include <my_global.h>
+
+/*
+   This file defines methods for interacting with the
+   Master Info structure on a Slave MySQLD.
+   These methods are only valid when running in an
+   active slave thread.
+*/
+
+/*
+  Accessors
+*/
+uint32 ndb_mi_get_master_server_id();
+const char* ndb_mi_get_group_master_log_name();
+uint64 ndb_mi_get_group_master_log_pos();
+uint64 ndb_mi_get_future_event_relay_log_pos();
+uint64 ndb_mi_get_group_relay_log_pos();
+bool ndb_mi_get_ignore_server_id(uint32 server_id);
+uint32 ndb_mi_get_slave_run_id();
+
+/*
+   Relay log info related functions
+*/
+bool ndb_mi_get_in_relay_log_statement(class Relay_log_info* rli);
+
+// #ifndef NDB_MI_H
+#endif

=== modified file 'storage/ndb/CMakeLists.txt'
--- a/storage/ndb/CMakeLists.txt	2011-06-17 12:41:11 +0000
+++ b/storage/ndb/CMakeLists.txt	2011-06-30 08:49:22 +0000
@@ -148,7 +148,8 @@ SET(NDBCLUSTER_SOURCES
   ../../sql/ha_ndbcluster_connection.cc
   ../../sql/ha_ndbcluster_binlog.cc
   ../../sql/ha_ndb_index_stat.cc
-  ../../sql/ha_ndbinfo.cc)
+  ../../sql/ha_ndbinfo.cc
+  ../../sql/ndb_mi.cc)
 INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/ndb/include)
 
 IF(EXISTS ${CMAKE_SOURCE_DIR}/storage/mysql_storage_engine.cmake)

=== modified file 'storage/ndb/include/ndbapi/NdbIndexStat.hpp'
--- a/storage/ndb/include/ndbapi/NdbIndexStat.hpp	2011-06-07 10:03:02 +0000
+++ b/storage/ndb/include/ndbapi/NdbIndexStat.hpp	2011-06-28 16:13:49 +0000
@@ -334,9 +334,9 @@ public:
   struct Mem {
     Mem();
     virtual ~Mem();
-    virtual void* mem_alloc(size_t size) = 0;
+    virtual void* mem_alloc(UintPtr size) = 0;
     virtual void mem_free(void* ptr) = 0;
-    virtual size_t mem_used() const = 0;
+    virtual UintPtr mem_used() const = 0;
   };
 
   /*

=== modified file 'storage/ndb/ndb_configure.m4'
--- a/storage/ndb/ndb_configure.m4	2011-05-26 15:04:45 +0000
+++ b/storage/ndb/ndb_configure.m4	2011-06-30 08:49:22 +0000
@@ -2,7 +2,7 @@
 # Should be updated when creating a new NDB version
 NDB_VERSION_MAJOR=7
 NDB_VERSION_MINOR=0
-NDB_VERSION_BUILD=26
+NDB_VERSION_BUILD=27
 NDB_VERSION_STATUS=""
 
 dnl for build ndb docs

=== modified file 'storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-05-26 15:04:45 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp	2011-06-30 08:49:22 +0000
@@ -978,10 +978,10 @@ ArrayPool<TupTriggerData> c_triggerPool;
       subscriptionDeleteTriggers(triggerPool),
       subscriptionUpdateTriggers(triggerPool),
       constraintUpdateTriggers(triggerPool),
-      tuxCustomTriggers(triggerPool),
       deferredInsertTriggers(triggerPool),
+      deferredUpdateTriggers(triggerPool),
       deferredDeleteTriggers(triggerPool),
-      deferredUpdateTriggers(triggerPool)
+      tuxCustomTriggers(triggerPool)
       {}
     
     Bitmask<MAXNROFATTRIBUTESINWORDS> notNullAttributeMask;

=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.cpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2011-06-07 12:19:47 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2011-06-30 08:49:22 +0000
@@ -132,9 +132,9 @@ Suma::execREAD_CONFIG_REQ(Signal* signal
 
   // SumaParticipant
   Uint32 noTables, noAttrs, maxBufferedEpochs;
-  ndb_mgm_get_int_parameter(p, CFG_DB_NO_TABLES,  
+  ndb_mgm_get_int_parameter(p, CFG_DICT_TABLE,
 			    &noTables);
-  ndb_mgm_get_int_parameter(p, CFG_DB_NO_ATTRIBUTES,  
+  ndb_mgm_get_int_parameter(p, CFG_DICT_ATTRIBUTE,
 			    &noAttrs);
   ndb_mgm_get_int_parameter(p, CFG_DB_MAX_BUFFERED_EPOCHS,
                             &maxBufferedEpochs);

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-06-22 08:57:03 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-06-27 06:26:01 +0000
@@ -1081,10 +1081,6 @@ MgmtSrvr::sendall_STOP_REQ(NodeBitmask &
         else
           failed++;
       }
-      else
-      {
-        failed++;
-      }
     }
   }
 

=== modified file 'storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-16 18:16:01 +0000
+++ b/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-28 16:13:49 +0000
@@ -2257,7 +2257,7 @@ NdbIndexStatImpl::MemDefault::~MemDefaul
 }
 
 void*
-NdbIndexStatImpl::MemDefault::mem_alloc(size_t size)
+NdbIndexStatImpl::MemDefault::mem_alloc(UintPtr size)
 {
   if (size == 0 || size % 4 != 0)
   {
@@ -2290,7 +2290,7 @@ NdbIndexStatImpl::MemDefault::mem_free(v
   }
 }
 
-size_t
+UintPtr
 NdbIndexStatImpl::MemDefault::mem_used() const
 {
   return m_used;

=== modified file 'storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp	2011-06-12 16:54:32 +0000
+++ b/storage/ndb/src/ndbapi/NdbIndexStatImpl.hpp	2011-06-28 16:13:49 +0000
@@ -281,9 +281,9 @@ public:
 
   // default memory allocator
   struct MemDefault : public Mem {
-    virtual void* mem_alloc(size_t bytes);
+    virtual void* mem_alloc(UintPtr bytes);
     virtual void mem_free(void* p);
-    virtual size_t mem_used() const;
+    virtual UintPtr mem_used() const;
     MemDefault();
     virtual ~MemDefault();
   private:

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-06-16 10:26:41 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-06-30 08:49:22 +0000
@@ -318,6 +318,11 @@ NdbQueryDef::isScanQuery() const
 { return m_impl.isScanQuery();
 }
 
+NdbQueryDef::QueryType
+NdbQueryDef::getQueryType() const
+{ return m_impl.getQueryType();
+}
+
 NdbQueryDefImpl& 
 NdbQueryDef::getImpl() const{
   return m_impl;
@@ -1095,24 +1100,10 @@ NdbQueryBuilderImpl::contains(const NdbQ
 const NdbQueryDefImpl*
 NdbQueryBuilderImpl::prepare()
 {
-  /* Check if query is sorted and has multiple scan operations. This 
-   * combination is not implemented.
-   */
-  if (m_operations.size() > 0 && 
-      m_operations[0]->isScanOperation() &&
-      m_operations[0]->getOrdering() 
-        != NdbQueryOptions::ScanOrdering_unordered &&
-      m_operations[0]->getOrdering() != NdbQueryOptions::ScanOrdering_void)
-  {
-    for (Uint32 i = 1; i<m_operations.size(); i++)
-    {
-      if (m_operations[i]->isScanOperation())
-      {
-        setErrorCode(QRY_MULTIPLE_SCAN_SORTED);
-        return NULL;
-      } 
-    }
-  }
+  const bool sorted =
+    m_operations.size() > 0 &&
+    m_operations[0]->getOrdering() != NdbQueryOptions::ScanOrdering_unordered &&
+    m_operations[0]->getOrdering() != NdbQueryOptions::ScanOrdering_void;
 
   int error;
   NdbQueryDefImpl* def = new NdbQueryDefImpl(m_operations, m_operands, error);
@@ -1127,6 +1118,16 @@ NdbQueryBuilderImpl::prepare()
     return NULL;
   }
 
+  /* Check if query is sorted and has multiple scan operations. This 
+   * combination is not implemented.
+   */
+  if (sorted && def->getQueryType() == NdbQueryDef::MultiScanQuery)
+  {
+    delete def;
+    setErrorCode(QRY_MULTIPLE_SCAN_SORTED);
+    return NULL;
+  }
+
   if (doPrintQueryTree)
   {
     ndbout << "Query tree:" << endl;
@@ -1256,6 +1257,20 @@ NdbQueryDefImpl::getQueryOperation(const
   return NULL;
 }
 
+NdbQueryDef::QueryType
+NdbQueryDefImpl::getQueryType() const
+{
+  if (!m_operations[0]->isScanOperation())
+    return NdbQueryDef::LookupQuery;
+
+  for (Uint32 i=1; i<m_operations.size(); ++i)
+  {
+    if (m_operations[i]->isScanOperation())
+      return NdbQueryDef::MultiScanQuery;
+  }
+  return NdbQueryDef::SingleScanQuery;
+}
+
 ////////////////////////////////////////////////////////////////
 // The (hidden) Impl of NdbQueryOperand (w/ various subclasses)
 ////////////////////////////////////////////////////////////////

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.hpp	2011-06-20 14:24:53 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.hpp	2011-06-30 08:49:22 +0000
@@ -493,6 +493,15 @@ class NdbQueryDef
 
 public:
 
+  /**
+   * The different types of query types supported
+   */
+  enum QueryType {
+    LookupQuery,     ///< All operations are PrimaryKey- or UniqueIndexAccess
+    SingleScanQuery, ///< Root is Table- or OrderedIndexScan, childs are 'lookup'
+    MultiScanQuery   ///< Root, and some childs are scans
+  };
+
   Uint32 getNoOfOperations() const;
 
   // Get a specific NdbQueryOperationDef by ident specified
@@ -504,6 +513,9 @@ public:
   // the client has completed access to it.
   bool isScanQuery() const;
 
+  // Return the 'enum QueryType' as defined above.
+  QueryType getQueryType() const;
+
   // Remove this NdbQueryDef including operation and operands it contains
   void destroy() const;
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-06-16 10:26:41 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-06-30 08:49:22 +0000
@@ -595,6 +595,8 @@ public:
   bool isScanQuery() const
   { return m_operations[0]->isScanOperation(); }
 
+  NdbQueryDef::QueryType getQueryType() const;
+
   Uint32 getNoOfOperations() const
   { return m_operations.size(); }
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryOperation.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-06-20 14:24:53 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryOperation.cpp	2011-06-30 08:49:22 +0000
@@ -4771,17 +4771,11 @@ NdbQueryOperationImpl::setOrdering(NdbQu
   /* Check if query is sorted and has multiple scan operations. This 
    * combination is not implemented.
    */
-  if (ordering != NdbQueryOptions::ScanOrdering_unordered)
+  if (ordering != NdbQueryOptions::ScanOrdering_unordered &&
+      getQueryDef().getQueryType() == NdbQueryDef::MultiScanQuery)
   {
-    for (Uint32 i = 1; i < getQuery().getNoOfOperations(); i++)
-    {
-      if (getQuery().getQueryOperation(i).getQueryOperationDef()
-          .isScanOperation())
-      {
-        getQuery().setErrorCode(QRY_MULTIPLE_SCAN_SORTED);
-        return -1;
-      }
-    }
+    getQuery().setErrorCode(QRY_MULTIPLE_SCAN_SORTED);
+    return -1;
   }
   
   m_ordering = ordering;

=== modified file 'storage/ndb/src/ndbapi/NdbTransaction.cpp'
--- a/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-05-26 15:04:45 +0000
+++ b/storage/ndb/src/ndbapi/NdbTransaction.cpp	2011-06-30 08:49:22 +0000
@@ -937,7 +937,10 @@ int NdbTransaction::refresh()
       scan_op != 0; scan_op = (NdbIndexScanOperation *) scan_op->theNext)
   {
     NdbTransaction* scan_trans = scan_op->theNdbCon;
-    scan_trans->sendTC_HBREP();
+    if (scan_trans)
+    {
+      scan_trans->sendTC_HBREP();
+    }
   }
   return sendTC_HBREP();
 }

=== modified file 'storage/ndb/test/ndbapi/testMgmd.cpp'
--- a/storage/ndb/test/ndbapi/testMgmd.cpp	2011-06-21 13:10:37 +0000
+++ b/storage/ndb/test/ndbapi/testMgmd.cpp	2011-06-27 06:26:01 +0000
@@ -280,6 +280,8 @@ public:
 
   }
 
+  NdbMgmHandle handle() { return m_mgmd_client.handle(); }
+
 private:
 
   bool get_section_string(const Properties& config,
@@ -1099,6 +1101,55 @@ int runTestBug12352191(NDBT_Context* ctx
 
 }
 
+int
+runBug61607(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NDBT_Workingdir wd("test_mgmd"); // temporary working directory
+
+  // Create config.ini
+  const int cnt_mgmd = 1;
+  Properties config = ConfigFactory::create(cnt_mgmd);
+  CHECK(ConfigFactory::write_config_ini(config,
+                                        path(wd.path(),
+                                             "config.ini",
+                                             NULL).c_str()));
+  // Start ndb_mgmd(s)
+  MgmdProcessList mgmds;
+  for (int i = 1; i <= cnt_mgmd; i++)
+  {
+    Mgmd* mgmd = new Mgmd(i);
+    mgmds.push_back(mgmd);
+    CHECK(mgmd->start_from_config_ini(wd.path()));
+  }
+
+  // Connect the ndb_mgmd(s)
+  for (unsigned i = 0; i < mgmds.size(); i++)
+    CHECK(mgmds[i]->connect(config));
+
+  // wait for confirmed config
+  for (unsigned i = 0; i < mgmds.size(); i++)
+    CHECK(mgmds[i]->wait_confirmed_config());
+
+  // Check binary config files created
+  CHECK(file_exists(path(wd.path(),
+                         "ndb_1_config.bin.1",
+                         NULL).c_str()));
+
+  int no_of_nodes = 0;
+  int * node_ids = 0;
+  int initialstart = 0;
+  int nostart = 0;
+  int abort = 0;
+  int force = 0;
+  int need_disconnect = 0;
+  int res = ndb_mgm_restart4(mgmds[0]->handle(), no_of_nodes, node_ids,
+                             initialstart, nostart, abort, force,
+                             &need_disconnect);
+
+
+  return res == 0 ? NDBT_OK : NDBT_FAILED;
+}
+
 NDBT_TESTSUITE(testMgmd);
 DRIVER(DummyDriver); /* turn off use of NdbApi */
 
@@ -1151,6 +1202,10 @@ TESTCASE("Bug12352191",
 {
   INITIALIZER(runTestBug12352191);
 }
+TESTCASE("Bug61607", "")
+{
+  INITIALIZER(runBug61607);
+}
 
 NDBT_TESTSUITE_END(testMgmd);
 

=== modified file 'storage/ndb/test/ndbapi/testScan.cpp'
--- a/storage/ndb/test/ndbapi/testScan.cpp	2011-04-07 07:22:49 +0000
+++ b/storage/ndb/test/ndbapi/testScan.cpp	2011-06-28 08:47:18 +0000
@@ -1424,6 +1424,86 @@ runBug54945(NDBT_Context* ctx, NDBT_Step
   return NDBT_OK;
 }
 
+int
+runCloseRefresh(NDBT_Context* ctx, NDBT_Step* step)
+{
+  Ndb * pNdb = GETNDB(step);
+
+  const Uint32 codeWords= 1;
+  Uint32 codeSpace[ codeWords ];
+  NdbInterpretedCode code(NULL, // Table is irrelevant
+                          &codeSpace[0],
+                          codeWords);
+  if ((code.interpret_exit_last_row() != 0) ||
+      (code.finalise() != 0))
+  {
+    ERR(code.getNdbError());
+    return NDBT_FAILED;
+  }
+
+  const NdbDictionary::Table*  pTab = ctx->getTab();
+  NdbTransaction* pTrans = pNdb->startTransaction();
+  NdbScanOperation* pOp = pTrans->getNdbScanOperation(pTab->getName());
+  if (pOp == NULL)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  if (pOp->readTuples(NdbOperation::LM_CommittedRead) != 0)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  if (pOp->setInterpretedCode(&code) == -1 )
+  {
+    ERR(pTrans->getNdbError());
+    pNdb->closeTransaction(pTrans);
+    return NDBT_FAILED;
+  }
+
+  if (pOp->getValue(NdbDictionary::Column::ROW_COUNT) == 0)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  pTrans->execute(NdbTransaction::NoCommit);
+  pOp->close(); // close this
+
+  pOp = pTrans->getNdbScanOperation(pTab->getName());
+  if (pOp == NULL)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  if (pOp->readTuples(NdbOperation::LM_CommittedRead) != 0)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  if (pOp->setInterpretedCode(&code) == -1 )
+  {
+    ERR(pTrans->getNdbError());
+    pNdb->closeTransaction(pTrans);
+    return NDBT_FAILED;
+  }
+
+  if (pOp->getValue(NdbDictionary::Column::ROW_COUNT) == 0)
+  {
+    ERR(pTrans->getNdbError());
+    return NDBT_FAILED;
+  }
+
+  pTrans->execute(NdbTransaction::NoCommit);
+  pTrans->refresh();
+  pTrans->close();
+  return NDBT_OK;
+}
+
 #define CHK_RET_FAILED(x) if (!(x)) { ndbout_c("Failed on line: %u", __LINE__); return NDBT_FAILED; }
 
 int
@@ -2066,6 +2146,10 @@ TESTCASE("Bug42559", "") 
   FINALIZER(finalizeBug42559);
   FINALIZER(runClearTable);
 }
+TESTCASE("CloseRefresh", "")
+{
+  INITIALIZER(runCloseRefresh);
+}
 TESTCASE("Bug54945", "")
 {
   INITIALIZER(runBug54945);

=== modified file 'storage/ndb/test/run-test/daily-basic-tests.txt'
--- a/storage/ndb/test/run-test/daily-basic-tests.txt	2011-06-07 12:19:47 +0000
+++ b/storage/ndb/test/run-test/daily-basic-tests.txt	2011-06-30 08:49:22 +0000
@@ -517,6 +517,10 @@ args: -n NoCloseTransaction T6 D1 D2 
 
 max-time: 500
 cmd: testScan
+args: -n CloseRefresh T1
+
+max-time: 500
+cmd: testScan
 args: -n CheckInactivityTimeOut T6 D1 D2 
 
 max-time: 500

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.0-spj-scan-vs-scan branch(ole.john.aske:3520 to 3521) Ole John Aske4 Jul