List:Commits« Previous MessageNext Message »
From:Frazer Clement Date:June 23 2011 2:52pm
Subject:bzr push into mysql-5.1-telco-7.1 branch (frazer.clement:4253 to 4254)
View as plain text  
 4254 Frazer Clement	2011-06-23
      Commit for club run2

    added:
      mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result
      mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result
      mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test
      mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test
      sql/ndb_mi.cc
      sql/ndb_mi.h
    modified:
      mysql-test/suite/ndb/r/ndb_basic.result
      sql/Makefile.am
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/ha_ndbcluster_binlog.cc
 4253 magnus.blaudd@stripped	2011-06-21 [merge]
      Merge 7.0 -> 7.1

    modified:
      mysql-test/t/ctype_cp932_binlog_stm.test
=== modified file 'mysql-test/suite/ndb/r/ndb_basic.result'
--- a/mysql-test/suite/ndb/r/ndb_basic.result	2011-06-16 18:16:01 +0000
+++ b/mysql-test/suite/ndb/r/ndb_basic.result	2011-06-23 14:50:59 +0000
@@ -76,6 +76,7 @@ Ndb_number_of_data_nodes	#
 Ndb_number_of_ready_data_nodes	#
 Ndb_pruned_scan_count	#
 Ndb_scan_count	#
+Ndb_slave_max_replicated_epoch	#
 SHOW GLOBAL VARIABLES LIKE 'ndb\_%';
 Variable_name	Value
 ndb_autoincrement_prefetch_sz	#

=== added file 'mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result'
--- a/mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_circular_2ch_rep_status.result	2011-06-23 14:50:59 +0000
@@ -0,0 +1,169 @@
+include/rpl_init.inc [topology=1->2,4->3]
+include/rpl_connect.inc [creating master]
+include/rpl_connect.inc [creating master1]
+include/rpl_connect.inc [creating slave]
+include/rpl_connect.inc [creating slave1]
+include/rpl_start_slaves.inc
+Cluster A servers have no epoch replication info
+select count(1) from mysql.ndb_apply_status;
+count(1)
+0
+Cluster A servers have no max replicated epoch value
+Master(1)
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Master1(3)
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Make a change originating at Cluster A
+Master(1)
+use test;
+create table t1 (a int primary key, b varchar(100)) engine=ndb;
+insert into t1 values (1, "Venice");
+Allow it to propagate to Cluster B
+Originate a second unrelated change at Cluster B, to allow us to wait for
+reverse propagation in the testcase
+Slave1 (4)
+insert into t1 values (2, "Death");
+Allow it to propagate to Cluster A
+Observe new entry in ndb_apply_status on Cluster A
+Master (1)
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+4
+Non-slave server on Cluster A will have no value for Max Replicated Epoch
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Slave server on Cluster A has current value for Max Replicated Epoch
+Master1 (3)
+Expect count 1
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now wait for all replication to quiesce
+Now swap replication channels around
+include/rpl_stop_slaves.inc
+include/rpl_change_topology.inc [new topology=2->1,3->4]
+Get current master status on Cluster A new master (next pos in Binlog)
+Master1 (3)
+Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+is skipped over.
+flush logs;
+Setup slave on Cluster B to use it
+Slave1 (4)
+Get current master status on Cluster B new master (next pos in Binlog)
+Slave (2)
+Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+is skipped over.
+flush logs;
+Setup slave on Cluster A to use it
+Master (1)
+Master (1)
+Show that Cluster A Slave server (old master) has no Max replicated epoch before receiving data
+select variable_name, variable_value from information_schema.global_status
+where variable_name='Ndb_slave_max_replicated_epoch';
+variable_name	variable_value
+NDB_SLAVE_MAX_REPLICATED_EPOCH	0
+Master1 (3)
+Cluster A Master server (old slave) has old Max replicated epoch
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now start slaves up
+include/rpl_start_slaves.inc
+Show that applying something from Cluster B causes the
+old Max Rep Epoch to be loaded from ndb_apply_status
+There is no new Max Rep Epoch from Cluster A as it has not changed
+anything yet
+Slave (2)
+insert into test.t1 values (3, "From the Sea");
+Allow to propagate to Cluster A
+Master (1)
+New Slave server on Cluster A has loaded old Max-Replicated-Epoch
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+4
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 1
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+Now make a new Cluster A change and see that the Max Replicated Epoch advances
+once it has propagated
+Master1 (3)
+insert into test.t1 values (4, "Brooke");
+Propagate to Cluster B
+Make change on Cluster B to allow waiting for reverse propagation
+Slave (2)
+insert into test.t1 values (5, "Rupert");
+Wait for propagation back to Cluster A
+Master (1)
+Show that Cluster A now has 2 different server_id entries in ndb_apply_status
+Those from the new master (server_id 3) are highest.
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+3
+4
+select
+count(1)
+from
+information_schema.global_status,
+mysql.ndb_apply_status
+where
+server_id = 3
+and
+variable_name='Ndb_slave_max_replicated_epoch'
+    and
+variable_value = epoch;
+count(1)
+1
+local_server_with_max_epoch
+3
+Done
+drop table t1;
+include/rpl_stop_slaves.inc
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+include/rpl_start_slaves.inc
+include/rpl_end.inc

=== added file 'mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result'
--- a/mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/r/ndb_rpl_init_rep_status.result	2011-06-23 14:50:59 +0000
@@ -0,0 +1,78 @@
+include/master-slave.inc
+[connection master]
+reset master;
+stop slave;
+Generate something in the Masters Binlog
+use test;
+create table t1 (a int primary key, b int) engine=ndb;
+insert into t1 values (1,1);
+Initial state
+select * from mysql.ndb_apply_status;
+server_id	epoch	log_name	start_pos	end_pos
+select variable_value from information_schema.global_status
+where variable_name like '%Ndb_slave_max_replicated_epoch%';
+variable_value
+0
+select @slave_server_id:=(variable_value+0) from information_schema.global_variables
+where variable_name like 'server_id';
+@slave_server_id:=(variable_value+0)
+2
+Default, no data, max replicated epoch will be 0.
+reset slave;
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+0
+Default, load of own serverid from ndb_apply_status, should be 111
+drop table test.t1;
+stop slave;
+reset slave;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+111
+drop table test.t1;
+Check that reset slave resets Ndb_slave_max_replicated_epoch
+stop slave;
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+111
+reset slave;
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+0
+Multiple-channel, load highest of configured serverids, should be 222
+set @other_local_server_id=@slave_server_id+1;
+set @other_remote_server_id=@slave_server_id+2;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_local_server_id, 222, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_remote_server_id, 444, 'Fictional log', 222, 333);
+CHANGE MASTER TO IGNORE_SERVER_IDS=(3);;
+start slave;
+select server_id from mysql.ndb_apply_status order by server_id;
+server_id
+1
+2
+3
+4
+select variable_value from information_schema.global_status
+where variable_name like 'Ndb_slave_max_replicated_epoch';
+variable_value
+222
+stop slave;
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+start slave;
+drop table test.t1;
+include/rpl_end.inc

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.cnf	2011-06-23 14:50:59 +0000
@@ -0,0 +1,14 @@
+!include ndb_rpl_circular_2ch.cnf
+
+[mysqld.1.1]
+ndb-log-apply-status
+
+[mysqld.2.1]
+ndb-log-apply-status
+
+[mysqld.1.slave]
+ndb-log-apply-status
+
+[mysqld.2.slave]
+ndb-log-apply-status
+

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_circular_2ch_rep_status.test	2011-06-23 14:50:59 +0000
@@ -0,0 +1,247 @@
+--source include/have_ndb.inc
+--source suite/ndb_rpl/ndb_master-slave_2ch.inc
+--source include/have_binlog_format_mixed_or_row.inc
+
+#
+# Test that the Maximum replicated epoch is maintained
+# as expected in a circular, 2 channel configuration.
+# The channels are swapped, and replication is restarted
+# The MaxReplicatedEpoch is reloaded from ndb_apply_status
+# for the Servers considered local (IGNORE_SERVER_IDS)
+#
+--connection master
+--echo Cluster A servers have no epoch replication info
+select count(1) from mysql.ndb_apply_status;
+
+--echo Cluster A servers have no max replicated epoch value
+--echo Master(1)
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+--connection master1
+--echo Master1(3)
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--echo Make a change originating at Cluster A
+--connection master
+--echo Master(1)
+use test;
+create table t1 (a int primary key, b varchar(100)) engine=ndb;
+insert into t1 values (1, "Venice");
+
+--echo Allow it to propagate to Cluster B
+--sync_slave_with_master slave
+
+--echo Originate a second unrelated change at Cluster B, to allow us to wait for
+--echo reverse propagation in the testcase
+--connection slave1
+--echo Slave1 (4)
+insert into t1 values (2, "Death");
+
+--echo Allow it to propagate to Cluster A
+--sync_slave_with_master master1
+
+--echo Observe new entry in ndb_apply_status on Cluster A
+--connection master
+--echo Master (1)
+select server_id from mysql.ndb_apply_status order by server_id;
+
+--echo Non-slave server on Cluster A will have no value for Max Replicated Epoch
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--echo Slave server on Cluster A has current value for Max Replicated Epoch
+--connection master1
+--echo Master1 (3)
+--echo Expect count 1
+# Here we join the max rep epoch with ndb_apply_status for server id 1
+# (Our site's current master server)
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now wait for all replication to quiesce
+
+--echo Now swap replication channels around
+--source include/rpl_stop_slaves.inc
+--let $rpl_topology= 2->1,3->4
+--source include/rpl_change_topology.inc
+
+# We've changed the direction, but need to set binlog filenames
+# and positions
+
+#
+# 'Normally' we should use the ndb_apply_status max applied epoch,
+# then lookup ndb_binlog_index etc.
+# However, in this case (and probably in lots of real cases), no changes
+# have been made after the last applied epoch, so there is no epoch
+# after the current one, and therefore no entry in ndb_binlog_index
+# to get the correct position from.
+# We could just re-apply the last epoch applied, but that's imprecise,
+# and causes us to create an ndb_apply_status entry for Server 3 when
+# it has not really been master for those changes.
+# So we just look at the Master status instead.
+#
+#--echo Get max applied epochs from a server on each cluster
+#--connection slave
+#let $max_applied_cluster_a_epoch = query_get_value("SELECT MAX(epoch) AS epoch FROM mysql.ndb_apply_status WHERE server_id IN (1,3)", epoch, 1);
+#--connection master
+#let $max_applied_cluster_b_epoch = query_get_value("SELECT MAX(epoch) AS epoch FROM mysql.ndb_apply_status WHERE server_id IN (2,4)", epoch, 1);
+#
+#--echo Get corresponding Binlog filename + pos from new Master servers
+#--connection master1
+#eval select * from mysql.ndb_binlog_index where epoch > $max_applied_cluster_a_epoch ;
+#let $cluster_a_master_log_file = query_get_value("SELECT SUBSTRING_INDEX(File, '/', -1) as File from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_a_epoch", File, 1);
+#let $cluster_a_master_log_pos = query_get_value("SELECT Position from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_a_epoch", Position, 1);
+#--connection slave
+#eval select * from mysql.ndb_binlog_index where epoch > $max_applied_cluster_b_epoch;
+#let $cluster_b_master_log_file = query_get_value("SELECT SUBSTRING_INDEX(File, '/', -1) as File from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_b_epoch", File, 1);
+#let $cluster_b_master_log_pos = query_get_value("SELECT Position from mysql.ndb_binlog_index WHERE epoch >= $max_applied_cluster_b_epoch", Position, 1);
+#--echo Now change new Slave servers to new Master file + pos
+#--connection master
+#--echo Changing master to $cluster_b_master_log_file, $cluster_b_master_log_pos
+#eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_b_master_log_file", MASTER_LOG_POS=$cluster_b_master_log_pos;
+#--connection slave1
+#--echo Changing master to $cluster_a_master_log_file, $cluster_a_master_log_pos
+#eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_a_master_log_file", MASTER_LOG_POS=$cluster_a_master_log_pos;
+
+--echo Get current master status on Cluster A new master (next pos in Binlog)
+--connection master1
+--echo Master1 (3)
+--echo Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+--echo is skipped over.
+flush logs;
+let $cluster_a_master_log_file = query_get_value("SHOW MASTER STATUS", "File", 1);
+let $cluster_a_master_log_pos = query_get_value("SHOW MASTER STATUS", "Position", 1);
+--echo Setup slave on Cluster B to use it
+--connection slave1
+--echo Slave1 (4)
+--disable_query_log
+eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_a_master_log_file", MASTER_LOG_POS=$cluster_a_master_log_pos;
+--enable_query_log
+
+--echo Get current master status on Cluster B new master (next pos in Binlog)
+--connection slave
+--echo Slave (2)
+--echo  Flush logs to ensure any pending update (e.g. reflected apply_status write row)
+--echo is skipped over.
+flush logs;
+let $cluster_b_master_log_file = query_get_value("SHOW MASTER STATUS", "File", 1);
+let $cluster_b_master_log_pos = query_get_value("SHOW MASTER STATUS", "Position", 1);
+--echo Setup slave on Cluster A to use it
+--connection master
+--echo Master (1)
+--disable_query_log
+eval CHANGE MASTER TO MASTER_LOG_FILE="$cluster_b_master_log_file", MASTER_LOG_POS=$cluster_b_master_log_pos;
+--enable_query_log
+
+--connection master
+--echo Master (1)
+--echo Show that Cluster A Slave server (old master) has no Max replicated epoch before receiving data
+select variable_name, variable_value from information_schema.global_status
+  where variable_name='Ndb_slave_max_replicated_epoch';
+
+--connection master1
+--echo Master1 (3)
+--echo Cluster A Master server (old slave) has old Max replicated epoch
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now start slaves up
+--source include/rpl_start_slaves.inc
+
+--echo Show that applying something from Cluster B causes the
+--echo old Max Rep Epoch to be loaded from ndb_apply_status
+--echo There is no new Max Rep Epoch from Cluster A as it has not changed
+--echo anything yet
+
+--connection slave
+--echo Slave (2)
+insert into test.t1 values (3, "From the Sea");
+
+--echo Allow to propagate to Cluster A
+--sync_slave_with_master master
+
+--connection master
+--echo Master (1)
+--echo New Slave server on Cluster A has loaded old Max-Replicated-Epoch
+select server_id from mysql.ndb_apply_status order by server_id;
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 1
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+--echo Now make a new Cluster A change and see that the Max Replicated Epoch advances
+--echo once it has propagated
+
+--connection master1
+--echo Master1 (3)
+insert into test.t1 values (4, "Brooke");
+
+--echo Propagate to Cluster B
+--sync_slave_with_master slave1
+
+--echo Make change on Cluster B to allow waiting for reverse propagation
+--connection slave
+--echo Slave (2)
+insert into test.t1 values (5, "Rupert");
+
+--echo Wait for propagation back to Cluster A
+--sync_slave_with_master master
+
+--connection master
+--echo Master (1)
+--echo Show that Cluster A now has 2 different server_id entries in ndb_apply_status
+--echo Those from the new master (server_id 3) are highest.
+select server_id from mysql.ndb_apply_status order by server_id;
+select
+    count(1)
+  from
+    information_schema.global_status,
+    mysql.ndb_apply_status
+  where
+    server_id = 3
+    and
+    variable_name='Ndb_slave_max_replicated_epoch'
+    and
+    variable_value = epoch;
+
+let $max_epoch = query_get_value("select max(epoch) as epoch from mysql.ndb_apply_status where server_id in (1,3)","epoch", 1);
+--disable_query_log
+# We have to constrain the search to master server ids 1,3 in case the 
+# Slave happens to have similar epoch values
+eval select server_id as local_server_with_max_epoch from mysql.ndb_apply_status where epoch=$max_epoch and server_id in (1,3);
+--enable_query_log
+
+--echo Done
+
+--connection master1
+drop table t1;
+--sync_slave_with_master slave1
+
+--source suite/ndb_rpl/ndb_master-slave_2ch_end.inc
+

=== added file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_init_rep_status.test	2011-06-23 14:50:59 +0000
@@ -0,0 +1,89 @@
+--source include/have_ndb.inc
+--source include/have_binlog_format_mixed_or_row.inc
+--source suite/ndb_rpl/ndb_master-slave.inc
+
+# Test Slave initialisation of Ndb_slave_max_replicated_epoch status var
+
+--connection slave
+reset master;
+stop slave;
+
+--connection master
+--echo Generate something in the Masters Binlog
+use test;
+create table t1 (a int primary key, b int) engine=ndb;
+
+insert into t1 values (1,1);
+
+--connection slave
+--echo Initial state
+select * from mysql.ndb_apply_status;
+select variable_value from information_schema.global_status
+  where variable_name like '%Ndb_slave_max_replicated_epoch%';
+select @slave_server_id:=(variable_value+0) from information_schema.global_variables
+  where variable_name like 'server_id';
+
+--echo Default, no data, max replicated epoch will be 0.
+reset slave;
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+--echo Default, load of own serverid from ndb_apply_status, should be 111
+drop table test.t1;
+stop slave;
+reset slave;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+drop table test.t1;
+
+--echo Check that reset slave resets Ndb_slave_max_replicated_epoch
+stop slave;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+reset slave;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+--echo Multiple-channel, load highest of configured serverids, should be 222
+set @other_local_server_id=@slave_server_id+1;
+set @other_remote_server_id=@slave_server_id+2;
+insert into mysql.ndb_apply_status values (@slave_server_id, 111, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_local_server_id, 222, 'Fictional log', 222, 333);
+insert into mysql.ndb_apply_status values (@other_remote_server_id, 444, 'Fictional log', 222, 333);
+
+let $local_server_ids = `select @other_local_server_id`;
+
+--eval CHANGE MASTER TO IGNORE_SERVER_IDS=($local_server_ids);
+start slave;
+--connection master
+--sync_slave_with_master
+--connection slave
+--replace_column 3 # 4 # 5 #
+select server_id from mysql.ndb_apply_status order by server_id;
+select variable_value from information_schema.global_status
+  where variable_name like 'Ndb_slave_max_replicated_epoch';
+
+# Clean up
+stop slave;
+CHANGE MASTER TO IGNORE_SERVER_IDS= ();
+start slave;
+--connection master
+drop table test.t1;
+sync_slave_with_master;
+
+--source include/rpl_end.inc
+

=== modified file 'sql/Makefile.am'
--- a/sql/Makefile.am	2011-06-15 10:37:56 +0000
+++ b/sql/Makefile.am	2011-06-23 14:50:59 +0000
@@ -63,6 +63,7 @@ noinst_HEADERS =	item.h item_func.h item
 			ha_ndbcluster_lock_ext.h ha_ndbinfo.h \
 			ha_ndbcluster_glue.h \
 			ha_ndb_index_stat.h \
+                        ndb_mi.h \
 			ha_partition.h rpl_constants.h \
 			debug_sync.h \
 			opt_range.h protocol.h rpl_tblmap.h rpl_utility.h \
@@ -140,6 +141,8 @@ libndb_la_SOURCES=	ha_ndbcluster.cc \
 			ha_ndb_index_stat.cc \
 			ha_ndbinfo.cc
 
+libndb_extra_DIST=      ndb_mi.cc
+
 gen_lex_hash_SOURCES =	gen_lex_hash.cc
 gen_lex_hash_LDFLAGS =  @NOINST_LDFLAGS@
 
@@ -159,7 +162,7 @@ BUILT_SOURCES =		$(BUILT_MAINT_SRC) lex_
 EXTRA_DIST =		udf_example.c udf_example.def $(BUILT_MAINT_SRC) \
 			nt_servc.cc nt_servc.h \
 			message.mc  message.h message.rc MSG00001.bin \
-			CMakeLists.txt
+			CMakeLists.txt $(libndb_extra_DIST)
 
 CLEANFILES =        	lex_hash.h sql_yacc.output link_sources
 DISTCLEANFILES =        $(EXTRA_PROGRAMS)

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-06-21 13:50:33 +0000
+++ b/sql/ha_ndbcluster.cc	2011-06-23 14:50:59 +0000
@@ -30,8 +30,6 @@
 
 #include "ha_ndbcluster_glue.h"
 
-#include "rpl_mi.h"
-
 #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
 #include "ha_ndbcluster.h"
 #include <ndbapi/NdbApi.hpp>
@@ -46,6 +44,7 @@
 
 #include <mysql/plugin.h>
 #include <ndb_version.h>
+#include "ndb_mi.h"
 
 #ifdef ndb_dynamite
 #undef assert
@@ -459,7 +458,11 @@ update_slave_api_stats(Ndb* ndb)
 st_ndb_slave_state g_ndb_slave_state;
 
 st_ndb_slave_state::st_ndb_slave_state()
-  : current_conflict_defined_op_count(0)
+  : current_conflict_defined_op_count(0),
+    current_master_server_epoch(0),
+    current_max_rep_epoch(0),
+    max_rep_epoch(0),
+    sql_run_id(~Uint32(0))
 {
   memset(current_violation_count, 0, sizeof(current_violation_count));
   memset(total_violation_count, 0, sizeof(total_violation_count));
@@ -471,6 +474,7 @@ st_ndb_slave_state::atTransactionAbort()
   /* Reset current-transaction counters + state */
   memset(current_violation_count, 0, sizeof(current_violation_count));
   current_conflict_defined_op_count = 0;
+  current_max_rep_epoch = 0;
 }
 
 void
@@ -485,8 +489,196 @@ st_ndb_slave_state::atTransactionCommit(
     current_violation_count[i] = 0;
   }
   current_conflict_defined_op_count = 0;
+  if (current_max_rep_epoch > max_rep_epoch)
+  {
+    DBUG_PRINT("info", ("Max replicated epoch increases from %llu to %llu",
+                        max_rep_epoch,
+                        current_max_rep_epoch));
+
+    max_rep_epoch = current_max_rep_epoch;
+  }
+  current_max_rep_epoch = 0;
+}
+
+void
+st_ndb_slave_state::atApplyStatusWrite(Uint32 master_server_id,
+                                       Uint32 row_server_id,
+                                       Uint64 row_epoch,
+                                       bool is_row_server_id_local)
+{
+  if (row_server_id == master_server_id)
+  {
+    /*
+       WRITE_ROW to ndb_apply_status injected by MySQLD
+       immediately upstream of us.
+       Record epoch
+    */
+    current_master_server_epoch = row_epoch;
+    assert(! is_row_server_id_local);
+  }
+  else if (is_row_server_id_local)
+  {
+    DBUG_PRINT("info", ("Recording application of local server %u epoch %llu "
+                        " which is %s.",
+                        row_server_id, row_epoch,
+                        (row_epoch > g_ndb_slave_state.current_max_rep_epoch)?
+                        " new highest." : " older than previously applied"));
+    if (row_epoch > current_max_rep_epoch)
+    {
+      /*
+        Store new highest epoch in thdvar.  If we commit successfully
+        then this can become the new global max
+      */
+      current_max_rep_epoch = row_epoch;
+    }
+  }
+}
+
+void
+st_ndb_slave_state::atResetSlave()
+{
+  /* Reset the Maximum replicated epoch vars
+   * on slave reset
+   * No need to touch the sql_run_id as that
+   * will increment if the slave is started
+   * again.
+   */
+  current_max_rep_epoch = 0;
+  max_rep_epoch = 0;
+}
+
+int
+ha_ndbcluster::check_slave_state(THD* thd)
+{
+  DBUG_ENTER("check_slave_state");
+
+#ifdef HAVE_NDB_BINLOG
+  if (!thd->slave_thread)
+    DBUG_RETURN(0);
+
+  Uint32 runId = ndb_mi_get_slave_run_id();
+  DBUG_PRINT("info", ("Slave SQL thread run id is %u",
+                      runId));
+  if (unlikely(runId != g_ndb_slave_state.sql_run_id))
+  {
+    DBUG_PRINT("info", ("Slave run id changed from %u, "
+                        "treating as Slave restart",
+                        g_ndb_slave_state.sql_run_id));
+    g_ndb_slave_state.sql_run_id = runId;
+
+    /* Always try to load the Max Replicated Epoch info
+     * first.
+     * Could be made optional if it's a problem
+     */
+    {
+      /*
+         Load highest replicated epoch from a local
+         MySQLD from the cluster.
+      */
+      DBUG_PRINT("info", ("Loading applied epoch information from %s",
+                          NDB_APPLY_TABLE));
+      NdbError ndb_error;
+      Uint64 highestAppliedEpoch = 0;
+      do
+      {
+        Ndb* ndb= check_ndb_in_thd(thd);
+        NDBDICT* dict= ndb->getDictionary();
+        NdbTransaction* trans= NULL;
+        ndb->setDatabaseName(NDB_REP_DB);
+        Ndb_table_guard ndbtab_g(dict, NDB_APPLY_TABLE);
+
+        const NDBTAB* ndbtab= ndbtab_g.get_table();
+        if (unlikely(ndbtab == NULL))
+        {
+          ndb_error = dict->getNdbError();
+          break;
+        }
+
+        trans= ndb->startTransaction();
+        if (unlikely(trans == NULL))
+        {
+          ndb_error = ndb->getNdbError();
+          break;
+        }
+
+        do
+        {
+          NdbScanOperation* sop = trans->getNdbScanOperation(ndbtab);
+          if (unlikely(sop == NULL))
+          {
+            ndb_error = trans->getNdbError();
+            break;
+          }
+
+          const Uint32 server_id_col_num = 0;
+          const Uint32 epoch_col_num = 1;
+          NdbRecAttr* server_id_ra;
+          NdbRecAttr* epoch_ra;
+
+          if (unlikely((sop->readTuples(NdbOperation::LM_CommittedRead) != 0)   ||
+                       ((server_id_ra = sop->getValue(server_id_col_num)) == NULL)  ||
+                       ((epoch_ra = sop->getValue(epoch_col_num)) == NULL)))
+          {
+            ndb_error = sop->getNdbError();
+            break;
+          }
+
+          if (trans->execute(NdbTransaction::Commit))
+          {
+            ndb_error = trans->getNdbError();
+            break;
+          }
+
+          int rc = 0;
+          while (0 == (rc= sop->nextResult(true)))
+          {
+            Uint32 serverid = server_id_ra->u_32_value();
+            Uint64 epoch = epoch_ra->u_64_value();
+
+            if ((serverid == ::server_id) ||
+                (ndb_mi_get_ignore_server_id(serverid)))
+            {
+              highestAppliedEpoch = MAX(epoch, highestAppliedEpoch);
+            }
+          }
+
+          if (rc != 1)
+          {
+            ndb_error = sop->getNdbError();
+            break;
+          }
+        } while (0);
+
+        trans->close();
+      } while(0);
+
+      if (ndb_error.code != 0)
+      {
+        sql_print_warning("NDB Slave : Could not determine maximum replicated epoch from %s.%s "
+                          "at Slave start, error %u %s",
+                          NDB_REP_DB,
+                          NDB_APPLY_TABLE,
+                          ndb_error.code, ndb_error.message);
+      }
+
+      /*
+        Set Global status variable to the Highest Applied Epoch from
+        the Cluster DB.
+        If none was found, this will be zero.
+      */
+      g_ndb_slave_state.max_rep_epoch = highestAppliedEpoch;
+      sql_print_information("NDB Slave : MaxReplicatedEpoch set to %llu (%u/%u) at Slave start",
+                            g_ndb_slave_state.max_rep_epoch,
+                            (Uint32)(g_ndb_slave_state.max_rep_epoch >> 32),
+                            (Uint32)(g_ndb_slave_state.max_rep_epoch & 0xffffffff));
+    } // Load highest replicated epoch
+  } // New Slave SQL thread run id
+#endif
+
+  DBUG_RETURN(0);
 }
 
+
 static int update_status_variables(Thd_ndb *thd_ndb,
                                    st_ndb_status *ns,
                                    Ndb_cluster_connection *c)
@@ -616,6 +808,7 @@ SHOW_VAR ndb_status_injector_variables[]
 
 SHOW_VAR ndb_status_slave_variables[]= {
   NDBAPI_COUNTERS("_slave", &g_slave_api_client_stats),
+  {"slave_max_replicated_epoch", (char*) &g_ndb_slave_state.max_rep_epoch, SHOW_LONGLONG},
   {NullS, NullS, SHOW_LONG}
 };
 
@@ -708,7 +901,8 @@ static int ndb_to_mysql_error(const NdbE
 }
 
 #ifdef HAVE_NDB_BINLOG
-extern Master_info *active_mi;
+#include "ndb_mi.cc"
+
 /* Write conflicting row to exceptions table. */
 static int write_conflict_row(NDB_SHARE *share,
                               NdbTransaction *trans,
@@ -736,8 +930,8 @@ static int write_conflict_row(NDB_SHARE
   }
   {
     uint32 server_id= (uint32)::server_id;
-    uint32 master_server_id= (uint32)active_mi->master_id;
-    uint64 master_epoch= (uint64)active_mi->master_epoch;
+    uint32 master_server_id= (uint32) ndb_mi_get_master_server_id();
+    uint64 master_epoch= (uint64) g_ndb_slave_state.current_master_server_epoch;
     uint32 count= (uint32)++(cfn_share->m_count);
     if (ex_op->setValue((Uint32)0, (const char *)&(server_id)) ||
         ex_op->setValue((Uint32)1, (const char *)&(master_server_id)) ||
@@ -3910,7 +4104,7 @@ bool ha_ndbcluster::isManualBinlogExec(T
 #ifndef EMBEDDED_LIBRARY
   return thd ? 
     ( thd->rli_fake? 
-      thd->rli_fake->get_flag(Relay_log_info::IN_STMT) : false)
+      ndb_mi_get_in_relay_log_statement(thd->rli_fake) : false)
     : false;
 #else
   /* For Embedded library, we can't determine if we're
@@ -4166,25 +4360,42 @@ handle_conflict_op_error(Thd_ndb* thd_nd
 #endif /* HAVE_NDB_BINLOG */
 
 
+#ifdef HAVE_NDB_BINLOG
+/*
+  is_serverid_local
+*/
+static bool is_serverid_local(Uint32 serverid)
+{
+  /*
+     If it's not our serverid, check the
+     IGNORE_SERVER_IDS setting to check if
+     it's local.
+  */
+  return ((serverid == ::server_id) ||
+          ndb_mi_get_ignore_server_id(serverid));
+}
+#endif
+
 int ha_ndbcluster::write_row(uchar *record)
 {
   DBUG_ENTER("ha_ndbcluster::write_row");
 #ifdef HAVE_NDB_BINLOG
   if (m_share == ndb_apply_status_share && table->in_use->slave_thread)
   {
-    uint32 sid, master_server_id= active_mi->master_id;
-    memcpy(&sid, table->field[0]->ptr + (record - table->record[0]), sizeof(sid));
-    if (sid == master_server_id)
-    {
-      uint64 master_epoch;
-      memcpy(&master_epoch, table->field[1]->ptr + (record - table->record[0]),
-             sizeof(master_epoch));
-      active_mi->master_epoch= master_epoch;
-    }
+    uint32 row_server_id, master_server_id= ndb_mi_get_master_server_id();
+    uint64 row_epoch;
+    memcpy(&row_server_id, table->field[0]->ptr + (record - table->record[0]),
+           sizeof(row_server_id));
+    memcpy(&row_epoch, table->field[1]->ptr + (record - table->record[0]),
+           sizeof(row_epoch));
+    g_ndb_slave_state.atApplyStatusWrite(master_server_id,
+                                         row_server_id,
+                                         row_epoch,
+                                         is_serverid_local(row_server_id));
   }
 #endif /* HAVE_NDB_BINLOG */
   DBUG_RETURN(ndb_write_row(record, FALSE, FALSE));
-}
+};
 
 /**
   Insert one record into NDB
@@ -4204,6 +4415,10 @@ int ha_ndbcluster::ndb_write_row(uchar *
   Uint32 num_sets= 0;
   DBUG_ENTER("ha_ndbcluster::ndb_write_row");
 
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   has_auto_increment= (table->next_number_field && record == table->record[0]);
 
   if (has_auto_increment && table_share->primary_key != MAX_KEY) 
@@ -4748,6 +4963,11 @@ int ha_ndbcluster::ndb_update_row(const
 
   DBUG_ENTER("ndb_update_row");
   DBUG_ASSERT(trans);
+
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   /*
    * If IGNORE the ignore constraint violations on primary and unique keys,
    * but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE
@@ -5082,6 +5302,10 @@ int ha_ndbcluster::ndb_delete_row(const
   DBUG_ENTER("ndb_delete_row");
   DBUG_ASSERT(trans);
 
+  error = check_slave_state(thd);
+  if (unlikely(error))
+    DBUG_RETURN(error);
+
   ha_statistic_increment(&SSV::ha_delete_count);
   m_rows_changed++;
 
@@ -6543,30 +6767,15 @@ static int ndbcluster_update_apply_statu
     r|= op->setValue(1u, (Uint64)0);
     DBUG_ASSERT(r == 0);
   }
-#if MYSQL_VERSION_ID < 50600
-  const char* group_master_log_name =
-    active_mi->rli.group_master_log_name;
-  const Uint64 group_master_log_pos =
-    (Uint64)active_mi->rli.group_master_log_pos;
-  const Uint64 future_event_relay_log_pos =
-    (Uint64)active_mi->rli.future_event_relay_log_pos;
-  const Uint64 group_relay_log_pos =
-    (Uint64)active_mi->rli.group_relay_log_pos;
-#else
-  /*
-    - Master_info's rli member returns Relay_log_info*
-    - Relay_log_info members are protected and must be accessed
-      using accessor functions
-  */
   const char* group_master_log_name =
-    active_mi->rli->get_group_master_log_name();
+    ndb_mi_get_group_master_log_name();
   const Uint64 group_master_log_pos =
-    (Uint64)active_mi->rli->get_group_master_log_pos();
+    ndb_mi_get_group_master_log_pos();
   const Uint64 future_event_relay_log_pos =
-    (Uint64)active_mi->rli->get_future_event_relay_log_pos();
+    ndb_mi_get_future_event_relay_log_pos();
   const Uint64 group_relay_log_pos =
-    (Uint64)active_mi->rli->get_group_relay_log_pos();
-#endif
+    ndb_mi_get_group_relay_log_pos();
+
   // log_name
   char tmp_buf[FN_REFLEN];
   ndb_pack_varchar(ndbtab->getColumn(2u), tmp_buf,

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-06-17 07:14:20 +0000
+++ b/sql/ha_ndbcluster.h	2011-06-23 14:50:59 +0000
@@ -345,13 +345,23 @@ struct st_ndb_slave_state
   /* Counter values for current slave transaction */
   Uint32 current_conflict_defined_op_count;
   Uint32 current_violation_count[CFT_NUMBER_OF_CFTS];
+  Uint64 current_master_server_epoch;
+  Uint64 current_max_rep_epoch;
 
   /* Cumulative counter values */
   Uint64 total_violation_count[CFT_NUMBER_OF_CFTS];
+  Uint64 max_rep_epoch;
+  Uint32 sql_run_id;
 
   /* Methods */
   void atTransactionCommit();
   void atTransactionAbort();
+  void atResetSlave();
+
+  void atApplyStatusWrite(Uint32 master_server_id,
+                          Uint32 row_server_id,
+                          Uint64 row_epoch,
+                          bool is_row_server_id_local);
 
   st_ndb_slave_state();
 };
@@ -886,6 +896,8 @@ private:
   int start_statement(THD *thd, Thd_ndb *thd_ndb, uint table_count);
   int init_handler_for_statement(THD *thd);
 
+  int check_slave_state(THD* thd);
+
   Thd_ndb *m_thd_ndb;
   NdbScanOperation *m_active_cursor;
   const NdbDictionary::Table *m_table;

=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-06-16 18:16:01 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-06-23 14:50:59 +0000
@@ -47,6 +47,7 @@ extern my_bool opt_ndb_log_updated_only;
 extern my_bool opt_ndb_log_binlog_index;
 extern my_bool opt_ndb_log_apply_status;
 extern ulong opt_ndb_extra_logging;
+extern st_ndb_slave_state g_ndb_slave_state;
 
 bool ndb_log_empty_epochs(void);
 
@@ -892,6 +893,9 @@ static void ndbcluster_reset_slave(THD *
     thd_stmt_da(thd)->reset_diagnostics_area();
   }
 
+  /* Reset Ndb_slave_max_replicated_epoch */
+  g_ndb_slave_state.atResetSlave();
+
   DBUG_VOID_RETURN;
 }
 

=== added file 'sql/ndb_mi.cc'
--- a/sql/ndb_mi.cc	1970-01-01 00:00:00 +0000
+++ b/sql/ndb_mi.cc	2011-06-23 14:50:59 +0000
@@ -0,0 +1,78 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#include "ndb_mi.h"
+#include "rpl_mi.h"
+
+extern Master_info *active_mi;
+
+
+Uint32 ndb_mi_get_master_server_id()
+{
+  return (Uint32) active_mi->master_id;
+}
+
+const char* ndb_mi_get_group_master_log_name()
+{
+#if MYSQL_VERSION_ID < 50600
+  return active_mi->rli.group_master_log_name;
+#else
+  return active_mi->rli->get_group_master_log_name();
+#endif
+}
+
+Uint64 ndb_mi_get_group_master_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (Uint64) active_mi->rli.group_master_log_pos;
+#else
+  return (Uint64) active_mi->rli->get_group_master_log_pos();
+#endif
+}
+
+Uint64 ndb_mi_get_future_event_relay_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (Uint64) active_mi->rli.future_event_relay_log_pos;
+#else
+  return (Uint64) active_mi->rli->get_future_event_relay_log_pos();
+#endif
+}
+
+Uint64 ndb_mi_get_group_relay_log_pos()
+{
+#if MYSQL_VERSION_ID < 50600
+  return (Uint64) active_mi->rli.group_relay_log_pos;
+#else
+  return (Uint64) active_mi->rli->get_group_relay_log_pos();
+#endif
+}
+
+bool ndb_mi_get_ignore_server_id(Uint32 server_id)
+{
+  return (active_mi->shall_ignore_server_id(server_id) != 0);
+}
+
+Uint32 ndb_mi_get_slave_run_id()
+{
+  return active_mi->rli.slave_run_id;
+}
+
+bool ndb_mi_get_in_relay_log_statement(Relay_log_info* rli)
+{
+  return (rli->get_flag(Relay_log_info::IN_STMT) != 0);
+}

=== added file 'sql/ndb_mi.h'
--- a/sql/ndb_mi.h	1970-01-01 00:00:00 +0000
+++ b/sql/ndb_mi.h	2011-06-23 14:50:59 +0000
@@ -0,0 +1,46 @@
+/*
+   Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; version 2 of the License.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
+*/
+
+#ifndef NDB_MI_H
+#define NDB_MI_H
+
+/*
+   This file defines methods for interacting with the
+   Master Info structure on a Slave MySQLD.
+   These methods are only valid when running in an
+   active slave thread.
+*/
+
+/*
+  Accessors
+*/
+Uint32 ndb_mi_get_master_server_id();
+const char* ndb_mi_get_group_master_log_name();
+Uint64 ndb_mi_get_group_master_log_pos();
+Uint64 ndb_mi_get_future_event_relay_log_pos();
+Uint64 ndb_mi_get_group_relay_log_pos();
+bool ndb_mi_get_ignore_server_id(Uint32 server_id);
+Uint32 ndb_mi_get_slave_run_id();
+
+
+/*
+   Relay log info related functions
+*/
+bool ndb_mi_get_in_relay_log_statement(class Relay_log_info* rli);
+
+// #ifndef NDB_MI_H
+#endif

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.1 branch (frazer.clement:4253 to 4254) Frazer Clement23 Jun