List:Commits« Previous MessageNext Message »
From:Dao-Gang.Qu Date:May 25 2010 3:36am
Subject:bzr commit into mysql-5.1-rep+2 branch (Dao-Gang.Qu:3185) Bug#49931 Bug#49932
View as plain text  
#At file:///home/daogangqu/mysql/bzrwork/bug49932/mysql-5.1-rep%2B2/ based on revid:li-bing.song@stripped

 3185 Dao-Gang.Qu@stripped	2010-05-25
      Bug #49931  	Incorrect type in read_log_event error
      Bug #49932  	mysqlbinlog max_allowed_packet hard coded to 1GB
      
      The MYSQL server can create a row events with size is bigger than 1GB
      and transmit it successfully. But the slave I/O thread will check the
      size of the event by the value of 'max_allowed_packet' when reading 
      log event from binlog. The slave SQL thread do the same thing. And
      the max value of 'max_allowed_packet' is 1073741824. Once the size
      of a row event is bigger than the max value. It will cause
      'Event too large' or 'packet too large' error.
      
      To fix the problem to replicate a row event with size is more than
      1 GB to slave. The slave I/O thread and slave SQL thread will check
      the bigger one of the values of 'max_allowed_packet' and
      'binlog-row-event-max-size' when reading log event from binlog.
      Then the mysqlbinlog will add a new 'binlog-row-event-max-size'
      option for reading large row event.
     @ client/mysqlbinlog.cc
        Added code to add a new 'binlog-row-event-max-size' mysqlbinlog
        option for reading large row event.
     @ mysql-test/suite/rpl/r/rpl_log_event.result
        Test Result for Bug#49931 and Bug#49932
     @ mysql-test/suite/rpl/t/rpl_log_event.test
        Added test file to verify if the slave I/O thread and slave SQL thread 
        will check the bigger one of the values of 'max_allowed_packet' and
        'binlog-row-event-max-size' when reading log event from binlog, and 
        the added 'binlog-row-event-max-size' mysqlbinlog option works fine.
     @ sql/log_event.cc
        Added code to make the slave I/O thread and slave SQL thread will
        check the bigger one of the values of 'max_allowed_packet' and
        'binlog-row-event-max-size' when reading log event from binlog.
     @ sql/slave.cc
        Added code to assign the max_packet_size with the bigger one the
        'max_packet_size' and 'opt_binlog_rows_event_max_size'. So that
        replicate large row event with its size is more than 1 GB.

    added:
      mysql-test/suite/rpl/r/rpl_log_event.result
      mysql-test/suite/rpl/t/rpl_log_event-master.opt
      mysql-test/suite/rpl/t/rpl_log_event-slave.opt
      mysql-test/suite/rpl/t/rpl_log_event.test
    modified:
      client/client_priv.h
      client/mysqlbinlog.cc
      sql/log_event.cc
      sql/slave.cc
=== modified file 'client/client_priv.h'
--- a/client/client_priv.h	2010-01-27 12:23:28 +0000
+++ b/client/client_priv.h	2010-05-25 03:36:24 +0000
@@ -91,5 +91,6 @@ enum options_client
   OPT_WRITE_BINLOG, OPT_DUMP_DATE,
   OPT_FIRST_SLAVE,
   OPT_ALL,
-  OPT_MAX_CLIENT_OPTION
+  OPT_MAX_CLIENT_OPTION,
+  OPT_BINLOG_ROWS_EVENT_MAX_SIZE
 };

=== modified file 'client/mysqlbinlog.cc'
--- a/client/mysqlbinlog.cc	2010-03-03 14:43:35 +0000
+++ b/client/mysqlbinlog.cc	2010-05-25 03:36:24 +0000
@@ -49,6 +49,7 @@ ulong server_id = 0;
 ulong bytes_sent = 0L, bytes_received = 0L;
 ulong mysqld_net_retry_count = 10L;
 ulong open_files_limit;
+ulong opt_binlog_rows_event_max_size;
 uint test_flags = 0; 
 static uint opt_protocol= 0;
 static FILE *result_file;
@@ -1160,6 +1161,16 @@ that may lead to an endless loop.",
    "Used to reserve file descriptors for use by this program.",
    (uchar**) &open_files_limit, (uchar**) &open_files_limit, 0, GET_ULONG,
    REQUIRED_ARG, MY_NFILE, 8, OS_FILE_LIMIT, 0, 1, 0},
+  {"binlog-row-event-max-size", OPT_BINLOG_ROWS_EVENT_MAX_SIZE,
+   "The maximum size of a row-based binary log event in bytes. Rows will be "
+   "grouped into events smaller than this size if possible. "
+   "The value has to be a multiple of 256.",
+   (uchar**) &opt_binlog_rows_event_max_size,
+   (uchar**) &opt_binlog_rows_event_max_size, 0,
+   GET_ULONG, REQUIRED_ARG,
+   /* def_value 4GB */ 4*1024L*1024L*1024L - 1, /* min_value */ 256,
+   /* max_value */ ULONG_MAX, /* sub_size */ 0,
+   /* block_size */ 256, /* app_type */ 0},
   {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 

=== added file 'mysql-test/suite/rpl/r/rpl_log_event.result'
--- a/mysql-test/suite/rpl/r/rpl_log_event.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/r/rpl_log_event.result	2010-05-25 03:36:24 +0000
@@ -0,0 +1,29 @@
+stop slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+reset master;
+reset slave;
+drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9;
+start slave;
+CREATE TABLE t1 (a int not null auto_increment, data1 LONGBLOB,
+data2 LONGBLOB, PRIMARY KEY(a));
+INSERT INTO t1 (data1, data2) VALUES (repeat('a',1000), repeat('a', 1000));
+SELECT LENGTH(data1), LENGTH(data2) FROM t1 WHERE a = 1;
+LENGTH(data1)	LENGTH(data2)
+1000	1000
+FLUSH LOGS;
+# Test the added 'binlog-row-event-max-size' mysqlbinlog option works fine
+The size of the binlog with the row event: 3450
+INSERT INTO t1 (data1, data2) VALUES (repeat('a',1024), repeat('a', 1024));
+# On slave, test the row event data is replicated when the value of
+# the bigger one of max_allowed_packet and binlog_row_event_max_size
+# is larger than the required size.
+SELECT LENGTH(data1), LENGTH(data2) FROM t1 WHERE a = 1;
+LENGTH(data1)	LENGTH(data2)
+1000	1000
+# On slave, test the row event data is replicated when the value of
+# the bigger one of max_allowed_packet and binlog_row_event_max_size
+# is equal to the required size.
+SELECT LENGTH(data1), LENGTH(data2) FROM t1 WHERE a = 2;
+LENGTH(data1)	LENGTH(data2)
+1024	1024
+DROP TABLE t1;

=== added file 'mysql-test/suite/rpl/t/rpl_log_event-master.opt'
--- a/mysql-test/suite/rpl/t/rpl_log_event-master.opt	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/t/rpl_log_event-master.opt	2010-05-25 03:36:24 +0000
@@ -0,0 +1 @@
+--max_allowed_packet=1024 --binlog-row-event-max-size=2048

=== added file 'mysql-test/suite/rpl/t/rpl_log_event-slave.opt'
--- a/mysql-test/suite/rpl/t/rpl_log_event-slave.opt	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/t/rpl_log_event-slave.opt	2010-05-25 03:36:24 +0000
@@ -0,0 +1 @@
+--max_allowed_packet=1024 --binlog-row-event-max-size=2048 --slave-skip-error=1236

=== added file 'mysql-test/suite/rpl/t/rpl_log_event.test'
--- a/mysql-test/suite/rpl/t/rpl_log_event.test	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/t/rpl_log_event.test	2010-05-25 03:36:24 +0000
@@ -0,0 +1,48 @@
+#
+# Bug #49931 and Bug #49932
+# This test verifies if the slave I/O thread and slave SQL thread
+# will check the bigger one of the values of 'max_allowed_packet' 
+# and 'binlog-row-event-max-size' when reading log event from
+# binlog, and the added 'binlog-row-event-max-size' mysqlbinlog 
+# option works fine.
+# 
+
+source include/master-slave.inc;
+source include/have_binlog_format_row.inc;
+
+CREATE TABLE t1 (a int not null auto_increment, data1 LONGBLOB,
+                 data2 LONGBLOB, PRIMARY KEY(a));
+
+INSERT INTO t1 (data1, data2) VALUES (repeat('a',1000), repeat('a', 1000));
+
+SELECT LENGTH(data1), LENGTH(data2) FROM t1 WHERE a = 1;
+
+FLUSH LOGS;
+let $MYSQLD_DATADIR= `select @@datadir`;
+--echo # Test the added 'binlog-row-event-max-size' mysqlbinlog option works fine
+exec $MYSQL_BINLOG --binlog-row-event-max-size=2048  --start-position=378 --stop-position=2420 $MYSQLD_DATADIR/master-bin.000001 > $MYSQLTEST_VARDIR/tmp/binlog_row_event.binlog;
+
+--perl
+use File::stat;
+my $binlogpath = $ENV{'MYSQLTEST_VARDIR'}.'/tmp/binlog_row_event.binlog';
+my $filesize = stat($binlogpath)->size;
+print "The size of the binlog with the row event: $filesize\n";
+EOF
+
+INSERT INTO t1 (data1, data2) VALUES (repeat('a',1024), repeat('a', 1024));
+
+sync_slave_with_master;
+--echo # On slave, test the row event data is replicated when the value of
+--echo # the bigger one of max_allowed_packet and binlog_row_event_max_size
+--echo # is larger than the required size.
+SELECT LENGTH(data1), LENGTH(data2) FROM t1 WHERE a = 1;
+
+--echo # On slave, test the row event data is replicated when the value of
+--echo # the bigger one of max_allowed_packet and binlog_row_event_max_size
+--echo # is equal to the required size.
+SELECT LENGTH(data1), LENGTH(data2) FROM t1 WHERE a = 2;
+
+connection master;
+DROP TABLE t1;
+
+sync_slave_with_master;

=== modified file 'sql/log_event.cc'
--- a/sql/log_event.cc	2010-03-03 14:43:35 +0000
+++ b/sql/log_event.cc	2010-05-25 03:36:24 +0000
@@ -992,9 +992,10 @@ int Log_event::read_log_event(IO_CACHE* 
   }
   data_len= uint4korr(buf + EVENT_LEN_OFFSET);
   if (data_len < LOG_EVENT_MINIMAL_HEADER_LEN ||
-      data_len > current_thd->variables.max_allowed_packet)
+      data_len > max(current_thd->variables.max_allowed_packet,
+                     opt_binlog_rows_event_max_size + MAX_LOG_EVENT_HEADER))
   {
-    DBUG_PRINT("error",("data_len: %ld", data_len));
+    DBUG_PRINT("error",("data_len: %lu", data_len));
     result= ((data_len < LOG_EVENT_MINIMAL_HEADER_LEN) ? LOG_READ_BOGUS :
 	     LOG_READ_TOO_LARGE);
     goto end;
@@ -1088,7 +1089,7 @@ failed my_b_read"));
     */
     DBUG_RETURN(0);
   }
-  uint data_len = uint4korr(head + EVENT_LEN_OFFSET);
+  ulong data_len = uint4korr(head + EVENT_LEN_OFFSET);
   char *buf= 0;
   const char *error= 0;
   Log_event *res=  0;
@@ -1097,7 +1098,8 @@ failed my_b_read"));
   uint max_allowed_packet= thd ? thd->variables.max_allowed_packet : ~(ulong)0;
 #endif
 
-  if (data_len > max_allowed_packet)
+  if (data_len > max(max_allowed_packet,
+                     opt_binlog_rows_event_max_size + MAX_LOG_EVENT_HEADER))
   {
     error = "Event too big";
     goto err;
@@ -1131,7 +1133,7 @@ err:
   {
     DBUG_ASSERT(error != 0);
     sql_print_error("Error in Log_event::read_log_event(): "
-                    "'%s', data_len: %d, event_type: %d",
+                    "'%s', data_len: %lu, event_type: %d",
 		    error,data_len,head[EVENT_TYPE_OFFSET]);
     my_free(buf, MYF(MY_ALLOW_ZERO_PTR));
     /*

=== modified file 'sql/slave.cc'
--- a/sql/slave.cc	2010-02-12 23:30:44 +0000
+++ b/sql/slave.cc	2010-05-25 03:36:24 +0000
@@ -2693,10 +2693,15 @@ pthread_handler_t handle_slave_io(void *
 			  IO_RPL_LOG_NAME,
 			  llstr(mi->master_log_pos,llbuff));
   /*
+    Assign the max_packet_size with the bigger one the 'max_packet_size'
+    and 'opt_binlog_rows_event_max_size'. So that replicate large row
+    event with its size is more than 1 GB.
     Adding MAX_LOG_EVENT_HEADER_LEN to the max_packet_size on the I/O
     thread, since a replication event can become this much larger than
     the corresponding packet (query) sent from client to master.
   */
+    thd->net.max_packet_size= max(thd->net.max_packet_size,
+                                  opt_binlog_rows_event_max_size);
     mysql->net.max_packet_size= thd->net.max_packet_size+= MAX_LOG_EVENT_HEADER;
   }
   else


Attachment: [text/bzr-bundle] bzr/dao-gang.qu@sun.com-20100525033624-bzy60esymnsnb8uw.bundle
Thread
bzr commit into mysql-5.1-rep+2 branch (Dao-Gang.Qu:3185) Bug#49931 Bug#49932Dao-Gang.Qu25 May