List:Commits« Previous MessageNext Message »
From:guilhem Date:June 20 2006 8:14pm
Subject:bk commit into 5.0 tree (guilhem:1.2174) BUG#20188
View as plain text  
Below is the list of changes that have just been committed into a local
5.0 repository of guilhem. When guilhem does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2174 06/06/20 22:14:36 guilhem@stripped +3 -0
  Fix for BUG#20188 "REPLACE or ON DUPLICATE KEY UPDATE in
  auto_increment breaks binlog":
  if slave's table had a higher auto_increment counter than master's (even
  though all rows of the two tables were identical), then in some cases,
  REPLACE and INSERT ON DUPLICATE KEY UPDATE failed to replicate
  statement-based (it inserted different values on slave from on master).
  write_record() contained a "thd->next_insert_id=0" to force an adjustment
  of thd->next_insert_id after the update or replacement. But it is this
  assigment introduced indeterminism of the statement on the slave, thus
  the bug. For ON DUPLICATE, we replace that assignment by a call to
  handler::update_auto_increment() which is deterministic (does not depend
  on slave table's autoinc counter). For REPLACE, this assignment can simply
  be removed (as REPLACE can't insert a number larger than thd->next_insert_id).
  We also move a too early restore_auto_increment() down to when we really know
  that we can restore the value, otherwise duplicate key errors can happen
  in REPLACE: if a row fails write_row(), then succeeds delete_row() + 
  write_row(), then the next row would use the id of the previous row,
  and thus fail write_row(), assuming the two rows had NULL for their
  autoincrement column.

  sql/sql_insert.cc
    1.191 06/06/20 22:14:31 guilhem@stripped +7 -10
    restore_auto_increment() means "I know I won't use this autogenerated
    autoincrement value, you are free to reuse it for next row". But we were
    calling restore_auto_increment() in the case of REPLACE: if write_row() fails
    inserting the row, we don't know we won't use the value, as we are going to
    try again by doing internally an UPDATE of the existing row, or a DELETE
    of the existing row and then an INSERT. So I move restore_auto_increment()
    further down, when we know for sure we failed all possibilities for the row.
    Additionally, in case of REPLACE, we don't need to reset THD::next_insert_id:
    the value of thd->next_insert_id will be suitable for the next row.
    In case of ON DUPLICATE KEY UPDATE, resetting thd->next_insert_id is also
    wrong (breaks statement-based binlog), but cannot simply be removed, as
    thd->next_insert_id must be adjusted. We now do the adjustment by calling
    handler::update_auto_increment() (which, contrary to thd->next_insert_id=0,
    does not depend on the slave table's autoinc counter).

  mysql-test/t/rpl_insert_id.test
    1.17 06/06/20 22:14:31 guilhem@stripped +53 -0
    test for BUG#20188 "REPLACE or ON DUPLICATE KEY UPDATE in
    auto_increment breaks binlog".
    There is, in this order:
    - a test of the bug for the case of REPLACE
    - a test of basic ON DUPLICATE KEY UPDATE functionality which was not
    tested before (which is a reason why I managed to break it in a previous
    cset for this bug...)
    - a test of the bug for the case of ON DUPLICATE KEY UPDATE

  mysql-test/r/rpl_insert_id.result
    1.15 06/06/20 22:14:31 guilhem@stripped +55 -0
    result update, without the bugfix, slave's "3 350" were "4 350".

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	guilhem
# Host:	gbichot3.local
# Root:	/home/mysql_src/mysql-5.0

--- 1.190/sql/sql_insert.cc	2006-05-26 10:51:16 +02:00
+++ 1.191/sql/sql_insert.cc	2006-06-20 22:14:31 +02:00
@@ -955,7 +955,6 @@
       uint key_nr;
       if (error != HA_WRITE_SKIP)
 	goto err;
-      table->file->restore_auto_increment();
       if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
       {
 	error=HA_WRITE_SKIP;			/* Database can't find key */
@@ -1030,9 +1029,11 @@
 
         if (thd->clear_next_insert_id)
         {
-          /* Reset auto-increment cacheing if we do an update */
-          thd->clear_next_insert_id= 0;
-          thd->next_insert_id= 0;
+          /*
+            This update may put in the auto_increment column a value larger
+            than thd->next_insert_id:
+          */
+          table->file->update_auto_increment();
         }
         if ((error=table->file->update_row(table->record[1],table->record[0])))
 	{
@@ -1067,12 +1068,6 @@
               table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
                                                 TRG_ACTION_BEFORE, TRUE))
             goto before_trg_err;
-          if (thd->clear_next_insert_id)
-          {
-            /* Reset auto-increment cacheing if we do an update */
-            thd->clear_next_insert_id= 0;
-            thd->next_insert_id= 0;
-          }
           if ((error=table->file->update_row(table->record[1],
 					     table->record[0])))
             goto err;
@@ -1135,6 +1130,7 @@
   DBUG_RETURN(trg_error);
 
 err:
+  table->file->restore_auto_increment();
   info->last_errno= error;
   /* current_select is NULL if this is a delayed insert */
   if (thd->lex->current_select)
@@ -1142,6 +1138,7 @@
   table->file->print_error(error,MYF(0));
 
 before_trg_err:
+  table->file->restore_auto_increment();
   if (key)
     my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH);
   DBUG_RETURN(1);

--- 1.14/mysql-test/r/rpl_insert_id.result	2006-04-21 16:54:56 +02:00
+++ 1.15/mysql-test/r/rpl_insert_id.result	2006-06-20 22:14:31 +02:00
@@ -132,3 +132,58 @@
 drop function bug15728;
 drop function bug15728_insert;
 drop table t1, t2;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+insert into t1 values(null,100);
+select * from t1 order by n;
+n	b
+1	100
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+n	b
+1	100
+replace into t1 values(null,100),(null,350);
+select * from t1 order by n;
+n	b
+2	100
+3	350
+select * from t1 order by n;
+n	b
+2	100
+3	350
+insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
+select * from t1 order by n;
+n	b
+2	100
+4	400
+1000	350
+1001	600
+select * from t1 order by n;
+n	b
+2	100
+4	400
+1000	350
+1001	600
+drop table t1;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+insert into t1 values(null,100);
+select * from t1 order by n;
+n	b
+1	100
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+n	b
+1	100
+insert into t1 values(null,100),(null,350) on duplicate key update n=2;
+select * from t1 order by n;
+n	b
+2	100
+3	350
+select * from t1 order by n;
+n	b
+2	100
+3	350
+drop table t1;

--- 1.16/mysql-test/t/rpl_insert_id.test	2006-05-29 12:45:15 +02:00
+++ 1.17/mysql-test/t/rpl_insert_id.test	2006-06-20 22:14:31 +02:00
@@ -147,6 +147,59 @@
 drop function bug15728_insert;
 drop table t1, t2;
 
+# test of BUG#20188 REPLACE or ON DUPLICATE KEY UPDATE in
+# auto_increment breaks binlog
+
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+insert into t1 values(null,100);
+select * from t1 order by n;
+sync_slave_with_master;
+# make slave's table autoinc counter bigger
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+# check that slave's table content is identical to master
+select * from t1 order by n;
+# only the auto_inc counter differs.
+
+connection master;
+replace into t1 values(null,100),(null,350);
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+# Same test as for REPLACE, but for ON DUPLICATE KEY UPDATE
+
+# We first check that if we update a row using a value larger than the
+# table's counter, the counter for next row is bigger than the
+# after-value of the updated row.
+connection master;
+insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+# and now test for the bug:
+connection master;
+drop table t1;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+insert into t1 values(null,100);
+select * from t1 order by n;
+sync_slave_with_master;
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+
+connection master;
+insert into t1 values(null,100),(null,350) on duplicate key update n=2;
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+connection master;
+drop table t1;
+
 # End of 5.0 tests
 
 sync_slave_with_master;
Thread
bk commit into 5.0 tree (guilhem:1.2174) BUG#20188guilhem20 Jun