List:Commits« Previous MessageNext Message »
From:guilhem Date:July 5 2006 12:41pm
Subject:bk commit into 5.0 tree (guilhem:1.2174) BUG#20188
View as plain text  
Below is the list of changes that have just been committed into a local
5.0 repository of guilhem. When guilhem does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2174 06/07/05 14:41:35 guilhem@stripped +5 -0
  Fix for BUG#20188 "REPLACE or ON DUPLICATE KEY UPDATE in
  auto_increment breaks binlog":
  if slave's table had a higher auto_increment counter than master's (even
  though all rows of the two tables were identical), then in some cases,
  REPLACE and INSERT ON DUPLICATE KEY UPDATE failed to replicate
  statement-based (it inserted different values on slave from on master).
  write_record() contained a "thd->next_insert_id=0" to force an adjustment
  of thd->next_insert_id after the update or replacement. But it is this
  assigment introduced indeterminism of the statement on the slave, thus
  the bug. For ON DUPLICATE, we replace that assignment by a call to
  handler::adjust_next_insert_id_after_explicit_value() which is deterministic
  (does not depend on slave table's autoinc counter). For REPLACE, this
  assignment can simply be removed (as REPLACE can't insert a number larger
  than thd->next_insert_id).
  We also move a too early restore_auto_increment() down to when we really know
  that we can restore the value.

  sql/sql_insert.cc
    1.191 06/07/05 14:40:58 guilhem@stripped +7 -13
    restore_auto_increment() means "I know I won't use this autogenerated
    autoincrement value, you are free to reuse it for next row". But we were
    calling restore_auto_increment() in the case of REPLACE: if write_row() fails
    inserting the row, we don't know that we won't use the value, as we are going to
    try again by doing internally an UPDATE of the existing row, or a DELETE
    of the existing row and then an INSERT. So I move restore_auto_increment()
    further down, when we know for sure we failed all possibilities for the row.
    Additionally, in case of REPLACE, we don't need to reset THD::next_insert_id:
    the value of thd->next_insert_id will be suitable for the next row.
    In case of ON DUPLICATE KEY UPDATE, resetting thd->next_insert_id is also
    wrong (breaks statement-based binlog), but cannot simply be removed, as
    thd->next_insert_id must be adjusted if the explicit value exceeds it.
    We now do the adjustment by calling
    handler::adjust_next_insert_id_after_explicit_value() (which, contrary to
    thd->next_insert_id=0, does not depend on the slave table's autoinc counter,
    and so is deterministic).

  sql/handler.h
    1.171 06/07/05 14:40:58 guilhem@stripped +1 -0
    see handler.cc

  sql/handler.cc
    1.213 06/07/05 14:40:58 guilhem@stripped +21 -11
    the adjustment of next_insert_id if inserting a big explicit value, is
    moved to a separate method to be used elsewhere.

  mysql-test/t/rpl_insert_id.test
    1.17 06/07/05 14:40:58 guilhem@stripped +63 -0
    test for BUG#20188 "REPLACE or ON DUPLICATE KEY UPDATE in
    auto_increment breaks binlog".
    There is, in this order:
    - a test of the bug for the case of REPLACE
    - a test of basic ON DUPLICATE KEY UPDATE functionality which was not
    tested before
    - a test of the bug for the case of ON DUPLICATE KEY UPDATE

  mysql-test/r/rpl_insert_id.result
    1.15 06/07/05 14:40:58 guilhem@stripped +65 -0
    result update, without the bugfix, slave's "3 350" were "4 350".

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	guilhem
# Host:	gbichot3.local
# Root:	/home/mysql_src/mysql-5.0

--- 1.212/sql/handler.cc	2006-04-13 09:25:52 +02:00
+++ 1.213/sql/handler.cc	2006-07-05 14:40:58 +02:00
@@ -1471,6 +1471,26 @@
 }
 
 
+void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr)
+{
+  /*
+    If we have set THD::next_insert_id previously and plan to insert an
+    explicitely-specified value larger than this, we need to increase
+    THD::next_insert_id to be greater than the explicit value.
+  */
+  THD *thd= table->in_use;
+  if (thd->clear_next_insert_id && (nr >= thd->next_insert_id))
+  {
+    if (thd->variables.auto_increment_increment != 1)
+      nr= next_insert_id(nr, &thd->variables);
+    else
+      nr++;
+    thd->next_insert_id= nr;
+    DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
+  }
+}
+
+
 /*
   Update the auto_increment field if necessary
 
@@ -1547,17 +1567,7 @@
     /* Clear flag for next row */
     /* Mark that we didn't generate a new value **/
     auto_increment_column_changed=0;
-
-    /* Update next_insert_id if we have already generated a value */
-    if (thd->clear_next_insert_id && nr >= thd->next_insert_id)
-    {
-      if (variables->auto_increment_increment != 1)
-        nr= next_insert_id(nr, variables);
-      else
-        nr++;
-      thd->next_insert_id= nr;
-      DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr));
-    }
+    adjust_next_insert_id_after_explicit_value(nr);
     DBUG_RETURN(0);
   }
   if (!(nr= thd->next_insert_id))

--- 1.170/sql/handler.h	2006-05-09 22:34:24 +02:00
+++ 1.171/sql/handler.h	2006-07-05 14:40:58 +02:00
@@ -563,6 +563,7 @@
     {}
   virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ }
   int ha_open(const char *name, int mode, int test_if_locked);
+  void adjust_next_insert_id_after_explicit_value(ulonglong nr);
   bool update_auto_increment();
   virtual void print_error(int error, myf errflag);
   virtual bool get_error_message(int error, String *buf);

--- 1.190/sql/sql_insert.cc	2006-05-26 10:51:16 +02:00
+++ 1.191/sql/sql_insert.cc	2006-07-05 14:40:58 +02:00
@@ -955,7 +955,6 @@
       uint key_nr;
       if (error != HA_WRITE_SKIP)
 	goto err;
-      table->file->restore_auto_increment();
       if ((int) (key_nr = table->file->get_dup_key(error)) < 0)
       {
 	error=HA_WRITE_SKIP;			/* Database can't find key */
@@ -1028,20 +1027,20 @@
         if (res == VIEW_CHECK_ERROR)
           goto before_trg_err;
 
-        if (thd->clear_next_insert_id)
-        {
-          /* Reset auto-increment cacheing if we do an update */
-          thd->clear_next_insert_id= 0;
-          thd->next_insert_id= 0;
-        }
         if ((error=table->file->update_row(table->record[1],table->record[0])))
 	{
 	  if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore)
+          {
+            table->file->restore_auto_increment();
             goto ok_or_after_trg_err;
+          }
           goto err;
 	}
         info->updated++;
 
+        if (table->next_number_field)
+          table->file->adjust_next_insert_id_after_explicit_value(table->next_number_field->val_int());
+
         trg_error= (table->triggers &&
                     table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
                                                       TRG_ACTION_AFTER, TRUE));
@@ -1067,12 +1066,6 @@
               table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
                                                 TRG_ACTION_BEFORE, TRUE))
             goto before_trg_err;
-          if (thd->clear_next_insert_id)
-          {
-            /* Reset auto-increment cacheing if we do an update */
-            thd->clear_next_insert_id= 0;
-            thd->next_insert_id= 0;
-          }
           if ((error=table->file->update_row(table->record[1],
 					     table->record[0])))
             goto err;
@@ -1142,6 +1135,7 @@
   table->file->print_error(error,MYF(0));
 
 before_trg_err:
+  table->file->restore_auto_increment();
   if (key)
     my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH);
   DBUG_RETURN(1);

--- 1.14/mysql-test/r/rpl_insert_id.result	2006-04-21 16:54:56 +02:00
+++ 1.15/mysql-test/r/rpl_insert_id.result	2006-07-05 14:40:58 +02:00
@@ -132,3 +132,68 @@
 drop function bug15728;
 drop function bug15728_insert;
 drop table t1, t2;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+set sql_log_bin=0;
+insert into t1 values(null,100);
+replace into t1 values(null,50),(null,100),(null,150);
+select * from t1 order by n;
+n	b
+2	50
+3	100
+4	150
+truncate table t1;
+set sql_log_bin=1;
+insert into t1 values(null,100);
+select * from t1 order by n;
+n	b
+1	100
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+n	b
+1	100
+replace into t1 values(null,100),(null,350);
+select * from t1 order by n;
+n	b
+2	100
+3	350
+select * from t1 order by n;
+n	b
+2	100
+3	350
+insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
+select * from t1 order by n;
+n	b
+2	100
+4	400
+1000	350
+1001	600
+select * from t1 order by n;
+n	b
+2	100
+4	400
+1000	350
+1001	600
+drop table t1;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+insert into t1 values(null,100);
+select * from t1 order by n;
+n	b
+1	100
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+n	b
+1	100
+insert into t1 values(null,100),(null,350) on duplicate key update n=2;
+select * from t1 order by n;
+n	b
+2	100
+3	350
+select * from t1 order by n;
+n	b
+2	100
+3	350
+drop table t1;

--- 1.16/mysql-test/t/rpl_insert_id.test	2006-05-29 12:45:15 +02:00
+++ 1.17/mysql-test/t/rpl_insert_id.test	2006-07-05 14:40:58 +02:00
@@ -147,6 +147,69 @@
 drop function bug15728_insert;
 drop table t1, t2;
 
+# test of BUG#20188 REPLACE or ON DUPLICATE KEY UPDATE in
+# auto_increment breaks binlog
+
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+
+# First, test that we do not call restore_auto_increment() too early
+# in write_record():
+set sql_log_bin=0;
+insert into t1 values(null,100);
+replace into t1 values(null,50),(null,100),(null,150);
+select * from t1 order by n;
+truncate table t1;
+set sql_log_bin=1;
+
+insert into t1 values(null,100);
+select * from t1 order by n;
+sync_slave_with_master;
+# make slave's table autoinc counter bigger
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+# check that slave's table content is identical to master
+select * from t1 order by n;
+# only the auto_inc counter differs.
+
+connection master;
+replace into t1 values(null,100),(null,350);
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+# Same test as for REPLACE, but for ON DUPLICATE KEY UPDATE
+
+# We first check that if we update a row using a value larger than the
+# table's counter, the counter for next row is bigger than the
+# after-value of the updated row.
+connection master;
+insert into t1 values (NULL,400),(3,500),(NULL,600) on duplicate key UPDATE n=1000;
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+# and now test for the bug:
+connection master;
+drop table t1;
+create table t1 (n int primary key auto_increment not null,
+b int, unique(b));
+insert into t1 values(null,100);
+select * from t1 order by n;
+sync_slave_with_master;
+insert into t1 values(null,200),(null,300);
+delete from t1 where b <> 100;
+select * from t1 order by n;
+
+connection master;
+insert into t1 values(null,100),(null,350) on duplicate key update n=2;
+select * from t1 order by n;
+sync_slave_with_master;
+select * from t1 order by n;
+
+connection master;
+drop table t1;
+
 # End of 5.0 tests
 
 sync_slave_with_master;
Thread
bk commit into 5.0 tree (guilhem:1.2174) BUG#20188guilhem5 Jul