List:Commits« Previous MessageNext Message »
From:Dmitry Shulga Date:January 9 2013 6:11am
Subject:bzr push into mysql-trunk branch (Dmitry.Shulga:5342 to 5343) Bug#13519770
View as plain text  
 5343 Dmitry Shulga	2013-01-09
      This patch fixes bug#13519770 (CODE REVIEW: WRITE_ROW() CRASHES ON
      INSERT...DUPLICATE KEY UPDATE).
      
      The root cause of this bug is that the return value of handler::get_dup_key
      wasn't checked for valid range. It was possible that this method could
      return the constant MAX_KEY and subsequent call to key_copy()
      could generate a segfault and crash the system. This bug was discovered
      during code review and doesn't produce any visible side effect.
      
      We consider the constant MAX_KEY for the value of key index as indicator
      for the case when there isn't any information about key that provoked
      the duplication error. For such case we require that the storage engine
      supports the HA_DUPLICATE_POS flag. If some future storage engine
      doesn't support HA_DUPLICATE_POS flag and get_dup_key() returns value MAX_KEY
      then the server will be crashed by this assert.
      In order to catch the condition mentioned above for mysql server built
      without debug we added extra 'else' branch that returns error if the key index
      is equal to MAX_KEY.

    modified:
      sql/handler.cc
      sql/log_event.cc
      sql/log_event_old.cc
      sql/sql_insert.cc
 5342 Sunny Bains	2013-01-09 [merge]
      Merge from mysql-5.6.

    modified:
      mysql-test/suite/sys_vars/r/innodb_sync_array_size_basic.result
      mysql-test/suite/sys_vars/t/innodb_sync_array_size_basic.test
      storage/innobase/handler/ha_innodb.cc
=== modified file 'sql/handler.cc'
--- a/sql/handler.cc	2012-12-11 02:20:50 +0000
+++ b/sql/handler.cc	2013-01-09 06:10:20 +0000
@@ -3665,7 +3665,7 @@ void handler::print_error(int error, myf
   {
     const char *ptr= "???";
     uint key_nr= table ? get_dup_key(error) : -1;
-    if ((int) key_nr >= 0)
+    if ((int) key_nr >= 0 && key_nr != MAX_KEY)
       ptr= table->key_info[key_nr].name;
     my_error(ER_DROP_INDEX_FK, errflag, ptr);
     DBUG_VOID_RETURN;

=== modified file 'sql/log_event.cc'
--- a/sql/log_event.cc	2012-12-19 09:35:15 +0000
+++ b/sql/log_event.cc	2013-01-09 06:10:20 +0000
@@ -12373,6 +12373,19 @@ Write_rows_log_event::write_row(const Re
       goto error;
     }
     /*
+      key index value is either valid in the range [0-MAX_KEY) or
+      has value MAX_KEY as a marker for the case when no information
+      about key can be found. In the last case we have to require
+      that storage engine has the flag HA_DUPLICATE_POS turned on.
+      If this invariant is false then DBUG_ASSERT will crash
+      the server built in debug mode. For the server that was built
+      without DEBUG we have additional check for the value of key index
+      in the code below in order to report about error in any case.
+    */
+    DBUG_ASSERT(keynum != MAX_KEY ||
+                (keynum == MAX_KEY &&
+                 (table->file->ha_table_flags() & HA_DUPLICATE_POS)));
+    /*
        We need to retrieve the old row into record[1] to be able to
        either update or delete the offending record.  We either:
 
@@ -12431,12 +12444,22 @@ Write_rows_log_event::write_row(const Re
         }
       }
 
-      key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
-               0);
-      error= table->file->ha_index_read_idx_map(table->record[1], keynum,
-                                                (const uchar*)key.get(),
-                                                HA_WHOLE_KEY,
-                                                HA_READ_KEY_EXACT);
+      if ((uint)keynum < MAX_KEY)
+      {
+        key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
+                 0);
+        error= table->file->ha_index_read_idx_map(table->record[1], keynum,
+                                                  (const uchar*)key.get(),
+                                                  HA_WHOLE_KEY,
+                                                  HA_READ_KEY_EXACT);
+      }
+      else
+        /*
+          For the server built in non-debug mode returns error if
+          handler::get_dup_key() returned MAX_KEY as the value of key index.
+        */
+        error= HA_ERR_FOUND_DUPP_KEY;
+
       if (error)
       {
         DBUG_PRINT("info",("ha_index_read_idx_map() returns %s", HA_ERR(error)));

=== modified file 'sql/log_event_old.cc'
--- a/sql/log_event_old.cc	2012-11-21 12:44:48 +0000
+++ b/sql/log_event_old.cc	2013-01-09 06:10:20 +0000
@@ -556,6 +556,20 @@ replace_record(THD *thd, TABLE *table,
     }
 
     /*
+      key index value is either valid in the range [0-MAX_KEY) or
+      has value MAX_KEY as a marker for the case when no information
+      about key can be found. In the last case we have to require
+      that storage engine has the flag HA_DUPLICATE_POS turned on.
+      If this invariant is false then DBUG_ASSERT will crash
+      the server built in debug mode. For the server that was built
+      without DEBUG we have additional check for the value of key index
+      in the code below in order to report about error in any case.
+    */
+    DBUG_ASSERT(keynum != MAX_KEY ||
+                (keynum == MAX_KEY &&
+                 (table->file->ha_table_flags() & HA_DUPLICATE_POS)));
+
+    /*
        We need to retrieve the old row into record[1] to be able to
        either update or delete the offending record.  We either:
 
@@ -591,12 +605,22 @@ replace_record(THD *thd, TABLE *table,
           DBUG_RETURN(ENOMEM);
       }
 
-      key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
-               0);
-      error= table->file->ha_index_read_idx_map(table->record[1], keynum,
-                                                (const uchar*)key.get(),
-                                                HA_WHOLE_KEY,
-                                                HA_READ_KEY_EXACT);
+      if ((uint)keynum < MAX_KEY)
+      {
+        key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
+                 0);
+        error= table->file->ha_index_read_idx_map(table->record[1], keynum,
+                                                  (const uchar*)key.get(),
+                                                  HA_WHOLE_KEY,
+                                                  HA_READ_KEY_EXACT);
+      }
+      else
+        /*
+          For the server built in non-debug mode returns error if
+          handler::get_dup_key() returned MAX_KEY as the value of key index.
+        */
+        error= HA_ERR_FOUND_DUPP_KEY;
+
       if (error)
       {
         DBUG_PRINT("info", ("ha_index_read_idx_map() returns error %d", error));
@@ -2052,7 +2076,19 @@ Old_rows_log_event::write_row(const Rela
       */
       DBUG_RETURN(error);
     }
-
+    /*
+      key index value is either valid in the range [0-MAX_KEY) or
+      has value MAX_KEY as a marker for the case when no information
+      about key can be found. In the last case we have to require
+      that storage engine has the flag HA_DUPLICATE_POS turned on.
+      If this invariant is false then DBUG_ASSERT will crash
+      the server built in debug mode. For the server that was built
+      without DEBUG we have additional check for the value of key index
+      in the code below in order to report about error in any case.
+    */
+    DBUG_ASSERT(keynum != MAX_KEY ||
+                (keynum == MAX_KEY &&
+                 (table->file->ha_table_flags() & HA_DUPLICATE_POS)));
     /*
        We need to retrieve the old row into record[1] to be able to
        either update or delete the offending record.  We either:
@@ -2096,12 +2132,22 @@ Old_rows_log_event::write_row(const Rela
         }
       }
 
-      key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
-               0);
-      error= table->file->ha_index_read_idx_map(table->record[1], keynum,
-                                                (const uchar*)key.get(),
-                                                HA_WHOLE_KEY,
-                                                HA_READ_KEY_EXACT);
+      if ((uint)keynum < MAX_KEY)
+      {
+        key_copy((uchar*)key.get(), table->record[0], table->key_info + keynum,
+                 0);
+        error= table->file->ha_index_read_idx_map(table->record[1], keynum,
+                                                  (const uchar*)key.get(),
+                                                  HA_WHOLE_KEY,
+                                                  HA_READ_KEY_EXACT);
+      }
+      else
+        /*
+          For the server built in non-debug mode returns error if
+          handler::get_dup_key() returned MAX_KEY as the value of key index.
+        */
+        error= HA_ERR_FOUND_DUPP_KEY;
+
       if (error)
       {
         DBUG_PRINT("info",("ha_index_read_idx_map() returns error %d", error));

=== modified file 'sql/sql_insert.cc'
--- a/sql/sql_insert.cc	2012-12-11 11:58:31 +0000
+++ b/sql/sql_insert.cc	2013-01-09 06:10:20 +0000
@@ -1289,6 +1289,20 @@ int write_record(THD *thd, TABLE *table,
 	error= HA_ERR_FOUND_DUPP_KEY;         /* Database can't find key */
 	goto err;
       }
+      /*
+        key index value is either valid in the range [0-MAX_KEY) or
+        has value MAX_KEY as a marker for the case when no information
+        about key can be found. In the last case we have to require
+        that storage engine has the flag HA_DUPLICATE_POS turned on.
+        If this invariant is false then DBUG_ASSERT will crash
+        the server built in debug mode. For the server that was built
+        without DEBUG we have additional check for the value of key_nr
+        in the code below in order to report about error in any case.
+      */
+      DBUG_ASSERT(key_nr != MAX_KEY ||
+                  (key_nr == MAX_KEY &&
+                   (table->file->ha_table_flags() & HA_DUPLICATE_POS)));
+
       DEBUG_SYNC(thd, "write_row_replace");
 
       /* Read all columns for the row we are going to replace */
@@ -1308,7 +1322,11 @@ int write_record(THD *thd, TABLE *table,
         if (table->file->ha_rnd_pos(table->record[1],table->file->dup_ref))
           goto err;
       }
-      else
+      /*
+        If the key index is equal to MAX_KEY it's treated as unknown key case
+        and we shouldn't try to locate key info.
+      */
+      else if (key_nr < MAX_KEY)
       {
 	if (table->file->extra(HA_EXTRA_FLUSH_CACHE)) /* Not needed with NISAM */
 	{
@@ -1331,6 +1349,15 @@ int write_record(THD *thd, TABLE *table,
                                                        HA_READ_KEY_EXACT))))
 	  goto err;
       }
+      else
+      {
+        /*
+          For the server built in non-debug mode returns error if
+          handler::get_dup_key() returned MAX_KEY as the value of key index.
+        */
+        error= HA_ERR_FOUND_DUPP_KEY;         /* Database can't find key */
+        goto err;
+      }
       if (duplicate_handling == DUP_UPDATE)
       {
         int res= 0;

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk branch (Dmitry.Shulga:5342 to 5343) Bug#13519770Dmitry Shulga13 Feb