List:Commits« Previous MessageNext Message »
From:mattiasj Date:March 14 2008 8:35am
Subject:bk commit into 5.1 tree (mattiasj:1.2520) BUG#33479
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of mattiasj.  When mattiasj does a push these changes
will be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet@stripped, 2008-03-14 09:35:13+01:00, mattiasj@witty. +7 -0
  Bug#33479: auto_increment failures in partitioning
  
  This patch is on based on the previous patches.
  
  I have updated according to guilhems review comments.
  
  Changed the locking handling to lock, execute, unlock in
  the same block/function.
  Simplifyed handling of auto_increment as a secondary index column.
  Better doxygen comments.

  mysql-test/suite/parts/inc/partition_auto_increment.inc@stripped, 2008-03-14 09:35:11+01:00, mattiasj@witty. +4 -6
    Bug#33479: auto_increment failures in partitioning
    
    I have updated according to guilhems review comments.

  mysql-test/suite/parts/r/partition_auto_increment_innodb.result@stripped, 2008-03-14 09:35:11+01:00, mattiasj@witty. +2 -0
    Bug#33479: auto_increment failures in partitioning
    
    I have updated according to guilhems review comments.

  mysql-test/suite/parts/r/partition_auto_increment_myisam.result@stripped, 2008-03-14 09:35:11+01:00, mattiasj@witty. +4 -0
    Bug#33479: auto_increment failures in partitioning
    
    I have updated according to guilhems review comments.

  sql/ha_partition.cc@stripped, 2008-03-14 09:35:11+01:00, mattiasj@witty. +61 -147
    Bug#33479: auto_increment failures in partitioning
    
    I have updated according to guilhems review comments.

  sql/ha_partition.h@stripped, 2008-03-14 09:35:11+01:00, mattiasj@witty. +19 -17
    Bug#33479: auto_increment failures in partitioning
    
    I have updated according to guilhems review comments.

  sql/handler.h@stripped, 2008-03-14 09:35:11+01:00, mattiasj@witty. +4 -4
    Bug#33479: auto_increment failures in partitioning
    
    I have updated according to guilhems review comments.

  sql/table.h@stripped, 2008-03-14 09:35:11+01:00, mattiasj@witty. +2 -2
    Bug#33479: auto_increment failures in partitioning
    
    I have updated according to guilhems review comments.

diff -Nrup a/mysql-test/suite/parts/inc/partition_auto_increment.inc b/mysql-test/suite/parts/inc/partition_auto_increment.inc
--- a/mysql-test/suite/parts/inc/partition_auto_increment.inc	2008-03-06 10:36:43 +01:00
+++ b/mysql-test/suite/parts/inc/partition_auto_increment.inc	2008-03-14 09:35:11 +01:00
@@ -333,12 +333,10 @@ ENGINE = $engine
 PARTITION BY HASH(c2)
 PARTITIONS 2;
 INSERT INTO t1 VALUES (1, 0);
--- error 0, ER_DUP_KEY
 INSERT INTO t1 VALUES (1, 1);
 INSERT INTO t1 VALUES (NULL, 1), (NULL, 2), (NULL, 3);
 INSERT INTO t1 VALUES (NULL, 3);
 INSERT INTO t1 VALUES (2, 0), (NULL, 2);
--- error 0, ER_DUP_KEY
 INSERT INTO t1 VALUES (2, 2);
 INSERT INTO t1 VALUES (2, 22);
 INSERT INTO t1 VALUES (NULL, 2);
@@ -354,12 +352,12 @@ ENGINE = $engine
 PARTITION BY HASH(c2)
 PARTITIONS 2;
 INSERT INTO t1 VALUES (1, 0);
--- error 0, ER_DUP_KEY
+-- error ER_DUP_KEY
 INSERT INTO t1 VALUES (1, 1);
 INSERT INTO t1 VALUES (1, NULL);
 INSERT INTO t1 VALUES (2, NULL), (3, 11), (3, NULL), (2, 0);
 INSERT INTO t1 VALUES (2, NULL);
--- error 0, ER_DUP_KEY
+-- error ER_DUP_KEY
 INSERT INTO t1 VALUES (2, 2);
 INSERT INTO t1 VALUES (2, 22);
 INSERT INTO t1 VALUES (2, NULL);
@@ -390,13 +388,13 @@ if (`SELECT @my_errno NOT IN (0,$ER_WRON
 if ($run)
 {
 INSERT INTO t1 VALUES (1, 0);
--- error 0, ER_DUP_KEY
+-- error ER_DUP_KEY
 INSERT INTO t1 VALUES (1, 1);
 INSERT INTO t1 VALUES (1, NULL);
 INSERT INTO t1 VALUES (2, NULL);
 INSERT INTO t1 VALUES (3, NULL);
 INSERT INTO t1 VALUES (3, NULL), (2, 0), (2, NULL);
--- error 0, ER_DUP_KEY
+-- error ER_DUP_KEY
 INSERT INTO t1 VALUES (2, 2);
 INSERT INTO t1 VALUES (2, 22), (2, NULL);
 SELECT * FROM t1 ORDER BY c1,c2;
diff -Nrup a/mysql-test/suite/parts/r/partition_auto_increment_innodb.result b/mysql-test/suite/parts/r/partition_auto_increment_innodb.result
--- a/mysql-test/suite/parts/r/partition_auto_increment_innodb.result	2008-03-06 10:36:43 +01:00
+++ b/mysql-test/suite/parts/r/partition_auto_increment_innodb.result	2008-03-14 09:35:11 +01:00
@@ -555,10 +555,12 @@ PARTITION BY HASH(c2)
 PARTITIONS 2;
 INSERT INTO t1 VALUES (1, 0);
 INSERT INTO t1 VALUES (1, 1);
+ERROR 23000: Can't write; duplicate key in table 't1'
 INSERT INTO t1 VALUES (1, NULL);
 INSERT INTO t1 VALUES (2, NULL), (3, 11), (3, NULL), (2, 0);
 INSERT INTO t1 VALUES (2, NULL);
 INSERT INTO t1 VALUES (2, 2);
+ERROR 23000: Can't write; duplicate key in table 't1'
 INSERT INTO t1 VALUES (2, 22);
 INSERT INTO t1 VALUES (2, NULL);
 SELECT * FROM t1 ORDER BY c1,c2;
diff -Nrup a/mysql-test/suite/parts/r/partition_auto_increment_myisam.result b/mysql-test/suite/parts/r/partition_auto_increment_myisam.result
--- a/mysql-test/suite/parts/r/partition_auto_increment_myisam.result	2008-03-06 10:36:43 +01:00
+++ b/mysql-test/suite/parts/r/partition_auto_increment_myisam.result	2008-03-14 09:35:11 +01:00
@@ -583,10 +583,12 @@ PARTITION BY HASH(c2)
 PARTITIONS 2;
 INSERT INTO t1 VALUES (1, 0);
 INSERT INTO t1 VALUES (1, 1);
+ERROR 23000: Can't write; duplicate key in table 't1'
 INSERT INTO t1 VALUES (1, NULL);
 INSERT INTO t1 VALUES (2, NULL), (3, 11), (3, NULL), (2, 0);
 INSERT INTO t1 VALUES (2, NULL);
 INSERT INTO t1 VALUES (2, 2);
+ERROR 23000: Can't write; duplicate key in table 't1'
 INSERT INTO t1 VALUES (2, 22);
 INSERT INTO t1 VALUES (2, NULL);
 SELECT * FROM t1 ORDER BY c1,c2;
@@ -611,11 +613,13 @@ PARTITION BY HASH(c2)
 PARTITIONS 2;
 INSERT INTO t1 VALUES (1, 0);
 INSERT INTO t1 VALUES (1, 1);
+ERROR 23000: Can't write; duplicate key in table 't1'
 INSERT INTO t1 VALUES (1, NULL);
 INSERT INTO t1 VALUES (2, NULL);
 INSERT INTO t1 VALUES (3, NULL);
 INSERT INTO t1 VALUES (3, NULL), (2, 0), (2, NULL);
 INSERT INTO t1 VALUES (2, 2);
+ERROR 23000: Can't write; duplicate key in table 't1'
 INSERT INTO t1 VALUES (2, 22), (2, NULL);
 SELECT * FROM t1 ORDER BY c1,c2;
 c1	c2
diff -Nrup a/sql/ha_partition.cc b/sql/ha_partition.cc
--- a/sql/ha_partition.cc	2008-03-05 23:56:03 +01:00
+++ b/sql/ha_partition.cc	2008-03-14 09:35:11 +01:00
@@ -1239,8 +1239,8 @@ void ha_partition::cleanup_new_partition
 
 int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
                                     const char *path,
-                                    ulonglong *copied,
-                                    ulonglong *deleted,
+                                    ulonglong * const copied,
+                                    ulonglong * const deleted,
                                     const uchar *pack_frm_data
                                     __attribute__((unused)),
                                     size_t pack_frm_len
@@ -1525,7 +1525,8 @@ int ha_partition::change_partitions(HA_C
     partitions.
 */
 
-int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
+int ha_partition::copy_partitions(ulonglong * const copied,
+                                  ulonglong * const deleted)
 {
   uint reorg_part= 0;
   int result= 0;
@@ -2271,6 +2272,7 @@ int ha_partition::open(const char *name,
   uint alloc_len;
   handler **file;
   char name_buff[FN_REFLEN];
+  bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE);
   DBUG_ENTER("ha_partition::open");
 
   ref_length= 0;
@@ -2356,22 +2358,22 @@ int ha_partition::open(const char *name,
     goto err_handler;
 
 
-  if (table_share->tmp_table == NO_TMP_TABLE)
+  if (is_not_tmp_table)
     pthread_mutex_lock(&table->s->mutex);
   if (!table_share->ha_data)
   {
     HA_DATA_PARTITION *ha_data;
     /* currently only needed for auto_increment */
-    table_share->ha_data= alloc_root(&table_share->mem_root,
-                                     sizeof(HA_DATA_PARTITION));
-    if (!table_share->ha_data)
+    table_share->ha_data= ha_data= (HA_DATA_PARTITION*)
+                                   alloc_root(&table_share->mem_root,
+                                              sizeof(HA_DATA_PARTITION));
+    if (!ha_data)
       goto err_handler;
-    ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
     bzero(ha_data, sizeof(HA_DATA_PARTITION));
-    if (table_share->tmp_table == NO_TMP_TABLE)
+    if (is_not_tmp_table)
       pthread_mutex_init(&ha_data->mutex, MY_MUTEX_INIT_FAST);
   }
-  if (table_share->tmp_table == NO_TMP_TABLE)
+  if (is_not_tmp_table)
     pthread_mutex_unlock(&table->s->mutex);
   /*
     Some handlers update statistics as part of the open call. This will in
@@ -2734,26 +2736,14 @@ int ha_partition::write_row(uchar * buf)
   */
   if (have_auto_increment)
   {
-    /*
-      must initialize it before write if it explicity set,
-      since the partitions can have higher auto_increment_value
-      which should be used.
-    */
     if (!ha_data->auto_inc_initialized &&
-        !table->s->next_number_keypart &&
-        table_share->tmp_table == NO_TMP_TABLE)
+        !table->s->next_number_keypart)
     {
       /*
-        If auto_increment in table_share is not initialized, we must
-        have a mutex around update_auto_increment,
-        get_auto_increment and write_row.
-        (get_auto_increment can be called from update_auto_increment
-        and update_auto_increment can be called from write_row).
-        Not needed for tmp-tables, or auto_increment in secondary
-        columns in multi-column index.
-        Also check ha_partition::get_auto_increment().
+        If auto_increment in table_share is not initialized, start by
+        initializing it.
       */
-      lock_auto_increment();
+      info(HA_STATUS_AUTO);
     }
     error= update_auto_increment();
     /*
@@ -2794,7 +2784,6 @@ int ha_partition::write_row(uchar * buf)
     set_auto_increment_if_higher();
   reenable_binlog(thd);
 exit:
-  unlock_auto_increment();
   DBUG_RETURN(error);
 }
 
@@ -2865,17 +2854,14 @@ int ha_partition::update_row(const uchar
     tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
     error= m_file[new_part_id]->ha_update_row(old_data, new_data);
     /*
-      if updating a auto_increment row, update
+      if updating an auto_increment column, update
       auto_increment_value in table_share if needed
       (not to be used if auto_increment on secondary field in a multi-
       column index)
     */
     if (table->next_number_field && new_data == table->record[0] &&
         !table->s->next_number_keypart)
-    {
       set_auto_increment_if_higher();
-      unlock_auto_increment();
-    }
     reenable_binlog(thd);
     goto exit;
   }
@@ -2980,7 +2966,7 @@ int ha_partition::delete_all_rows()
 {
   int error;
   handler **file;
-  THD *thd= current_thd;
+  THD *thd= ha_thd();
   DBUG_ENTER("ha_partition::delete_all_rows");
 
   if (thd->lex->sql_command == SQLCOM_TRUNCATE)
@@ -2996,9 +2982,6 @@ int ha_partition::delete_all_rows()
   {
     if ((error= (*file)->ha_delete_all_rows()))
       DBUG_RETURN(error);
-    /* must reset the auto_increment for some engines (eg MyISAM) */
-    if (thd->lex->sql_command == SQLCOM_TRUNCATE)
-      (*file)->ha_reset_auto_increment(0);
   } while (*(++file));
   DBUG_RETURN(0);
 }
@@ -4451,56 +4434,51 @@ int ha_partition::handle_ordered_prev(uc
 int ha_partition::info(uint flag)
 {
   handler *file, **file_array;
-  HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
-  DBUG_ENTER("ha_partition:info");
+  DBUG_ENTER("ha_partition::info");
 
   if (flag & HA_STATUS_AUTO)
   {
+    bool auto_inc_is_first_in_idx= (table_share->next_number_keypart == 0);
+    HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
     DBUG_PRINT("info", ("HA_STATUS_AUTO"));
-    if (ha_data->auto_inc_initialized && !table_share->next_number_keypart)
+    if (!table->found_next_number_field)
+      stats.auto_increment_value= 0;
+    else if (ha_data->auto_inc_initialized && auto_inc_is_first_in_idx)
     {
-      DBUG_PRINT("info", ("Using ha_data->next_auto_inc_val: %llu",
-                          ha_data->next_auto_inc_val));
-
       lock_auto_increment();
       stats.auto_increment_value= ha_data->next_auto_inc_val;
       unlock_auto_increment();
     }
     else
     {
-      ulonglong auto_increment_value= 0;
-      file_array= m_file;
-      DBUG_PRINT("info", ("checking all partitions for auto_increment_value"));
-      do
-      {
-        file= *file_array;
-        file->info(HA_STATUS_AUTO);
-        set_if_bigger(auto_increment_value, file->stats.auto_increment_value);
-        DBUG_PRINT("info", ("file->stats.auto_increment_value: %llu",
-                            file->stats.auto_increment_value));
-      } while (*(++file_array));
-
-      /*
-        This should not occur unless bugs in the other storage engine.
-        If locked, it is called via get_auto_increment as a part of
-        initializing and is handled there.
-      */
-      if (!auto_increment_lock && auto_increment_value == 0)
-        auto_increment_value= 1;
-      stats.auto_increment_value= auto_increment_value;
-      if (!table_share->next_number_keypart)
+      lock_auto_increment();
+      /* to avoid two concurrent initializations, check again when locked */
+      if (ha_data->auto_inc_initialized)
+        stats.auto_increment_value= ha_data->next_auto_inc_val;
+      else
       {
-        bool is_already_locked= auto_increment_lock;
-        if (!is_already_locked)
-          lock_auto_increment();
-        set_if_bigger(ha_data->next_auto_inc_val, auto_increment_value);
-        ha_data->auto_inc_initialized= TRUE;
-        if (!is_already_locked)
-          unlock_auto_increment();
+        ulonglong auto_increment_value= 0;
+        file_array= m_file;
+        DBUG_PRINT("info",
+                   ("checking all partitions for auto_increment_value"));
+        do
+        {
+          file= *file_array;
+          file->info(HA_STATUS_AUTO);
+          set_if_bigger(auto_increment_value,
+                        file->stats.auto_increment_value);
+        } while (*(++file_array));
+
+        DBUG_ASSERT(auto_increment_value);
+        stats.auto_increment_value= auto_increment_value;
+        if (auto_inc_is_first_in_idx)
+        {
+          set_if_bigger(ha_data->next_auto_inc_val, auto_increment_value);
+          ha_data->auto_inc_initialized= TRUE;
+        }
       }
+      unlock_auto_increment();
     }
-    DBUG_PRINT("info", ("stats.auto_increment_value: %llu",
-                        stats.auto_increment_value));
   }
   if (flag & HA_STATUS_VARIABLE)
   {
@@ -5661,11 +5639,10 @@ int ha_partition::reset_auto_increment(u
   int res;
   HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
   DBUG_ENTER("ha_partition::reset_auto_increment");
-  DBUG_PRINT("info", ("value: %llu", value));
   lock_auto_increment();
   ha_data->auto_inc_initialized= FALSE;
   ha_data->next_auto_inc_val= 0;
-  for (pos=m_file, end= m_file+ m_tot_parts; pos != end ; pos++)
+  for (pos=m_file, end= m_file + m_tot_parts; pos != end ; pos++)
   {
     if ((res= (*pos)->ha_reset_auto_increment(value)) != 0)
     {
@@ -5694,9 +5671,6 @@ void ha_partition::get_auto_increment(ul
 {
   HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
   DBUG_ENTER("ha_partition::get_auto_increment");
-  DBUG_PRINT("info", ("offset: %llu inc: %llu desired_values: %llu "
-                      "first_value: %llu", offset, increment,
-                      nb_desired_values, *first_value));
   DBUG_ASSERT(increment && nb_desired_values);
   *first_value= 0;
   if (table->s->next_number_keypart)
@@ -5706,17 +5680,17 @@ void ha_partition::get_auto_increment(ul
       column in the index (it is allowed in MyISAM)
     */
     DBUG_PRINT("info", ("next_number_keypart != 0"));
-    ulonglong first_value_part, last_value_part, nb_reserved_values_part,
-      last_value= ~ (ulonglong) 0;
+    ulonglong first_value_part, nb_reserved_values_part;
     handler **pos, **end;
     /*
-      Must lock and find highest value among all partitions
+      Must lock and find highest value among all partitions.
     */
     lock_auto_increment();
-    for (pos=m_file, end= m_file+ m_tot_parts; pos != end ; pos++)
+    for (pos=m_file, end= m_file + m_tot_parts; pos != end ; pos++)
     {
       first_value_part= 0;
-      (*pos)->get_auto_increment(offset, increment, nb_desired_values,
+      /* Only nb_desired_values = 1 makes sense */
+      (*pos)->get_auto_increment(offset, increment, 1,
                                  &first_value_part, &nb_reserved_values_part);
       if (first_value_part == ~(ulonglong)(0)) // error in one partition
       {
@@ -5725,75 +5699,24 @@ void ha_partition::get_auto_increment(ul
         unlock_auto_increment();
         DBUG_VOID_RETURN;
       }
-      DBUG_PRINT("info", ("partition loop, first_value_part: %llu", first_value_part));
-      /*
-        Partition has reserved an interval. Intersect it with the intervals
-        already reserved for the previous partitions.
-      */
-      last_value_part= (nb_reserved_values_part == ULONGLONG_MAX) ?
-        ULONGLONG_MAX : (first_value_part + nb_reserved_values_part * increment);
       set_if_bigger(*first_value, first_value_part);
-      set_if_smaller(last_value, last_value_part);
-    }
-    DBUG_PRINT("info", ("*first_value: %llu last_value: %llu", *first_value,
-                        last_value));
-    if (last_value < *first_value)
-    {
-      /*
-        Set last_value to *first_value. This is safe because of the mutex.
-      */
-      last_value= *first_value;
-    }
-    if (increment)                                // If not check for values
-    {
-      *nb_reserved_values= (last_value == ULONGLONG_MAX) ?
-        ULONGLONG_MAX : ((last_value - *first_value) / increment);
     }
+    *nb_reserved_values= 1;
     unlock_auto_increment();
   }
-  else if (!ha_data->auto_inc_initialized)
+  else
   {
     /*
-      Not initialized, but should be locked in ha_partition::write_row().
-      (Or is a temporary table).
-      Initialize to highest value among all partitions
+     This should have been done in the beginning of the first write_row call.
     */
-    info(HA_STATUS_AUTO);
-    *first_value= ha_data->next_auto_inc_val;
-    if (*first_value == 0 &&
-        !(table->auto_increment_field_not_null &&
-          table->next_number_field->val_int() == 0 &&
-          current_thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO))
-    {
-      /*
-        First auto_increment, set to 1
-        (if not explicit number 0 and not null and sql_mode set)
-      */
-      DBUG_PRINT("info", ("Setting *first_value to 1"));
-      *first_value= 1;
-    }
-    DBUG_PRINT("info", ("*first_value: %llu", *first_value));
-
-    ha_data->next_auto_inc_val= *first_value + nb_desired_values * increment;
-    ha_data->auto_inc_initialized= TRUE;
-    *nb_reserved_values= nb_desired_values;
-  }
-  else
-  {
+    DBUG_ASSERT(ha_data->auto_inc_initialized);
     /*
-      This is an optimized solution that not require a mutex around
-      write_row for auto_increment handling, instead only using a mutex
-      for picking the next auto_increment_value.
-
       Get a lock for handling the auto_increment in table_share->ha_data
       for avoiding two concurrent statements getting the same number.
     */ 
 
-    DBUG_PRINT("info", ("reserving %llu auto_inc values", nb_desired_values));
     lock_auto_increment();
 
-    DBUG_PRINT("info", ("ha_data->next_auto_inc_val: %llu",
-                        ha_data->next_auto_inc_val));
     /* this gets corrected (for offset/increment) in update_auto_increment */
     *first_value= ha_data->next_auto_inc_val;
     ha_data->next_auto_inc_val= *first_value + nb_desired_values * increment;
@@ -5801,10 +5724,6 @@ void ha_partition::get_auto_increment(ul
     unlock_auto_increment();
     *nb_reserved_values= nb_desired_values;
   }
-  DBUG_PRINT("info", ("first_value: %llu next_auto_inc_val: %llu "
-                      "nb_reserved_values: %llu", *first_value,
-                      ha_data->next_auto_inc_val,
-                      *nb_reserved_values));
 
   DBUG_VOID_RETURN;
 }
@@ -5821,20 +5740,15 @@ void ha_partition::release_auto_incremen
   else
   {
     HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
-    DBUG_PRINT("info", ("next_auto_inc_val: %llu cur_row_max: %llu min: %llu",
-                        ha_data->next_auto_inc_val,
-                        auto_inc_interval_for_cur_row.maximum(),
-                        auto_inc_interval_for_cur_row.minimum()));
     if (next_insert_id)
     {
       lock_auto_increment();
       if (next_insert_id < ha_data->next_auto_inc_val &&
-          auto_inc_interval_for_cur_row.maximum() >= ha_data->next_auto_inc_val)
+          auto_inc_interval_for_cur_row.maximum() >=
+          ha_data->next_auto_inc_val)
         ha_data->next_auto_inc_val= next_insert_id;
       unlock_auto_increment();
     }
-    DBUG_PRINT("info", ("next_auto_inc_val: %llu next_ins_id: %llu",
-                        ha_data->next_auto_inc_val, next_insert_id));
   }
   DBUG_VOID_RETURN;
 }
diff -Nrup a/sql/ha_partition.h b/sql/ha_partition.h
--- a/sql/ha_partition.h	2008-03-05 23:56:03 +01:00
+++ b/sql/ha_partition.h	2008-03-14 09:35:11 +01:00
@@ -37,12 +37,15 @@ typedef struct st_partition_share
 } PARTITION_SHARE;
 #endif
 
-/* TODO: move all partition specific data from TABLE_SHARE here */
+/**
+  Partition specific ha_data struct
+  @todo: move all partition specific data from TABLE_SHARE here
+*/
 typedef struct st_ha_data_partition
 {
-  ulonglong next_auto_inc_val;
+  ulonglong next_auto_inc_val;                 /**< first non reserved value */
   bool auto_inc_initialized;
-  pthread_mutex_t mutex;
+  pthread_mutex_t mutex;                       /**< mutex for auto_increment */
 } HA_DATA_PARTITION;
 
 #define PARTITION_BYTES_IN_POS 2
@@ -205,8 +208,8 @@ public:
   virtual char *update_table_comment(const char *comment);
   virtual int change_partitions(HA_CREATE_INFO *create_info,
                                 const char *path,
-                                ulonglong *copied,
-                                ulonglong *deleted,
+                                ulonglong * const copied,
+                                ulonglong * const deleted,
                                 const uchar *pack_frm_data,
                                 size_t pack_frm_len);
   virtual int drop_partitions(const char *path);
@@ -220,7 +223,7 @@ public:
   virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
 private:
   int prepare_for_delete();
-  int copy_partitions(ulonglong *copied, ulonglong *deleted);
+  int copy_partitions(ulonglong * const copied, ulonglong * const deleted);
   void cleanup_new_partition(uint part_count);
   int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
                             handler *file, const char *part_name,
@@ -843,16 +846,17 @@ public:
   virtual void release_auto_increment();
 private:
   virtual int reset_auto_increment(ulonglong value);
-  inline virtual void lock_auto_increment()
+  virtual void lock_auto_increment()
   {
-    if(!auto_increment_lock && table_share->tmp_table == NO_TMP_TABLE)
+    DBUG_ASSERT(!auto_increment_lock);
+    if(table_share->tmp_table == NO_TMP_TABLE)
     {
       HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
       auto_increment_lock= TRUE;
       pthread_mutex_lock(&ha_data->mutex);
     }
   }
-  inline virtual void unlock_auto_increment()
+  virtual void unlock_auto_increment()
   {
     if(auto_increment_lock)
     {
@@ -861,18 +865,16 @@ private:
       auto_increment_lock= FALSE;
     }
   }
-  inline virtual void set_auto_increment_if_higher()
+  virtual void set_auto_increment_if_higher()
   {
     ulonglong nr= table->next_number_field->val_int();
     HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
+    lock_auto_increment();
+    /* must check when the mutex is taken */
     if (nr >= ha_data->next_auto_inc_val)
-    {
-      lock_auto_increment();
-      /* must check again when the mutex is taken */
-      if (nr >= ha_data->next_auto_inc_val)
-        ha_data->next_auto_inc_val= nr + 1;
-      ha_data->auto_inc_initialized= TRUE;
-    }
+      ha_data->next_auto_inc_val= nr + 1;
+    ha_data->auto_inc_initialized= TRUE;
+    unlock_auto_increment();
   }
 
 public:
diff -Nrup a/sql/handler.h b/sql/handler.h
--- a/sql/handler.h	2007-12-20 19:16:51 +01:00
+++ b/sql/handler.h	2008-03-14 09:35:11 +01:00
@@ -1138,8 +1138,8 @@ public:
 
   int ha_change_partitions(HA_CREATE_INFO *create_info,
                            const char *path,
-                           ulonglong *copied,
-                           ulonglong *deleted,
+                           ulonglong * const copied,
+                           ulonglong * const deleted,
                            const uchar *pack_frm_data,
                            size_t pack_frm_len);
   int ha_drop_partitions(const char *path);
@@ -1792,8 +1792,8 @@ private:
 
   virtual int change_partitions(HA_CREATE_INFO *create_info,
                                 const char *path,
-                                ulonglong *copied,
-                                ulonglong *deleted,
+                                ulonglong * const copied,
+                                ulonglong * const deleted,
                                 const uchar *pack_frm_data,
                                 size_t pack_frm_len)
   { return HA_ERR_WRONG_COMMAND; }
diff -Nrup a/sql/table.h b/sql/table.h
--- a/sql/table.h	2008-03-05 23:56:03 +01:00
+++ b/sql/table.h	2008-03-14 09:35:11 +01:00
@@ -358,8 +358,8 @@ typedef struct st_table_share
   */
   int cached_row_logging_check;
 
-  /* TODO: Move into *ha_data for partitioning */
 #ifdef WITH_PARTITION_STORAGE_ENGINE
+  /** @todo: Move into *ha_data for partitioning */
   bool auto_partitioned;
   const char *partition_info;
   uint  partition_info_len;
@@ -369,7 +369,7 @@ typedef struct st_table_share
   handlerton *default_part_db_type;
 #endif
 
-  /* place to store storage engine specific data */
+  /** place to store storage engine specific data */
   void *ha_data;
 
 
Thread
bk commit into 5.1 tree (mattiasj:1.2520) BUG#33479mattiasj14 Mar
  • Re: bk commit into 5.1 tree (mattiasj:1.2520) BUG#33479Guilhem Bichot18 Mar