List:Commits« Previous MessageNext Message »
From:konstantin Date:December 19 2007 7:15pm
Subject:bk commit into 5.1 tree (kostja:1.2650) BUG#12713
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of kostja. When kostja does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet@stripped, 2007-12-19 22:15:02+03:00, kostja@bodhi.(none) +4 -0
  Make handler::{write,delete,update}_row private. It's critical
  that the entire server uses their public ha_* counterparts instead,
  since only then we can ensure proper tracing of these calls that
  is necessary for Bug#12713.
  A pre-requisite for Bug#12713 "Error in a stored function called from 
  a SELECT doesn't cause ROLLBACK of statem"

  sql/ha_partition.cc@stripped, 2007-12-19 22:14:59+03:00, kostja@bodhi.(none) +23 -6
    Use ha_write_row, ha_update_row, ha_delete_row instead of now-private
    write_row, update_row, delete_row. 
    In future ha_* calls will contain more than just a call to the binary
    log, so it's essential they are used consistently everywhere in the server.
    
    Disable the undesired effect of double binary logging of changes
    to partitioned tables with tmp_disable_binlog.

  sql/handler.h@stripped, 2007-12-19 22:14:59+03:00, kostja@bodhi.(none) +15 -16
    Make write_row, update_row, delete_row private. It's critical
    that the entire code base uses ha_write_row, ha_update_row, ha_delete_row
    instead -- in future, ha_* counterparts will have more common
    functionality than just a call to the binary log.

  sql/sql_select.cc@stripped, 2007-12-19 22:15:00+03:00, kostja@bodhi.(none) +15 -15
    Use ha_write_row, ha_update_row, ha_delete_row instead of
    write_row, update_row, delete_row respectively. 
    The change affects the join execution code that works with an
    intermediate internal temporary table. Do not disable binary logging,
    since it's unnecessary - temporary tables are not replicated
    by row level replication.

  sql/sql_table.cc@stripped, 2007-12-19 22:15:00+03:00, kostja@bodhi.(none) +1 -1
    Use ha_write_row in copy_data_between_tables - the function
    that writes data from the original table to a temporary copy
    when executing ALTER TABLE. Do not disable binary logging
    since temporary tables are not replicated by row level 
    replication anyway.

diff -Nrup a/sql/ha_partition.cc b/sql/ha_partition.cc
--- a/sql/ha_partition.cc	2007-12-14 16:01:09 +03:00
+++ b/sql/ha_partition.cc	2007-12-19 22:14:59 +03:00
@@ -1574,9 +1574,13 @@ int ha_partition::copy_partitions(ulongl
       }
       else
       {
+        THD *thd= ha_thd();
         /* Copy record to new handler */
         copied++;
-        if ((result= m_new_file[new_part]->write_row(m_rec0)))
+        tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
+        result= m_new_file[new_part]->ha_write_row(m_rec0);
+        reenable_binlog(thd);
+        if (result)
           goto error;
       }
     }
@@ -2694,6 +2698,7 @@ int ha_partition::write_row(uchar * buf)
   longlong func_value;
   bool autoincrement_lock= FALSE;
   my_bitmap_map *old_map;
+  THD *thd= ha_thd();
 #ifdef NOT_NEEDED
   uchar *rec0= m_rec0;
 #endif
@@ -2765,7 +2770,9 @@ int ha_partition::write_row(uchar * buf)
   }
   m_last_part= part_id;
   DBUG_PRINT("info", ("Insert in partition %d", part_id));
-  error= m_file[part_id]->write_row(buf);
+  tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
+  error= m_file[part_id]->ha_write_row(buf);
+  reenable_binlog(thd);
 exit:
   if (autoincrement_lock)
     pthread_mutex_unlock(&table_share->mutex);
@@ -2806,6 +2813,7 @@ exit:
 
 int ha_partition::update_row(const uchar *old_data, uchar *new_data)
 {
+  THD *thd= ha_thd();
   uint32 new_part_id, old_part_id;
   int error= 0;
   longlong func_value;
@@ -2840,16 +2848,25 @@ int ha_partition::update_row(const uchar
   if (new_part_id == old_part_id)
   {
     DBUG_PRINT("info", ("Update in partition %d", new_part_id));
-    error= m_file[new_part_id]->update_row(old_data, new_data);
+    tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
+    error= m_file[new_part_id]->ha_update_row(old_data, new_data);
+    reenable_binlog(thd);
     goto exit;
   }
   else
   {
     DBUG_PRINT("info", ("Update from partition %d to partition %d",
 			old_part_id, new_part_id));
-    if ((error= m_file[new_part_id]->write_row(new_data)))
+    tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
+    error= m_file[new_part_id]->ha_write_row(new_data);
+    reenable_binlog(thd);
+    if (error)
       goto exit;
-    if ((error= m_file[old_part_id]->delete_row(old_data)))
+
+    tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
+    error= m_file[old_part_id]->ha_delete_row(old_data);
+    reenable_binlog(thd);
+    if (error)
     {
 #ifdef IN_THE_FUTURE
       (void) m_file[new_part_id]->delete_last_inserted_row(new_data);
@@ -3980,7 +3997,7 @@ int ha_partition::partition_scan_set_up(
 
 int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
 {
-  handler *file= file= m_file[m_part_spec.start_part];
+  handler *file= m_file[m_part_spec.start_part];
   int error;
   DBUG_ENTER("ha_partition::handle_unordered_next");
 
diff -Nrup a/sql/handler.h b/sql/handler.h
--- a/sql/handler.h	2007-12-19 00:36:18 +03:00
+++ b/sql/handler.h	2007-12-19 22:14:59 +03:00
@@ -1674,22 +1674,6 @@ public:
 					 uint table_changes)
  { return COMPATIBLE_DATA_NO; }
 
- /** These are only called from sql_select for internal temporary tables */
-  virtual int write_row(uchar *buf __attribute__((unused)))
-  {
-    return HA_ERR_WRONG_COMMAND;
-  }
-
-  virtual int update_row(const uchar *old_data __attribute__((unused)),
-                         uchar *new_data __attribute__((unused)))
-  {
-    return HA_ERR_WRONG_COMMAND;
-  }
-
-  virtual int delete_row(const uchar *buf __attribute__((unused)))
-  {
-    return HA_ERR_WRONG_COMMAND;
-  }
   /**
     use_hidden_primary_key() is called in case of an update/delete when
     (table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
@@ -1721,6 +1705,21 @@ private:
   */
   virtual int rnd_init(bool scan)= 0;
   virtual int rnd_end() { return 0; }
+  virtual int write_row(uchar *buf __attribute__((unused)))
+  {
+    return HA_ERR_WRONG_COMMAND;
+  }
+
+  virtual int update_row(const uchar *old_data __attribute__((unused)),
+                         uchar *new_data __attribute__((unused)))
+  {
+    return HA_ERR_WRONG_COMMAND;
+  }
+
+  virtual int delete_row(const uchar *buf __attribute__((unused)))
+  {
+    return HA_ERR_WRONG_COMMAND;
+  }
   /**
     Reset state of file to after 'open'.
     This function is called after every statement for all tables used
diff -Nrup a/sql/sql_select.cc b/sql/sql_select.cc
--- a/sql/sql_select.cc	2007-12-14 16:01:11 +03:00
+++ b/sql/sql_select.cc	2007-12-19 22:15:00 +03:00
@@ -10554,13 +10554,13 @@ bool create_myisam_from_heap(THD *thd, T
   */
   while (!table->file->rnd_next(new_table.record[1]))
   {
-    write_err= new_table.file->write_row(new_table.record[1]);
+    write_err= new_table.file->ha_write_row(new_table.record[1]);
     DBUG_EXECUTE_IF("raise_error", write_err= HA_ERR_FOUND_DUPP_KEY ;);
     if (write_err)
       goto err;
   }
   /* copy row that filled HEAP table */
-  if ((write_err=new_table.file->write_row(table->record[0])))
+  if ((write_err=new_table.file->ha_write_row(table->record[0])))
   {
     if (new_table.file->is_fatal_error(write_err, HA_CHECK_DUP) ||
 	!ignore_last_dupp_key_error)
@@ -12023,7 +12023,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab
     {
       int error;
       join->found_records++;
-      if ((error=table->file->write_row(table->record[0])))
+      if ((error=table->file->ha_write_row(table->record[0])))
       {
         if (!table->file->is_fatal_error(error, HA_CHECK_DUP))
 	  goto end;
@@ -12085,8 +12085,8 @@ end_update(JOIN *join, JOIN_TAB *join_ta
   {						/* Update old record */
     restore_record(table,record[1]);
     update_tmptable_sum_func(join->sum_funcs,table);
-    if ((error=table->file->update_row(table->record[1],
-                                       table->record[0])))
+    if ((error=table->file->ha_update_row(table->record[1],
+                                          table->record[0])))
     {
       table->file->print_error(error,MYF(0));	/* purecov: inspected */
       DBUG_RETURN(NESTED_LOOP_ERROR);            /* purecov: inspected */
@@ -12109,7 +12109,7 @@ end_update(JOIN *join, JOIN_TAB *join_ta
   }
   init_tmptable_sum_functions(join->sum_funcs);
   copy_funcs(join->tmp_table_param.items_to_copy);
-  if ((error=table->file->write_row(table->record[0])))
+  if ((error=table->file->ha_write_row(table->record[0])))
   {
     if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
 				error, 0))
@@ -12145,7 +12145,7 @@ end_unique_update(JOIN *join, JOIN_TAB *
   copy_fields(&join->tmp_table_param);		// Groups are copied twice.
   copy_funcs(join->tmp_table_param.items_to_copy);
 
-  if (!(error=table->file->write_row(table->record[0])))
+  if (!(error=table->file->ha_write_row(table->record[0])))
     join->send_records++;			// New group
   else
   {
@@ -12161,8 +12161,8 @@ end_unique_update(JOIN *join, JOIN_TAB *
     }
     restore_record(table,record[1]);
     update_tmptable_sum_func(join->sum_funcs,table);
-    if ((error=table->file->update_row(table->record[1],
-                                       table->record[0])))
+    if ((error=table->file->ha_update_row(table->record[1],
+                                          table->record[0])))
     {
       table->file->print_error(error,MYF(0));	/* purecov: inspected */
       DBUG_RETURN(NESTED_LOOP_ERROR);            /* purecov: inspected */
@@ -12205,7 +12205,7 @@ end_write_group(JOIN *join, JOIN_TAB *jo
                        join->sum_funcs_end[send_group_parts]);
 	if (!join->having || join->having->val_int())
 	{
-          int error= table->file->write_row(table->record[0]);
+          int error= table->file->ha_write_row(table->record[0]);
           if (error && create_myisam_from_heap(join->thd, table,
                                                &join->tmp_table_param,
                                                error, 0))
@@ -13433,7 +13433,7 @@ static int remove_dup_with_compare(THD *
     }
     if (having && !having->val_int())
     {
-      if ((error=file->delete_row(record)))
+      if ((error=file->ha_delete_row(record)))
 	goto err;
       error=file->rnd_next(record);
       continue;
@@ -13460,7 +13460,7 @@ static int remove_dup_with_compare(THD *
       }
       if (compare_record(table, first_field) == 0)
       {
-	if ((error=file->delete_row(record)))
+	if ((error=file->ha_delete_row(record)))
 	  goto err;
       }
       else if (!found)
@@ -13557,7 +13557,7 @@ static int remove_dup_with_hash_index(TH
     }
     if (having && !having->val_int())
     {
-      if ((error=file->delete_row(record)))
+      if ((error=file->ha_delete_row(record)))
 	goto err;
       continue;
     }
@@ -13574,7 +13574,7 @@ static int remove_dup_with_hash_index(TH
     if (hash_search(&hash, org_key_pos, key_length))
     {
       /* Duplicated found ; Remove the row */
-      if ((error=file->delete_row(record)))
+      if ((error=file->ha_delete_row(record)))
 	goto err;
     }
     else
@@ -15582,7 +15582,7 @@ int JOIN::rollup_write_data(uint idx, TA
           item->save_in_result_field(1);
       }
       copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]);
-      if ((write_error= table_arg->file->write_row(table_arg->record[0])))
+      if ((write_error= table_arg->file->ha_write_row(table_arg->record[0])))
       {
 	if (create_myisam_from_heap(thd, table_arg, &tmp_table_param,
                                     write_error, 0))
diff -Nrup a/sql/sql_table.cc b/sql/sql_table.cc
--- a/sql/sql_table.cc	2007-12-14 16:01:11 +03:00
+++ b/sql/sql_table.cc	2007-12-19 22:15:00 +03:00
@@ -7059,7 +7059,7 @@ copy_data_between_tables(TABLE *from,TAB
       copy_ptr->do_copy(copy_ptr);
     }
     prev_insert_id= to->file->next_insert_id;
-    error=to->file->write_row(to->record[0]);
+    error=to->file->ha_write_row(to->record[0]);
     to->auto_increment_field_not_null= FALSE;
     if (error)
     {
Thread
bk commit into 5.1 tree (kostja:1.2650) BUG#12713konstantin19 Dec