List:Commits« Previous MessageNext Message »
From:Alfranio Correia Date:June 15 2011 3:00pm
Subject:bzr push into mysql-next-mr-wl5569 branch (alfranio.correia:3289 to 3290)
View as plain text  
 3290 Alfranio Correia	2011-06-15
      Fixed replication valgring failures caused by the MTS.

    added:
      mysql-test/suite/rpl/r/rpl_parallel_innodb.result
    modified:
      sql/rpl_rli.cc
      sql/rpl_slave.cc
 3289 Andrei Elkin	2011-06-14
      wl#5569 MTS
      wl#5754 Query event parallel execution
      
      Fixing failing tests and a failure in gathering accessed databases that was caused
      by a recent merge from trunk.
     @ mysql-test/suite/rpl/r/rpl_parallel_multi_db.result
        results updated.
     @ mysql-test/suite/rpl/r/rpl_parallel_seconds_behind_master.result
        results updated.
     @ mysql-test/suite/rpl/r/rpl_parallel_start_stop.result
        results updated.
     @ mysql-test/suite/rpl/t/rpl_parallel_multi_db.test
        moving mtr.add_supp to eliminate possibility of warning in the slave's error;
        adding graceful termination lines the test.
     @ mysql-test/suite/rpl/t/rpl_parallel_seconds_behind_master.test
        moving mtr.add_supp to eliminate possibility of warning in the slave's error.
     @ mysql-test/suite/rpl/t/rpl_parallel_start_stop.test
        Suppression are added for errors that are expected by test logics;
        adding graceful termination lines the test.
     @ sql/log_event.cc
        fixing the last argument to report() which should be c-string;
        fixing gathering of db:s on the master side. Because of a query can be preceeded in binlog
        by engineered BEGIN (the current pattern of logging from the trunk) resetting in Query::write()
        can't be any longer. 
        However another reset point exists at the end of the top-level query and that suffices.
     @ sql/rpl_rli.h
        is_mts_in_group() to mimic STS' is_in_group() is added though semantics are different.
     @ sql/rpl_slave.cc
        further cleanup in sql_slave_killed() as requested by reviewers.

    modified:
      mysql-test/suite/rpl/r/rpl_parallel_multi_db.result
      mysql-test/suite/rpl/r/rpl_parallel_seconds_behind_master.result
      mysql-test/suite/rpl/r/rpl_parallel_start_stop.result
      mysql-test/suite/rpl/t/rpl_parallel_multi_db.test
      mysql-test/suite/rpl/t/rpl_parallel_seconds_behind_master.test
      mysql-test/suite/rpl/t/rpl_parallel_start_stop.test
      sql/log_event.cc
      sql/rpl_rli.h
      sql/rpl_slave.cc
=== added file 'mysql-test/suite/rpl/r/rpl_parallel_innodb.result'
--- a/mysql-test/suite/rpl/r/rpl_parallel_innodb.result	1970-01-01 00:00:00 +0000
+++ b/mysql-test/suite/rpl/r/rpl_parallel_innodb.result	2011-06-15 14:59:23 +0000
@@ -0,0 +1,53 @@
+include/master-slave.inc
+[connection master]
+call mtr.add_suppression('Slave: Error dropping database');
+include/stop_slave.inc
+start slave;
+stop slave sql_thread;
+use test0;
+insert into benchmark set state='slave ends load';
+use test;
+select * from test0.benchmark into outfile 'benchmark.out';
+select ts from test0.benchmark where state like 'master started load' into @m_0;
+select ts from test0.benchmark where state like 'master ends load' into @m_1;
+select ts from test0.benchmark where state like 'slave takes on load' into @s_m0;
+select ts from test0.benchmark where state like 'slave is supposed to finish with load' into @s_m1;
+select ts from test0.benchmark where state like 'slave ends load' into @s_1;
+select ts from test0.benchmark where state like 'slave is processing load' into @s_0;
+select time_to_sec(@m_1) - time_to_sec(@m_0) as 'delta_m', 
+time_to_sec(@s_1) - time_to_sec(@s_0) as 'delta_s',
+time_to_sec(@s_m1) - time_to_sec(@s_m0) as 'delta_sm'  into outfile 'delta.out';
+include/diff_tables.inc [master:test15.v_ti_nk, slave:test15.v_ti_nk]
+include/diff_tables.inc [master:test15.v_ti_wk, slave:test15.v_ti_wk]
+include/diff_tables.inc [master:test14.v_ti_nk, slave:test14.v_ti_nk]
+include/diff_tables.inc [master:test14.v_ti_wk, slave:test14.v_ti_wk]
+include/diff_tables.inc [master:test13.v_ti_nk, slave:test13.v_ti_nk]
+include/diff_tables.inc [master:test13.v_ti_wk, slave:test13.v_ti_wk]
+include/diff_tables.inc [master:test12.v_ti_nk, slave:test12.v_ti_nk]
+include/diff_tables.inc [master:test12.v_ti_wk, slave:test12.v_ti_wk]
+include/diff_tables.inc [master:test11.v_ti_nk, slave:test11.v_ti_nk]
+include/diff_tables.inc [master:test11.v_ti_wk, slave:test11.v_ti_wk]
+include/diff_tables.inc [master:test10.v_ti_nk, slave:test10.v_ti_nk]
+include/diff_tables.inc [master:test10.v_ti_wk, slave:test10.v_ti_wk]
+include/diff_tables.inc [master:test9.v_ti_nk, slave:test9.v_ti_nk]
+include/diff_tables.inc [master:test9.v_ti_wk, slave:test9.v_ti_wk]
+include/diff_tables.inc [master:test8.v_ti_nk, slave:test8.v_ti_nk]
+include/diff_tables.inc [master:test8.v_ti_wk, slave:test8.v_ti_wk]
+include/diff_tables.inc [master:test7.v_ti_nk, slave:test7.v_ti_nk]
+include/diff_tables.inc [master:test7.v_ti_wk, slave:test7.v_ti_wk]
+include/diff_tables.inc [master:test6.v_ti_nk, slave:test6.v_ti_nk]
+include/diff_tables.inc [master:test6.v_ti_wk, slave:test6.v_ti_wk]
+include/diff_tables.inc [master:test5.v_ti_nk, slave:test5.v_ti_nk]
+include/diff_tables.inc [master:test5.v_ti_wk, slave:test5.v_ti_wk]
+include/diff_tables.inc [master:test4.v_ti_nk, slave:test4.v_ti_nk]
+include/diff_tables.inc [master:test4.v_ti_wk, slave:test4.v_ti_wk]
+include/diff_tables.inc [master:test3.v_ti_nk, slave:test3.v_ti_nk]
+include/diff_tables.inc [master:test3.v_ti_wk, slave:test3.v_ti_wk]
+include/diff_tables.inc [master:test2.v_ti_nk, slave:test2.v_ti_nk]
+include/diff_tables.inc [master:test2.v_ti_wk, slave:test2.v_ti_wk]
+include/diff_tables.inc [master:test1.v_ti_nk, slave:test1.v_ti_nk]
+include/diff_tables.inc [master:test1.v_ti_wk, slave:test1.v_ti_wk]
+include/diff_tables.inc [master:test0.v_ti_nk, slave:test0.v_ti_nk]
+include/diff_tables.inc [master:test0.v_ti_wk, slave:test0.v_ti_wk]
+set @@global.mts_exp_slave_local_timestamp= @save.mts_exp_slave_local_timestamp;
+include/rpl_end.inc

=== modified file 'sql/rpl_rli.cc'
--- a/sql/rpl_rli.cc	2011-06-13 00:07:26 +0000
+++ b/sql/rpl_rli.cc	2011-06-15 14:59:23 +0000
@@ -105,7 +105,7 @@ Relay_log_info::Relay_log_info(bool is_s
 /**
    The method to invoke at slave threads start
 */
-void Relay_log_info::init_workers(ulong n)
+void Relay_log_info::init_workers(ulong n_workers)
 {
   /*
     Parallel slave parameters initialization is done regardless
@@ -114,15 +114,16 @@ void Relay_log_info::init_workers(ulong
   trans_jobs= stmt_jobs= pending_jobs= wait_jobs= 0;
   mts_wqs_underrun_cnt= mts_wqs_overfill_cnt= 0;
 
-  my_init_dynamic_array(&workers, sizeof(Slave_worker *), slave_parallel_workers, 4);
+  my_init_dynamic_array(&workers, sizeof(Slave_worker *), n_workers, 4);
   my_atomic_rwlock_init(&slave_open_temp_tables_lock);
 
 #ifdef HAVE_PSI_INTERFACE
-  key_mutex_slave_parallel_worker= new PSI_mutex_key[slave_parallel_workers];
-  key_cond_slave_parallel_worker= new PSI_cond_key[slave_parallel_workers];
-  worker_mutexes= new PSI_mutex_info[slave_parallel_workers];
-  worker_conds= new PSI_cond_info[slave_parallel_workers];
-  for (uint wi= 0; wi < slave_parallel_workers; wi++)
+  key_mutex_slave_parallel_worker= new PSI_mutex_key[n_workers];
+  key_cond_slave_parallel_worker= new PSI_cond_key[n_workers];
+  worker_mutexes= new PSI_mutex_info[n_workers];
+  worker_conds= new PSI_cond_info[n_workers];
+
+  for (uint wi= 0; wi < n_workers; wi++)
   {
      worker_mutexes[wi].m_key= (PSI_mutex_key *) &(key_mutex_slave_parallel_worker[wi]);
      worker_mutexes[wi].m_name= "Slave_worker::jobs_lock";
@@ -134,9 +135,9 @@ void Relay_log_info::init_workers(ulong
   if (PSI_server)
   {
     PSI_server->register_mutex("worker", worker_mutexes,
-                               slave_parallel_workers);
+                               n_workers);
     PSI_server->register_cond("worker", worker_conds,
-                               slave_parallel_workers);
+                               n_workers);
   }
   mysql_mutex_init(key_mutex_slave_parallel_pend_jobs, &pending_jobs_lock,
                    MY_MUTEX_INIT_FAST);
@@ -155,6 +156,16 @@ void Relay_log_info::init_workers(ulong
 */
 void Relay_log_info::deinit_workers()
 {
+  delete [] key_mutex_slave_parallel_worker;
+  delete [] key_cond_slave_parallel_worker;
+  delete [] worker_mutexes;
+  delete [] worker_conds;
+
+  key_mutex_slave_parallel_worker= NULL;
+  key_cond_slave_parallel_worker= NULL;
+  worker_mutexes= NULL;
+  worker_conds= NULL;
+
   mysql_mutex_destroy(&pending_jobs_lock);
   mysql_cond_destroy(&pending_jobs_cond);
   mysql_mutex_destroy(&mts_temp_tables_lock);

=== modified file 'sql/rpl_slave.cc'
--- a/sql/rpl_slave.cc	2011-06-14 18:23:13 +0000
+++ b/sql/rpl_slave.cc	2011-06-15 14:59:23 +0000
@@ -2042,6 +2042,7 @@ bool show_master_info(THD* thd, Master_i
   // TODO: fix this for multi-master
   List<Item> field_list;
   Protocol *protocol= thd->protocol;
+  char *slave_sql_running_state= NULL;
   DBUG_ENTER("show_master_info");
 
   field_list.push_back(new Item_empty_string("Slave_IO_State",
@@ -2141,7 +2142,7 @@ bool show_master_info(THD* thd, Master_i
     mysql_mutex_unlock(&mi->run_lock);
 
     mysql_mutex_lock(&mi->rli->run_lock);
-    const char *slave_sql_running_state= mi->rli->info_thd ? mi->rli->info_thd->proc_info : "";
+    slave_sql_running_state= const_cast<char *>(mi->rli->info_thd ? mi->rli->info_thd->proc_info : "");
     mysql_mutex_unlock(&mi->rli->run_lock);
 
     mysql_mutex_lock(&mi->data_lock);
@@ -3757,6 +3758,16 @@ err:
 
   if (thd)
   {
+    /*
+       The slave code is very bad. Notice that it is missing
+       several clean up calls here. I've just added what was
+       necessary to avoid valgrind errors.
+ 
+       /Alfranio
+    */
+    DBUG_ASSERT(thd->net.buff != 0);
+    net_end(&thd->net);
+
     mysql_mutex_lock(&LOCK_thread_count);
     THD_CHECK_SENTRY(thd);
     /*
@@ -3822,9 +3833,21 @@ bool mts_recovery_groups(Relay_log_info
     retrieve_job(worker, job_file);
     LOG_POS_COORD w_last= {worker->group_master_log_name, worker->group_master_log_pos};
     if (mts_event_coord_cmp(&w_last, &cp) > 0)
+    {
+      /*
+        Inserts information into a dynamic array for further processing.
+      */
       insert_dynamic(&above_lwm_jobs, (uchar*) &job_file);
+    }
     else
+    {
+      /*
+        Deletes the worker because its jobs are included in the latest
+        checkpoint.
+      */
+      worker->end_info();
       delete worker;
+    }
   };
 
   sort_dynamic(&above_lwm_jobs, (qsort_cmp) mts_event_coord_cmp);

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-next-mr-wl5569 branch (alfranio.correia:3289 to 3290) Alfranio Correia16 Jun