List:Commits« Previous MessageNext Message »
From:Marc Alff Date:August 10 2011 12:43am
Subject:bzr push into mysql-trunk branch (marc.alff:3350 to 3351) Bug#12346211
View as plain text  
 3351 Marc Alff	2011-08-09
      Bug#12346211 - PERF IMPACT OF PERFORMANCE-SCHEMA WITH DISABLED INSTRUMENTS/CONSUMERS
      
      Performance improvement #3: Better allocation.
      
      Before this fix, the performance schema allocation algorithm used,
      for example when creating a mutex, was:
      - compute a random number I between 0 and N
      - start to scan for an empty slot at position I modulo N,
       I+1 modulo N, I+2 modulo N ...
      and this for each competing thread.
      
      When only a few threads are running, and when the slots are mostly empty,
      this works well since few collisions happen.
      
      When the number of competing threads augments, and when the slots are mostly
      occupied, this code can potentially create a lot of competition between
      threads, when doing atomic operations against each slot internal lock.
      
      With this fix, the allocation is changed to:
      - keep a static monotonic counter for each buffer, I
      - atomically increment I
      - look for an empty slot at I modulo N
      - repeat if necessary.
      
      Because the monotonic counter is shared between all threads,
      competing threads end up actually looking for different slots at I0, I0+1,
      I0+2 etc, which prevents competition on each slot internal lock.
      Also, because records are allocated in circular fashion, starting to look
      for an empty slot after the last slot found increases the probability to
      find available slots, and reduces the number of loops during the scan.
      
      While difficult to measure, this fix is expected to improve the scalability
      of the performance schema allocation code, which is used for:
      - creating a mutex,
      - creating a rwlock,
      - creating a cond,
      - opening a file,
      - opening a table
      which are all operations that can occur frequently when the server is
      running.

    modified:
      storage/perfschema/pfs_instr.cc
      storage/perfschema/pfs_instr_class.cc
      storage/perfschema/pfs_setup_actor.cc
      storage/perfschema/pfs_setup_object.cc
 3350 Marko Mäkelä	2011-08-08 [merge]
      Merge mysql-5.5 to mysql-trunk.

    modified:
      mysql-test/suite/innodb/r/innodb-zip.result
      mysql-test/suite/innodb/t/innodb-zip.test
      storage/innobase/handler/ha_innodb.cc
      support-files/mysql.spec.sh
=== modified file 'storage/perfschema/pfs_instr.cc'
--- a/storage/perfschema/pfs_instr.cc	2011-07-14 08:11:07 +0000
+++ b/storage/perfschema/pfs_instr.cc	2011-08-09 16:39:16 +0000
@@ -564,34 +564,52 @@ void PFS_scan::init(uint random, uint ma
 */
 PFS_mutex* create_mutex(PFS_mutex_class *klass, const void *identity)
 {
-  PFS_scan scan;
-  uint random= randomized_index(identity, mutex_max);
+  static uint mutex_monotonic_index= 0;
+  uint index;
+  uint attempts= 0;
+  PFS_mutex *pfs;
+
+  while (++attempts <= mutex_max)
+  {
+    /*
+      Problem:
+      Multiple threads running concurrently may need to create a new
+      instrumented mutex, and find an empty slot in mutex_array[].
+      With N1 threads running on a N2 core hardware:
+      - up to N2 hardware threads can run concurrently,
+      causing contention if looking at the same array[i] slot.
+      - up to N1 threads can run almost concurrently (with thread scheduling),
+      scanning maybe overlapping regions in the [0-mutex_max] array.
+
+      Solution:
+      Instead of letting different threads compete on the same array[i] entry,
+      this code forces all threads to cooperate with the monotonic_index.
+      Only one thread will be allowed to test a given array[i] slot.
+      All threads do scan from the same region, starting at monotonic_index.
+      Serializing on monotonic_index ensures that when a slot is found occupied
+      in a given loop by a given thread, other threads will not attempt this
+      slot.
+    */
+    PFS_atomic::add_u32(& mutex_monotonic_index, 1);
+    index= mutex_monotonic_index % mutex_max;
+    pfs= mutex_array + index;
 
-  for (scan.init(random, mutex_max);
-       scan.has_pass();
-       scan.next_pass())
-  {
-    PFS_mutex *pfs= mutex_array + scan.first();
-    PFS_mutex *pfs_last= mutex_array + scan.last();
-    for ( ; pfs < pfs_last; pfs++)
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
-        {
-          pfs->m_identity= identity;
-          pfs->m_class= klass;
-          pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
-          pfs->m_timed= klass->m_timed;
-          pfs->m_wait_stat.reset();
-          pfs->m_lock_stat.reset();
-          pfs->m_owner= NULL;
-          pfs->m_last_locked= 0;
-          pfs->m_lock.dirty_to_allocated();
-          if (klass->is_singleton())
-            klass->m_singleton= pfs;
-          return pfs;
-        }
+        pfs->m_identity= identity;
+        pfs->m_class= klass;
+        pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
+        pfs->m_timed= klass->m_timed;
+        pfs->m_wait_stat.reset();
+        pfs->m_lock_stat.reset();
+        pfs->m_owner= NULL;
+        pfs->m_last_locked= 0;
+        pfs->m_lock.dirty_to_allocated();
+        if (klass->is_singleton())
+          klass->m_singleton= pfs;
+        return pfs;
       }
     }
   }
@@ -625,37 +643,37 @@ void destroy_mutex(PFS_mutex *pfs)
 */
 PFS_rwlock* create_rwlock(PFS_rwlock_class *klass, const void *identity)
 {
-  PFS_scan scan;
-  uint random= randomized_index(identity, rwlock_max);
+  static uint rwlock_monotonic_index= 0;
+  uint index;
+  uint attempts= 0;
+  PFS_rwlock *pfs;
 
-  for (scan.init(random, rwlock_max);
-       scan.has_pass();
-       scan.next_pass())
-  {
-    PFS_rwlock *pfs= rwlock_array + scan.first();
-    PFS_rwlock *pfs_last= rwlock_array + scan.last();
-    for ( ; pfs < pfs_last; pfs++)
+  while (++attempts <= rwlock_max)
+  {
+    /* See create_mutex() */
+    PFS_atomic::add_u32(& rwlock_monotonic_index, 1);
+    index= rwlock_monotonic_index % rwlock_max;
+    pfs= rwlock_array + index;
+
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
-        {
-          pfs->m_identity= identity;
-          pfs->m_class= klass;
-          pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
-          pfs->m_timed= klass->m_timed;
-          pfs->m_wait_stat.reset();
-          pfs->m_lock.dirty_to_allocated();
-          pfs->m_read_lock_stat.reset();
-          pfs->m_write_lock_stat.reset();
-          pfs->m_writer= NULL;
-          pfs->m_readers= 0;
-          pfs->m_last_written= 0;
-          pfs->m_last_read= 0;
-          if (klass->is_singleton())
-            klass->m_singleton= pfs;
-          return pfs;
-        }
+        pfs->m_identity= identity;
+        pfs->m_class= klass;
+        pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
+        pfs->m_timed= klass->m_timed;
+        pfs->m_wait_stat.reset();
+        pfs->m_lock.dirty_to_allocated();
+        pfs->m_read_lock_stat.reset();
+        pfs->m_write_lock_stat.reset();
+        pfs->m_writer= NULL;
+        pfs->m_readers= 0;
+        pfs->m_last_written= 0;
+        pfs->m_last_read= 0;
+        if (klass->is_singleton())
+          klass->m_singleton= pfs;
+        return pfs;
       }
     }
   }
@@ -689,33 +707,33 @@ void destroy_rwlock(PFS_rwlock *pfs)
 */
 PFS_cond* create_cond(PFS_cond_class *klass, const void *identity)
 {
-  PFS_scan scan;
-  uint random= randomized_index(identity, cond_max);
+  static uint cond_monotonic_index= 0;
+  uint index;
+  uint attempts= 0;
+  PFS_cond *pfs;
+
+  while (++attempts <= cond_max)
+  {
+    /* See create_mutex() */
+    PFS_atomic::add_u32(& cond_monotonic_index, 1);
+    index= cond_monotonic_index % cond_max;
+    pfs= cond_array + index;
 
-  for (scan.init(random, cond_max);
-       scan.has_pass();
-       scan.next_pass())
-  {
-    PFS_cond *pfs= cond_array + scan.first();
-    PFS_cond *pfs_last= cond_array + scan.last();
-    for ( ; pfs < pfs_last; pfs++)
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
-        {
-          pfs->m_identity= identity;
-          pfs->m_class= klass;
-          pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
-          pfs->m_timed= klass->m_timed;
-          pfs->m_cond_stat.m_signal_count= 0;
-          pfs->m_cond_stat.m_broadcast_count= 0;
-          pfs->m_wait_stat.reset();
-          pfs->m_lock.dirty_to_allocated();
-          if (klass->is_singleton())
-            klass->m_singleton= pfs;
-          return pfs;
-        }
+        pfs->m_identity= identity;
+        pfs->m_class= klass;
+        pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
+        pfs->m_timed= klass->m_timed;
+        pfs->m_cond_stat.m_signal_count= 0;
+        pfs->m_cond_stat.m_broadcast_count= 0;
+        pfs->m_wait_stat.reset();
+        pfs->m_lock.dirty_to_allocated();
+        if (klass->is_singleton())
+          klass->m_singleton= pfs;
+        return pfs;
       }
     }
   }
@@ -759,116 +777,115 @@ PFS_thread* PFS_thread::get_current_thre
 PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
                           ulong thread_id)
 {
-  PFS_scan scan;
+  static uint thread_monotonic_index= 0;
   uint index;
-  uint random= randomized_index(identity, thread_max);
+  uint attempts= 0;
+  PFS_thread *pfs;
+
+  while (++attempts <= thread_max)
+  {
+    /* See create_mutex() */
+    PFS_atomic::add_u32(& thread_monotonic_index, 1);
+    index= thread_monotonic_index % thread_max;
+    pfs= thread_array + index;
 
-  for (scan.init(random, thread_max);
-       scan.has_pass();
-       scan.next_pass())
-  {
-    PFS_thread *pfs= thread_array + scan.first();
-    PFS_thread *pfs_last= thread_array + scan.last();
-    for ( ; pfs < pfs_last; pfs++)
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
-        {
-          pfs->m_thread_internal_id=
-            PFS_atomic::add_u32(&thread_internal_id_counter, 1);
-          pfs->m_parent_thread_internal_id= 0;
-          pfs->m_thread_id= thread_id;
-          pfs->m_event_id= 1;
-          pfs->m_enabled= true;
-          pfs->m_class= klass;
-          pfs->m_events_waits_count= WAIT_STACK_BOTTOM;
-          pfs->m_waits_history_full= false;
-          pfs->m_waits_history_index= 0;
-          pfs->m_stages_history_full= false;
-          pfs->m_stages_history_index= 0;
-          pfs->m_statements_history_full= false;
-          pfs->m_statements_history_index= 0;
-
-          pfs->reset_stats();
-
-          pfs->m_filename_hash_pins= NULL;
-          pfs->m_table_share_hash_pins= NULL;
-          pfs->m_setup_actor_hash_pins= NULL;
-          pfs->m_setup_object_hash_pins= NULL;
-
-          pfs->m_username_length= 0;
-          pfs->m_hostname_length= 0;
-          pfs->m_dbname_length= 0;
-          pfs->m_command= 0;
-          pfs->m_start_time= 0;
-          pfs->m_processlist_state_length= 0;
-          pfs->m_processlist_info_length= 0;
+        pfs->m_thread_internal_id=
+          PFS_atomic::add_u32(&thread_internal_id_counter, 1);
+        pfs->m_parent_thread_internal_id= 0;
+        pfs->m_thread_id= thread_id;
+        pfs->m_event_id= 1;
+        pfs->m_enabled= true;
+        pfs->m_class= klass;
+        pfs->m_events_waits_count= WAIT_STACK_BOTTOM;
+        pfs->m_waits_history_full= false;
+        pfs->m_waits_history_index= 0;
+        pfs->m_stages_history_full= false;
+        pfs->m_stages_history_index= 0;
+        pfs->m_statements_history_full= false;
+        pfs->m_statements_history_index= 0;
+
+        pfs->reset_stats();
+
+        pfs->m_filename_hash_pins= NULL;
+        pfs->m_table_share_hash_pins= NULL;
+        pfs->m_setup_actor_hash_pins= NULL;
+        pfs->m_setup_object_hash_pins= NULL;
+
+        pfs->m_username_length= 0;
+        pfs->m_hostname_length= 0;
+        pfs->m_dbname_length= 0;
+        pfs->m_command= 0;
+        pfs->m_start_time= 0;
+        pfs->m_processlist_state_length= 0;
+        pfs->m_processlist_info_length= 0;
 
-          PFS_events_waits *child_wait;
-          for (index= 0; index < WAIT_STACK_SIZE; index++)
-          {
-            child_wait= & pfs->m_events_waits_stack[index];
-            child_wait->m_thread_internal_id= pfs->m_thread_internal_id;
-            child_wait->m_event_id= 0;
-            child_wait->m_event_type= EVENT_TYPE_STATEMENT;
-            child_wait->m_wait_class= NO_WAIT_CLASS;
-          }
-
-          PFS_events_stages *child_stage= & pfs->m_stage_current;
-          child_stage->m_thread_internal_id= pfs->m_thread_internal_id;
-          child_stage->m_event_id= 0;
-          child_stage->m_event_type= EVENT_TYPE_STATEMENT;
-          child_stage->m_class= NULL;
-          child_stage->m_timer_start= 0;
-          child_stage->m_timer_end= 0;
-          child_stage->m_source_file= NULL;
-          child_stage->m_source_line= 0;
+        PFS_events_waits *child_wait;
+        for (index= 0; index < WAIT_STACK_SIZE; index++)
+        {
+          child_wait= & pfs->m_events_waits_stack[index];
+          child_wait->m_thread_internal_id= pfs->m_thread_internal_id;
+          child_wait->m_event_id= 0;
+          child_wait->m_event_type= EVENT_TYPE_STATEMENT;
+          child_wait->m_wait_class= NO_WAIT_CLASS;
+        }
 
-          PFS_events_statements *child_statement;
-          for (index= 0; index < statement_stack_max; index++)
-          {
-            child_statement= & pfs->m_statement_stack[index];
-            child_statement->m_thread_internal_id= pfs->m_thread_internal_id;
-            child_statement->m_event_id= 0;
-            child_statement->m_event_type= EVENT_TYPE_STATEMENT;
-            child_statement->m_class= NULL;
-            child_statement->m_timer_start= 0;
-            child_statement->m_timer_end= 0;
-            child_statement->m_lock_time= 0;
-            child_statement->m_source_file= NULL;
-            child_statement->m_source_line= 0;
-            child_statement->m_current_schema_name_length= 0;
-            child_statement->m_sqltext_length= 0;
-
-            child_statement->m_message_text[0]= '\0';
-            child_statement->m_sql_errno= 0;
-            child_statement->m_sqlstate[0]= '\0';
-            child_statement->m_error_count= 0;
-            child_statement->m_warning_count= 0;
-            child_statement->m_rows_affected= 0;
-
-            child_statement->m_rows_sent= 0;
-            child_statement->m_rows_examined= 0;
-            child_statement->m_created_tmp_disk_tables= 0;
-            child_statement->m_created_tmp_tables= 0;
-            child_statement->m_select_full_join= 0;
-            child_statement->m_select_full_range_join= 0;
-            child_statement->m_select_range= 0;
-            child_statement->m_select_range_check= 0;
-            child_statement->m_select_scan= 0;
-            child_statement->m_sort_merge_passes= 0;
-            child_statement->m_sort_range= 0;
-            child_statement->m_sort_rows= 0;
-            child_statement->m_sort_scan= 0;
-            child_statement->m_no_index_used= 0;
-            child_statement->m_no_good_index_used= 0;
-          }
-          pfs->m_events_statements_count= 0;
+        PFS_events_stages *child_stage= & pfs->m_stage_current;
+        child_stage->m_thread_internal_id= pfs->m_thread_internal_id;
+        child_stage->m_event_id= 0;
+        child_stage->m_event_type= EVENT_TYPE_STATEMENT;
+        child_stage->m_class= NULL;
+        child_stage->m_timer_start= 0;
+        child_stage->m_timer_end= 0;
+        child_stage->m_source_file= NULL;
+        child_stage->m_source_line= 0;
 
-          pfs->m_lock.dirty_to_allocated();
-          return pfs;
+        PFS_events_statements *child_statement;
+        for (index= 0; index < statement_stack_max; index++)
+        {
+          child_statement= & pfs->m_statement_stack[index];
+          child_statement->m_thread_internal_id= pfs->m_thread_internal_id;
+          child_statement->m_event_id= 0;
+          child_statement->m_event_type= EVENT_TYPE_STATEMENT;
+          child_statement->m_class= NULL;
+          child_statement->m_timer_start= 0;
+          child_statement->m_timer_end= 0;
+          child_statement->m_lock_time= 0;
+          child_statement->m_source_file= NULL;
+          child_statement->m_source_line= 0;
+          child_statement->m_current_schema_name_length= 0;
+          child_statement->m_sqltext_length= 0;
+
+          child_statement->m_message_text[0]= '\0';
+          child_statement->m_sql_errno= 0;
+          child_statement->m_sqlstate[0]= '\0';
+          child_statement->m_error_count= 0;
+          child_statement->m_warning_count= 0;
+          child_statement->m_rows_affected= 0;
+
+          child_statement->m_rows_sent= 0;
+          child_statement->m_rows_examined= 0;
+          child_statement->m_created_tmp_disk_tables= 0;
+          child_statement->m_created_tmp_tables= 0;
+          child_statement->m_select_full_join= 0;
+          child_statement->m_select_full_range_join= 0;
+          child_statement->m_select_range= 0;
+          child_statement->m_select_range_check= 0;
+          child_statement->m_select_scan= 0;
+          child_statement->m_sort_merge_passes= 0;
+          child_statement->m_sort_range= 0;
+          child_statement->m_sort_rows= 0;
+          child_statement->m_sort_scan= 0;
+          child_statement->m_no_index_used= 0;
+          child_statement->m_no_good_index_used= 0;
         }
+        pfs->m_events_statements_count= 0;
+
+        pfs->m_lock.dirty_to_allocated();
+        return pfs;
       }
     }
   }
@@ -974,7 +991,6 @@ find_or_create_file(PFS_thread *thread,
                     const char *filename, uint len)
 {
   PFS_file *pfs;
-  PFS_scan scan;
 
   if (! filename_hash_inited)
   {
@@ -1076,6 +1092,10 @@ find_or_create_file(PFS_thread *thread,
   PFS_file **entry;
   uint retry_count= 0;
   const uint retry_max= 3;
+  static uint file_monotonic_index= 0;
+  uint index;
+  uint attempts= 0;
+
 search:
   entry= reinterpret_cast<PFS_file**>
     (lf_hash_search(&filename_hash, thread->m_filename_hash_pins,
@@ -1088,60 +1108,56 @@ search:
     return pfs;
   }
 
-  /* filename is not constant, just using it for noise on create */
-  uint random= randomized_index(filename, file_max);
 
-  for (scan.init(random, file_max);
-       scan.has_pass();
-       scan.next_pass())
-  {
-    pfs= file_array + scan.first();
-    PFS_file *pfs_last= file_array + scan.last();
-    for ( ; pfs < pfs_last; pfs++)
+  while (++attempts <= file_max)
+  {
+    /* See create_mutex() */
+    PFS_atomic::add_u32(& file_monotonic_index, 1);
+    index= file_monotonic_index % file_max;
+    pfs= file_array + index;
+
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
+        pfs->m_class= klass;
+        pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
+        pfs->m_timed= klass->m_timed;
+        strncpy(pfs->m_filename, normalized_filename, normalized_length);
+        pfs->m_filename[normalized_length]= '\0';
+        pfs->m_filename_length= normalized_length;
+        pfs->m_wait_stat.reset();
+        pfs->m_file_stat.m_open_count= 1;
+        pfs->m_file_stat.m_io_stat.reset();
+
+        int res;
+        res= lf_hash_insert(&filename_hash, thread->m_filename_hash_pins,
+                            &pfs);
+        if (likely(res == 0))
         {
-          pfs->m_class= klass;
-          pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
-          pfs->m_timed= klass->m_timed;
-          strncpy(pfs->m_filename, normalized_filename, normalized_length);
-          pfs->m_filename[normalized_length]= '\0';
-          pfs->m_filename_length= normalized_length;
-          pfs->m_wait_stat.reset();
-          pfs->m_file_stat.m_open_count= 1;
-          pfs->m_file_stat.m_io_stat.reset();
-
-          int res;
-          res= lf_hash_insert(&filename_hash, thread->m_filename_hash_pins,
-                              &pfs);
-          if (likely(res == 0))
-          {
-            pfs->m_lock.dirty_to_allocated();
-            if (klass->is_singleton())
-              klass->m_singleton= pfs;
-            return pfs;
-          }
+          pfs->m_lock.dirty_to_allocated();
+          if (klass->is_singleton())
+            klass->m_singleton= pfs;
+          return pfs;
+        }
 
-          pfs->m_lock.dirty_to_free();
+        pfs->m_lock.dirty_to_free();
 
-          if (res > 0)
+        if (res > 0)
+        {
+          /* Duplicate insert by another thread */
+          if (++retry_count > retry_max)
           {
-            /* Duplicate insert by another thread */
-            if (++retry_count > retry_max)
-            {
-              /* Avoid infinite loops */
-              file_lost++;
-              return NULL;
-            }
-            goto search;
+            /* Avoid infinite loops */
+            file_lost++;
+            return NULL;
           }
-
-          /* OOM in lf_hash_insert */
-          file_lost++;
-          return NULL;
+          goto search;
         }
+
+        /* OOM in lf_hash_insert */
+        file_lost++;
+        return NULL;
       }
     }
   }
@@ -1198,35 +1214,35 @@ void destroy_file(PFS_thread *thread, PF
 PFS_table* create_table(PFS_table_share *share, PFS_thread *opening_thread,
                         const void *identity)
 {
-  PFS_scan scan;
-  uint random= randomized_index(identity, table_max);
+  static uint table_monotonic_index= 0;
+  uint index;
+  uint attempts= 0;
+  PFS_table *pfs;
+
+  while (++attempts <= table_max)
+  {
+    /* See create_mutex() */
+    PFS_atomic::add_u32(& table_monotonic_index, 1);
+    index= table_monotonic_index % table_max;
+    pfs= table_array + index;
 
-  for (scan.init(random, table_max);
-       scan.has_pass();
-       scan.next_pass())
-  {
-    PFS_table *pfs= table_array + scan.first();
-    PFS_table *pfs_last= table_array + scan.last();
-    for ( ; pfs < pfs_last; pfs++)
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
-        {
-          pfs->m_identity= identity;
-          pfs->m_share= share;
-          pfs->m_io_enabled= share->m_enabled &&
-            flag_global_instrumentation && global_table_io_class.m_enabled;
-          pfs->m_io_timed= share->m_timed && global_table_io_class.m_timed;
-          pfs->m_lock_enabled= share->m_enabled &&
-            flag_global_instrumentation && global_table_lock_class.m_enabled;
-          pfs->m_lock_timed= share->m_timed && global_table_lock_class.m_timed;
-          share->inc_refcount();
-          pfs->m_table_stat.reset();
-          pfs->m_thread_owner= opening_thread;
-          pfs->m_lock.dirty_to_allocated();
-          return pfs;
-        }
+        pfs->m_identity= identity;
+        pfs->m_share= share;
+        pfs->m_io_enabled= share->m_enabled &&
+          flag_global_instrumentation && global_table_io_class.m_enabled;
+        pfs->m_io_timed= share->m_timed && global_table_io_class.m_timed;
+        pfs->m_lock_enabled= share->m_enabled &&
+          flag_global_instrumentation && global_table_lock_class.m_enabled;
+        pfs->m_lock_timed= share->m_timed && global_table_lock_class.m_timed;
+        share->inc_refcount();
+        pfs->m_table_stat.reset();
+        pfs->m_thread_owner= opening_thread;
+        pfs->m_lock.dirty_to_allocated();
+        return pfs;
       }
     }
   }

=== modified file 'storage/perfschema/pfs_instr_class.cc'
--- a/storage/perfschema/pfs_instr_class.cc	2011-08-03 07:02:05 +0000
+++ b/storage/perfschema/pfs_instr_class.cc	2011-08-09 16:39:16 +0000
@@ -1045,6 +1045,10 @@ PFS_table_share* find_or_create_table_sh
   const uint retry_max= 3;
   bool enabled= true;
   bool timed= true;
+  static uint table_share_monotonic_index= 0;
+  uint index;
+  uint attempts= 0;
+  PFS_table_share *pfs;
 
 search:
   entry= reinterpret_cast<PFS_table_share**>
@@ -1052,7 +1056,6 @@ search:
                     key.m_hash_key, key.m_key_length));
   if (entry && (entry != MY_ERRPTR))
   {
-    PFS_table_share *pfs;
     pfs= *entry;
     pfs->inc_refcount() ;
     if (compare_keys(pfs, share) != 0)
@@ -1080,58 +1083,53 @@ search:
     */
   }
 
-  PFS_scan scan;
-  uint random= randomized_index(table_name, table_share_max);
-
-  for (scan.init(random, table_share_max);
-       scan.has_pass();
-       scan.next_pass())
+  while (++attempts <= table_share_max)
   {
-    PFS_table_share *pfs= table_share_array + scan.first();
-    PFS_table_share *pfs_last= table_share_array + scan.last();
-    for ( ; pfs < pfs_last; pfs++)
+    /* See create_mutex() */
+    PFS_atomic::add_u32(& table_share_monotonic_index, 1);
+    index= table_share_monotonic_index % table_share_max;
+    pfs= table_share_array + index;
+
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
+        pfs->m_key= key;
+        pfs->m_schema_name= &pfs->m_key.m_hash_key[1];
+        pfs->m_schema_name_length= schema_name_length;
+        pfs->m_table_name= &pfs->m_key.m_hash_key[schema_name_length + 2];
+        pfs->m_table_name_length= table_name_length;
+        pfs->m_enabled= enabled;
+        pfs->m_timed= timed;
+        pfs->init_refcount();
+        pfs->m_table_stat.reset();
+        set_keys(pfs, share);
+
+        int res;
+        res= lf_hash_insert(&table_share_hash, pins, &pfs);
+        if (likely(res == 0))
         {
-          pfs->m_key= key;
-          pfs->m_schema_name= &pfs->m_key.m_hash_key[1];
-          pfs->m_schema_name_length= schema_name_length;
-          pfs->m_table_name= &pfs->m_key.m_hash_key[schema_name_length + 2];
-          pfs->m_table_name_length= table_name_length;
-          pfs->m_enabled= enabled;
-          pfs->m_timed= timed;
-          pfs->init_refcount();
-          pfs->m_table_stat.reset();
-          set_keys(pfs, share);
-
-          int res;
-          res= lf_hash_insert(&table_share_hash, pins, &pfs);
-          if (likely(res == 0))
-          {
-            pfs->m_lock.dirty_to_allocated();
-            return pfs;
-          }
+          pfs->m_lock.dirty_to_allocated();
+          return pfs;
+        }
 
-          pfs->m_lock.dirty_to_free();
+        pfs->m_lock.dirty_to_free();
 
-          if (res > 0)
+        if (res > 0)
+        {
+          /* Duplicate insert by another thread */
+          if (++retry_count > retry_max)
           {
-            /* Duplicate insert by another thread */
-            if (++retry_count > retry_max)
-            {
-              /* Avoid infinite loops */
-              table_share_lost++;
-              return NULL;
-            }
-            goto search;
+            /* Avoid infinite loops */
+            table_share_lost++;
+            return NULL;
           }
-
-          /* OOM in lf_hash_insert */
-          table_share_lost++;
-          return NULL;
+          goto search;
         }
+
+        /* OOM in lf_hash_insert */
+        table_share_lost++;
+        return NULL;
       }
     }
   }

=== modified file 'storage/perfschema/pfs_setup_actor.cc'
--- a/storage/perfschema/pfs_setup_actor.cc	2011-06-30 15:50:45 +0000
+++ b/storage/perfschema/pfs_setup_actor.cc	2011-08-09 16:39:16 +0000
@@ -163,48 +163,45 @@ int insert_setup_actor(const String *use
   if (unlikely(pins == NULL))
     return HA_ERR_OUT_OF_MEM;
 
-  /* user is not constant, just using it for noise on insert */
-  uint i= randomized_index(user, setup_actor_max);
+  static uint setup_actor_monotonic_index= 0;
+  uint index;
+  uint attempts= 0;
+  PFS_setup_actor *pfs;
 
-  /*
-    Pass 1: [random, setup_actor_max - 1]
-    Pass 2: [0, setup_actor_max - 1]
-  */
-  int pass;
-  for (pass= 1; pass <= 2; i=0, pass++)
+  while (++attempts <= setup_actor_max)
   {
-    PFS_setup_actor *pfs= setup_actor_array + i;
-    PFS_setup_actor *pfs_last= setup_actor_array + setup_actor_max;
-    for ( ; pfs < pfs_last; pfs++)
+    /* See create_mutex() */
+    PFS_atomic::add_u32(& setup_actor_monotonic_index, 1);
+    index= setup_actor_monotonic_index % setup_actor_max;
+    pfs= setup_actor_array + index;
+
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
-        {
-          set_setup_actor_key(&pfs->m_key,
-                              user->ptr(), user->length(),
-                              host->ptr(), host->length(),
-                              role->ptr(), role->length());
-          pfs->m_username= &pfs->m_key.m_hash_key[0];
-          pfs->m_username_length= user->length();
-          pfs->m_hostname= pfs->m_username + pfs->m_username_length + 1;
-          pfs->m_hostname_length= host->length();
-          pfs->m_rolename= pfs->m_hostname + pfs->m_hostname_length + 1;
-          pfs->m_rolename_length= role->length();
-
-          int res;
-          res= lf_hash_insert(&setup_actor_hash, pins, &pfs);
-          if (likely(res == 0))
-          {
-            pfs->m_lock.dirty_to_allocated();
-            return 0;
-          }
+        set_setup_actor_key(&pfs->m_key,
+                            user->ptr(), user->length(),
+                            host->ptr(), host->length(),
+                            role->ptr(), role->length());
+        pfs->m_username= &pfs->m_key.m_hash_key[0];
+        pfs->m_username_length= user->length();
+        pfs->m_hostname= pfs->m_username + pfs->m_username_length + 1;
+        pfs->m_hostname_length= host->length();
+        pfs->m_rolename= pfs->m_hostname + pfs->m_hostname_length + 1;
+        pfs->m_rolename_length= role->length();
 
-          pfs->m_lock.dirty_to_free();
-          if (res > 0)
-            return HA_ERR_FOUND_DUPP_KEY;
-          return HA_ERR_OUT_OF_MEM;
+        int res;
+        res= lf_hash_insert(&setup_actor_hash, pins, &pfs);
+        if (likely(res == 0))
+        {
+          pfs->m_lock.dirty_to_allocated();
+          return 0;
         }
+
+        pfs->m_lock.dirty_to_free();
+        if (res > 0)
+          return HA_ERR_FOUND_DUPP_KEY;
+        return HA_ERR_OUT_OF_MEM;
       }
     }
   }

=== modified file 'storage/perfschema/pfs_setup_object.cc'
--- a/storage/perfschema/pfs_setup_object.cc	2011-05-05 06:11:49 +0000
+++ b/storage/perfschema/pfs_setup_object.cc	2011-08-09 16:39:16 +0000
@@ -157,47 +157,46 @@ int insert_setup_object(enum_object_type
   if (unlikely(pins == NULL))
     return HA_ERR_OUT_OF_MEM;
 
-  PFS_scan scan;
-  uint random= randomized_index(object, setup_object_max);
+  static uint setup_object_monotonic_index= 0;
+  uint index;
+  uint attempts= 0;
+  PFS_setup_object *pfs;
 
-  for (scan.init(random, setup_object_max);
-       scan.has_pass();
-       scan.next_pass())
+  while (++attempts <= setup_object_max)
   {
-    PFS_setup_object *pfs= setup_object_array + scan.first();
-    PFS_setup_object *pfs_last= setup_object_array + scan.last();
+    /* See create_mutex() */
+    PFS_atomic::add_u32(& setup_object_monotonic_index, 1);
+    index= setup_object_monotonic_index % setup_object_max;
+    pfs= setup_object_array + index;
 
-    for ( ; pfs < pfs_last; pfs++)
+    if (pfs->m_lock.is_free())
     {
-      if (pfs->m_lock.is_free())
+      if (pfs->m_lock.free_to_dirty())
       {
-        if (pfs->m_lock.free_to_dirty())
-        {
-          set_setup_object_key(&pfs->m_key, object_type,
-                               schema->ptr(), schema->length(),
-                               object->ptr(), object->length());
-          pfs->m_schema_name= &pfs->m_key.m_hash_key[1];
-          pfs->m_schema_name_length= schema->length();
-          pfs->m_object_name= pfs->m_schema_name + pfs->m_schema_name_length + 1;
-          pfs->m_object_name_length= object->length();
-          pfs->m_enabled= enabled;
-          pfs->m_timed= timed;
-
-          int res;
-          res= lf_hash_insert(&setup_object_hash, pins, &pfs);
-          if (likely(res == 0))
-          {
-            pfs->m_lock.dirty_to_allocated();
-            setup_objects_version++;
-            return 0;
-          }
+        set_setup_object_key(&pfs->m_key, object_type,
+                             schema->ptr(), schema->length(),
+                             object->ptr(), object->length());
+        pfs->m_schema_name= &pfs->m_key.m_hash_key[1];
+        pfs->m_schema_name_length= schema->length();
+        pfs->m_object_name= pfs->m_schema_name + pfs->m_schema_name_length + 1;
+        pfs->m_object_name_length= object->length();
+        pfs->m_enabled= enabled;
+        pfs->m_timed= timed;
 
-          pfs->m_lock.dirty_to_free();
-          if (res > 0)
-            return HA_ERR_FOUND_DUPP_KEY;
-          /* OOM in lf_hash_insert */
-          return HA_ERR_OUT_OF_MEM;
+        int res;
+        res= lf_hash_insert(&setup_object_hash, pins, &pfs);
+        if (likely(res == 0))
+        {
+          pfs->m_lock.dirty_to_allocated();
+          setup_objects_version++;
+          return 0;
         }
+
+        pfs->m_lock.dirty_to_free();
+        if (res > 0)
+          return HA_ERR_FOUND_DUPP_KEY;
+        /* OOM in lf_hash_insert */
+        return HA_ERR_OUT_OF_MEM;
       }
     }
   }

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-trunk branch (marc.alff:3350 to 3351) Bug#12346211Marc Alff10 Aug