List:Commits« Previous MessageNext Message »
From:Marc Alff Date:July 30 2009 3:27pm
Subject:bzr push into mysql-5.4-perfschema branch (marc.alff:2839 to 2840)
View as plain text  
 2840 Marc Alff	2009-07-30
      Fixed my_atomic to never use safe_mutex
      Changed PFS_atomic to use my_atomic_rwlock
      modified:
        include/atomic/rwlock.h
        mysys/my_atomic.c
        storage/perfschema/pfs_atomic.cc
        storage/perfschema/pfs_atomic.h

 2839 Marc Alff	2009-07-30
      Implemented more review comments:
      - moved doxygen comments
      - fixed the number of bytes for file io on error
      modified:
        include/my_sys.h
        include/mysql/psi/mysql_file.h
        include/mysql/psi/psi.h
        mysys/my_static.c

=== modified file 'include/atomic/rwlock.h'
--- a/include/atomic/rwlock.h	2008-05-29 15:44:11 +0000
+++ b/include/atomic/rwlock.h	2009-07-30 15:22:07 +0000
@@ -1,4 +1,5 @@
-/* Copyright (C) 2006 MySQL AB
+/* Copyright (C) 2006 MySQL AB,
+   Copyright (C) 2009 Sun Microsystems, Inc
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -13,7 +14,9 @@
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA */
 
-typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t;
+#ifndef MY_ATOMIC_RWLOCK_H_INCLUDED
+#define MY_ATOMIC_RWLOCK_H_INCLUDED
+
 #define MY_ATOMIC_MODE_RWLOCKS 1
 
 #ifdef MY_ATOMIC_MODE_DUMMY
@@ -24,6 +27,9 @@ typedef struct {pthread_mutex_t rw;} my_
   implementations (another way is to run a UP build on an SMP box).
 */
 #warning MY_ATOMIC_MODE_DUMMY and MY_ATOMIC_MODE_RWLOCKS are incompatible
+
+typedef char my_atomic_rwlock_t;
+
 #define my_atomic_rwlock_destroy(name)
 #define my_atomic_rwlock_init(name)
 #define my_atomic_rwlock_rdlock(name)
@@ -31,7 +37,12 @@ typedef struct {pthread_mutex_t rw;} my_
 #define my_atomic_rwlock_rdunlock(name)
 #define my_atomic_rwlock_wrunlock(name)
 #define MY_ATOMIC_MODE "dummy (non-atomic)"
-#else
+#else /* not MY_ATOMIC_MODE_DUMMY */
+
+typedef struct {pthread_mutex_t rw;} my_atomic_rwlock_t;
+
+#ifndef SAFE_MUTEX
+
 /*
   we're using read-write lock macros but map them to mutex locks, and they're
   faster. Still, having semantically rich API we can change the
@@ -43,6 +54,38 @@ typedef struct {pthread_mutex_t rw;} my_
 #define my_atomic_rwlock_wrlock(name)      pthread_mutex_lock(& (name)->rw)
 #define my_atomic_rwlock_rdunlock(name)    pthread_mutex_unlock(& (name)->rw)
 #define my_atomic_rwlock_wrunlock(name)    pthread_mutex_unlock(& (name)->rw)
+
+#else /* SAFE_MUTEX */
+
+/*
+  SAFE_MUTEX pollutes the compiling name space with macros
+  that alter pthread_mutex_t, pthread_mutex_init, etc.
+  Atomic operations should never use the safe mutex wrappers.
+  Unfortunately, there is no way to have both:
+  - safe mutex macros expanding pthread_mutex_lock to safe_mutex_lock
+  - my_atomic macros expanding to unmodified pthread_mutex_lock
+  inlined in the same compilation unit.
+  So, in case of SAFE_MUTEX, a function call is required.
+  Given that SAFE_MUTEX is a debugging facility,
+  this extra function call is not a performance concern for
+  production builds.
+*/
+C_MODE_START
+extern void plain_pthread_mutex_init(safe_mutex_t *);
+extern void plain_pthread_mutex_destroy(safe_mutex_t *);
+extern void plain_pthread_mutex_lock(safe_mutex_t *);
+extern void plain_pthread_mutex_unlock(safe_mutex_t *);
+C_MODE_END
+
+#define my_atomic_rwlock_destroy(name)     plain_pthread_mutex_destroy(& (name)->rw)
+#define my_atomic_rwlock_init(name)        plain_pthread_mutex_init(& (name)->rw)
+#define my_atomic_rwlock_rdlock(name)      plain_pthread_mutex_lock(& (name)->rw)
+#define my_atomic_rwlock_wrlock(name)      plain_pthread_mutex_lock(& (name)->rw)
+#define my_atomic_rwlock_rdunlock(name)    plain_pthread_mutex_unlock(& (name)->rw)
+#define my_atomic_rwlock_wrunlock(name)    plain_pthread_mutex_unlock(& (name)->rw)
+
+#endif /* SAFE_MUTEX */
+
 #define MY_ATOMIC_MODE "mutex"
 #ifndef MY_ATOMIC_MODE_RWLOCKS
 #define MY_ATOMIC_MODE_RWLOCKS 1
@@ -55,3 +98,5 @@ typedef struct {pthread_mutex_t rw;} my_
 #define make_atomic_load_body(S)    ret= *a;
 #define make_atomic_store_body(S)   *a= v;
 
+#endif /* MY_ATOMIC_RWLOCK_H_INCLUDED */
+

=== modified file 'mysys/my_atomic.c'
--- a/mysys/my_atomic.c	2008-07-22 14:16:22 +0000
+++ b/mysys/my_atomic.c	2009-07-30 15:22:07 +0000
@@ -43,3 +43,32 @@ int my_atomic_initialize()
 #endif
 }
 
+#ifdef SAFE_MUTEX
+#undef pthread_mutex_init
+#undef pthread_mutex_destroy
+#undef pthread_mutex_lock
+#undef pthread_mutex_unlock
+
+void plain_pthread_mutex_init(safe_mutex_t *m)
+{
+  pthread_mutex_init(& m->mutex, NULL);
+}
+
+void plain_pthread_mutex_destroy(safe_mutex_t *m)
+{
+  pthread_mutex_destroy(& m->mutex);
+}
+
+void plain_pthread_mutex_lock(safe_mutex_t *m)
+{
+  pthread_mutex_lock(& m->mutex);
+}
+
+void plain_pthread_mutex_unlock(safe_mutex_t *m)
+{
+  pthread_mutex_unlock(& m->mutex);
+}
+
+#endif
+
+

=== modified file 'storage/perfschema/pfs_atomic.cc'
--- a/storage/perfschema/pfs_atomic.cc	2009-07-08 23:52:29 +0000
+++ b/storage/perfschema/pfs_atomic.cc	2009-07-30 15:22:07 +0000
@@ -19,9 +19,9 @@
 */
 
 #include <my_global.h>
+#include <my_pthread.h>
 #include "pfs_atomic.h"
 
-#ifdef SAFE_MUTEX
 /*
   Using SAFE_MUTEX is impossible, because of recursion.
   - code locks mutex X
@@ -45,56 +45,28 @@
   this code is not inlined in pfs_atomic.h, but located here in pfs_atomic.cc.
 
   What is needed is a plain, unmodified, pthread_mutex_t.
+  This is provided by my_atomic_rwlock_t.
 */
-#undef pthread_mutex_t
-#undef pthread_mutex_init
-#undef pthread_mutex_destroy
-#undef pthread_mutex_lock
-#undef pthread_mutex_unlock
-#endif
 
-#ifdef MY_ATOMIC_MODE_RWLOCKS
 /**
-  Internal mutex array.
-  Using a single mutex for all atomic operations would be a bottleneck.
-  Using a mutex per performance schema structure would be too costly in
-  memory, and use too many mutex.
+  Internal rwlock array.
+  Using a single rwlock for all atomic operations would be a bottleneck.
+  Using a rwlock per performance schema structure would be too costly in
+  memory, and use too many rwlock.
   The PFS_atomic implementation computes a hash value from the
   atomic variable, to spread the bottleneck across 256 buckets,
   while still providing --transparently for the caller-- an atomic
   operation.
 */
-static pthread_mutex_t m_mutex_array[256];
-
-static inline pthread_mutex_t *get_mutex(volatile void *ptr)
-{
-  /*
-    Divide an address by 8 to remove alignment,
-    modulo 256 to fall in the array.
-  */
-  uint index= (((intptr) ptr) >> 3) & 0xFF;
-  pthread_mutex_t *result= &m_mutex_array[index];
-  return result;
-}
-
-void PFS_atomic::lock(volatile void *ptr)
-{
-  pthread_mutex_lock(get_mutex(ptr));
-}
-
-void PFS_atomic::unlock(volatile void *ptr)
-{
-  pthread_mutex_unlock(get_mutex(ptr));
-}
-#endif
+my_atomic_rwlock_t PFS_atomic::m_rwlock_array[256];
 
 void PFS_atomic::init(void)
 {
 #ifdef MY_ATOMIC_MODE_RWLOCKS
   uint i;
 
-  for (i=0; i< array_elements(m_mutex_array); i++)
-    pthread_mutex_init(& m_mutex_array[i], NULL);
+  for (i=0; i< array_elements(m_rwlock_array); i++)
+    my_atomic_rwlock_init(& m_rwlock_array[i]);
 #endif
 }
 
@@ -103,8 +75,8 @@ void PFS_atomic::cleanup(void)
 #ifdef MY_ATOMIC_MODE_RWLOCKS
   uint i;
 
-  for (i=0; i< array_elements(m_mutex_array); i++)
-    pthread_mutex_destroy(& m_mutex_array[i]);
+  for (i=0; i< array_elements(m_rwlock_array); i++)
+    my_atomic_rwlock_destroy(& m_rwlock_array[i]);
 #endif
 }
 

=== modified file 'storage/perfschema/pfs_atomic.h'
--- a/storage/perfschema/pfs_atomic.h	2009-07-08 23:52:29 +0000
+++ b/storage/perfschema/pfs_atomic.h	2009-07-30 15:22:07 +0000
@@ -34,9 +34,9 @@ public:
   static inline int32 load_32(volatile int32 *ptr)
   {
     int32 result;
-    lock(ptr);
+    rdlock(ptr);
     result= my_atomic_load32(ptr);
-    unlock(ptr);
+    rdunlock(ptr);
     return result;
   }
 
@@ -44,35 +44,35 @@ public:
   static inline uint32 load_u32(volatile uint32 *ptr)
   {
     uint32 result;
-    lock(ptr);
+    rdlock(ptr);
     result= (uint32) my_atomic_load32((int32*) ptr);
-    unlock(ptr);
+    rdunlock(ptr);
     return result;
   }
 
   /** Atomic store. */
   static inline void store_32(volatile int32 *ptr, int32 value)
   {
-    lock(ptr);
+    wrlock(ptr);
     my_atomic_store32(ptr, value);
-    unlock(ptr);
+    wrunlock(ptr);
   }
 
   /** Atomic store. */
   static inline void store_u32(volatile uint32 *ptr, uint32 value)
   {
-    lock(ptr);
+    wrlock(ptr);
     my_atomic_store32((int32*) ptr, (int32) value);
-    unlock(ptr);
+    wrunlock(ptr);
   }
 
   /** Atomic add. */
   static inline int32 add_32(volatile int32 *ptr, int32 value)
   {
     int32 result;
-    lock(ptr);
+    wrlock(ptr);
     result= my_atomic_add32(ptr, value);
-    unlock(ptr);
+    wrunlock(ptr);
     return result;
   }
 
@@ -80,9 +80,9 @@ public:
   static inline uint32 add_u32(volatile uint32 *ptr, uint32 value)
   {
     uint32 result;
-    lock(ptr);
+    wrlock(ptr);
     result= (uint32) my_atomic_add32((int32*) ptr, (int32) value);
-    unlock(ptr);
+    wrunlock(ptr);
     return result;
   }
 
@@ -91,9 +91,9 @@ public:
                             int32 new_value)
   {
     bool result;
-    lock(ptr);
+    wrlock(ptr);
     result= my_atomic_cas32(ptr, old_value, new_value);
-    unlock(ptr);
+    wrunlock(ptr);
     return result;
   }
 
@@ -102,23 +102,46 @@ public:
                              uint32 new_value)
   {
     bool result;
-    lock(ptr);
+    wrlock(ptr);
     result= my_atomic_cas32((int32*) ptr, (int32*) old_value,
                             (uint32) new_value);
-    unlock(ptr);
+    wrunlock(ptr);
     return result;
   }
 
 private:
-#ifdef MY_ATOMIC_MODE_RWLOCKS
-  static void lock(volatile void *ptr);
-  static void unlock(volatile void *ptr);
-#else
-  static inline void lock(volatile void *ptr)
-  {}
-  static inline void unlock(volatile void *ptr)
-  {}
-#endif
+  static my_atomic_rwlock_t m_rwlock_array[256];
+
+  static inline my_atomic_rwlock_t *get_rwlock(volatile void *ptr)
+  {
+    /*
+      Divide an address by 8 to remove alignment,
+      modulo 256 to fall in the array.
+    */
+    uint index= (((intptr) ptr) >> 3) & 0xFF;
+    my_atomic_rwlock_t *result= &m_rwlock_array[index];
+    return result;
+  }
+
+  static inline void rdlock(volatile void *ptr)
+  {
+    my_atomic_rwlock_rdlock(get_rwlock(ptr));
+  }
+
+  static inline void wrlock(volatile void *ptr)
+  {
+    my_atomic_rwlock_wrlock(get_rwlock(ptr));
+  }
+
+  static inline void rdunlock(volatile void *ptr)
+  {
+    my_atomic_rwlock_rdunlock(get_rwlock(ptr));
+  }
+
+  static inline void wrunlock(volatile void *ptr)
+  {
+    my_atomic_rwlock_wrunlock(get_rwlock(ptr));
+  }
 };
 
 #endif

Thread
bzr push into mysql-5.4-perfschema branch (marc.alff:2839 to 2840) Marc Alff30 Jul