List:Commits« Previous MessageNext Message »
From:Vladislav Vaintroub Date:September 7 2010 5:15pm
Subject:bzr commit into mysql-trunk branch (vvaintroub:3194) Bug#56585
View as plain text  
#At file:///H:/bzr-new/trunk1/ based on revid:vvaintroub@stripped

 3194 Vladislav Vaintroub	2010-09-07
      Bug#56585 : Slowdown on Windows in readonly  benchmark.
      (Added fix for race condition while seting/resetting event
      by last existing and first entering readers, spotted by Dmitry)
      
      Yet another take on this problem:
      implement rwlock using 2 critical sections, atomic variable and event.
      This implementation is cheaper than  general one on based on condition 
      variables,because it has less overhead compared to general one
      
      For writer-mostly scenarios, the lock is equivalent to critical section,
      with just Enter/LeaveCriticalSection per rwlock/rwunlock pair.
      
      For concurent reader scenarios, it is again Enter/LeaveCriticalSection
      plus atomic increment and decrement  readers, except first-time lock
      and last-time unlock (first lock  blocks the writers via event, last unlock 
      unblocks writers, so for there is a bit more overhead).
      
      Compared to implementation on other platforms:
      1) reader and writer critical sections are split (so there is less 
        contention)
      2) Less critical section lock/unlock operations generally.
      
      Performance-wise this lock seems to bring a lot, outperforming
      even native reader-writer locks, as in the table below, under
      column ("no fix" is 5.5,  "srwlock" is native Vista RW lock, thislock
      is implementation in this patch)
      
      -------------------------------------------------
      benchmark      | no fix     srwlock  thislock  
      -----------------------------------------------
      simple_ranges | 14051    17996   18459
      -----------------------------------------------
      point_select    | 20576     29120   29309
      
      
      Besides,this lock implements correctly (in Dmitri's sense of it)
      "prefer readers", e.g writer does not enter or block newly incoming
      readers and waits until count of all (pending and active readers goes
      down to 0).
      
      Also, this lock is recusrive, for both readers and writers.

    modified:
      include/my_pthread.h
      mysys/thr_rwlock.c
=== modified file 'include/my_pthread.h'
--- a/include/my_pthread.h	2010-09-06 21:09:45 +0000
+++ b/include/my_pthread.h	2010-09-07 17:15:03 +0000
@@ -623,6 +623,25 @@ extern int rw_pr_init(rw_pr_lock_t *);
 /* Otherwise we have to use our own implementation of read/write locks. */
 #define NEED_MY_RW_LOCK 1
 struct st_my_rw_lock_t;
+
+#ifdef _WIN32
+typedef struct _win_prlock
+{
+  CRITICAL_SECTION reader_cs;
+  CRITICAL_SECTION writer_cs;
+  volatile LONG reader_count; /*readers (all, pending and active) */
+  int  writer_recursion_count; /* recursion count for writer lock */
+  HANDLE allow_writer;
+}rw_pr_lock_t;
+
+extern int rw_pr_init(rw_pr_lock_t *);
+extern int rw_pr_rdlock(rw_pr_lock_t *);
+extern int rw_pr_wrlock(rw_pr_lock_t *);
+extern int rw_pr_tryrdlock(rw_pr_lock_t *);
+extern int rw_pr_trywrlock(rw_pr_lock_t *);
+extern int rw_pr_unlock(rw_pr_lock_t *);
+extern int rw_pr_destroy(rw_pr_lock_t *);
+#else
 #define rw_pr_lock_t my_rw_lock_t
 extern int rw_pr_init(struct st_my_rw_lock_t *);
 #define rw_pr_rdlock(A) my_rw_rdlock((A))
@@ -634,7 +653,7 @@ extern int rw_pr_init(struct st_my_rw_lo
 #define rw_pr_lock_assert_write_owner(A) my_rw_lock_assert_write_owner((A))
 #define rw_pr_lock_assert_not_write_owner(A) my_rw_lock_assert_not_write_owner((A))
 #endif /* defined(HAVE_PTHREAD_RWLOCK_RDLOCK) && defined(HAVE_PTHREAD_RWLOCKATTR_SETKIND_NP) */
-
+#endif
 
 #ifdef NEED_MY_RW_LOCK
 /*

=== modified file 'mysys/thr_rwlock.c'
--- a/mysys/thr_rwlock.c	2010-08-12 13:50:23 +0000
+++ b/mysys/thr_rwlock.c	2010-09-07 17:15:03 +0000
@@ -192,12 +192,13 @@ int my_rw_unlock(my_rw_lock_t *rwp)
   return(0);
 }
 
-
+#ifndef _WIN32
 int rw_pr_init(struct st_my_rw_lock_t *rwlock)
 {
   my_bool prefer_readers_attr= TRUE;
   return my_rw_init(rwlock, &prefer_readers_attr);
 }
+#endif
 
 #else
 
@@ -218,4 +219,128 @@ int rw_pr_init(rw_pr_lock_t *rwlock)
 }
 
 #endif /* defined(NEED_MY_RW_LOCK) */
+
+#ifdef _WIN32
+int rw_pr_init(rw_pr_lock_t *rwl)
+{
+  InitializeCriticalSection(&rwl->reader_cs);
+  InitializeCriticalSection(&rwl->writer_cs);
+  rwl->allow_writer= CreateEvent(NULL, TRUE, TRUE, NULL);
+  rwl->reader_count= 0;
+  rwl->writer_recursion_count= 0;
+  if(!rwl->allow_writer)
+    return ENOMEM;
+  return 0;
+}
+
+int rw_pr_rdlock(rw_pr_lock_t *rwl)
+{
+  EnterCriticalSection(&rwl->reader_cs);
+  if (InterlockedIncrement(&rwl->reader_count) == 1)
+  {
+    /* 
+      First reader waits for possible writer to complete
+      (during this time it blocks other readers that are waiting on reader_cs)
+      After that, it will disallows other writers until all readers finish
+      via ResetEvent().
+    */
+    EnterCriticalSection(&rwl->writer_cs);
+    ResetEvent(rwl->allow_writer);
+    LeaveCriticalSection(&rwl->writer_cs);
+  }
+  LeaveCriticalSection(&rwl->reader_cs);
+  return 0;
+}
+
+
+int rw_pr_wrlock(rw_pr_lock_t *rwl)
+{
+  EnterCriticalSection(&rwl->writer_cs);
+  if(rwl->reader_count > 0)
+    WaitForSingleObject(rwl->allow_writer, INFINITE);
+  rwl->writer_recursion_count++;
+  return 0;
+}
+
+
+int rw_pr_unlock(rw_pr_lock_t *rwl)
+{
+  if(rwl->writer_recursion_count > 0)
+  {
+    /* Unlock writer */
+    rwl->writer_recursion_count--;
+    LeaveCriticalSection(&rwl->writer_cs);
+  }
+  else
+  {
+    if (InterlockedDecrement(&rwl->reader_count) == 0)
+    {
+      /* 
+        Last reader exits, allow writers to run, unless some other reader
+        has started meanwhile.
+      */
+      if(TryEnterCriticalSection(&rwl->reader_cs))
+      {
+        if(rwl->reader_count == 0)
+          SetEvent(rwl->allow_writer);
+        LeaveCriticalSection(&rwl->reader_cs);
+      }
+      /* 
+       If TryEnter above fails, it means that new reader started, and 
+       writer event can be left unset, until next time readers go down to zero.
+      */
+    }
+  }
+  return 0;
+}
+
+int rw_pr_tryrdlock(rw_pr_lock_t *rwl)
+{
+  EnterCriticalSection(&rwl->reader_cs);
+  if (InterlockedIncrement(&rwl->reader_count) == 1)
+  {
+    /* Wait for possible writer to complete, disallow other writers */
+    if(TryEnterCriticalSection(&rwl->writer_cs))
+    {
+      ResetEvent(rwl->allow_writer);
+      LeaveCriticalSection(&rwl->writer_cs);
+    }
+    else
+    {
+      InterlockedDecrement(&rwl->reader_count);
+      LeaveCriticalSection(&rwl->reader_cs);
+      return EBUSY;
+    }
+  }
+  LeaveCriticalSection(&rwl->reader_cs);
+  return 0;
+}
+
+int rw_pr_trywrlock(rw_pr_lock_t *rwl)
+{
+  if(!TryEnterCriticalSection(&rwl->writer_cs))
+    return EBUSY;
+
+   if(rwl->reader_count > 0)
+   {
+      if(WaitForSingleObject(rwl->allow_writer, 0) != WAIT_OBJECT_0)
+      {
+        LeaveCriticalSection(&rwl->writer_cs);
+        return EBUSY;
+      }
+   }
+
+  rwl->writer_recursion_count++;
+  return 0;
+}
+
+int rw_pr_destroy(rw_pr_lock_t *rwl)
+{
+  DeleteCriticalSection(&rwl->reader_cs);
+  DeleteCriticalSection(&rwl->writer_cs);
+  CloseHandle(rwl->allow_writer);
+  return 0;
+}
+#endif /* _WIN32 */
+
 #endif /* defined(THREAD) */


Attachment: [text/bzr-bundle] bzr/vvaintroub@mysql.com-20100907171503-74hgc30g0kv6vwz8.bundle
Thread
bzr commit into mysql-trunk branch (vvaintroub:3194) Bug#56585Vladislav Vaintroub7 Sep
Re: bzr commit into mysql-trunk branch (vvaintroub:3194) Bug#56585Dmitry Lenev9 Sep
  • RE: bzr commit into mysql-trunk branch (vvaintroub:3194) Bug#56585Vladislav Vaintroub11 Sep