MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:Dmitry Lenev Date:October 29 2009 9:13am
Subject:bzr commit into mysql-6.0-codebase-bugfixing branch (dlenev:3627)
View as plain text  
#At file:///home/dlenev/src/bzr/mysql-6.0-codebase-concurr/ based on revid:dlenev@stripped

 3627 Dmitry Lenev	2009-10-29
      Patch that Changes metadata locking subsystem to use mutex per lock and
      condition variable per context instead of one mutex and one conditional
      variable for the whole subsystem.
      
      This should increase concurrency in this subsystem.
      
      It also opens the way for further changes which are necessary to solve
      such bugs as #46272 "MySQL 5.4.4, new MDL: unnecessary deadlock" and
      #37346 "innodb does not detect deadlock between update and alter table".
     @ mysql-test/include/handler.inc
        Adjusted handler_myisam.test and handler_innodb.test to the fact that
        exclusive metadata locks on tables are now acquired according to
        alphabetical order of fully qualified table names instead of order
        in which tables are mentioned in statement.
     @ mysql-test/r/handler_innodb.result
        Adjusted handler_myisam.test and handler_innodb.test to the fact that
        exclusive metadata locks on tables are now acquired according to
        alphabetical order of fully qualified table names instead of order
        in which tables are mentioned in statement.
     @ mysql-test/r/handler_myisam.result
        Adjusted handler_myisam.test and handler_innodb.test to the fact that
        exclusive metadata locks on tables are now acquired according to
        alphabetical order of fully qualified table names instead of order
        in which tables are mentioned in statement.
     @ mysql-test/r/mdl_sync.result
        Adjusted mdl_sync.test to the fact that exclusive metadata locks on
        tables are now acquired according to alphabetical order of fully
        qualified table names instead of order in which tables are mentioned
        in statement.
     @ mysql-test/t/mdl_sync.test
        Adjusted mdl_sync.test to the fact that exclusive metadata locks on
        tables are now acquired according to alphabetical order of fully
        qualified table names instead of order in which tables are mentioned
        in statement.
     @ sql/mdl.cc
        Changed metadata locking subsystem to use mutex per lock and condition
        variable per context instead of one mutex and one conditional variable
        for the whole subsystem. To implement this change:
        - Removed LOCK_mdl mutex and COND_mdl condition variable.
        - Introduced LOCK_mdl_hash mutex which protects mdl_locks hash only.
        - Introduced MDL_lock::lock mutexes which protect individual lock
          objects.
        - Introduced MDL_context::m_cond condition variable to be to be used
          for waiting until this context's pending request can be satisfied
          or its thread has to perform actions to resolve potential deadlock.
          Context which want to wait add ticket corresponding to the request
          to an appropriate queue of waiters in MDL_lock or MDL_global_lock
          objects so they can be noticed when other contexts change state of
          lock and be awaken by them by signalling on MDL_context::m_cond.
          As consequence MDL_ticket objects has to be used for any waiting
          in metadata locking subsystem including one which happens in
          MDL_context::wait_for_locks() method.
        - Introduced LOCK_mdl_global mutex variable which protects global_lock
          object. To support the approach to waiting described above added
          waiting queues to MDL_global_lock class. In addition, since we no
          longer handle global lock in the same critical section as per-object
          locks moved operations with global lock out to separate set of
          methods. Changed existing MDL_context::acquire_global_shared_lock()
          and release_global_shared_lock() accordingly.
        - To simplify waking up of contexts waiting for per-object lock split
          waiting queue in MDL_lock class in two queues. One for pending
          requests exclusive locks and another for requests for shared locks.
        - Adjusted MDL_context::try_acquire_shared_lock()/exclusive_lock(),
          MDL_ticket::upgrade_shared_lock_to_exclusive_lock() and
          MDL_context::release_ticket() methods to use LOCK_mdl_hash and
          MDL_lock::lock instead of single LOCK_mdl mutex and wake up waiters
          according to the approach described above. The latter method also was
          renamed to MDL_context::release_lock().
        - Since acquiring of several exclusive locks can no longer happen under
          single LOCK_mdl mutex the approach to it had to be changed. Now we do
          it in one by one fashion. This is done in alphabetical order to avoid
          deadlocks. Changed MDL_context::acquire_exclusive_locks() accordingly
          (as part of this change moved code responsible for acquiring single
           exclusive lock to new MDL_context::acquire_exclusive_lock_impl()
           method). In addition, to avoid deadlocks between acquring several
           exclusive locks and global shared lock ensured that global intention
           exlusive locks are acquired in lock and wait-free manner in recursive
           case.
        - Since we no longer have single LOCK_mdl mutex which protects all
          MDL_context::m_is_waiting_in_mdl members using these members to
          determine if we have really awaken context holding conflicting
          shared lock became inconvinient. Got rid of this member and changed
          notify_shared_lock() helper function and process of acquiring
          of/upgrading to exclusive lock not to rely on such information.
          Now in MDL_context::acquire_exclusive_lock_impl() and
          MDL_ticket::upgrade_shared_lock_to_exclusive_lock() we simply
          re-try to wake up threads holding conflicting shared locks after
          small time out.
        - Adjusted MDL_context::has_pending_conflicting_locks() and
          MDL_ticket::has_pending_conflicting_lock() to use per-lock
          mutexes instead of LOCK_mdl.
     @ sql/mdl.h
        Changed metadata locking subsystem to use mutex per lock and condition
        variable per context instead of one mutex and one conditional variable
        for the whole subsystem. In order to implement this change:
        - Added MDL_key::cmp() method to be able to sort MDL_key objects
          alphabetically.
        - Changed MDL_ticket::get_ctx() to return pointer to non-const object
          in order to be able to use MDL_context::wake_up() method for such
          contexts.
        - Got rid of unlocked versions of has_pending_conflicting_locks/lock()
          methods in MDL_context and MDL_ticket. We no longer has single mutex
          which protects all locks. Thus one always has to use versions of
          these methods which acquire per-lock mutexes.
        - MDL_request_list type of list now counts its elements.
        - Added MDL_context::m_cond condition variable to be used for waiting
          until this context's pending request can be satisfied or its thread
          has to perform actions to resolve potential deadlock. Added wake_up()
          method to awake context from such wait.
        - Added auxiliary methods to MDL_context class in order to incapsulate
          work with global metadata lock.
          Introduced m_global_intention_exclusive_locks member to support lock
          free acquiring of global intention exclusive locks in recursive case.
        - Added auxiliary MDL_context::acquire_exclusive_lock_impl() method
          which does all necessary work to acquire exclusive lock on one object
          but should not be used directly as it does not enforce any asserts
          ensuring that no deadlocks are possible.
        - MDL_context::release_ticket() became release_lock() method.
        - Since we no longer need to know if thread trying to acquire exclusive
          lock managed to wake up any threads having conflicting shared locks
          (as, anyway, we will try to wake up such threads again shortly)
          MDL_context::m_is_waiting_in_mdl member became unnecessary and
          notify_shared_lock() no longer needs to be friend of MDL_context.
        - MDL_ticket::upgrade_shared_lock_to_exclusive() became friend of
          MDL_context as needs to use MDL_context::m_cond to wait until
          it is possible to upgrade lock.
     @ sql/sql_plist.h
        Added support for element counting to I_P_List list template.
        One can use policy classes to specify if such counting is needed
        or not needed for particular list.

    modified:
      mysql-test/include/handler.inc
      mysql-test/r/handler_innodb.result
      mysql-test/r/handler_myisam.result
      mysql-test/r/mdl_sync.result
      mysql-test/t/mdl_sync.test
      sql/mdl.cc
      sql/mdl.h
      sql/sql_plist.h
=== modified file 'mysql-test/include/handler.inc'
--- a/mysql-test/include/handler.inc	2009-07-28 14:16:37 +0000
+++ b/mysql-test/include/handler.inc	2009-10-29 09:13:48 +0000
@@ -543,7 +543,7 @@ disconnect flush;
 #
 
 --disable_warnings
-drop table if exists t1,t2;
+drop table if exists t1, t0;
 --enable_warnings
 create table t1 (c1 int);
 --echo connection: default
@@ -552,25 +552,25 @@ handler t1 read first;
 connect (flush,localhost,root,,);
 connection flush;
 --echo connection: flush
---send rename table t1 to t2;
+--send rename table t1 to t0;
 connection waiter;
 --echo connection: waiter 
 let $wait_condition=
   select count(*) = 1 from information_schema.processlist
-  where state = "Waiting for table" and info = "rename table t1 to t2";
+  where state = "Waiting for table" and info = "rename table t1 to t0";
 --source include/wait_condition.inc
 connection default;
 --echo connection: default
-handler t2 open;
-handler t2 read first;
+handler t0 open;
+handler t0 read first;
 --error ER_NO_SUCH_TABLE
 handler t1 read next;
 handler t1 close;
-handler t2 close;
+handler t0 close;
 connection flush;
 reap;
 connection default;
-drop table t2;
+drop table t0;
 connection flush;
 disconnect flush;
 --source include/wait_until_disconnected.inc

=== modified file 'mysql-test/r/handler_innodb.result'
--- a/mysql-test/r/handler_innodb.result	2009-07-28 14:16:37 +0000
+++ b/mysql-test/r/handler_innodb.result	2009-10-29 09:13:48 +0000
@@ -560,24 +560,24 @@ c1
 handler t1 close;
 handler t2 close;
 drop table t1,t2;
-drop table if exists t1,t2;
+drop table if exists t1, t0;
 create table t1 (c1 int);
 connection: default
 handler t1 open;
 handler t1 read first;
 c1
 connection: flush
-rename table t1 to t2;;
+rename table t1 to t0;;
 connection: waiter 
 connection: default
-handler t2 open;
-handler t2 read first;
+handler t0 open;
+handler t0 read first;
 c1
 handler t1 read next;
 ERROR 42S02: Table 'test.t1' doesn't exist
 handler t1 close;
-handler t2 close;
-drop table t2;
+handler t0 close;
+drop table t0;
 drop table if exists t1;
 create temporary table t1 (a int, b char(1), key a(a), key b(a,b));
 insert into t1 values (0,"a"),(1,"b"),(2,"c"),(3,"d"),(4,"e"),

=== modified file 'mysql-test/r/handler_myisam.result'
--- a/mysql-test/r/handler_myisam.result	2009-08-21 08:51:52 +0000
+++ b/mysql-test/r/handler_myisam.result	2009-10-29 09:13:48 +0000
@@ -559,24 +559,24 @@ c1
 handler t1 close;
 handler t2 close;
 drop table t1,t2;
-drop table if exists t1,t2;
+drop table if exists t1, t0;
 create table t1 (c1 int);
 connection: default
 handler t1 open;
 handler t1 read first;
 c1
 connection: flush
-rename table t1 to t2;;
+rename table t1 to t0;;
 connection: waiter 
 connection: default
-handler t2 open;
-handler t2 read first;
+handler t0 open;
+handler t0 read first;
 c1
 handler t1 read next;
 ERROR 42S02: Table 'test.t1' doesn't exist
 handler t1 close;
-handler t2 close;
-drop table t2;
+handler t0 close;
+drop table t0;
 drop table if exists t1;
 create temporary table t1 (a int, b char(1), key a(a), key b(a,b));
 insert into t1 values (0,"a"),(1,"b"),(2,"c"),(3,"d"),(4,"e"),

=== modified file 'mysql-test/r/mdl_sync.result'
--- a/mysql-test/r/mdl_sync.result	2009-10-16 08:24:11 +0000
+++ b/mysql-test/r/mdl_sync.result	2009-10-29 09:13:48 +0000
@@ -23,7 +23,7 @@ SET DEBUG_SYNC= 'RESET';
 # Test coverage for basic deadlock detection in metadata
 # locking subsystem.
 #
-drop tables if exists t1, t2, t3, t4;
+drop tables if exists t0, t1, t2, t3, t4, t5;
 create table t1 (i int);
 create table t2 (j int);
 create table t3 (k int);
@@ -90,7 +90,7 @@ commit;
 #
 # Switching to connection 'deadlock_con1'.
 begin;
-insert into t1 values (2);
+insert into t2 values (2);
 #
 # Switching to connection 'default'.
 # Send:
@@ -98,11 +98,11 @@ rename table t2 to t0, t1 to t2, t0 to t
 #
 # Switching to connection 'deadlock_con1'.
 # Wait until the above RENAME TABLE is blocked because it has to wait
-# for 'deadlock_con1' which holds shared metadata lock on 't1'.
+# for 'deadlock_con1' which holds shared metadata lock on 't2'.
 # 
 # The below statement should not wait as doing so will cause deadlock.
 # Instead it should fail and emit ER_LOCK_DEADLOCK statement.
-select * from t2;
+select * from t1;
 ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
 #
 # Let us check that failure of the above statement has not released
@@ -141,7 +141,7 @@ select * from t2;;
 # for an exclusive metadata lock to go away.
 # Send RENAME TABLE statement that will deadlock with the
 # SELECT statement and thus should abort the latter.
-rename table t1 to t0, t2 to t1, t0 to t2;;
+rename table t1 to t5, t2 to t1, t5 to t2;;
 #
 # Switching to connection 'deadlock_con1'.
 # Since the latest RENAME TABLE entered in deadlock with SELECT
@@ -156,15 +156,17 @@ ERROR 40001: Deadlock found when trying 
 # Commit transaction to unblock this RENAME TABLE.
 commit;
 #
-# Switching to connection 'deadlock_con3'.
-# Reap RENAME TABLE t1 TO t0 ... .
-#
 # Switching to connection 'deadlock_con2'.
 # Commit transaction to unblock the first RENAME TABLE.
 commit;
 #
 # Switching to connection 'default'.
 # Reap RENAME TABLE t2 TO t0 ... .
+#
+# Switching to connection 'deadlock_con3'.
+# Reap RENAME TABLE t1 TO t5 ... .
+#
+# Switching to connection 'default'.
 drop tables t1, t2, t3, t4;
 #
 # Now, test case which shows that deadlock detection empiric

=== modified file 'mysql-test/t/mdl_sync.test'
--- a/mysql-test/t/mdl_sync.test	2009-10-16 08:24:11 +0000
+++ b/mysql-test/t/mdl_sync.test	2009-10-29 09:13:48 +0000
@@ -78,7 +78,7 @@ SET DEBUG_SYNC= 'RESET';
 --echo # locking subsystem.
 --echo #
 --disable_warnings
-drop tables if exists t1, t2, t3, t4;
+drop tables if exists t0, t1, t2, t3, t4, t5;
 --enable_warnings
 
 connect(deadlock_con1,localhost,root,,);
@@ -189,7 +189,7 @@ connection default;
 --echo # Switching to connection 'deadlock_con1'.
 connection deadlock_con1;
 begin;
-insert into t1 values (2);
+insert into t2 values (2);
 
 --echo #
 --echo # Switching to connection 'default'.
@@ -201,7 +201,7 @@ connection default;
 --echo # Switching to connection 'deadlock_con1'.
 connection deadlock_con1;
 --echo # Wait until the above RENAME TABLE is blocked because it has to wait
---echo # for 'deadlock_con1' which holds shared metadata lock on 't1'.
+--echo # for 'deadlock_con1' which holds shared metadata lock on 't2'.
 let $wait_condition=
   select count(*) = 1 from information_schema.processlist
   where state = "Waiting for table" and info = "rename table t2 to t0, t1 to t2, t0 to t1";
@@ -210,7 +210,7 @@ let $wait_condition=
 --echo # The below statement should not wait as doing so will cause deadlock.
 --echo # Instead it should fail and emit ER_LOCK_DEADLOCK statement.
 --error ER_LOCK_DEADLOCK
-select * from t2;
+select * from t1;
 
 --echo #
 --echo # Let us check that failure of the above statement has not released
@@ -276,7 +276,7 @@ let $wait_condition=
 
 --echo # Send RENAME TABLE statement that will deadlock with the
 --echo # SELECT statement and thus should abort the latter.
---send rename table t1 to t0, t2 to t1, t0 to t2;
+--send rename table t1 to t5, t2 to t1, t5 to t2;
 
 --echo #
 --echo # Switching to connection 'deadlock_con1'.
@@ -294,18 +294,12 @@ connection deadlock_con1;
 --echo # is blocked.
 let $wait_condition=
   select count(*) = 1 from information_schema.processlist
-  where state = "Waiting for table" and info = "rename table t1 to t0, t2 to t1, t0 to t2";
+  where state = "Waiting for table" and info = "rename table t1 to t5, t2 to t1, t5 to t2";
 --source include/wait_condition.inc
 --echo # Commit transaction to unblock this RENAME TABLE.
 commit;
 
 --echo #
---echo # Switching to connection 'deadlock_con3'.
-connection deadlock_con3;
---echo # Reap RENAME TABLE t1 TO t0 ... .
---reap;
-
---echo #
 --echo # Switching to connection 'deadlock_con2'.
 connection deadlock_con2;
 --echo # Commit transaction to unblock the first RENAME TABLE.
@@ -317,6 +311,16 @@ connection default;
 --echo # Reap RENAME TABLE t2 TO t0 ... .
 --reap
 
+--echo #
+--echo # Switching to connection 'deadlock_con3'.
+connection deadlock_con3;
+--echo # Reap RENAME TABLE t1 TO t5 ... .
+--reap;
+
+--echo #
+--echo # Switching to connection 'default'.
+connection default;
+
 drop tables t1, t2, t3, t4;
 
 --echo #

=== modified file 'sql/mdl.cc'
--- a/sql/mdl.cc	2009-10-16 08:24:11 +0000
+++ b/sql/mdl.cc	2009-10-29 09:13:48 +0000
@@ -49,19 +49,25 @@ public:
   MDL_key key;
   /** List of granted tickets for this lock. */
   Ticket_list granted;
+  /** Tickets for contexts waiting to acquire shared lock. */
+  Ticket_list waiting_shared;
   /**
+    Tickets for contexts waiting to acquire exclusive lock.
     There can be several upgraders and active exclusive
     locks belonging to the same context. E.g.
     in case of RENAME t1 to t2, t2 to t3, we attempt to
     exclusively lock t2 twice.
   */
-  Ticket_list waiting;
+  Ticket_list waiting_exclusive;
   void   *cached_object;
   mdl_cached_object_release_hook cached_object_release_hook;
+  /** Mutex protecting this lock context. */
+  pthread_mutex_t lock;
 
   bool is_empty() const
   {
-    return (granted.is_empty() && waiting.is_empty());
+    return (granted.is_empty() && waiting_shared.is_empty() &&
+            waiting_exclusive.is_empty());
   }
 
   bool can_grant_lock(const MDL_context *requestor_ctx,
@@ -76,12 +82,17 @@ private:
     cached_object(NULL),
     cached_object_release_hook(NULL)
   {
+    pthread_mutex_init(&lock, NULL);
+  }
+
+  ~MDL_lock()
+  {
+    pthread_mutex_destroy(&lock);
   }
 };
 
 
-static pthread_mutex_t LOCK_mdl;
-static pthread_cond_t  COND_mdl;
+static pthread_mutex_t LOCK_mdl_hash;
 static HASH mdl_locks;
 
 /**
@@ -95,19 +106,31 @@ static HASH mdl_locks;
 class MDL_global_lock
 {
 public:
-  uint waiting_shared;
+  /*
+    QQ: In theory we can get rid of these lists by using separate
+        pthread_cond_t used specifically for global lock.
+        But before doing this we should think about possible
+        consequences for deadlock detection/resolving.
+        Also long term it probably makes sense to have a more
+        generic/uniform structure.
+        Should we do this ?
+  */
+  MDL_lock::Ticket_list waiting_shared;
+  MDL_lock::Ticket_list waiting_intention_exclusive;
   uint active_shared;
   uint active_intention_exclusive;
 
   bool is_empty() const
   {
-    return (waiting_shared == 0 && active_shared == 0 &&
-            active_intention_exclusive == 0);
+    return (waiting_shared.is_empty() &&
+            waiting_intention_exclusive.is_empty() &&
+            active_shared == 0 && active_intention_exclusive == 0);
   }
   bool is_lock_type_compatible(enum_mdl_type type, bool is_upgrade) const;
 };
 
 
+static pthread_mutex_t LOCK_mdl_global;
 static MDL_global_lock global_lock;
 
 
@@ -147,8 +170,8 @@ void mdl_init()
 {
   DBUG_ASSERT(! mdl_initialized);
   mdl_initialized= TRUE;
-  pthread_mutex_init(&LOCK_mdl, NULL);
-  pthread_cond_init(&COND_mdl, NULL);
+  pthread_mutex_init(&LOCK_mdl_hash, NULL);
+  pthread_mutex_init(&LOCK_mdl_global, NULL);
   my_hash_init(&mdl_locks, &my_charset_bin, 16 /* FIXME */, 0, 0,
                mdl_locks_key, 0, 0);
   /* The global lock is zero-initialized by the loader. */
@@ -170,8 +193,8 @@ void mdl_destroy()
     mdl_initialized= FALSE;
     DBUG_ASSERT(!mdl_locks.records);
     DBUG_ASSERT(global_lock.is_empty());
-    pthread_mutex_destroy(&LOCK_mdl);
-    pthread_cond_destroy(&COND_mdl);
+    pthread_mutex_destroy(&LOCK_mdl_global);
+    pthread_mutex_destroy(&LOCK_mdl_hash);
     my_hash_free(&mdl_locks);
   }
 }
@@ -186,6 +209,7 @@ void mdl_destroy()
 void MDL_context::init(THD *thd_arg)
 {
   m_has_global_shared_lock= FALSE;
+  m_global_intention_exclusive_locks= 0;
   m_thd= thd_arg;
   /*
     FIXME: In reset_n_backup_open_tables_state,
@@ -195,7 +219,7 @@ void MDL_context::init(THD *thd_arg)
     to empty the list.
   */
   m_tickets.empty();
-  m_is_waiting_in_mdl= FALSE;
+  pthread_cond_init(&m_cond, NULL);
 }
 
 
@@ -214,7 +238,9 @@ void MDL_context::init(THD *thd_arg)
 void MDL_context::destroy()
 {
   DBUG_ASSERT(m_tickets.is_empty());
+  DBUG_ASSERT(! m_global_intention_exclusive_locks);
   DBUG_ASSERT(! m_has_global_shared_lock);
+  pthread_cond_destroy(&m_cond);
 }
 
 
@@ -235,6 +261,8 @@ void MDL_context::backup_and_reset(MDL_c
   m_tickets.swap(backup->m_tickets);
 
   backup->m_has_global_shared_lock= m_has_global_shared_lock;
+  backup->m_global_intention_exclusive_locks=
+            m_global_intention_exclusive_locks;
   /*
     When the main context is swapped out, one can not take
     the global shared lock, and one can not rely on it:
@@ -242,6 +270,7 @@ void MDL_context::backup_and_reset(MDL_c
     a temporary hack to support ad-hoc opening of system tables.
   */
   m_has_global_shared_lock= FALSE;
+  m_global_intention_exclusive_locks= 0;
 }
 
 
@@ -253,8 +282,11 @@ void MDL_context::restore_from_backup(MD
 {
   DBUG_ASSERT(m_tickets.is_empty());
   DBUG_ASSERT(m_has_global_shared_lock == FALSE);
+  DBUG_ASSERT(m_global_intention_exclusive_locks == 0);
 
   m_tickets.swap(backup->m_tickets);
+  m_global_intention_exclusive_locks=
+    backup->m_global_intention_exclusive_locks;
   m_has_global_shared_lock= backup->m_has_global_shared_lock;
 }
 
@@ -282,9 +314,11 @@ void MDL_context::merge(MDL_context *src
   }
   /*
     MDL_context::merge() is a hack used in one place only: to open
-    an SQL handler. We never acquire the global shared lock there.
+    an SQL handler. We never acquire the global shared lock or global
+    intention exclusive locks there.
   */
   DBUG_ASSERT(! src->m_has_global_shared_lock);
+  DBUG_ASSERT(! src->m_global_intention_exclusive_locks);
 }
 
 
@@ -423,18 +457,21 @@ void MDL_ticket::destroy(MDL_ticket *tic
         will probably introduce too much overhead.
 */
 
-#define MDL_ENTER_COND(A, B) mdl_enter_cond(A, B, __func__, __FILE__, __LINE__)
+#define MDL_ENTER_COND(A, B, C, D) \
+        mdl_enter_cond(A, B, C, D, __func__, __FILE__, __LINE__)
 
 static inline const char *mdl_enter_cond(THD *thd,
                                          st_my_thread_var *mysys_var,
+                                         pthread_cond_t *cond,
+                                         pthread_mutex_t *mutex,
                                          const char *calling_func,
                                          const char *calling_file,
                                          const unsigned int calling_line)
 {
-  safe_mutex_assert_owner(&LOCK_mdl);
+  safe_mutex_assert_owner(mutex);
 
-  mysys_var->current_mutex= &LOCK_mdl;
-  mysys_var->current_cond= &COND_mdl;
+  mysys_var->current_mutex= mutex;
+  mysys_var->current_cond= cond;
 
   DEBUG_SYNC(thd, "mdl_enter_cond");
 
@@ -442,18 +479,20 @@ static inline const char *mdl_enter_cond
                            calling_func, calling_file, calling_line);
 }
 
-#define MDL_EXIT_COND(A, B, C) mdl_exit_cond(A, B, C, __func__, __FILE__, __LINE__)
+#define MDL_EXIT_COND(A, B, C, D) \
+        mdl_exit_cond(A, B, C, D, __func__, __FILE__, __LINE__)
 
 static inline void mdl_exit_cond(THD *thd,
                                  st_my_thread_var *mysys_var,
+                                 pthread_mutex_t *mutex,
                                  const char* old_msg,
                                  const char *calling_func,
                                  const char *calling_file,
                                  const unsigned int calling_line)
 {
-  DBUG_ASSERT(&LOCK_mdl == mysys_var->current_mutex);
+  DBUG_ASSERT(mutex == mysys_var->current_mutex);
 
-  pthread_mutex_unlock(&LOCK_mdl);
+  pthread_mutex_unlock(mutex);
   pthread_mutex_lock(&mysys_var->mutex);
   mysys_var->current_mutex= 0;
   mysys_var->current_cond= 0;
@@ -515,7 +554,7 @@ MDL_global_lock::is_lock_type_compatible
     return TRUE;
     break;
   case MDL_SHARED_UPGRADABLE:
-    if (active_shared || waiting_shared)
+    if (active_shared || ! waiting_shared.is_empty())
     {
       /*
         We are going to obtain intention exclusive global lock and
@@ -540,7 +579,7 @@ MDL_global_lock::is_lock_type_compatible
     }
     else
     {
-      if (active_shared || waiting_shared)
+      if (active_shared || ! waiting_shared.is_empty())
       {
         /*
           We are going to obtain intention exclusive global lock and
@@ -604,7 +643,7 @@ MDL_lock::can_grant_lock(const MDL_conte
     if (type == MDL_lock::MDL_LOCK_SHARED)
     {
       /* Pending exclusive locks have higher priority over shared locks. */
-      if (waiting.is_empty() || type_arg == MDL_SHARED_HIGH_PRIO)
+      if (waiting_exclusive.is_empty() || type_arg == MDL_SHARED_HIGH_PRIO)
         can_grant= TRUE;
     }
     else if (granted.head()->get_ctx() == requestor_ctx)
@@ -684,6 +723,176 @@ MDL_context::find_ticket(MDL_request *md
 
 
 /**
+  Try to acquire global intention exclusive lock.
+
+  @param[in]  mdl_request  Lock request object for lock to be acquired
+  @param[out] acquired     FALSE - if lock was not acquired due to conflict.
+                           TRUE  - if lock was successfully acquired.
+
+  @retval  FALSE   Success. The lock may have not been acquired.
+                   One needs to check value of 'acquired' out-parameter
+                   to find out what has happened.
+  @retval  TRUE    Error.
+*/
+
+bool
+MDL_context::
+try_acquire_global_intention_exclusive_lock(MDL_request *mdl_request,
+                                            bool *acquired)
+{
+  DBUG_ASSERT(mdl_request->type == MDL_SHARED_UPGRADABLE ||
+              mdl_request->type == MDL_EXCLUSIVE);
+
+  *acquired= FALSE;
+
+  if (m_has_global_shared_lock)
+  {
+    my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
+    return TRUE;
+  }
+
+  if (! m_global_intention_exclusive_locks)
+  {
+    pthread_mutex_lock(&LOCK_mdl_global);
+    if (!global_lock.is_lock_type_compatible(mdl_request->type, FALSE))
+    {
+      pthread_mutex_unlock(&LOCK_mdl_global);
+      return FALSE;
+    }
+    global_lock.active_intention_exclusive++;
+    pthread_mutex_unlock(&LOCK_mdl_global);
+  }
+
+  m_global_intention_exclusive_locks++;
+  *acquired= TRUE;
+
+  return FALSE;
+}
+
+
+/**
+  Acquire global intention exclusive lock.
+
+  @param[in]  mdl_request  Lock request object for lock to be acquired
+
+  @retval  FALSE   Success. The lock has been acquired.
+  @retval  TRUE    Error.
+*/
+
+bool
+MDL_context::acquire_global_intention_exclusive_lock(MDL_request *mdl_request)
+{
+  const char *old_msg;
+  st_my_thread_var *mysys_var= my_thread_var;
+
+  DBUG_ASSERT(mdl_request->type == MDL_EXCLUSIVE);
+
+  if (m_has_global_shared_lock)
+  {
+    my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
+    return TRUE;
+  }
+
+  /*
+    Grant global intention exclusive lock without waiting if this context
+    already has global intention exclusive lock (or more precisely mark
+    in the context that another instance of such lock was acquired).
+
+    The fact that we don't wait in such situation allows to avoid deadlocks
+    in cases when pending request for global shared lock pops up after the
+    moment when thread has acquired its first intention exclusive lock but
+    before it has requested the second instance of such lock.
+  */
+  if (m_global_intention_exclusive_locks)
+  {
+    m_global_intention_exclusive_locks++;
+    return FALSE;
+  }
+
+  /*
+    Otherwise we might have to wait until active global shared lock or
+    pending requests will go away. Since we won't hold any resources
+    while doing it deadlocks are not possible,
+  */
+  DBUG_ASSERT(! has_locks());
+
+  pthread_mutex_lock(&LOCK_mdl_global);
+
+  old_msg= MDL_ENTER_COND(m_thd, mysys_var, &m_cond, &LOCK_mdl_global);
+
+  if (unlikely(! global_lock.is_lock_type_compatible(mdl_request->type, FALSE)))
+  {
+    MDL_ticket *pending_ticket;
+
+    /*
+      Create a temporary ticket and add it to waiters list, to allow
+      threads releasing shared global lock wake-up this thread.
+    */
+    if (! (pending_ticket= MDL_ticket::create(this, mdl_request->type)))
+    {
+      pthread_mutex_unlock(&LOCK_mdl_global);
+      return TRUE;
+    }
+    global_lock.waiting_intention_exclusive.push_front(pending_ticket);
+
+    do
+    {
+      pthread_cond_wait(&this->m_cond, &LOCK_mdl_global);
+    }
+    while (! global_lock.is_lock_type_compatible(mdl_request->type, FALSE) &&
+           ! mysys_var->abort);
+
+    global_lock.waiting_intention_exclusive.remove(pending_ticket);
+    MDL_ticket::destroy(pending_ticket);
+
+    if (mysys_var->abort)
+    {
+      MDL_EXIT_COND(m_thd, mysys_var, &LOCK_mdl_global, old_msg);
+      return TRUE;
+    }
+  }
+
+  global_lock.active_intention_exclusive++;
+
+  MDL_EXIT_COND(m_thd, mysys_var, &LOCK_mdl_global, old_msg);
+
+  m_global_intention_exclusive_locks++;
+
+  return FALSE;
+}
+
+
+/**
+  Release global intention exclusive lock.
+*/
+
+void MDL_context::release_global_intention_exclusive_lock()
+{
+  m_global_intention_exclusive_locks--;
+
+  if (! m_global_intention_exclusive_locks)
+  {
+    pthread_mutex_lock(&LOCK_mdl_global);
+    global_lock.active_intention_exclusive--;
+
+    /*
+      Wake up waiters if this was the last of global intention exclusive
+      locks and there are pending shared locks.
+    */
+    if (unlikely(global_lock.active_intention_exclusive == 0 &&
+                 ! global_lock.waiting_shared.is_empty()))
+    {
+      MDL_lock::Ticket_iterator it(global_lock.waiting_shared);
+      MDL_ticket *ticket;
+      while ((ticket= it++))
+        ticket->get_ctx()->wake_up();
+    }
+    pthread_mutex_unlock(&LOCK_mdl_global);
+  }
+}
+
+
+/**
   Try to acquire one shared lock.
 
   Unlike exclusive locks, shared locks are acquired one by
@@ -717,13 +926,6 @@ MDL_context::try_acquire_shared_lock(MDL
   mdl_request->ticket= NULL;
   safe_mutex_assert_not_owner(&LOCK_open);
 
-  if (m_has_global_shared_lock &&
-      mdl_request->type == MDL_SHARED_UPGRADABLE)
-  {
-    my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
-    return TRUE;
-  }
-
   /*
     Check whether the context already holds a shared lock on the object,
     and if so, grant the request.
@@ -731,26 +933,29 @@ MDL_context::try_acquire_shared_lock(MDL
   if ((ticket= find_ticket(mdl_request)))
   {
     DBUG_ASSERT(ticket->m_state == MDL_ACQUIRED);
-    /* Only shared locks can be recursive. */
     DBUG_ASSERT(ticket->is_shared());
     mdl_request->ticket= ticket;
     return FALSE;
   }
 
-  pthread_mutex_lock(&LOCK_mdl);
-
-  if (!global_lock.is_lock_type_compatible(mdl_request->type, FALSE))
+  if (mdl_request->type == MDL_SHARED_UPGRADABLE)
   {
-    pthread_mutex_unlock(&LOCK_mdl);
-    return FALSE;
+    bool acquired;
+
+    if (try_acquire_global_intention_exclusive_lock(mdl_request, &acquired))
+      return TRUE;
+    if (! acquired)
+      return FALSE;
   }
 
   if (!(ticket= MDL_ticket::create(this, mdl_request->type)))
   {
-    pthread_mutex_unlock(&LOCK_mdl);
+    if (mdl_request->type == MDL_SHARED_UPGRADABLE)
+      release_global_intention_exclusive_lock();
     return TRUE;
   }
 
+  pthread_mutex_lock(&LOCK_mdl_hash);
   if (!(lock= (MDL_lock*) my_hash_search(&mdl_locks,
                                          key->ptr(), key->length())))
   {
@@ -759,12 +964,18 @@ MDL_context::try_acquire_shared_lock(MDL
     if (!lock || my_hash_insert(&mdl_locks, (uchar*)lock))
     {
       MDL_lock::destroy(lock);
+      pthread_mutex_unlock(&LOCK_mdl_hash);
       MDL_ticket::destroy(ticket);
-      pthread_mutex_unlock(&LOCK_mdl);
+      if (mdl_request->type == MDL_SHARED_UPGRADABLE)
+        release_global_intention_exclusive_lock();
       return TRUE;
     }
   }
 
+  pthread_mutex_lock(&lock->lock);
+
+  pthread_mutex_unlock(&LOCK_mdl_hash);
+
   if (lock->can_grant_lock(this, mdl_request->type, FALSE))
   {
     mdl_request->ticket= ticket;
@@ -772,16 +983,17 @@ MDL_context::try_acquire_shared_lock(MDL
     m_tickets.push_front(ticket);
     ticket->m_state= MDL_ACQUIRED;
     ticket->m_lock= lock;
-    if (mdl_request->type == MDL_SHARED_UPGRADABLE)
-      global_lock.active_intention_exclusive++;
+    pthread_mutex_unlock(&lock->lock);
   }
   else
   {
     /* We can't get here if we allocated a new lock. */
     DBUG_ASSERT(! lock->is_empty());
+    pthread_mutex_unlock(&lock->lock);
     MDL_ticket::destroy(ticket);
+    if (mdl_request->type == MDL_SHARED_UPGRADABLE)
+      release_global_intention_exclusive_lock();
   }
-  pthread_mutex_unlock(&LOCK_mdl);
 
   return FALSE;
 }
@@ -793,14 +1005,10 @@ MDL_context::try_acquire_shared_lock(MDL
 
   @param thd               Current thread context
   @param conflicting_ticket  Conflicting metadata lock
-
-  @retval TRUE   A thread was woken up
-  @retval FALSE  Lock is not a shared one or no thread was woken up
 */
 
-bool notify_shared_lock(THD *thd, MDL_ticket *conflicting_ticket)
+void notify_shared_lock(THD *thd, MDL_ticket *conflicting_ticket)
 {
-  bool woke= FALSE;
   if (conflicting_ticket->is_shared())
   {
     THD *conflicting_thd= conflicting_ticket->get_ctx()->get_thd();
@@ -808,196 +1016,264 @@ bool notify_shared_lock(THD *thd, MDL_ti
 
     /*
       If thread which holds conflicting lock is waiting inside of MDL
-      subsystem wake it up by broadcasting on COND_mdl.
-      Otherwise assume that it is waiting on table-level lock or some other
-      non-MDL resource and delegate its waking up to code outside of MDL.
+      subsystem it has to be woken up by calling MDL_context::wake_up().
     */
-    if (conflicting_ticket->get_ctx()->m_is_waiting_in_mdl)
-    {
-      pthread_cond_broadcast(&COND_mdl);
-      woke= TRUE;
-    }
-    else
-      woke= mysql_notify_thread_having_shared_lock(thd, conflicting_thd);
+    conflicting_ticket->get_ctx()->wake_up();
+    /*
+      If it is waiting on table-level lock or some other non-MDL resource
+      we delegate its waking up to code outside of MDL.
+    */
+    mysql_notify_thread_having_shared_lock(thd, conflicting_thd);
   }
-  return woke;
-}
-
-
-/**
-  Acquire a single exclusive lock. A convenience
-  wrapper around the method acquiring a list of locks.
-*/
-
-bool MDL_context::acquire_exclusive_lock(MDL_request *mdl_request)
-{
-  MDL_request_list mdl_requests;
-  mdl_requests.push_front(mdl_request);
-  return acquire_exclusive_locks(&mdl_requests);
 }
 
 
 /**
-  Acquire exclusive locks. The context must contain the list of
-  locks to be acquired. There must be no granted locks in the
-  context.
+  Auxiliary method for acquiring an exclusive lock.
 
-  This is a replacement of lock_table_names(). It is used in
-  RENAME, DROP and other DDL SQL statements.
+  @param mdl_request  Request for the lock to be acqured.
 
-  @note The MDL context may not have non-exclusive lock requests
-        or acquired locks.
+  @note Should not be used outside of MDL subsystem. Instead one should
+        call acquire_exclusive_lock() or acquire_exclusive_locks() methods
+        which ensure that conditions for deadlock-free lock acquisition are
+        fulfilled.
 
   @retval FALSE  Success
   @retval TRUE   Failure
 */
 
-bool MDL_context::acquire_exclusive_locks(MDL_request_list *mdl_requests)
+bool MDL_context::acquire_exclusive_lock_impl(MDL_request *mdl_request)
 {
   MDL_lock *lock;
-  bool signalled= FALSE;
   const char *old_msg;
-  MDL_request *mdl_request;
   MDL_ticket *ticket;
   st_my_thread_var *mysys_var= my_thread_var;
-  MDL_request_list::Iterator it(*mdl_requests);
+  MDL_key *key= &mdl_request->key;
+
+  DBUG_ASSERT(mdl_request->type == MDL_EXCLUSIVE &&
+              mdl_request->ticket == NULL);
 
   safe_mutex_assert_not_owner(&LOCK_open);
-  /* Exclusive locks must always be acquired first, all at once. */
-  DBUG_ASSERT(! has_locks());
 
-  if (m_has_global_shared_lock)
+  /* Don't take chances in production. */
+  mdl_request->ticket= NULL;
+
+  /*
+    Check whether the context already holds an exclusive lock on the object,
+    and if so, grant the request.
+  */
+  if ((ticket= find_ticket(mdl_request)))
   {
-    my_error(ER_CANT_UPDATE_WITH_READLOCK, MYF(0));
-    return TRUE;
+    DBUG_ASSERT(ticket->m_state == MDL_ACQUIRED);
+    DBUG_ASSERT(ticket->m_type == MDL_EXCLUSIVE);
+    mdl_request->ticket= ticket;
+    return FALSE;
   }
 
-  pthread_mutex_lock(&LOCK_mdl);
-
-  old_msg= MDL_ENTER_COND(m_thd, mysys_var);
+  if (acquire_global_intention_exclusive_lock(mdl_request))
+    return TRUE;
 
-  while ((mdl_request= it++))
+  /* Early allocation: ticket will be needed in any case. */
+  if (!(ticket= MDL_ticket::create(this, mdl_request->type)))
   {
-    MDL_key *key= &mdl_request->key;
-    DBUG_ASSERT(mdl_request->type == MDL_EXCLUSIVE &&
-                mdl_request->ticket == NULL);
-
-    /* Don't take chances in production. */
-    mdl_request->ticket= NULL;
-
-    /* Early allocation: ticket is used as a shortcut to the lock. */
-    if (!(ticket= MDL_ticket::create(this, mdl_request->type)))
-      goto err;
+    release_global_intention_exclusive_lock();
+    return TRUE;
+  }
 
-    if (!(lock= (MDL_lock*) my_hash_search(&mdl_locks,
-                                           key->ptr(), key->length())))
+  pthread_mutex_lock(&LOCK_mdl_hash);
+  if (!(lock= (MDL_lock*) my_hash_search(&mdl_locks, key->ptr(),
+                                         key->length())))
+  {
+    lock= MDL_lock::create(key);
+    if (!lock || my_hash_insert(&mdl_locks, (uchar*)lock))
     {
-      lock= MDL_lock::create(key);
-      if (!lock || my_hash_insert(&mdl_locks, (uchar*)lock))
-      {
-        MDL_ticket::destroy(ticket);
-        MDL_lock::destroy(lock);
-        goto err;
-      }
+      MDL_lock::destroy(lock);
+      pthread_mutex_unlock(&LOCK_mdl_hash);
+      MDL_ticket::destroy(ticket);
+      release_global_intention_exclusive_lock();
+      return TRUE;
     }
-
-    mdl_request->ticket= ticket;
-    lock->waiting.push_front(ticket);
-    ticket->m_lock= lock;
   }
 
-  while (1)
-  {
-    it.rewind();
-    while ((mdl_request= it++))
-    {
-      lock= mdl_request->ticket->m_lock;
+  pthread_mutex_lock(&lock->lock);
+  pthread_mutex_unlock(&LOCK_mdl_hash);
 
-      if (!global_lock.is_lock_type_compatible(mdl_request->type, FALSE))
-      {
-        /*
-          Someone owns or wants to acquire the global shared lock so
-          we have to wait until he goes away.
-        */
-        signalled= TRUE;
-        break;
-      }
-      else if (!lock->can_grant_lock(this, mdl_request->type, FALSE))
-      {
-        MDL_ticket *conflicting_ticket;
-        MDL_lock::Ticket_iterator it(lock->granted);
+  mdl_request->ticket= ticket;
+  lock->waiting_exclusive.push_front(ticket);
+  ticket->m_lock= lock;
 
-        signalled= (lock->type == MDL_lock::MDL_LOCK_EXCLUSIVE);
+  old_msg= MDL_ENTER_COND(m_thd, mysys_var, &m_cond, &lock->lock);
 
-        while ((conflicting_ticket= it++))
-          signalled|= notify_shared_lock(m_thd, conflicting_ticket);
+  while (!lock->can_grant_lock(this, mdl_request->type, FALSE))
+  {
+    MDL_ticket *conflicting_ticket;
+    MDL_lock::Ticket_iterator it(lock->granted);
 
-        break;
-      }
-    }
-    if (!mdl_request)
-      break;
+    while ((conflicting_ticket= it++))
+      notify_shared_lock(m_thd, conflicting_ticket);
 
     /* There is a shared or exclusive lock on the object. */
     DEBUG_SYNC(m_thd, "mdl_acquire_exclusive_locks_wait");
 
-    if (signalled)
-      pthread_cond_wait(&COND_mdl, &LOCK_mdl);
-    else
+    /*
+      Another thread might have obtained a shared MDL lock on some table
+      but has not yet opened it and/or tried to obtain data lock on it.
+      Also invocation of acquire_exclusive_lock() method and consequently
+      first call to notify_shared_lock() might have happened right after
+      thread holding shared metadata lock in wait_for_locks() method
+      checked that there are no pending conflicting locks but before
+      it has started waiting.
+      In both these cases we need to sleep until these threads will start
+      waiting and try to abort them once again.
+
+      QQ: What is the optimal value for this sleep?
+    */
+    struct timespec abstime;
+    set_timespec(abstime, 1);
+    pthread_cond_timedwait(&m_cond, &lock->lock, &abstime);
+
+    if (mysys_var->abort)
     {
       /*
-        Another thread obtained a shared MDL lock on some table but
-        has not yet opened it and/or tried to obtain data lock on
-        it. In this case we need to wait until this happens and try
-        to abort this thread once again.
+        Since we might have to delete MDL_lock object from the hash we have
+        to acquire LOCK_mdl_hash mutex.
+        To do this we need to temporarily release MDL_lock::lock mutex first.
+      */
+      MDL_EXIT_COND(m_thd, mysys_var, &lock->lock, old_msg);
+      pthread_mutex_lock(&LOCK_mdl_hash);
+      pthread_mutex_lock(&lock->lock);
+      /* Get rid of pending ticket. */
+      lock->waiting_exclusive.remove(ticket);
+      /*
+        If there are no active/pending exclusive locks wake up contexts
+        waiting for shared lock.
       */
-      struct timespec abstime;
-      set_timespec(abstime, 10);
-      pthread_cond_timedwait(&COND_mdl, &LOCK_mdl, &abstime);
+      if (lock->type == MDL_lock::MDL_LOCK_SHARED &&
+          ! lock->waiting_shared.is_empty() &&
+          lock->waiting_exclusive.is_empty())
+      {
+        MDL_lock::Ticket_iterator it(lock->waiting_shared);
+        MDL_ticket *wake_up_ticket;
+        while ((ticket= it++))
+          pthread_cond_signal(&wake_up_ticket->get_ctx()->m_cond);
+        pthread_mutex_unlock(&lock->lock);
+      }
+      else if (lock->is_empty())
+      {
+        my_hash_delete(&mdl_locks, (uchar *)lock);
+        if (lock->cached_object)
+          (*lock->cached_object_release_hook)(lock->cached_object);
+        pthread_mutex_unlock(&lock->lock);
+        MDL_lock::destroy(lock);
+      }
+      pthread_mutex_unlock(&LOCK_mdl_hash);
+      MDL_ticket::destroy(ticket);
+      release_global_intention_exclusive_lock();
+      mdl_request->ticket= NULL;
+      return TRUE;
     }
-    if (mysys_var->abort)
-      goto err;
   }
-  it.rewind();
-  while ((mdl_request= it++))
+
+  lock->type= MDL_lock::MDL_LOCK_EXCLUSIVE;
+
+  lock->waiting_exclusive.remove(ticket);
+  lock->granted.push_front(ticket);
+  m_tickets.push_front(ticket);
+  ticket->m_state= MDL_ACQUIRED;
+
+  if (lock->cached_object)
+    (*lock->cached_object_release_hook)(lock->cached_object);
+  lock->cached_object= NULL;
+
+  MDL_EXIT_COND(m_thd, mysys_var, &lock->lock, old_msg);
+
+  return FALSE;
+}
+
+
+/**
+  Acquire an exclusive lock.
+
+  @param mdl_request  Request for the lock to be acqured.
+
+  @retval FALSE  Success
+  @retval TRUE   Failure
+*/
+
+bool MDL_context::acquire_exclusive_lock(MDL_request *mdl_request)
+{
+  /* Exclusive locks must always be acquired first, all at once. */
+  DBUG_ASSERT(! has_locks());
+
+  return acquire_exclusive_lock_impl(mdl_request);
+}
+
+
+extern "C" int mdl_request_ptr_cmp(const void* ptr1, const void* ptr2)
+{
+  MDL_request *req1= *(MDL_request**)ptr1;
+  MDL_request *req2= *(MDL_request**)ptr2;
+  return req1->key.cmp(&req2->key);
+}
+
+
+/**
+  Acquire exclusive locks. There must be no granted locks in the
+  context.
+
+  This is a replacement of lock_table_names(). It is used in
+  RENAME, DROP and other DDL SQL statements.
+
+  @param  mdl_requests  List of requests for locks to be acquired.
+
+  @note The list of requests should not contain non-exclusive lock requests.
+        There should not be any acquired locks in the context.
+
+  @retval FALSE  Success
+  @retval TRUE   Failure
+*/
+
+bool MDL_context::acquire_exclusive_locks(MDL_request_list *mdl_requests)
+{
+  MDL_request_list::Iterator it(*mdl_requests);
+  MDL_request **sort_buf;
+  uint i;
+
+  /* Exclusive locks must always be acquired first, all at once. */
+  DBUG_ASSERT(! has_locks());
+
+  if (mdl_requests->is_empty())
+    return FALSE;
+
+  /* Sort requests according to MDL_key. */
+  if (! (sort_buf= (MDL_request **)my_malloc(mdl_requests->elements() *
+                                             sizeof(MDL_request *),
+                                             MYF(MY_WME))))
+    return TRUE;
+
+  for (i= 0; i < mdl_requests->elements(); i++)
+    sort_buf[i]= it++;
+
+  my_qsort(sort_buf, mdl_requests->elements(), sizeof(MDL_request*),
+           mdl_request_ptr_cmp);
+
+  for (i= 0; i < mdl_requests->elements(); i++)
   {
-    global_lock.active_intention_exclusive++;
-    ticket= mdl_request->ticket;
-    lock= ticket->m_lock;
-    lock->type= MDL_lock::MDL_LOCK_EXCLUSIVE;
-    lock->waiting.remove(ticket);
-    lock->granted.push_front(ticket);
-    m_tickets.push_front(ticket);
-    ticket->m_state= MDL_ACQUIRED;
-    if (lock->cached_object)
-      (*lock->cached_object_release_hook)(lock->cached_object);
-    lock->cached_object= NULL;
+    if (acquire_exclusive_lock_impl(sort_buf[i]))
+      goto err;
   }
-  /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
-  MDL_EXIT_COND(m_thd, mysys_var, old_msg);
+  my_free(sort_buf, MYF(0));
   return FALSE;
 
 err:
-  /* Remove our pending tickets from the locks. */
-  it.rewind();
-  while ((mdl_request= it++) && mdl_request->ticket)
-  {
-    ticket= mdl_request->ticket;
-    DBUG_ASSERT(ticket->m_state == MDL_PENDING);
-    lock= ticket->m_lock;
-    lock->waiting.remove(ticket);
-    MDL_ticket::destroy(ticket);
+  /* Release locks we have managed to acquire so far. */
+  for (i= 0; i < mdl_requests->elements() && sort_buf[i]->ticket; i++)
+  {
+    release_lock(sort_buf[i]->ticket);
     /* Reset lock request back to its initial state. */
-    mdl_request->ticket= NULL;
-    if (lock->is_empty())
-    {
-      my_hash_delete(&mdl_locks, (uchar *)lock);
-      MDL_lock::destroy(lock);
-    }
+    sort_buf[i]->ticket= NULL;
   }
-  /* May be some pending requests for shared locks can be satisfied now. */
-  pthread_cond_broadcast(&COND_mdl);
-  MDL_EXIT_COND(m_thd, mysys_var, old_msg);
+  my_free(sort_buf, MYF(0));
   return TRUE;
 }
 
@@ -1041,7 +1317,13 @@ MDL_ticket::upgrade_shared_lock_to_exclu
   /* Only allow upgrades from MDL_SHARED_UPGRADABLE */
   DBUG_ASSERT(m_type == MDL_SHARED_UPGRADABLE);
 
-  pthread_mutex_lock(&LOCK_mdl);
+  /*
+    Since we should have already acquired an intention exclusive
+    global lock this call is only enforcing asserts.
+  */
+  DBUG_ASSERT(m_ctx->is_global_intention_exclusive_lock_owner());
+
+  pthread_mutex_lock(&m_lock->lock);
 
   /*
     Create an auxiliary ticket representing pending exclusive lock and
@@ -1049,26 +1331,19 @@ MDL_ticket::upgrade_shared_lock_to_exclu
   */
   if (! (pending_ticket= MDL_ticket::create(this->m_ctx, MDL_EXCLUSIVE)))
   {
-    pthread_mutex_unlock(&LOCK_mdl);
+    pthread_mutex_unlock(&m_lock->lock);
     DBUG_RETURN(TRUE);
   }
   pending_ticket->m_lock= m_lock;
-  m_lock->waiting.push_front(pending_ticket);
+  m_lock->waiting_exclusive.push_front(pending_ticket);
 
-  old_msg= MDL_ENTER_COND(thd, mysys_var);
-
-  /*
-    Since we should have already acquired an intention exclusive
-    global lock this call is only enforcing asserts.
-  */
-  DBUG_ASSERT(global_lock.is_lock_type_compatible(MDL_EXCLUSIVE, TRUE));
+  old_msg= MDL_ENTER_COND(thd, mysys_var, &m_ctx->m_cond, &m_lock->lock);
 
   while (1)
   {
     if (m_lock->can_grant_lock(m_ctx, MDL_EXCLUSIVE, TRUE))
       break;
 
-    bool signalled= FALSE;
     MDL_ticket *conflicting_ticket;
     MDL_lock::Ticket_iterator it(m_lock->granted);
 
@@ -1096,35 +1371,45 @@ MDL_ticket::upgrade_shared_lock_to_exclu
     while ((conflicting_ticket= it++))
     {
       if (conflicting_ticket->m_ctx != m_ctx)
-        signalled|= notify_shared_lock(thd, conflicting_ticket);
+        notify_shared_lock(thd, conflicting_ticket);
     }
 
     /* There is a shared or exclusive lock on the object. */
     DEBUG_SYNC(thd, "mdl_upgrade_shared_lock_to_exclusive_wait");
 
-    if (signalled)
-      pthread_cond_wait(&COND_mdl, &LOCK_mdl);
-    else
-    {
-      /*
-        Another thread obtained a shared MDL lock on some table but
-        has not yet opened it and/or tried to obtain data lock on
-        it. In this case we need to wait until this happens and try
-        to abort this thread once again.
-      */
-      struct timespec abstime;
-      set_timespec(abstime, 10);
-      DBUG_PRINT("info", ("Failed to wake-up from table-level lock ... sleeping"));
-      pthread_cond_timedwait(&COND_mdl, &LOCK_mdl, &abstime);
-    }
+    /*
+      Another thread might have obtained a shared MDL lock on some table
+      but has not yet opened it and/or tried to obtain data lock on it.
+      Also invocation of acquire_exclusive_lock() method and consequently
+      first call to notify_shared_lock() might have happened right after
+      thread holding shared metadata lock in wait_for_locks() method
+      checked that there are no pending conflicting locks but before
+      it has started waiting.
+      In both these cases we need to sleep until these threads will start
+      waiting and try to abort them once again.
+    */
+    struct timespec abstime;
+    set_timespec(abstime, 1);
+    pthread_cond_timedwait(&m_ctx->m_cond, &m_lock->lock, &abstime);
+
     if (mysys_var->abort)
     {
       /* Get rid of auxiliary pending ticket. */
-      m_lock->waiting.remove(pending_ticket);
+      m_lock->waiting_exclusive.remove(pending_ticket);
       MDL_ticket::destroy(pending_ticket);
-      /* Pending requests for shared locks can be satisfied now. */
-      pthread_cond_broadcast(&COND_mdl);
-      MDL_EXIT_COND(thd, mysys_var, old_msg);
+      /*
+        If there are no other pending requests for exclusive locks
+        wake up threads waiting for chance to acquire shared lock.
+      */
+      if (! m_lock->waiting_shared.is_empty() &&
+          m_lock->waiting_exclusive.is_empty())
+      {
+        MDL_lock::Ticket_iterator it(m_lock->waiting_shared);
+        MDL_ticket *wake_up_ticket;
+        while ((wake_up_ticket= it++))
+          wake_up_ticket->get_ctx()->wake_up();
+      }
+      MDL_EXIT_COND(thd, mysys_var, &m_lock->lock, old_msg);
       DBUG_RETURN(TRUE);
     }
   }
@@ -1134,15 +1419,14 @@ MDL_ticket::upgrade_shared_lock_to_exclu
   m_type= MDL_EXCLUSIVE;
 
   /* Get rid of auxiliary pending ticket. */
-  m_lock->waiting.remove(pending_ticket);
+  m_lock->waiting_exclusive.remove(pending_ticket);
   MDL_ticket::destroy(pending_ticket);
 
   if (m_lock->cached_object)
     (*m_lock->cached_object_release_hook)(m_lock->cached_object);
   m_lock->cached_object= 0;
 
-  /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
-  MDL_EXIT_COND(thd, mysys_var, old_msg);
+  MDL_EXIT_COND(thd, mysys_var, &m_lock->lock, old_msg);
   DBUG_RETURN(FALSE);
 }
 
@@ -1176,6 +1460,7 @@ MDL_context::try_acquire_exclusive_lock(
   MDL_lock *lock;
   MDL_ticket *ticket;
   MDL_key *key= &mdl_request->key;
+  bool acquired;
 
   DBUG_ASSERT(mdl_request->type == MDL_EXCLUSIVE &&
               mdl_request->ticket == NULL);
@@ -1184,7 +1469,19 @@ MDL_context::try_acquire_exclusive_lock(
 
   mdl_request->ticket= NULL;
 
-  pthread_mutex_lock(&LOCK_mdl);
+  if (try_acquire_global_intention_exclusive_lock(mdl_request, &acquired))
+    return TRUE;
+  /*
+    Since in MySQL this method is called only in cases when context
+    already has global intention exclusive lock the above call should
+    always succeed to acquire another instance of such lock.
+    But we prefer to play safe and handle failure to acquire global
+    lock as well.
+  */
+  if (! acquired)
+    return FALSE;
+
+  pthread_mutex_lock(&LOCK_mdl_hash);
 
   if (!(lock= (MDL_lock*) my_hash_search(&mdl_locks,
                                          key->ptr(), key->length())))
@@ -1195,18 +1492,24 @@ MDL_context::try_acquire_exclusive_lock(
     {
       MDL_ticket::destroy(ticket);
       MDL_lock::destroy(lock);
-      pthread_mutex_unlock(&LOCK_mdl);
+      pthread_mutex_unlock(&LOCK_mdl_hash);
       return TRUE;
     }
+    pthread_mutex_lock(&lock->lock);
+    pthread_mutex_unlock(&LOCK_mdl_hash);
     mdl_request->ticket= ticket;
     lock->type= MDL_lock::MDL_LOCK_EXCLUSIVE;
     lock->granted.push_front(ticket);
     m_tickets.push_front(ticket);
     ticket->m_state= MDL_ACQUIRED;
     ticket->m_lock= lock;
-    global_lock.active_intention_exclusive++;
+    pthread_mutex_unlock(&lock->lock);
+  }
+  else
+  {
+    pthread_mutex_unlock(&LOCK_mdl_hash);
+    release_global_intention_exclusive_lock();
   }
-  pthread_mutex_unlock(&LOCK_mdl);
   return FALSE;
 }
 
@@ -1225,29 +1528,54 @@ bool MDL_context::acquire_global_shared_
 {
   st_my_thread_var *mysys_var= my_thread_var;
   const char *old_msg;
+  MDL_ticket *pending_ticket;
 
   safe_mutex_assert_not_owner(&LOCK_open);
   DBUG_ASSERT(!m_has_global_shared_lock);
 
-  pthread_mutex_lock(&LOCK_mdl);
+  if (! (pending_ticket= MDL_ticket::create(this, MDL_SHARED)))
+    return TRUE;
+
+  pthread_mutex_lock(&LOCK_mdl_global);
+  /*
+    Add temporary ticket to the list of waiters so we can be properly
+    woken-up once all active intention exclusive locks go away.
+  */
+  global_lock.waiting_shared.push_front(pending_ticket);
 
-  global_lock.waiting_shared++;
-  old_msg= MDL_ENTER_COND(m_thd, mysys_var);
+  old_msg= MDL_ENTER_COND(m_thd, mysys_var, &m_cond, &LOCK_mdl_global);
 
-  while (!mysys_var->abort && global_lock.active_intention_exclusive)
-    pthread_cond_wait(&COND_mdl, &LOCK_mdl);
+  while (global_lock.active_intention_exclusive && ! mysys_var->abort)
+    pthread_cond_wait(&m_cond, &LOCK_mdl_global);
 
-  global_lock.waiting_shared--;
   if (mysys_var->abort)
   {
-    /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
-    MDL_EXIT_COND(m_thd, mysys_var, old_msg);
+    global_lock.waiting_shared.remove(pending_ticket);
+    /*
+      If this was the last request for global shared lock and there is
+      no active shared lock we need to wake up all waiters.
+    */
+    if (global_lock.active_shared == 0 &&
+        global_lock.waiting_shared.is_empty() &&
+        ! global_lock.waiting_intention_exclusive.is_empty())
+    {
+      MDL_lock::Ticket_iterator it(global_lock.waiting_intention_exclusive);
+      MDL_ticket *wake_up_ticket;
+      while ((wake_up_ticket= it++))
+        pthread_cond_signal(&wake_up_ticket->get_ctx()->m_cond);
+    }
+    MDL_EXIT_COND(m_thd, mysys_var, &LOCK_mdl_global, old_msg);
+    MDL_ticket::destroy(pending_ticket);
     return TRUE;
   }
+
+  global_lock.waiting_shared.remove(pending_ticket);
   global_lock.active_shared++;
   m_has_global_shared_lock= TRUE;
-  /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
-  MDL_EXIT_COND(m_thd, mysys_var, old_msg);
+  MDL_EXIT_COND(m_thd, mysys_var, &LOCK_mdl_global, old_msg);
+
+  MDL_ticket::destroy(pending_ticket);
+
   return FALSE;
 }
 
@@ -1256,12 +1584,10 @@ bool MDL_context::acquire_global_shared_
   Check if there are any pending exclusive locks which conflict with
   shared locks held by this thread.
 
-  @pre The caller already has acquired LOCK_mdl.
-
   @return TRUE if there are any conflicting locks, FALSE otherwise.
 */
 
-bool MDL_context::has_pending_conflicting_locks_unlocked() const
+bool MDL_context::has_pending_conflicting_locks() const
 {
   Ticket_iterator ticket_it(m_tickets);
   MDL_ticket *ticket;
@@ -1278,7 +1604,7 @@ bool MDL_context::has_pending_conflictin
     */
     DBUG_ASSERT(! ticket->is_upgradable_or_exclusive());
 
-    if (ticket->has_pending_conflicting_lock_unlocked())
+    if (ticket->has_pending_conflicting_lock())
       return TRUE;
   }
   return FALSE;
@@ -1286,23 +1612,6 @@ bool MDL_context::has_pending_conflictin
 
 
 /**
-  Check if there are any pending exclusive locks which conflict with
-  shared locks held by this thread.
-
-  @return TRUE if there are any conflicting locks, FALSE otherwise.
-*/
-
-bool MDL_context::has_pending_conflicting_locks() const
-{
-  bool result;
-  pthread_mutex_lock(&LOCK_mdl);
-  result= has_pending_conflicting_locks_unlocked();
-  pthread_mutex_unlock(&LOCK_mdl);
-  return result;
-}
-
-
-/**
   Wait until there will be no locks that conflict with lock requests
   in the given list.
 
@@ -1339,8 +1648,6 @@ MDL_context::wait_for_locks(MDL_request_
             COND_mdl because of above scenario.
     */
     mysql_ha_flush(m_thd);
-    pthread_mutex_lock(&LOCK_mdl);
-    old_msg= MDL_ENTER_COND(m_thd, mysys_var);
 
     /*
       In cases when we wait while still holding some metadata locks deadlocks
@@ -1353,9 +1660,8 @@ MDL_context::wait_for_locks(MDL_request_
       in situations when conflicts are rare (in our case this is true since
       DDL statements should be rare).
     */
-    if (has_pending_conflicting_locks_unlocked())
+    if (has_pending_conflicting_locks())
     {
-      MDL_EXIT_COND(m_thd, mysys_var, old_msg);
       my_error(ER_LOCK_DEADLOCK, MYF(0));
       return TRUE;
     }
@@ -1365,54 +1671,122 @@ MDL_context::wait_for_locks(MDL_request_
     {
       MDL_key *key= &mdl_request->key;
       DBUG_ASSERT(mdl_request->ticket == NULL);
-      if (!global_lock.is_lock_type_compatible(mdl_request->type, FALSE))
+
+      pthread_mutex_lock(&LOCK_mdl_global);
+      if (unlikely (!global_lock.is_lock_type_compatible(mdl_request->type, FALSE)))
+      {
+        MDL_ticket *pending_ticket;
+        if (! (pending_ticket= MDL_ticket::create(this, mdl_request->type)))
+        {
+          pthread_mutex_unlock(&LOCK_mdl_global);
+          return TRUE;
+        }
+        global_lock.waiting_intention_exclusive.push_front(pending_ticket);
+
+        old_msg= MDL_ENTER_COND(m_thd, mysys_var, &m_cond, &LOCK_mdl_global);
+
+        pthread_cond_wait(&m_cond, &LOCK_mdl_global);
+
+        global_lock.waiting_intention_exclusive.remove(pending_ticket);
+        MDL_ticket::destroy(pending_ticket);
+        /*
+          We might have been woken-up to resolve deadlock...
+        */
+        MDL_EXIT_COND(m_thd, mysys_var, &LOCK_mdl_global, old_msg);
         break;
+      }
+      else
+        pthread_mutex_unlock(&LOCK_mdl_global);
+
+
       /*
         To avoid starvation we don't wait if we have a conflict against
         request for MDL_EXCLUSIVE lock.
       */
-      if (mdl_request->is_shared() &&
-          (lock= (MDL_lock*) my_hash_search(&mdl_locks, key->ptr(),
-                                            key->length())) &&
-          !lock->can_grant_lock(this, mdl_request->type, FALSE))
+      if (mdl_request->is_shared())
+      {
+        pthread_mutex_lock(&LOCK_mdl_hash);
+        if (! (lock= (MDL_lock*) my_hash_search(&mdl_locks, key->ptr(),
+                                                key->length())))
+        {
+          pthread_mutex_unlock(&LOCK_mdl_hash);
+          continue;
+        }
+        pthread_mutex_lock(&lock->lock);
+        pthread_mutex_unlock(&LOCK_mdl_hash);
+        if (lock->can_grant_lock(this, mdl_request->type, FALSE))
+        {
+          pthread_mutex_unlock(&lock->lock);
+          continue;
+        }
+        MDL_ticket *pending_ticket;
+        if (! (pending_ticket= MDL_ticket::create(this, mdl_request->type)))
+        {
+          pthread_mutex_unlock(&lock->lock);
+          return TRUE;
+        }
+        lock->waiting_shared.push_front(pending_ticket);
+
+        old_msg= MDL_ENTER_COND(m_thd, mysys_var, &m_cond, &lock->lock);
+
+        pthread_cond_wait(&this->m_cond, &lock->lock);
+
+        MDL_EXIT_COND(m_thd, mysys_var, &lock->lock, old_msg);
+
+        pthread_mutex_lock(&LOCK_mdl_hash);
+        pthread_mutex_lock(&lock->lock);
+        lock->waiting_shared.remove(pending_ticket);
+        if (lock->is_empty())
+        {
+          my_hash_delete(&mdl_locks, (uchar *)lock);
+          if (lock->cached_object)
+            (*lock->cached_object_release_hook)(lock->cached_object);
+          pthread_mutex_unlock(&lock->lock);
+          MDL_lock::destroy(lock);
+        }
+        else
+          pthread_mutex_unlock(&lock->lock);
+        pthread_mutex_unlock(&LOCK_mdl_hash);
+        MDL_ticket::destroy(pending_ticket);
         break;
+      }
     }
     if (!mdl_request)
     {
-      pthread_mutex_unlock(&LOCK_mdl);
+      /* There are no conflicts for any locks! */
       break;
     }
-    m_is_waiting_in_mdl= TRUE;
-    pthread_cond_wait(&COND_mdl, &LOCK_mdl);
-    m_is_waiting_in_mdl= FALSE;
-    /* As a side-effect MDL_EXIT_COND() unlocks LOCK_mdl. */
-    MDL_EXIT_COND(m_thd, mysys_var, old_msg);
   }
   return mysys_var->abort;
 }
 
 
 /**
-  Auxiliary function which allows to release particular lock
-  ownership of which is represented by a lock ticket object.
+  Release lock.
+
+  @param ticket Ticket for lock to be released.
 */
 
-void MDL_context::release_ticket(MDL_ticket *ticket)
+void MDL_context::release_lock(MDL_ticket *ticket)
 {
   MDL_lock *lock= ticket->m_lock;
-  DBUG_ENTER("release_ticket");
+  DBUG_ENTER("MDL_context::release_lock");
   DBUG_PRINT("enter", ("db=%s name=%s", lock->key.db_name(),
                                         lock->key.name()));
 
-  safe_mutex_assert_owner(&LOCK_mdl);
+  DBUG_ASSERT(this == ticket->m_ctx);
+  safe_mutex_assert_not_owner(&LOCK_open);
 
-  m_tickets.remove(ticket);
+
+  /*
+    QQ: Can we do anything to minimize time during which LOCK_mdl_hash is held?
+  */
+  pthread_mutex_lock(&LOCK_mdl_hash);
+  pthread_mutex_lock(&lock->lock);
 
   switch (ticket->m_type)
   {
     case MDL_SHARED_UPGRADABLE:
-      global_lock.active_intention_exclusive--;
-      /* Fallthrough. */
     case MDL_SHARED:
     case MDL_SHARED_HIGH_PRIO:
       lock->granted.remove(ticket);
@@ -1420,14 +1794,11 @@ void MDL_context::release_ticket(MDL_tic
     case MDL_EXCLUSIVE:
       lock->type= MDL_lock::MDL_LOCK_SHARED;
       lock->granted.remove(ticket);
-      global_lock.active_intention_exclusive--;
       break;
     default:
       DBUG_ASSERT(0);
   }
 
-  MDL_ticket::destroy(ticket);
-
   if (lock->is_empty())
   {
     my_hash_delete(&mdl_locks, (uchar *)lock);
@@ -1435,8 +1806,37 @@ void MDL_context::release_ticket(MDL_tic
                         lock->cached_object));
     if (lock->cached_object)
       (*lock->cached_object_release_hook)(lock->cached_object);
+    pthread_mutex_unlock(&lock->lock);
     MDL_lock::destroy(lock);
   }
+  else
+  {
+    if (lock->type == MDL_lock::MDL_LOCK_SHARED)
+    {
+      MDL_lock::Ticket_iterator it(lock->waiting_shared);
+      MDL_lock::Ticket_iterator it2(lock->waiting_exclusive);
+      MDL_ticket *waiting_ticket;
+      /*
+        We wake up threads waiting for shared lock even if there is a
+        pending exclusive lock as some them might be trying to acquire
+        high priority shared lock.
+      */
+      while ((waiting_ticket= it++))
+        waiting_ticket->get_ctx()->wake_up();
+      while ((waiting_ticket= it2++))
+        waiting_ticket->get_ctx()->wake_up();
+    }
+    pthread_mutex_unlock(&lock->lock);
+  }
+
+  pthread_mutex_unlock(&LOCK_mdl_hash);
+
+  if (ticket->m_type == MDL_SHARED_UPGRADABLE ||
+      ticket->m_type == MDL_EXCLUSIVE)
+    release_global_intention_exclusive_lock();
+
+  m_tickets.remove(ticket);
+  MDL_ticket::destroy(ticket);
 
   DBUG_VOID_RETURN;
 }
@@ -1456,20 +1856,14 @@ void MDL_context::release_all_locks()
   Ticket_iterator it(m_tickets);
   DBUG_ENTER("MDL_context::release_all_locks");
 
-  safe_mutex_assert_not_owner(&LOCK_open);
-
   if (m_tickets.is_empty())
     DBUG_VOID_RETURN;
 
-  pthread_mutex_lock(&LOCK_mdl);
   while ((ticket= it++))
   {
     DBUG_PRINT("info", ("found lock to release ticket=%p", ticket));
-    release_ticket(ticket);
+    release_lock(ticket);
   }
-  /* Inefficient but will do for a while */
-  pthread_cond_broadcast(&COND_mdl);
-  pthread_mutex_unlock(&LOCK_mdl);
 
   m_tickets.empty();
 
@@ -1478,24 +1872,6 @@ void MDL_context::release_all_locks()
 
 
 /**
-  Release a lock.
-
-  @param ticket    Lock to be released
-*/
-
-void MDL_context::release_lock(MDL_ticket *ticket)
-{
-  DBUG_ASSERT(this == ticket->m_ctx);
-  safe_mutex_assert_not_owner(&LOCK_open);
-
-  pthread_mutex_lock(&LOCK_mdl);
-  release_ticket(ticket);
-  pthread_cond_broadcast(&COND_mdl);
-  pthread_mutex_unlock(&LOCK_mdl);
-}
-
-
-/**
   Release all locks in the context which correspond to the same name/
   object as this lock request.
 
@@ -1536,11 +1912,19 @@ void MDL_ticket::downgrade_exclusive_loc
   if (is_shared())
     return;
 
-  pthread_mutex_lock(&LOCK_mdl);
+  pthread_mutex_lock(&m_lock->lock);
   m_lock->type= MDL_lock::MDL_LOCK_SHARED;
   m_type= MDL_SHARED_UPGRADABLE;
-  pthread_cond_broadcast(&COND_mdl);
-  pthread_mutex_unlock(&LOCK_mdl);
+
+  if (! m_lock->waiting_shared.is_empty())
+  {
+    MDL_lock::Ticket_iterator it(m_lock->waiting_shared);
+    MDL_ticket *ticket;
+    while ((ticket= it++))
+      ticket->get_ctx()->wake_up();
+  }
+
+  pthread_mutex_unlock(&m_lock->lock);
 }
 
 
@@ -1553,11 +1937,23 @@ void MDL_context::release_global_shared_
   safe_mutex_assert_not_owner(&LOCK_open);
   DBUG_ASSERT(m_has_global_shared_lock);
 
-  pthread_mutex_lock(&LOCK_mdl);
+  pthread_mutex_lock(&LOCK_mdl_global);
   global_lock.active_shared--;
   m_has_global_shared_lock= FALSE;
-  pthread_cond_broadcast(&COND_mdl);
-  pthread_mutex_unlock(&LOCK_mdl);
+
+  /*
+    If we are releasing the last instance of global shared
+    lock we have to wake-up all waiters.
+  */
+  if (global_lock.active_shared == 0 &&
+      ! global_lock.waiting_intention_exclusive.is_empty())
+  {
+    MDL_lock::Ticket_iterator it(global_lock.waiting_intention_exclusive);
+    MDL_ticket *wake_up_ticket;
+    while ((wake_up_ticket= it++))
+      wake_up_ticket->get_ctx()->wake_up();
+  }
+  pthread_mutex_unlock(&LOCK_mdl_global);
 }
 
 
@@ -1622,25 +2018,6 @@ MDL_context::is_lock_owner(MDL_key::enum
   existing shared lock.
 
   @pre The ticket must match an acquired lock.
-  @pre The caller already has acquired LOCK_mdl.
-
-  @return TRUE if there are any conflicting locks, FALSE otherwise.
-*/
-
-bool MDL_ticket::has_pending_conflicting_lock_unlocked() const
-{
-  DBUG_ASSERT(is_shared());
-  safe_mutex_assert_owner(&LOCK_mdl);
-
-  return !m_lock->waiting.is_empty();
-}
-
-
-/**
-  Check if we have any pending exclusive locks which conflict with
-  existing shared lock.
-
-  @pre The ticket must match an acquired lock.
 
   @return TRUE if there are any conflicting locks, FALSE otherwise.
 */
@@ -1650,10 +2027,11 @@ bool MDL_ticket::has_pending_conflicting
   bool result;
 
   safe_mutex_assert_not_owner(&LOCK_open);
+  DBUG_ASSERT(is_shared());
 
-  pthread_mutex_lock(&LOCK_mdl);
-  result= has_pending_conflicting_lock_unlocked();
-  pthread_mutex_unlock(&LOCK_mdl);
+  pthread_mutex_lock(&m_lock->lock);
+  result= !m_lock->waiting_exclusive.is_empty();
+  pthread_mutex_unlock(&m_lock->lock);
   return result;
 }
 

=== modified file 'sql/mdl.h'
--- a/sql/mdl.h	2009-10-16 08:24:11 +0000
+++ b/sql/mdl.h	2009-10-29 09:13:48 +0000
@@ -119,6 +119,19 @@ public:
     return (m_length == rhs->m_length &&
             memcmp(m_ptr, rhs->m_ptr, m_length) == 0);
   }
+  int cmp(const MDL_key *rhs) const
+  {
+    int res;
+    if ((res= memcmp(m_ptr, rhs->m_ptr, min(m_length, rhs->m_length))))
+      return res;
+    else if (m_length < rhs->m_length)
+      return -1;
+    else if (m_length > rhs->m_length)
+      return 1;
+    else
+      return 0;
+  }
+
   MDL_key(const MDL_key *rhs)
   {
     mdl_key_init(rhs);
@@ -265,7 +278,7 @@ public:
   void *get_cached_object();
   void set_cached_object(void *cached_object,
                          mdl_cached_object_release_hook release_hook);
-  const MDL_context *get_ctx() const { return m_ctx; }
+  MDL_context *get_ctx() const { return m_ctx; }
   bool is_shared() const { return m_type < MDL_EXCLUSIVE; }
   bool is_upgradable_or_exclusive() const
   {
@@ -300,14 +313,13 @@ private:
 private:
   MDL_ticket(const MDL_ticket &);               /* not implemented */
   MDL_ticket &operator=(const MDL_ticket &);    /* not implemented */
-
-  bool has_pending_conflicting_lock_unlocked() const;
 };
 
 
 typedef I_P_List<MDL_request, I_P_List_adapter<MDL_request,
                  &MDL_request::next_in_list,
-                 &MDL_request::prev_in_list> >
+                 &MDL_request::prev_in_list>,
+                 I_P_List_Counter>
         MDL_request_list;
 
 /**
@@ -367,20 +379,44 @@ public:
   bool has_pending_conflicting_locks() const;
 
   inline THD *get_thd() const { return m_thd; }
+
+  void wake_up()
+  {
+    pthread_cond_signal(&m_cond);
+  }
+
 private:
   Ticket_list m_tickets;
   bool m_has_global_shared_lock;
   /**
-    Indicates that owner of the context is waiting in wait_for_locks() method.
+    Number of instances of global intention exclusive lock which were
+    recursively acquired by this context.
   */
-  bool m_is_waiting_in_mdl;
+  uint m_global_intention_exclusive_locks;
   THD *m_thd;
+  /**
+    Condvar which is used for waiting until this context's pending
+    request can be satisfied or this thread has to perform actions
+    to resolve potential deadlock (we subscribe for such notification
+    by adding ticket corresponding to the request to an appropriate
+    queue of waiters).
+  */
+  pthread_cond_t m_cond;
 private:
-  void release_ticket(MDL_ticket *ticket);
   MDL_ticket *find_ticket(MDL_request *mdl_req);
-  bool has_pending_conflicting_locks_unlocked() const;
 
-  friend bool notify_shared_lock(THD *thd, MDL_ticket *conflicting_ticket);
+  bool acquire_exclusive_lock_impl(MDL_request *mdl_request);
+
+  bool try_acquire_global_intention_exclusive_lock(MDL_request *mdl_request,
+                                                   bool *acquired);
+  bool acquire_global_intention_exclusive_lock(MDL_request *mdl_request);
+  void release_global_intention_exclusive_lock();
+  bool is_global_intention_exclusive_lock_owner()
+  {
+    return m_global_intention_exclusive_locks;
+  }
+
+  friend bool MDL_ticket::upgrade_shared_lock_to_exclusive();
 };
 
 

=== modified file 'sql/sql_plist.h'
--- a/sql/sql_plist.h	2009-10-16 08:24:11 +0000
+++ b/sql/sql_plist.h	2009-10-29 09:13:48 +0000
@@ -18,7 +18,8 @@
 
 #include <my_global.h>
 
-template <typename T, typename B> class I_P_List_iterator;
+template <typename T, typename B, typename C> class I_P_List_iterator;
+class I_P_List_Null_Counter;
 
 
 /**
@@ -47,10 +48,14 @@ template <typename T, typename B> class 
                  return &el->prev;
                }
              };
+   @param C  Policy class specifying how counting of elements in the list
+             should be done. Instance of this class is also used as a place
+             where information about number of list elements is stored.
+             @sa I_P_List_Null_Counter, I_P_List_Counter
 */
 
-template <typename T, typename B>
-class I_P_List
+template <typename T, typename B, typename C = I_P_List_Null_Counter>
+class I_P_List : public C
 {
   T *first;
 
@@ -61,7 +66,7 @@ class I_P_List
   */
 public:
   I_P_List() : first(NULL) { };
-  inline void empty()      { first= NULL; }
+  inline void empty()      { first= NULL; C::reset(); }
   inline bool is_empty() const { return (first == NULL); }
   inline void push_front(T* a)
   {
@@ -70,6 +75,7 @@ public:
       *B::prev_ptr(first)= B::next_ptr(a);
     first= a;
     *B::prev_ptr(a)= &first;
+    C::inc();
   }
   inline void remove(T *a)
   {
@@ -77,21 +83,23 @@ public:
     if (next)
       *B::prev_ptr(next)= *B::prev_ptr(a);
     **B::prev_ptr(a)= next;
+    C::dec();
   }
   inline T* head() { return first; }
   inline const T *head() const { return first; }
-  void swap(I_P_List<T,B> &rhs)
+  void swap(I_P_List<T, B, C> &rhs)
   {
     swap_variables(T *, first, rhs.first);
     if (first)
       *B::prev_ptr(first)= &first;
     if (rhs.first)
       *B::prev_ptr(rhs.first)= &rhs.first;
+    C::swap(rhs);
   }
 #ifndef _lint
-  friend class I_P_List_iterator<T, B>;
+  friend class I_P_List_iterator<T, B, C>;
 #endif
-  typedef I_P_List_iterator<T, B> Iterator;
+  typedef I_P_List_iterator<T, B, C> Iterator;
 };
 
 
@@ -99,14 +107,14 @@ public:
    Iterator for I_P_List.
 */
 
-template <typename T, typename B>
+template <typename T, typename B, typename C = I_P_List_Null_Counter>
 class I_P_List_iterator
 {
-  const I_P_List<T, B> *list;
+  const I_P_List<T, B, C> *list;
   T *current;
 public:
-  I_P_List_iterator(const I_P_List<T, B> &a) : list(&a), current(a.first) {}
-  inline void init(const I_P_List<T, B> &a)
+  I_P_List_iterator(const I_P_List<T, B, C> &a) : list(&a), current(a.first) {}
+  inline void init(const I_P_List<T, B, C> &a)
   {
     list= &a;
     current= a.first;
@@ -124,4 +132,39 @@ public:
   }
 };
 
+
+/**
+  Element counting policy class for I_P_List to be used in
+  cases when no element counting should be done.
+*/
+
+class I_P_List_Null_Counter
+{
+protected:
+  void reset() {}
+  void inc() {}
+  void dec() {}
+  void swap(I_P_List_Null_Counter &rhs) {}
+};
+
+
+/**
+  Element counting policy class for I_P_List which provides
+  basic element counting.
+*/
+
+class I_P_List_Counter
+{
+  uint m_counter;
+protected:
+  I_P_List_Counter() : m_counter (0) {}
+  void reset() {m_counter= 0;}
+  void inc() {m_counter++;}
+  void dec() {m_counter--;}
+  void swap(I_P_List_Counter &rhs)
+  { swap_variables(uint, m_counter, rhs.m_counter); }
+public:
+  uint elements() const { return m_counter; }
+};
+
 #endif


Attachment: [text/bzr-bundle] bzr/dlenev@mysql.com-20091029091348-l65a13khilp5yhpd.bundle
Thread
bzr commit into mysql-6.0-codebase-bugfixing branch (dlenev:3627)Dmitry Lenev29 Oct