MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:pekka Date:July 9 2006 4:57pm
Subject:bk commit into 5.0 tree (pekka:1.2216) BUG#18781
View as plain text  
Below is the list of changes that have just been committed into a local
5.0 repository of pekka. When pekka does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet@stripped, 2006-07-09 18:57:24+02:00, pekka@stripped +6 -0
  ndb - bug#18781: close a tiny window (last patch)

  ndb/src/kernel/blocks/dbdict/DictLock.txt@stripped, 2006-07-09 18:56:06+02:00, pekka@stripped +8 -4
    wait until SL_STARTED before sending DICT_UNLOCK_ORD (last patch to this bug:)

  ndb/src/kernel/blocks/dbdih/Dbdih.hpp@stripped, 2006-07-09 18:56:06+02:00, pekka@stripped +3 -0
    wait until SL_STARTED before sending DICT_UNLOCK_ORD (last patch to this bug:)

  ndb/src/kernel/blocks/dbdih/DbdihMain.cpp@stripped, 2006-07-09 18:56:06+02:00, pekka@stripped +22 -18
    wait until SL_STARTED before sending DICT_UNLOCK_ORD (last patch to this bug:)

  ndb/src/kernel/vm/SimulatedBlock.cpp@stripped, 2006-07-09 18:56:06+02:00, pekka@stripped +9 -0
    wait until SL_STARTED before sending DICT_UNLOCK_ORD (last patch to this bug:)

  ndb/src/kernel/vm/SimulatedBlock.hpp@stripped, 2006-07-09 18:56:06+02:00, pekka@stripped +1 -0
    wait until SL_STARTED before sending DICT_UNLOCK_ORD (last patch to this bug:)

  ndb/test/run-test/daily-basic-tests.txt@stripped, 2006-07-09 18:56:06+02:00, pekka@stripped +4 -0
    wait until SL_STARTED before sending DICT_UNLOCK_ORD (last patch to this bug:)

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	pekka
# Host:	orca.ndb.mysql.com
# Root:	/space_old/pekka/ndb/version/my50-bug18781

--- 1.38/ndb/test/run-test/daily-basic-tests.txt	2006-07-09 18:57:40 +02:00
+++ 1.39/ndb/test/run-test/daily-basic-tests.txt	2006-07-09 18:57:40 +02:00
@@ -500,6 +500,10 @@
 cmd: testDict
 args: -n TemporaryTables T1 T6 T7 T8 
 
+max-time: 1500
+cmd: testDict
+args: -n Restart_NR2 T1
+
 #
 # TEST NDBAPI
 #

--- 1.11/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2006-07-09 18:57:40 +02:00
+++ 1.12/ndb/src/kernel/blocks/dbdih/Dbdih.hpp	2006-07-09 18:57:40 +02:00
@@ -1599,6 +1599,9 @@
    */
   void startInfoReply(Signal *, Uint32 nodeId);
 
+  // DIH specifics for execNODE_START_REP (sendDictUnlockOrd)
+  void exec_node_start_rep(Signal* signal);
+
   /*
    * Lock master DICT.  Only current use is by starting node
    * during NR.  A pool of slave records is convenient anyway.

--- 1.53/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2006-07-09 18:57:40 +02:00
+++ 1.54/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp	2006-07-09 18:57:40 +02:00
@@ -1356,24 +1356,6 @@
     }
     ndbrequire(false);
     break;
-  case ZNDB_SPH7:
-    jam();
-    switch (typestart) {
-    case NodeState::ST_INITIAL_START:
-    case NodeState::ST_SYSTEM_RESTART:
-      jam();
-      ndbsttorry10Lab(signal, __LINE__);
-      return;
-    case NodeState::ST_NODE_RESTART:
-    case NodeState::ST_INITIAL_NODE_RESTART:
-      jam();
-      sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart);
-      c_dictLockSlavePtrI_nodeRestart = RNIL;
-      ndbsttorry10Lab(signal, __LINE__);
-      return;
-    }
-    ndbrequire(false);
-    break;
   default:
     jam();
     ndbsttorry10Lab(signal, __LINE__);
@@ -1382,6 +1364,27 @@
 }//Dbdih::execNDB_STTOR()
 
 void
+Dbdih::exec_node_start_rep(Signal* signal)
+{
+  /*
+   * Send DICT_UNLOCK_ORD when this node is SL_STARTED.
+   *
+   * Sending it before (sp 7) conflicts with code which assumes
+   * SL_STARTING means we are in copy phase of NR.
+   *
+   * NodeState::starting.restartType is not supposed to be used
+   * when SL_STARTED.  Also it seems NODE_START_REP can arrive twice.
+   *
+   * For these reasons there are no consistency checks and
+   * we rely on c_dictLockSlavePtrI_nodeRestart alone.
+   */
+  if (c_dictLockSlavePtrI_nodeRestart != RNIL) {
+    sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart);
+    c_dictLockSlavePtrI_nodeRestart = RNIL;
+  }
+}
+
+void
 Dbdih::createMutexes(Signal * signal, Uint32 count){
   Callback c = { safe_cast(&Dbdih::createMutex_done), count };
 
@@ -1605,6 +1608,7 @@
 void Dbdih::recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret)
 {
   ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL);
+  ndbrequire(data != RNIL);
   c_dictLockSlavePtrI_nodeRestart = data;
 
   nodeRestartPh2Lab2(signal);

--- 1.24/ndb/src/kernel/vm/SimulatedBlock.cpp	2006-07-09 18:57:40 +02:00
+++ 1.25/ndb/src/kernel/vm/SimulatedBlock.cpp	2006-07-09 18:57:40 +02:00
@@ -917,6 +917,15 @@
 void
 SimulatedBlock::execNODE_START_REP(Signal* signal)
 {
+  // common stuff for all blocks
+
+  // block specific stuff by virtual method override (default empty)
+  exec_node_start_rep(signal);
+}
+
+void
+SimulatedBlock::exec_node_start_rep(Signal* signal)
+{
 }
 
 #ifdef VM_TRACE_TIME

--- 1.17/ndb/src/kernel/vm/SimulatedBlock.hpp	2006-07-09 18:57:40 +02:00
+++ 1.18/ndb/src/kernel/vm/SimulatedBlock.hpp	2006-07-09 18:57:40 +02:00
@@ -424,6 +424,7 @@
   void execSIGNAL_DROPPED_REP(Signal* signal);
   void execCONTINUE_FRAGMENTED(Signal* signal);
   void execNODE_START_REP(Signal* signal);
+  virtual void exec_node_start_rep(Signal* signal);
 
   Uint32 c_fragmentIdCounter;
   ArrayPool<FragmentInfo> c_fragmentInfoPool;

--- 1.2/ndb/src/kernel/blocks/dbdict/DictLock.txt	2006-07-09 18:57:40 +02:00
+++ 1.3/ndb/src/kernel/blocks/dbdict/DictLock.txt	2006-07-09 18:57:40 +02:00
@@ -85,10 +85,14 @@
     START_MECONF
 DIH/s
 
-* sp7 - release DICT lock
+* (copy data, omitted)
 
-DIH/s
-    DICT_UNLOCK_ORD
-        DICT/m
+* SL_STARTED - release DICT lock
+
+CNTR/s
+    NODE_START_REP
+        DIH/s
+            DICT_UNLOCK_ORD
+                DICT/m
 
 # vim: set et sw=4:
Thread
bk commit into 5.0 tree (pekka:1.2216) BUG#18781pekka9 Jul