MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:Frazer Clement Date:April 8 2010 4:52pm
Subject:bzr commit into mysql-5.1-telco-6.3 branch (frazer:3177) Bug#51301
View as plain text  
#At file:///home/frazer/bzr/mysql-5.1-telco-6.3/ based on revid:jonas@stripped

 3177 Frazer Clement	2010-04-08
      testUpgrade improvements (6.3) for bug#51301

    modified:
      storage/ndb/include/kernel/signaldata/CreateIndx.hpp
      storage/ndb/include/kernel/signaldata/CreateTable.hpp
      storage/ndb/include/kernel/signaldata/DropIndx.hpp
      storage/ndb/include/kernel/signaldata/DropTable.hpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/test/include/AtrtClient.hpp
      storage/ndb/test/include/NdbRestarter.hpp
      storage/ndb/test/ndbapi/testUpgrade.cpp
      storage/ndb/test/run-test/command.cpp
      storage/ndb/test/run-test/upgrade-tests.txt
      storage/ndb/test/src/AtrtClient.cpp
      storage/ndb/test/src/NdbRestarter.cpp
=== modified file 'storage/ndb/include/kernel/signaldata/CreateIndx.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateIndx.hpp	2010-01-13 08:56:02 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateIndx.hpp	2010-04-08 16:50:12 +0000
@@ -219,7 +219,8 @@ public:
     SingleUser = 299,
     TableIsTemporary = 776,
     TableIsNotTemporary = 777,
-    NoLoggingTemporaryIndex = 778
+    NoLoggingTemporaryIndex = 778,
+    IncompatibleVersions = 763
   };
 
   CreateIndxConf m_conf;

=== modified file 'storage/ndb/include/kernel/signaldata/CreateTable.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2010-04-08 16:50:12 +0000
@@ -101,7 +101,8 @@ public:
     NotATablespace = 758,
     InvalidTablespaceVersion = 759,
     OutOfStringBuffer = 773,
-    NoLoggingTemporaryTable = 778
+    NoLoggingTemporaryTable = 778,
+    IncompatibleVersions = 763
   };
 
 private:

=== modified file 'storage/ndb/include/kernel/signaldata/DropIndx.hpp'
--- a/storage/ndb/include/kernel/signaldata/DropIndx.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/include/kernel/signaldata/DropIndx.hpp	2010-04-08 16:50:12 +0000
@@ -176,7 +176,8 @@ public:
     BadRequestType = 4247,
     InvalidName = 4248,
     NotAnIndex = 4254,
-    SingleUser = 299
+    SingleUser = 299,
+    IncompatibleVersions = 763
   };
   STATIC_CONST( SignalLength = DropIndxConf::SignalLength + 3 );
 

=== modified file 'storage/ndb/include/kernel/signaldata/DropTable.hpp'
--- a/storage/ndb/include/kernel/signaldata/DropTable.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/include/kernel/signaldata/DropTable.hpp	2010-04-08 16:50:12 +0000
@@ -62,7 +62,8 @@ public:
     DropInProgress      = 283,
     NoDropTableRecordAvailable = 1229,
     BackupInProgress = 761,
-    SingleUser = 299
+    SingleUser = 299,
+    IncompatibleVersions = 763
   };
 };
 

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2010-03-16 08:50:44 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2010-04-08 16:50:12 +0000
@@ -4134,6 +4134,13 @@ Dbdict::execCREATE_TABLE_REQ(Signal* sig
       break;
     }
 
+    if (!check_ndb_versions())
+    {
+      jam();
+      parseRecord.errorCode = CreateTableRef::IncompatibleVersions;
+      break;
+    }
+
     CreateTableRecordPtr createTabPtr;
     c_opCreateTable.seize(createTabPtr);
     
@@ -7192,6 +7199,13 @@ Dbdict::execDROP_TABLE_REQ(Signal* signa
     return;
   }
 
+  if (!check_ndb_versions())
+  {
+    jam();
+    dropTableRef(signal, req, DropTableRef::IncompatibleVersions);
+    return;
+  }
+
   const TableRecord::TabState tabState = tablePtr.p->tabState;
   bool ok = false;
   switch(tabState){
@@ -9446,6 +9460,11 @@ Dbdict::execDROP_INDX_REQ(Signal* signal
         jam();
         tmperr = DropIndxRef::SingleUser;
       }
+      else if (!check_ndb_versions())
+      {
+        jam();
+        tmperr = DropIndxRef::IncompatibleVersions;
+      }
       else 
       {
         DictLockReq lockReq;

=== modified file 'storage/ndb/test/include/AtrtClient.hpp'
--- a/storage/ndb/test/include/AtrtClient.hpp	2009-08-05 09:54:52 +0000
+++ b/storage/ndb/test/include/AtrtClient.hpp	2010-04-08 16:50:12 +0000
@@ -26,7 +26,8 @@ public:
 
   enum AtrtCommandType {
     ATCT_CHANGE_VERSION= 1,
-    ATCT_RESET_PROC= 2
+    ATCT_RESET_PROC= 2,
+    ATCT_RESET_VERSION= 3
   };
 
   AtrtClient(const char* _suffix= ".1.atrt");
@@ -37,6 +38,7 @@ public:
   // Command functions
   bool changeVersion(int process_id, const char* process_args);
   bool resetProc(int process_id);
+  bool resetVersion(int process_id, const char* process_args);
 
   // Query functions
   bool getConnectString(int cluster_id, SqlResultSet& result);

=== modified file 'storage/ndb/test/include/NdbRestarter.hpp'
--- a/storage/ndb/test/include/NdbRestarter.hpp	2010-02-18 23:01:15 +0000
+++ b/storage/ndb/test/include/NdbRestarter.hpp	2010-04-08 16:50:12 +0000
@@ -71,9 +71,12 @@ public:
 			  int _startphase, unsigned int _timeout = 120);
   int waitNodesNoStart(const int * _nodes, int _num_nodes,
 		       unsigned int _timeout = 120); 
+  int waitNodesStartedOrNoStart(const int * _nodes, int _num_nodes,
+                                unsigned int _timeout = 120);
 
   int checkClusterAlive(const int * deadnodes, int num_nodes);
 
+  int getNumRunningMgmdNodes();
   int getNumDbNodes();
   int insertErrorInNode(int _nodeId, int error);
   int insertErrorInAllNodes(int error);
@@ -114,6 +117,11 @@ protected:
 		       unsigned int _timeout,
 		       int _startphase = -1);  
 
+  enum HackyState
+  {
+    HACKY_NODESTATE_STARTED_OR_NOTSTARTED = 255
+  };
+
   int waitNodesState(const int * _nodes, int _num_nodes,
 		     ndb_mgm_node_status _status,
 		     unsigned int _timeout,

=== modified file 'storage/ndb/test/ndbapi/testUpgrade.cpp'
--- a/storage/ndb/test/ndbapi/testUpgrade.cpp	2010-03-22 08:48:37 +0000
+++ b/storage/ndb/test/ndbapi/testUpgrade.cpp	2010-04-08 16:50:12 +0000
@@ -24,6 +24,7 @@
 #include <AtrtClient.hpp>
 #include <Bitmask.hpp>
 #include <NdbBackup.hpp>
+#include <ndb_version.h>
 
 static Vector<BaseString> table_list;
 
@@ -158,6 +159,45 @@ createDropEvent(NDBT_Context* ctx, NDBT_
   return NDBT_OK;
 }
 
+static
+BaseString verStr(Uint32 ver)
+{
+  BaseString vStr;
+  vStr.appfmt("%u.%u.%u",
+              (ver >> 16) & 0xff,
+              (ver >> 8) & 0xff,
+              (ver >> 0) & 0xff);
+  return vStr;
+}
+
+static
+void outputVersionInfo(NdbOut& out)
+{
+  NdbRestarter restarter;
+  Uint32 apiVersion = ndbGetOwnVersion();
+  int masterNodeVersion = 0;
+  int minNdbdVer = 0;
+  int maxNdbdVer = 0;
+  int minMgmdVer = 0;
+  int maxMgmdVer = 0;
+  restarter.getMasterNodeVersion(masterNodeVersion);
+  restarter.getNodeTypeVersionRange(NDB_MGM_NODE_TYPE_NDB,
+                                    minNdbdVer,
+                                    maxNdbdVer);
+  restarter.getNodeTypeVersionRange(NDB_MGM_NODE_TYPE_MGM,
+                                    minMgmdVer,
+                                    maxMgmdVer);
+
+  out << "Mgmd min : " << verStr(minMgmdVer).c_str()
+      << " max : " << verStr(maxMgmdVer).c_str()
+      << "  Ndbd min : " << verStr(minNdbdVer).c_str()
+      << " max : " << verStr(maxNdbdVer).c_str()
+      << " (Master node (" << restarter.getMasterNodeId()
+      << ") : " << verStr(masterNodeVersion).c_str() << ")"
+      << " Api : " << verStr(apiVersion).c_str()
+      << endl;
+}
+
 /* An enum for expressing how many of the multiple nodes
  * of a given type an action should be applied to
  */
@@ -306,9 +346,110 @@ runBug48416(NDBT_Context* ctx, NDBT_Step
 
 static
 int
-runUpgrade_Half(NDBT_Context* ctx, NDBT_Step* step)
+determineVersion(int& versionA, int& versionB)
 {
-  // Assuming 2 replicas
+  /* Perform an MGMD upgrade + reset to determine versions */
+  AtrtClient atrt;
+
+  /* TODO : Deal with > 1 cluster sensibly? */
+  SqlResultSet clusters;
+  if (!atrt.getClusters(clusters))
+    return NDBT_FAILED;
+
+  while (clusters.next())
+  {
+    uint clusterId= clusters.columnAsInt("id");
+    SqlResultSet tmp_result;
+    if (!atrt.getConnectString(clusterId, tmp_result))
+      return NDBT_FAILED;
+
+    NdbRestarter restarter(tmp_result.column("connectstring"));
+    restarter.setReconnect(true); // Restarting mgmd
+    g_err << "Cluster '" << clusters.column("name")
+          << "@" << tmp_result.column("connectstring") << "'" << endl;
+
+    if(restarter.waitClusterStarted())
+      return NDBT_FAILED;
+
+    int min, max;
+
+    if (restarter.getNodeTypeVersionRange(NDB_MGM_NODE_TYPE_MGM, min, max))
+      return NDBT_FAILED;
+
+    if (min != max)
+    {
+      ndbout << "Error : min MGMD version != max MGMD version" << endl;
+      ndbout << min << " " << max << endl;
+      return NDBT_FAILED;
+    }
+    
+    versionA = min;
+
+    ndbout << "VersionA min = " << verStr(min).c_str()
+           << " max = " << verStr(max).c_str()
+           << endl;
+
+    // Restart ndb_mgmd(s)
+    SqlResultSet mgmds;
+    if (!atrt.getMgmds(clusterId, mgmds))
+      return NDBT_FAILED;
+    uint mgmdCount = mgmds.numRows();
+
+    mgmds.next();
+
+    int processId = mgmds.columnAsInt("id");
+
+    ndbout << "Temp upgrading mgmd " << processId << endl;
+    if (!atrt.changeVersion(processId, ""))
+      return NDBT_FAILED;
+    
+    if(restarter.waitConnected())
+      return NDBT_FAILED;
+
+    while (restarter.getNumRunningMgmdNodes() < (int) mgmdCount)
+    {
+      ndbout << "Waiting for Mgmd to recover" << endl;
+      NdbSleep_SecSleep(1);
+    }
+    
+    if (restarter.getNodeTypeVersionRange(NDB_MGM_NODE_TYPE_MGM, min, max))
+      return NDBT_FAILED;
+
+    ndbout << "VersionB min = " << verStr(min).c_str()
+           << " max = " << verStr(max).c_str()
+           << endl;
+    
+    if (min != versionA)
+      versionB = min;
+    else if (max != versionA)
+      versionB = max;
+    else
+    {
+      ndbout << "No version change" << endl;
+      versionB = versionA;
+    }
+
+    /* Reset back to the old version */
+    if (!atrt.resetVersion(processId, ""))
+    {
+      return NDBT_FAILED;
+    }
+
+    if (restarter.waitConnected())
+      return NDBT_FAILED;
+  }
+
+  return NDBT_OK;
+}
+
+static
+int
+runUpgrade_fast(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Assuming 2 replicas
+   * We upgrade max of 1 node per nodegroup at a time
+   * This should be faster than upgrading nodes individually
+   */
 
   AtrtClient atrt;
 
@@ -322,6 +463,8 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
 
   NodeSet mgmdNodeSet = (NodeSet) ctx->getProperty("MgmdNodeSet", Uint32(0));
   NodeSet ndbdNodeSet = (NodeSet) ctx->getProperty("NdbdNodeSet", Uint32(0));
+  bool forceMasterOnUpgradedNode = (ndbdNodeSet == NotAll) &&
+    (ctx->getProperty("ForceMasterOnUpgradedNode", Uint32(0)) == 1);
 
   SqlResultSet clusters;
   if (!atrt.getClusters(clusters))
@@ -348,18 +491,18 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
       return NDBT_FAILED;
 
     uint mgmdCount = mgmds.numRows();
-    uint restartCount = getNodeCount(mgmdNodeSet, mgmdCount);
+    uint upgradeCount = getNodeCount(mgmdNodeSet, mgmdCount);
     
-    ndbout << "Restarting "
-             << restartCount << " of " << mgmdCount
+    ndbout << "Upgrading "
+             << upgradeCount << " of " << mgmdCount
             << " mgmds" << endl;
-      
-    while (mgmds.next() && restartCount --)
+
+    while (mgmds.next() && upgradeCount--)
     {
-      ndbout << "Restart mgmd" << mgmds.columnAsInt("node_id") << endl;
+      ndbout << "Upgrade mgmd" << mgmds.columnAsInt("node_id") << endl;
       if (!atrt.changeVersion(mgmds.columnAsInt("id"), ""))
         return NDBT_FAILED;
-
+      
       if(restarter.waitConnected())
         return NDBT_FAILED;
     }
@@ -382,103 +525,140 @@ runUpgrade_Half(NDBT_Context* ctx, NDBT_
     }
 
     uint ndbdCount = ndbds.numRows();
-    restartCount = getNodeCount(ndbdNodeSet, ndbdCount);
+    upgradeCount = getNodeCount(ndbdNodeSet, ndbdCount);
+    uint remainCount = ndbdCount - upgradeCount;
     
-    ndbout << "Restarting "
-             << restartCount << " of " << ndbdCount
-             << " ndbds" << endl;
+    ndbout << "Upgrading "
+           << upgradeCount << " of " << ndbdCount
+           << " ndbds" << endl;
     
-    int nodesarray[256];
-    int cnt= 0;
-
-    Bitmask<4> seen_groups;
     Bitmask<4> restarted_nodes;
-    for (Uint32 i = 0; (i<nodes.size() && restartCount); i++)
-    {
-      int nodeId = nodes[i].nodeId;
-      int processId = nodes[i].processId;
-      int nodeGroup= nodes[i].nodeGroup;
 
-      if (seen_groups.get(nodeGroup))
+    while (upgradeCount)
+    {    
+      Bitmask<4> seen_groups;
+      int nodesarray[256];
+      int cnt= 0;
+
+      for (Uint32 i = 0; (i<nodes.size() && upgradeCount); i++)
       {
-        // One node in this node group already down
-        continue;
+        int nodeId = nodes[i].nodeId;
+        int processId = nodes[i].processId;
+        int nodeGroup= nodes[i].nodeGroup;
+        
+        if (restarted_nodes.get(nodeId))
+        {
+          // Already upgraded
+          continue;
+        }
+        
+        if (seen_groups.get(nodeGroup))
+        {
+          // One node in this node group already down
+          continue;
+        }
+        seen_groups.set(nodeGroup);
+        restarted_nodes.set(nodeId);
+        
+        ndbout << "Upgrade node " << nodeId << endl;
+        
+        if (!atrt.changeVersion(processId, args))
+          return NDBT_FAILED;
+        
+        if (waitNode)
+        {
+          /* changeVersion will have no effect if the process
+           * is already upgraded, so we check for 'started' already
+           */
+          restarter.waitNodesStartedOrNoStart(&nodeId, 1);
+        }
+        
+        nodesarray[cnt++]= nodeId;
+        upgradeCount--;
       }
-      seen_groups.set(nodeGroup);
-      restarted_nodes.set(nodeId);
-
-      ndbout << "Restart node " << nodeId << endl;
-      
-      if (!atrt.changeVersion(processId, args))
-        return NDBT_FAILED;
       
-      if (waitNode)
+      if (!waitNode)
       {
-        restarter.waitNodesNoStart(&nodeId, 1);
+        /* changeVersion will have no effect if the process
+         * is already upgraded, so we check for 'started' already
+         */
+        if (restarter.waitNodesStartedOrNoStart(nodesarray, cnt))
+          return NDBT_FAILED;
       }
-
-      nodesarray[cnt++]= nodeId;
-      restartCount--;
-    }
-    
-    if (!waitNode)
-    {
-      if (restarter.waitNodesNoStart(nodesarray, cnt))
+      
+      ndbout << "Starting and wait for started..." << endl;
+      if (restarter.startAll())
         return NDBT_FAILED;
-    }
-
-    ndbout << "Starting and wait for started..." << endl;
-    if (restarter.startAll())
-      return NDBT_FAILED;
-
-    if (restarter.waitClusterStarted())
-      return NDBT_FAILED;
-
-    if (event && createDropEvent(ctx, step))
-    {
-      return NDBT_FAILED;
-    }
-
-    // Restart the remaining nodes
-    cnt= 0;
-    for (Uint32 i = 0; (i<nodes.size() && restartCount); i++)
-    {
-      int nodeId = nodes[i].nodeId;
-      int processId = nodes[i].processId;
-
-      if (restarted_nodes.get(nodeId))
-        continue;
       
-      ndbout << "Restart node " << nodeId << endl;
-      if (!atrt.changeVersion(processId, args))
+      if (restarter.waitClusterStarted())
         return NDBT_FAILED;
-
-      if (waitNode)
+      
+      if (event && createDropEvent(ctx, step))
       {
-        restarter.waitNodesNoStart(&nodeId, 1);
+        return NDBT_FAILED;
       }
-
-      nodesarray[cnt++]= nodeId;
-      restartCount --;
     }
 
-    
-    if (!waitNode)
+    if (forceMasterOnUpgradedNode)
     {
-      if (restarter.waitNodesNoStart(nodesarray, cnt))
-        return NDBT_FAILED;
-    }
+      ndbout << "Restarting "
+             << remainCount 
+             << " non-upgraded Ndbds to force master role to upgraded node" 
+             << endl;
+      
+      while (remainCount)
+      {
+        Bitmask<4> seen_groups;
+        int nodesarray[256];
+        int cnt= 0;
 
-    ndbout << "Starting and wait for started..." << endl;
-    if (restarter.startAll())
-      return NDBT_FAILED;
-    
-    if (restarter.waitClusterStarted())
-      return NDBT_FAILED;
+        for (Uint32 i = 0; (i<nodes.size() && remainCount); i++)
+        {
+          int nodeId = nodes[i].nodeId;
+          int processId = nodes[i].processId;
+          int nodeGroup= nodes[i].nodeGroup;
+          
+          if (seen_groups.get(nodeGroup))
+          {
+            /* Nodegroup already seen, not safe */
+            continue;
+          }
+          if (restarted_nodes.get(nodeId))
+          {
+            /* Node already restarted / upgraded, skip */
+            continue;
+          }
+          seen_groups.set(nodeGroup);
+          restarted_nodes.set(nodeId);
+          
+          ndbout << "Restart node " << nodeId << endl;
+          
+          if (!atrt.resetProc(processId))
+            return NDBT_FAILED;
+          
+          if (waitNode)
+          {
+            restarter.waitNodesNoStart(&nodeId, 1);
+          }
+          nodesarray[cnt++] = nodeId;
+          remainCount--;
+        }
+        
+        if (!waitNode)
+        {
+          if (restarter.waitNodesNoStart(nodesarray, cnt))
+            return NDBT_FAILED;
+        }
 
-    if (event && createDropEvent(ctx, step))
-    {
-      return NDBT_FAILED;
+        ndbout << "Starting and wait for started..." << endl;
+        if (restarter.startAll())
+          return NDBT_FAILED;
+        
+        if (restarter.waitClusterStarted())
+          return NDBT_FAILED;
+      }
+      
+      ndbout << "All remaining nodes restarted" << endl;
     }
   }
 
@@ -498,7 +678,7 @@ int runUpgrade_NR2(NDBT_Context* ctx, ND
 
   ctx->setProperty("WaitNode", 1);
   ctx->setProperty("CreateDropEvent", 1);
-  int res = runUpgrade_Half(ctx, step);
+  int res = runUpgrade_fast(ctx, step);
   ctx->stopTest();
   return res;
 }
@@ -513,7 +693,7 @@ int runUpgrade_NR3(NDBT_Context* ctx, ND
   // Assuming 2 replicas
 
   ctx->setProperty("CreateDropEvent", 1);
-  int res = runUpgrade_Half(ctx, step);
+  int res = runUpgrade_fast(ctx, step);
   ctx->stopTest();
   return res;
 }
@@ -524,7 +704,7 @@ int runUpgrade_NR3(NDBT_Context* ctx, ND
 int runUpgrade_NdbdOnly(NDBT_Context* ctx, NDBT_Step* step)
 {
   ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(None));
-  int res = runUpgrade_Half(ctx, step);
+  int res = runUpgrade_fast(ctx, step);
   ctx->stopTest();
   return res;
 }
@@ -536,12 +716,12 @@ int runUpgrade_NdbdOnly(NDBT_Context* ct
 int runUpgrade_NdbdFirst(NDBT_Context* ctx, NDBT_Step* step)
 {
   ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(None));
-  int res = runUpgrade_Half(ctx, step);
+  int res = runUpgrade_fast(ctx, step);
   if (res == NDBT_OK)
   {
     ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(All));
     ctx->setProperty("NdbdNodeSet", (Uint32) NodeSet(None));
-    res = runUpgrade_Half(ctx, step);
+    res = runUpgrade_fast(ctx, step);
   }
   ctx->stopTest();
   return res;
@@ -554,7 +734,7 @@ int runUpgrade_NotAllMGMD(NDBT_Context* 
 {
   ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(NotAll));
   ctx->setProperty("NdbdNodeSet", (Uint32) NodeSet(None));
-  int res = runUpgrade_Half(ctx, step);
+  int res = runUpgrade_fast(ctx, step);
   ctx->stopTest();
   return res;
 }
@@ -757,7 +937,7 @@ int runUpgrade_Traffic(NDBT_Context* ctx
   // Assuming 2 replicas
   
   ndbout_c("upgrading");
-  int res = runUpgrade_Half(ctx, step);
+  int res = runUpgrade_fast(ctx, step);
   if (res == NDBT_OK)
   {
     ndbout_c("rolling restarting");
@@ -767,9 +947,214 @@ int runUpgrade_Traffic(NDBT_Context* ctx
   return res;
 }
 
+/**
+   Upgrade subset of the MGMDs + NDBDs
+*/
+int runUpgrade_Subset_Kernel(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Set properties to indicate only half system should
+   * be upgraded
+   */
+  ctx->setProperty("MgmdNodeSet", (Uint32) NodeSet(NotAll));
+  ctx->setProperty("NdbdNodeSet", (Uint32) NodeSet(NotAll));
+  int res = runUpgrade_fast(ctx, step);
+  ctx->stopTest();
+  return res;
+}
+
+
+bool versionsSpanBoundary(int verA, int verB, int incBoundaryVer)
+{
+  int minPeerVer = MIN(verA, verB);
+  int maxPeerVer = MAX(verA, verB);
+
+  return ( (minPeerVer <  incBoundaryVer) &&
+           (maxPeerVer >= incBoundaryVer) );
+}
+
+#define SchemaTransVersion NDB_MAKE_VERSION(6,4,0)
+
+/**
+   Upgrade subset of the MGMDs + NDBDs
+   Restart others to force 'new master' testing
+*/
+int runUpgrade_Subset_Kernel_WithForcedMaster(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* First check which versions we're dealing with */
+  int versionA, versionB;
+  if (determineVersion(versionA, versionB))
+    return NDBT_FAILED;
+  
+  if (versionsSpanBoundary(versionA, versionB, SchemaTransVersion))
+  {
+    /* Schema transactions introduce an incompatibility where the new DICT master
+     * sends a signal unknown to older DICT instances
+     */
+    ndbout << "Versions not suitable for new-version-master-takeover, skipping"
+           << verStr(versionA).c_str() << " -> " << verStr(versionB).c_str()
+           << endl;
+    ctx->stopTest();
+    return NDBT_OK;
+  }
+
+  ctx->setProperty("ForceMasterOnUpgradedNode", Uint32(1));
+  return runUpgrade_Subset_Kernel(ctx, step);
+}
+
+int isDDLPossible(bool& result)
+{
+  NdbRestarter restarter;
+  int minNdbVer = 0;
+  int maxNdbVer = 0;
+  result = true;
+  
+  if (restarter.getNodeTypeVersionRange(NDB_MGM_NODE_TYPE_NDB,
+                                        minNdbVer,
+                                        maxNdbVer) == -1)
+  {
+    g_err << "isDDLPossible() getNodeTypeVersionRange call failed" << endl;
+    result = false;
+    return NDBT_FAILED;
+  }
+
+  if (minNdbVer != maxNdbVer)
+  {
+    result = false;
+    ndbout << "Ndbd nodes have mixed versions, DDL not possible" << endl;
+  }
+
+  /* Version-specific checks */
+  if (versionsSpanBoundary(minNdbVer, NDB_VERSION_D, SchemaTransVersion))
+  {
+    result = false;
+    ndbout << "Ndbd and Api versions span schema-transaction change "
+           << "boundary.  DDL not possible" << endl;
+  }
+
+  return NDBT_OK;
+}
+
+int runCheckDDLBehaviour(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /* Intention here is to check that selected operations 
+   * against the upgraded kernel nodes can succeed or fail
+   * cleanly from the original Api version
+   */
+
+  /* Basic DDL
+   * Drop and then re-create one of the tables
+   */
+  Ndb* ndb = GETNDB(step);
+  NdbDictionary::Dictionary* pDict = ndb->getDictionary();
+  bool ddlShouldPass = false;
+  
+  if (isDDLPossible(ddlShouldPass) == NDBT_FAILED)
+  {
+    return NDBT_FAILED;
+  }
+
+  if (runGetTableList(ctx, step) != NDBT_OK)
+    return NDBT_FAILED;
+
+  int numTables = table_list.size();
+  int aTable = rand() % numTables;
+  const char* tabName = table_list[aTable].c_str();
+
+  outputVersionInfo(ndbout);
+  ndbout_c("Checking drop/create of table %s", tabName);
+
+  int dropRc = pDict->dropTable(tabName);
+
+  if (dropRc == 0)
+  {
+    if (!ddlShouldPass)
+    {
+      g_err << "Drop table should not have passed for table "
+            << tabName << endl;
+      return NDBT_FAILED;
+    }
+  }
+  else
+  {
+    NdbError err = pDict->getNdbError();
+    g_err << "Failed to drop table : " << tabName
+          << " Error : "
+          << err << endl;
+    g_err << "At line " << __LINE__ << endl;
+    g_err << "My version : " << NDB_VERSION_D << endl;
+
+    if (ddlShouldPass)
+    {
+      g_err << "DDL was expected to pass" << endl;
+      return NDBT_FAILED;
+    }
+    else
+    {
+      /* 763 Alter table requires cluster nodes to have exact same version */
+      if (err.code == 763)
+      {
+        g_err << "DDL failed, as expected" << endl;
+        /* Drop out now */
+        return NDBT_OK;
+      }
+      g_err << "Unexpected error" << endl;
+      return NDBT_FAILED;
+    }
+  }
+
+  int createRc = NDBT_Tables::createTable(ndb, tabName);
+  
+  if (createRc == 0)
+  {
+    if (!ddlShouldPass)
+    {
+      g_err << "Create table was not expected to pass for table "
+            << tabName << endl;
+      return NDBT_FAILED;
+    }
+  }
+  else
+  {
+    NdbError err = pDict->getNdbError();
+    g_err << "Failed to re-create table : " << tabName
+          << " Error : "
+          << pDict->getNdbError() << endl;
+    g_err << "At line " << __LINE__ << endl;
+    g_err << "My version : " << NDB_VERSION_D << endl;
+
+    if (ddlShouldPass)
+    {
+      g_err << "DDL was expected to pass" << endl;
+      return NDBT_FAILED;
+    }
+    else
+    {
+      /* 763 Alter table requires cluster nodes to have exact same version */
+      if (err.code == 763)
+      {
+        g_err << "DDL failed, as expected" << endl;
+        /* Drop out now */
+        return NDBT_OK;
+      }
+      g_err << "Unexpected error" << endl;
+      return NDBT_FAILED;
+    }
+  }
+
+  ndbout_c("OK");
+  return NDBT_OK;
+}
+
 int
 startPostUpgradeChecks(NDBT_Context* ctx, NDBT_Step* step)
 {
+  /*
+   * First check that DDL is behaving properly from the
+   * 'old' version of the Api
+   */
+  if (runCheckDDLBehaviour(ctx, step) != NDBT_OK)
+    return NDBT_FAILED;
+
   /**
    * This will restart *self* in new version
    */
@@ -830,9 +1215,9 @@ int
 runPostUpgradeChecks(NDBT_Context* ctx, NDBT_Step* step)
 {
   /**
-   * Table will be dropped/recreated
-   *   automatically by NDBT...
-   *   so when we enter here, this is already tested
+   * Check that the new version API succeeds / 
+   * gets sensible error messages when attempting
+   * DDL post upgrade.
    */
   NdbBackup backup(GETNDB(step)->getNodeId()+1);
 
@@ -915,16 +1300,114 @@ runWait(NDBT_Context* ctx, NDBT_Step* st
 
 int runPostUpgradeDecideDDL(NDBT_Context* ctx, NDBT_Step* step)
 {
-  /* We always support DDL post upgrade in 6.3 */
-  ctx->setProperty("NoDDL", 1);
+  /* We are running post-upgrade, now examine the versions
+   * of connected nodes and update the 'NoDDL' variable
+   * accordingly
+   */
+  /* DDL should be ok as long as
+   *  1) All data nodes have the same version
+   *  2) There is not some version specific exception
+   * TODO : Modify 7.0 DecideDDL to check for version-span problem
+   */
+  bool useDDL = true;
 
-  return NDBT_OK;
+  if (isDDLPossible(useDDL) == NDBT_FAILED)
+    return NDBT_FAILED;
+
+  ctx->setProperty("NoDDL", useDDL?0:1);
+
+  return runCheckDDLBehaviour(ctx, step);
 }
 
+/* testUpgrade
+ * Invocation
+ * ----------
+ * testUpgrade is run by ATRT against two cluster versions
+ * (which could even be the same cluster version).
+ *
+ * ATRT invokes testUpgrade -n <TC_name> for the first version
+ * against a cluster running at that version.  The testcase
+ * can upgrade some or all of the cluster nodes.  At some
+ * point the testcase can 'upgrade itself' which results in 
+ * ATRT invoking testUpgrade -n<TC_Name>--post-upgrade against
+ * the second version's implementation of testUpgrade.
+ * 
+ * For an upgrade from 6.3.X to 7.0.Y, the pre-upgrade steps
+ * will be carried out by the 6.3.X version of testUpgrade, 
+ * and the post-upgrade steps will be carried out by the 
+ * 7.0.Y version.
+ * 
+ * DDL
+ * ---
+ * For most releases, DDL is not possible while the data nodes
+ * are on mixed versions.  DDL *is* possible while the Api
+ * nodes are on different versions to the data nodes.
+ * 
+ * Note that for 6.3->7.0 upgrades, DDL is not possible until
+ * all NDBD and API nodes are upgraded.
+ *
+ * Upgrade Ordering
+ * ----------------
+ * Traditionally, Ndb supported only upgrades of the order :
+ *  - Upgrade all MGMD nodes
+ *  - Upgrade all NDBD nodes
+ *  - Upgrade all API nodes
+ * 
+ * e.g. at any time it is guaranteed that :
+ * MGMD version >= NDBD version >= API version.
+ * 
+ * Since later releases of 7.0, this has been relaxed to allow
+ * less constrained upgrade scenarios, e.g. API nodes first,
+ * NDBD nodes first, or some mixture of nodes.
+ * Some testcases below check that the system behaves correctly
+ * in these non-traditional upgrade order scenarios.
+ *
+ * Ordering variants across node types :
+ *   Traditional : All MGMDs, All NDBDs, All Apis
+ *                 MGMD version >= NDBD version >= API version.
+ *   Api first   : All Api, All MGMDs, All NDBDs
+ *                 API version >= MGMD version >= NDBD version
+ *   Reverse     : All Api, All NDBDs, All MGMDs
+ *                 API version >= NDBD version >= MGMD version
+ *   Mixed       : Part MGMD, All Apis, All NDBDs, Remain MGMDs
+ *                 MGMD version <=> API version >= NDBD version
+ *   
+ * Ordering variants within NDBDs
+ *   Single at a time
+ *   One node per nodegroup at a time, one stopped at a time
+ *   One node per nodegroup at a time, all stoppped together
+ *   
+ * TODO :
+ *   Various blocks 'elect' a Master node which coordinates distributed
+ *   operations.  The 'election' is generally approximated as 'longest
+ *   running node'.  This has a side-effect that during a normal upgrade, 
+ *   the master role remains on the old version until all NDBD nodes are 
+ *   upgraded.  However, if all old version nodes underwent a non-upgrading
+ *   restart before the upgrade was complete then the master would be on
+ *   the new version, perhaps exposing bugs.
+ *
+ *   One node per nodegroup at a time, upgrade half, restart other half
+ *     Forces an upgraded node to take on 'Master' roles, in non-fully
+ *     upgraded cluster.
+ * 
+ * TODO :
+ *   Currently we run either the old or new version of testUpgrade at
+ *   any time, and can only perform one change of version.
+ *   It would be good to run old and new Api version concurrently, either
+ *   by running >1 test program, or allowing the single process to 'downgrade'
+ */
 
 NDBT_TESTSUITE(testUpgrade);
 TESTCASE("Upgrade_NR1",
 	 "Test that one node at a time can be upgraded"){
+  /* Upgrade MGMDs
+   * Upgrade cluster, each NDBD individually
+   *   Use --initial
+   *   After each node starts, check that an event can be 
+   *   created+dropped for each table
+   * Upgrade Api
+   *   Check backup possible etc.
+   */
   INITIALIZER(runCheckStarted);
   INITIALIZER(runBug48416);
   STEP(runUpgrade_NR1);
@@ -935,8 +1418,20 @@ POSTUPGRADE("Upgrade_NR1")
   INITIALIZER(runCheckStarted);
   INITIALIZER(runPostUpgradeChecks);
 }
+
 TESTCASE("Upgrade_NR2",
 	 "Test that one node in each nodegroup can be upgradde simultaneously"){
+  /* Upgrade MGMDs
+   * Upgrade cluster, half NDBDs at a time
+   *   Use --initial
+   *   Wait for NoStart state for each node individually before stopping next
+   *   Start all upgraded nodes
+   *   Test that an event can be created+dropped for each table 
+   *   Upgrade remaining nodes
+   *   Test that an event can be created+dropped for each table
+   * Upgrade Api
+   *   Check backup possible etc.
+   */
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR2);
   VERIFIER(startPostUpgradeChecks);
@@ -946,8 +1441,20 @@ POSTUPGRADE("Upgrade_NR2")
   INITIALIZER(runCheckStarted);
   INITIALIZER(runPostUpgradeChecks);
 }
+
 TESTCASE("Upgrade_NR3",
 	 "Test that one node in each nodegroup can be upgradde simultaneously"){
+  /* Upgrade MGMDs
+   * Upgrade cluster, half NDBDs at a time
+   *   Use --initial
+   *   Stop all nodes then wait for all to enter NoStart before starting
+   *   Start all upgraded nodes
+   *   Test that an event can be created+dropped for each table 
+   *   Upgrade remaining nodes
+   *   Test that an event can be created+dropped for each table
+   * Upgrade Api
+   *   Check backup possible etc.
+   */
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR3);
   VERIFIER(startPostUpgradeChecks);
@@ -957,9 +1464,21 @@ POSTUPGRADE("Upgrade_NR3")
   INITIALIZER(runCheckStarted);
   INITIALIZER(runPostUpgradeChecks);
 }
+
 TESTCASE("Upgrade_FS",
 	 "Test that one node in each nodegroup can be upgrade simultaneously")
 {
+  /* Upgrade MGMDs
+   * Restart one NDBD in each node group
+   *   Don't use --initial, so FS will be present
+   *   Wait for each node to enter NOT_STARTED before moving to next
+   *   Start all upgraded nodes
+   *   Test that an event can be created+dropped for each table 
+   *   Upgrade remaining nodes
+   *   Test that an event can be created+dropped for each table
+   * Upgrade Api
+   *   Check backup possible etc.
+   */
   TC_PROPERTY("KeepFS", 1);
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateAllTables);
@@ -972,9 +1491,23 @@ POSTUPGRADE("Upgrade_FS")
   INITIALIZER(runCheckStarted);
   INITIALIZER(runPostUpgradeChecks);
 }
+
 TESTCASE("Upgrade_Traffic",
 	 "Test upgrade with traffic, all tables and restart --initial")
 {
+  /* Upgrade MGMDs
+   * Run basic traffic to tables
+   * Restart one NDBD in each node group
+   *   Use --initial
+   *   Wait for each node to enter NOT_STARTED before moving to next
+   *   Start all upgraded nodes
+   *   Test that an event can be created+dropped for each table 
+   *   Upgrade remaining nodes
+   *   Test that an event can be created+dropped for each table
+   *   Perform rolling restart of all nodes indivually
+   * Upgrade Api
+   *   Check backup possible etc.
+   */
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateAllTables);
   STEP(runUpgrade_Traffic);
@@ -986,9 +1519,23 @@ POSTUPGRADE("Upgrade_Traffic")
   INITIALIZER(runCheckStarted);
   INITIALIZER(runPostUpgradeChecks);
 }
+
 TESTCASE("Upgrade_Traffic_FS",
 	 "Test upgrade with traffic, all tables and restart using FS")
 {
+  /* Upgrade MGMDs
+   * Run basic traffic to tables
+   * Restart one NDBD in each node group
+   *   Don't use --initial, so FS will be present
+   *   Wait for each node to enter NOT_STARTED before moving to next
+   *   Start all upgraded nodes
+   *   Test that an event can be created+dropped for each table 
+   *   Upgrade remaining nodes
+   *   Test that an event can be created+dropped for each table
+   *   Perform rolling restart of all nodes indivually
+   * Upgrade Api
+   *   Check backup possible etc.
+   */
   TC_PROPERTY("KeepFS", 1);
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateAllTables);
@@ -1001,9 +1548,23 @@ POSTUPGRADE("Upgrade_Traffic_FS")
   INITIALIZER(runCheckStarted);
   INITIALIZER(runPostUpgradeChecks);
 }
+
 TESTCASE("Upgrade_Traffic_one",
 	 "Test upgrade with traffic, *one* table and restart --initial")
 {
+  /* Upgrade MGMDs
+   * Run basic traffic to one table
+   * Restart one NDBD in each node group
+   *   Use --initial
+   *   Wait for each node to enter NOT_STARTED before moving to next
+   *   Start all upgraded nodes
+   *   Test that an event can be created+dropped for each table 
+   *   Upgrade remaining nodes
+   *   Test that an event can be created+dropped for each table
+   *   Perform rolling restart of all nodes indivually
+   * Upgrade Api
+   *   Check backup possible etc.
+   */
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateOneTable);
   STEP(runUpgrade_Traffic);
@@ -1015,9 +1576,23 @@ POSTUPGRADE("Upgrade_Traffic_one")
   INITIALIZER(runCheckStarted);
   INITIALIZER(runPostUpgradeChecks);
 }
+
 TESTCASE("Upgrade_Traffic_FS_one",
 	 "Test upgrade with traffic, all tables and restart using FS")
 {
+  /* Upgrade MGMDs
+   * Run basic traffic to one table
+   * Restart one NDBD in each node group
+   *   Don't use --initial, so FS will be present
+   *   Wait for each node to enter NOT_STARTED before moving to next
+   *   Start all upgraded nodes
+   *   Test that an event can be created+dropped for each table 
+   *   Upgrade remaining nodes
+   *   Test that an event can be created+dropped for each table
+   *   Perform rolling restart of all nodes indivually
+   * Upgrade Api
+   *   Check backup possible etc.
+   */
   TC_PROPERTY("KeepFS", 1);
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateOneTable);
@@ -1030,9 +1605,14 @@ POSTUPGRADE("Upgrade_Traffic_FS_one")
   INITIALIZER(runCheckStarted);
   INITIALIZER(runPostUpgradeChecks);
 }
+
 TESTCASE("Upgrade_Api_Only",
          "Test that upgrading the Api node only works")
 {
+  /* Upgrade Api
+   *   Check backup possible etc
+   *   Run basic traffic, 
+   */
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateAllTables);
   VERIFIER(startPostUpgradeChecksApiFirst);
@@ -1044,14 +1624,23 @@ POSTUPGRADE("Upgrade_Api_Only")
   INITIALIZER(runGetTableList);
   TC_PROPERTY("WaitSeconds", 30);
   STEP(runBasic);
-  STEP(runPostUpgradeChecks);
   STEP(runWait);
+  FINALIZER(runPostUpgradeChecks);
   FINALIZER(runClearAll);
 }
+
 TESTCASE("Upgrade_Api_Before_NR1",
          "Test that upgrading the Api node before the kernel works")
 {
-  /* Api, then MGMD(s), then NDBDs */
+  /* Upgrade Api
+   * Run basic traffic
+   * Upgrade MGMDs
+   * Upgrade data nodes individually
+   *   Use --initial
+   *   After each node starts, check that an event can be 
+   *   created+dropped for each table
+   * Check backup possible
+   */
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateAllTables);
   VERIFIER(startPostUpgradeChecksApiFirst);
@@ -1066,9 +1655,18 @@ POSTUPGRADE("Upgrade_Api_Before_NR1")
   FINALIZER(runPostUpgradeChecks);
   FINALIZER(runClearAll);
 }
+
 TESTCASE("Upgrade_Api_NDBD_MGMD",
          "Test that updating in reverse order works")
 {
+  /* Upgrade Api
+   * Run basic traffic
+   *   Upgrade cluster, half NDBD nodes at a time
+   *     Use --initial
+   *     Check that an event can be created+dropped for each table
+   *   Upgrade MGMD nodes
+   * Check backup possible
+   */
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateAllTables);
   VERIFIER(startPostUpgradeChecksApiFirst);
@@ -1083,9 +1681,20 @@ POSTUPGRADE("Upgrade_Api_NDBD_MGMD")
   FINALIZER(runPostUpgradeChecks);
   FINALIZER(runClearAll);
 }
+
 TESTCASE("Upgrade_Mixed_MGMD_API_NDBD",
          "Test that upgrading MGMD/API partially before data nodes works")
 {
+  /* Upgrade one MGMD
+   * Run Basic traffic
+   * Upgrade Api
+   * Run Basic traffic
+   * Upgrade cluster, half NDBD nodes at a time
+   *   Use --intial
+   *   Check that an event can be created+dropped for each table
+   * Upgrade remaining MGMD node(s)
+   * Check backup possible
+   */
   INITIALIZER(runCheckStarted);
   INITIALIZER(runCreateAllTables);
   STEP(runUpgrade_NotAllMGMD); /* Upgrade an MGMD */
@@ -1103,6 +1712,72 @@ POSTUPGRADE("Upgrade_Mixed_MGMD_API_NDBD
   FINALIZER(runPostUpgradeChecks);
   FINALIZER(runClearAll);
 }
+
+TESTCASE("Upgrade_SubsetKernel_API_Remain",
+         "Test that upgrading part of kernel MGMD + NDBD then API then remaining works")
+{
+  /*
+   * Run basic traffic from old version
+   *   Upgrade an MGMD
+   *   Upgrade some subset of NDBD nodes
+   * Upgrade API
+   * Run basic traffic from new version
+   *   Upgrade remaining MGMDs
+   *   Upgrade remaining NDBD nodes
+   *   Check backup etc.
+   */
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runCreateAllTables);
+  STEP(runUpgrade_Subset_Kernel);
+  STEP(runBasic);
+  VERIFIER(startPostUpgradeChecksApiFirst);
+}
+POSTUPGRADE("Upgrade_SubsetKernel_API_Remain")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeDecideDDL);
+  INITIALIZER(runGetTableList);
+  INITIALIZER(runClearAll); /* Clear rows from old-ver basic run */
+  STEP(runBasic);
+  STEP(runUpgrade_NR3); /* Will upgrade all nodes */ 
+  FINALIZER(runPostUpgradeChecks); 
+  FINALIZER(runClearAll);
+}
+
+TESTCASE("Upgrade_SubsetKernel_NewMaster_API",
+         "Test that upgrading subset of kernel, switching master role to new NDBDs then Api works")
+{
+  /*
+   * Run basic traffic from old version
+   *   Upgrade an MGMD
+   *   Upgrade some subset of NDBD nodes
+   *   Restart remaining NDBD nodes (at same version) to 
+   *    force master onto upgraded nodes
+   * Upgrade API
+   * Run basic traffic from new version
+   *   Upgrade remaining MGMDs
+   *   Upgrade remaining NDBD nodes
+   *   Check backup etc.
+   */
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runCreateAllTables);
+  /* Restart non upgraded nodes to get master on new version */
+  TC_PROPERTY("ForceMasterOnUpgradedNode", Uint32(1));
+  STEP(runBasic);
+  STEP(runUpgrade_Subset_Kernel_WithForcedMaster);
+  VERIFIER(startPostUpgradeChecksApiFirst);
+}
+POSTUPGRADE("Upgrade_SubsetKernel_NewMaster_API")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeDecideDDL);
+  INITIALIZER(runGetTableList);
+  INITIALIZER(runClearAll); /* Clear rows from old-ver basic run */
+  STEP(runBasic);
+  STEP(runUpgrade_NR3); /* Will upgrade all nodes */ 
+  FINALIZER(runPostUpgradeChecks); 
+  FINALIZER(runClearAll);
+}
   
 NDBT_TESTSUITE_END(testUpgrade);
 

=== modified file 'storage/ndb/test/run-test/command.cpp'
--- a/storage/ndb/test/run-test/command.cpp	2010-02-18 23:01:15 +0000
+++ b/storage/ndb/test/run-test/command.cpp	2010-04-08 16:50:12 +0000
@@ -107,16 +107,17 @@ Vector<atrt_process> g_saved_procs;
 
 static
 bool
-do_change_version(atrt_config& config, SqlResultSet& command,
-                  AtrtClient& atrtdb){
-  /**
-   * TODO make option to restart "not" initial
-   */
+do_modify_version(atrt_config& config, SqlResultSet& command,
+                  AtrtClient& atrtdb,
+                  const char* operationName,
+                  const char* verA,
+                  const char* verB)
+{
   uint process_id= command.columnAsInt("process_id");
   const char* process_args= command.column("process_args");
 
-  g_logger.info("Change version for process: %d, args: %s",
-                process_id, process_args);
+  g_logger.info("%s for process: %d, args: %s",
+                operationName, process_id, process_args);
 
   // Get the process
   if (process_id > config.m_processes.size()){
@@ -125,8 +126,14 @@ do_change_version(atrt_config& config, S
   }
   atrt_process& proc= *config.m_processes[process_id];
 
-  const char* new_prefix= g_prefix1 ? g_prefix1 : g_prefix;
-  const char* old_prefix= g_prefix;
+  const char* new_prefix= verB ? verB : verA;
+  const char* old_prefix= verA;
+  g_logger.info(" Existing path   : %s.",
+                proc.m_proc.m_path.c_str());
+  g_logger.info(" Old path prefix : %s.",
+                old_prefix);
+  g_logger.info(" New path prefix : %s.", 
+                new_prefix);
   const char *start= strstr(proc.m_proc.m_path.c_str(), old_prefix);
   if (!start){
     /* Process path does not contain old prefix.  
@@ -139,7 +146,7 @@ do_change_version(atrt_config& config, S
        * is ok
        * Alternatives could be - error, or downgrade.
        */
-      g_logger.info("Process already upgraded");
+      g_logger.info("%s for process already complete", operationName);
       return true;
     }
       
@@ -179,6 +186,25 @@ do_change_version(atrt_config& config, S
   return true;
 }
 
+static
+bool
+do_change_version(atrt_config& config, SqlResultSet& command,
+                  AtrtClient& atrtdb){
+  return do_modify_version(config, command, atrtdb,
+                           "Change version",
+                           g_prefix, g_prefix1);
+}
+
+static
+bool
+do_reset_version(atrt_config& config, SqlResultSet& command,
+                 AtrtClient& atrtdb){
+  /* Perform normal 'modify version' in reverse direction */
+  return do_modify_version(config, command, atrtdb,
+                           "Reset version",
+                           g_prefix1, g_prefix);
+}
+
 
 static
 bool
@@ -259,6 +285,11 @@ do_command(atrt_config& config){
       return false;
     break;
 
+  case AtrtClient::ATCT_RESET_VERSION:
+    if (!do_reset_version(config, command, atrtdb))
+      return false;
+    break;
+
   default:
     command.print();
     g_logger.error("got unknown command: %d", cmd);

=== modified file 'storage/ndb/test/run-test/upgrade-tests.txt'
--- a/storage/ndb/test/run-test/upgrade-tests.txt	2010-02-18 23:01:15 +0000
+++ b/storage/ndb/test/run-test/upgrade-tests.txt	2010-04-08 16:50:12 +0000
@@ -38,3 +38,11 @@ cmd: testUpgrade
 args: -n Upgrade_Mixed_MGMD_API_NDBD T2
 max-time: 1200
 
+cmd: testUpgrade
+args: -n Upgrade_SubsetKernel_API_Remain T1
+max-time: 1200
+
+cmd: testUpgrade
+args: -n Upgrade_SubsetKernel_NewMaster_API T1
+max-time: 1200
+

=== modified file 'storage/ndb/test/src/AtrtClient.cpp'
--- a/storage/ndb/test/src/AtrtClient.cpp	2009-08-05 09:54:52 +0000
+++ b/storage/ndb/test/src/AtrtClient.cpp	2010-04-08 16:50:12 +0000
@@ -171,6 +171,16 @@ AtrtClient::resetProc(int process_id){
 
 
 bool
+AtrtClient::resetVersion(int process_id,
+                         const char* process_args)
+{
+  Properties args;
+  args.put("process_id", process_id);
+  args.put("process_args", process_args);
+  return doCommand(ATCT_RESET_VERSION, args);
+}
+
+bool
 AtrtClient::getConnectString(int cluster_id, SqlResultSet& result){
   Properties args;
   args.put("0", cluster_id);

=== modified file 'storage/ndb/test/src/NdbRestarter.cpp'
--- a/storage/ndb/test/src/NdbRestarter.cpp	2010-02-18 23:01:15 +0000
+++ b/storage/ndb/test/src/NdbRestarter.cpp	2010-04-08 16:50:12 +0000
@@ -441,7 +441,15 @@ NdbRestarter::waitNodesState(const int *
 	} else if (ndbNode->start_phase < _startphase)
 	  allInState = false;
       } else {
-	if (ndbNode->node_status !=  _status) 
+        if (_status == (ndb_mgm_node_status)HACKY_NODESTATE_STARTED_OR_NOTSTARTED)
+        {
+          if ((ndbNode->node_status != NDB_MGM_NODE_STATUS_STARTED) &&
+              (ndbNode->node_status != NDB_MGM_NODE_STATUS_NOT_STARTED))
+          {
+            allInState = false;
+          }
+        }
+        else if (ndbNode->node_status !=  _status) 
 	  allInState = false;
       }
     }
@@ -472,6 +480,14 @@ int NdbRestarter::waitNodesNoStart(const
 			  NDB_MGM_NODE_STATUS_NOT_STARTED, _timeout);  
 }
 
+int NdbRestarter::waitNodesStartedOrNoStart(const int * _nodes, int _num_nodes,
+                                            unsigned int _timeout)
+{
+  return waitNodesState(_nodes, _num_nodes,
+                        (ndb_mgm_node_status)HACKY_NODESTATE_STARTED_OR_NOTSTARTED, 
+                        _timeout);  
+}
+
 bool 
 NdbRestarter::isConnected(){
   if (connected == true)
@@ -592,6 +608,23 @@ NdbRestarter::getStatus(){
   return -1;
 }
 
+int NdbRestarter::getNumRunningMgmdNodes(){
+  if (!isConnected())
+    return -1;
+
+  if (getStatus() != 0)
+    return -1;
+
+  int nodeCount = 0;
+  for (unsigned i=0; i < mgmNodes.size(); i++)
+  {
+    /* A bit crude... */
+    if (mgmNodes[i].version > 0)
+      nodeCount++;
+  }
+
+  return nodeCount;
+}
 
 int NdbRestarter::getNumDbNodes(){
   if (!isConnected())


Attachment: [text/bzr-bundle] bzr/frazer@mysql.com-20100408165012-3d38cj95cvqe9hkw.bundle
Thread
bzr commit into mysql-5.1-telco-6.3 branch (frazer:3177) Bug#51301Frazer Clement8 Apr