List:Commits« Previous MessageNext Message »
From:Jonas Oreland Date:August 5 2009 12:01pm
Subject:bzr push into mysql-5.1-telco-7.0 branch (jonas:2957 to 2960) Bug#46563
View as plain text  
 2960 Jonas Oreland	2009-08-05
      ndb - bug#46563
        Add code to detect table-id of SYSTAB_0 as it's impossible to know (hard-coded)
          e.g after upgrade
      
        The incorrect table-id cause Sequences not to work in DbUtil
        Which causes BACKUP (and replication) not to work

    modified:
      storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp
      storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp
      storage/ndb/src/kernel/blocks/suma/Suma.cpp
      storage/ndb/test/ndbapi/testUpgrade.cpp
 2959 Jonas Oreland	2009-08-05
      ndb - bug#46494
        Problem was that after an upgrade from 6.3, no default hashmap existed
        Patch fixes this by always (silently) adding a "create default hashmap if not exists" to each
          create table operation (which works perfectly thx to schema transactions)
      
        Test is committed to 6.3, which as a post-upgrade-test will drop/recreate the table
     @ storage/ndb/include/kernel/signaldata/CreateTable.hpp
        fix unique error code for invalud hashmap
     @ storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
        add signal names
     @ storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
        implement
        - CreateHashMapReq::CreateDefault
        - CreateHashMapReq::CreateIfNotExists
     @ storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
        always add a "create default hashmap if not exists" to a create-table
          if it is hashmap-partitioned and has not specified a hashmap
     @ storage/ndb/src/ndbapi/ndberror.c
        new error code

    modified:
      storage/ndb/include/kernel/signaldata/CreateHashMap.hpp
      storage/ndb/include/kernel/signaldata/CreateTable.hpp
      storage/ndb/src/common/debugger/signaldata/SignalNames.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/ndbapi/NdbDictionary.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
      storage/ndb/src/ndbapi/ndberror.c
 2958 Jonas Oreland	2009-08-05 [merge]
      merge 63 to 70

    modified:
      storage/ndb/test/include/AtrtClient.hpp
      storage/ndb/test/include/NDBT_Test.hpp
      storage/ndb/test/ndbapi/testUpgrade.cpp
      storage/ndb/test/run-test/command.cpp
      storage/ndb/test/run-test/db.cpp
      storage/ndb/test/run-test/setup.cpp
      storage/ndb/test/src/AtrtClient.cpp
 2957 Jonas Oreland	2009-08-04 [merge]
      merge 63 to 70

=== modified file 'storage/ndb/include/kernel/signaldata/CreateHashMap.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateHashMap.hpp	2008-06-02 13:27:27 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateHashMap.hpp	2009-08-05 10:48:56 +0000
@@ -21,9 +21,13 @@
 struct CreateHashMapReq
 {
 
-  STATIC_CONST( SignalLength = 5 );
+  STATIC_CONST( SignalLength = 7 );
 
-  enum RequestType {
+  enum RequestType
+  {
+    CreateIfNotExists = 1,
+    CreateDefault     = 2,
+    CreateForReorg    = 4
   };
 
   Uint32 clientRef;

=== modified file 'storage/ndb/include/kernel/signaldata/CreateTable.hpp'
--- a/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/include/kernel/signaldata/CreateTable.hpp	2009-08-05 10:48:56 +0000
@@ -74,7 +74,8 @@ struct CreateTableRef {
     NotATablespace = 758,
     InvalidTablespaceVersion = 759,
     OutOfStringBuffer = 773,
-    NoLoggingTemporaryTable = 778
+    NoLoggingTemporaryTable = 778,
+    InvalidHashMap = 790
   };
 
   Uint32 senderRef;

=== modified file 'storage/ndb/src/common/debugger/signaldata/SignalNames.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/SignalNames.cpp	2009-08-05 10:48:56 +0000
@@ -741,6 +741,8 @@ const GsnName SignalNames [] = {
   ,{ GSN_RELEASE_PAGES_REQ, "RELEASE_PAGES_REQ" }
   ,{ GSN_RELEASE_PAGES_CONF, "RELEASE_PAGES_CONF" }
 
-
+  ,{ GSN_CREATE_HASH_MAP_REQ, "CREATE_HASH_MAP_REQ" }
+  ,{ GSN_CREATE_HASH_MAP_REF, "CREATE_HASH_MAP_REF" }
+  ,{ GSN_CREATE_HASH_MAP_CONF, "CREATE_HASH_MAP_CONF" }
 };
 const unsigned short NO_OF_SIGNAL_NAMES = sizeof(SignalNames)/sizeof(GsnName);

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2009-06-16 12:19:58 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2009-08-05 10:48:56 +0000
@@ -4553,10 +4553,10 @@ void Dbdict::handleTabInfoInit(SimplePro
     jam();
     HashMapPtr hm_ptr;
     tabRequire(c_hash_map_hash.find(hm_ptr, tablePtr.p->hashMapObjectId),
-               CreateTableRef::InvalidTablespace);
+               CreateTableRef::InvalidHashMap);
 
     tabRequire(hm_ptr.p->m_object_version ==  tablePtr.p->hashMapVersion,
-               CreateTableRef::InvalidTablespace);
+               CreateTableRef::InvalidHashMap);
   }
   
   {
@@ -26134,6 +26134,13 @@ Dbdict::createHashMap_parse(Signal* sign
      * No info, create "default"
      */
     jam();
+    if (impl_req->requestType & CreateHashMapReq::CreateDefault)
+    {
+      jam();
+      impl_req->buckets = NDB_DEFAULT_HASHMAP_BUCKTETS;
+      impl_req->fragments = 0;
+    }
+
     Uint32 buckets = impl_req->buckets;
     Uint32 fragments = impl_req->fragments;
     if (fragments == 0)
@@ -26142,6 +26149,7 @@ Dbdict::createHashMap_parse(Signal* sign
 
       fragments = get_default_fragments();
     }
+
     BaseString::snprintf(hm.HashMapName, sizeof(hm.HashMapName),
                          "DEFAULT-HASHMAP-%u-%u",
                          buckets,
@@ -26173,6 +26181,9 @@ Dbdict::createHashMap_parse(Signal* sign
                                DictHashMapInfo::MappingSize, true);
     ndbrequire(s == SimpleProperties::Eof);
     w.getPtr(objInfoPtr);
+
+    handle.m_cnt = 1;
+    handle.m_ptr[CreateHashMapReq::INFO] = objInfoPtr;
   }
 
   Uint32 len = Uint32(strlen(hm.HashMapName) + 1);
@@ -26186,12 +26197,52 @@ Dbdict::createHashMap_parse(Signal* sign
     return;
   }
 
-  if(get_object(hm.HashMapName, len, hash) != 0)
+  DictObject * objptr = get_object(hm.HashMapName, len, hash);
+  if(objptr != 0)
   {
     jam();
-    setError(error, CreateTableRef::TableAlreadyExist, __LINE__);
+
+    if (! (impl_req->requestType & CreateHashMapReq::CreateIfNotExists))
+    {
+      jam();
+      setError(error, CreateTableRef::TableAlreadyExist, __LINE__);
+      return;
+    }
+
+    /**
+     * verify object found
+     */
+
+    if (objptr->m_type != DictTabInfo::HashMap)
+    {
+      jam();
+      setError(error, CreateTableRef::TableAlreadyExist, __LINE__);
+      return;
+    }
+
+    if (check_write_obj(objptr->m_id,
+                        trans_ptr.p->m_transId,
+                        SchemaFile::SF_CREATE, error))
+    {
+      jam();
+      return;
+    }
+
+    HashMapPtr hm_ptr;
+    ndbrequire(c_hash_map_hash.find(hm_ptr, objptr->m_id));
+
+    impl_req->objectId = objptr->m_id;
+    impl_req->objectVersion = hm_ptr.p->m_object_version;
     return;
   }
+  else
+  {
+    jam();
+    /**
+     * Clear the IfNotExistsFlag
+     */
+    impl_req->requestType &= ~Uint32(CreateHashMapReq::CreateIfNotExists);
+  }
 
   if (ERROR_INSERTED(6206))
   {
@@ -26402,13 +26453,19 @@ Dbdict::createHashMap_abortParse(Signal*
 {
   D("createHashMap_abortParse" << *op_ptr.p);
 
-  if (op_ptr.p->m_orig_entry_id != RNIL)
+  CreateHashMapRecPtr createHashMapPtr;
+  getOpRec(op_ptr, createHashMapPtr);
+  CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+
+  if (impl_req->requestType & CreateHashMapReq::CreateIfNotExists)
   {
     jam();
+    ndbrequire(op_ptr.p->m_orig_entry_id == RNIL);
+  }
 
-    CreateHashMapRecPtr createHashMapPtr;
-    getOpRec(op_ptr, createHashMapPtr);
-    CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
+  if (op_ptr.p->m_orig_entry_id != RNIL)
+  {
+    jam();
 
     Ptr<HashMapRecord> hm_ptr;
     ndbrequire(c_hash_map_hash.find(hm_ptr, impl_req->objectId));
@@ -26479,6 +26536,13 @@ Dbdict::createHashMap_prepare(Signal* si
   getOpRec(op_ptr, createHashMapPtr);
   CreateHashMapImplReq* impl_req = &createHashMapPtr.p->m_request;
 
+  if (impl_req->requestType & CreateHashMapReq::CreateIfNotExists)
+  {
+    jam();
+    sendTransConf(signal, op_ptr);
+    return;
+  }
+
   Callback cb;
   cb.m_callbackData = op_ptr.p->op_key;
   cb.m_callbackFunction = safe_cast(&Dbdict::createHashMap_writeObjConf);

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.cpp	2009-08-05 11:05:34 +0000
@@ -237,9 +237,24 @@ DbUtil::execSTTOR(Signal* signal) 
     c_transId[1] = 0;
   }
   
-  if(startphase == 6){
-    hardcodedPrepare();
-    connectTc(signal);
+  if(startphase == 6)
+  {
+    jam();
+
+    /**
+     * 1) get systab_0 table-id
+     * 2) run hardcodedPrepare (for sequences)
+     * 3) connectTc()
+     * 4) STTORRY
+     */
+
+    /**
+     * We need to find table-id of SYSTAB_0, as it can be after upgrade
+     *   we don't know what it will be...
+     */
+    get_systab_tableid(signal);
+
+    return;
   }
   
   signal->theData[0] = 0;
@@ -252,6 +267,35 @@ DbUtil::execSTTOR(Signal* signal) 
 }
 
 void
+DbUtil::get_systab_tableid(Signal* signal)
+{
+  static char NAME[] = "sys/def/SYSTAB_0";
+
+  GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend();
+  req->senderRef = reference();
+  req->senderData = RNIL;
+  req->schemaTransId = 0;
+  req->requestType = GetTabInfoReq::RequestByName |
+    GetTabInfoReq::LongSignalConf;
+
+  req->tableNameLen = sizeof(NAME);
+
+  /********************************************
+   * Code signal data and send signals to DICT
+   ********************************************/
+
+  Uint32 buf[(sizeof(NAME)+3)/4];
+  ndbrequire(sizeof(buf) >= sizeof(NAME));
+  memcpy(buf, NAME, sizeof(NAME));
+
+  LinearSectionPtr ptr[1];
+  ptr[0].p = buf;
+  ptr[0].sz = sizeof(NAME);
+  sendSignal(DBDICT_REF, GSN_GET_TABINFOREQ, signal,
+             GetTabInfoReq::SignalLength, JBB, ptr,1);
+}
+
+void
 DbUtil::execNDB_STTOR(Signal* signal) 
 {
   (void)signal;  // Don't want compiler warning
@@ -286,6 +330,16 @@ DbUtil::execTCSEIZECONF(Signal* signal){
   ptr.p->connectPtr = signal->theData[1];
   
   c_seizingTransactions.release(ptr);
+
+  if (c_seizingTransactions.isEmpty())
+  {
+    jam();
+    signal->theData[0] = 0;
+    signal->theData[3] = 1;
+    signal->theData[4] = 6;
+    signal->theData[5] = 255;
+    sendSignal(NDBCNTR_REF, GSN_STTORRY, signal, 6, JBB);
+  }
 }
 
 
@@ -970,7 +1024,7 @@ void DbUtil::readPrepareProps(Signal* si
   {
     GetTabInfoReq * req = (GetTabInfoReq *)signal->getDataPtrSend();
     req->senderRef = reference();
-    req->senderData = prepPtr.i;;           
+    req->senderData = prepPtr.i;
     req->schemaTransId = prepPtr.p->schemaTransId;
     if (tableKey == UtilPrepareReq::TableName) {
       jam();
@@ -1031,10 +1085,23 @@ DbUtil::execGET_TABINFO_CONF(Signal* sig
   handle.getSection(dictTabInfoPtr, GetTabInfoConf::DICT_TAB_INFO);
   ndbrequire(dictTabInfoPtr.sz == totalLen);
   
-  PreparePtr prepPtr;
-  c_runningPrepares.getPtr(prepPtr, prepI);
-  prepareOperation(signal, prepPtr, dictTabInfoPtr);
-  releaseSections(handle);
+  if (prepI != RNIL)
+  {
+    jam();
+    PreparePtr prepPtr;
+    c_runningPrepares.getPtr(prepPtr, prepI);
+    prepareOperation(signal, prepPtr, dictTabInfoPtr);
+    releaseSections(handle);
+    return;
+  }
+  else
+  {
+    jam();
+    // get_systab_tableid
+    releaseSections(handle);
+    hardcodedPrepare(signal, conf->tableId);
+    return;
+  }
 }
 
 void
@@ -1512,10 +1579,9 @@ DbUtil::execUTIL_RELEASE_REQ(Signal* sig
  *
  *  A service with a stored incrementable number
  **************************************************************************/
-#define SYSTAB_0 2
-
 void
-DbUtil::hardcodedPrepare() {
+DbUtil::hardcodedPrepare(Signal* signal, Uint32 SYSTAB_0)
+{
   /**
    * Prepare SequenceCurrVal (READ)
    */
@@ -1647,6 +1713,8 @@ DbUtil::hardcodedPrepare() {
     attrInfo[3] = 0; // FinalReadSize
     attrInfo[4] = 0; // SubroutineSize
   }
+
+  connectTc(signal);
 }
 
 void

=== modified file 'storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp'
--- a/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/src/kernel/blocks/dbutil/DbUtil.hpp	2009-08-05 11:05:34 +0000
@@ -420,7 +420,8 @@ public:
 		   struct LinearSectionPtr sectionsPtr[]);
   void finishTransaction(Signal*, TransactionPtr);
   void releaseTransaction(TransactionPtr transPtr);
-  void hardcodedPrepare();
+  void get_systab_tableid(Signal*);
+  void hardcodedPrepare(Signal*, Uint32 SYSTAB_0);
   void connectTc(Signal* signal);
   void reportSequence(Signal*, const Transaction *);
   void readPrepareProps(Signal* signal, 

=== modified file 'storage/ndb/src/kernel/blocks/suma/Suma.cpp'
--- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp	2009-08-05 11:05:34 +0000
@@ -1653,6 +1653,11 @@ Suma::execUTIL_SEQUENCE_REF(Signal* sign
 
   SubscriberPtr subbPtr;
   c_subscriberPool.getPtr(subbPtr,subData);
+  if (err == UtilSequenceRef::TCError)
+  {
+    jam();
+    err = ref->TCErrorCode;
+  }
   sendSubIdRef(signal, subbPtr.p->m_senderRef, subbPtr.p->m_senderData, err);
   c_subscriberPool.release(subbPtr);
   DBUG_PRINT("info",("c_subscriberPool  size: %d free: %d",

=== modified file 'storage/ndb/src/ndbapi/NdbDictionary.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionary.cpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp	2009-08-05 10:48:56 +0000
@@ -2875,6 +2875,7 @@ NdbDictionary::Dictionary::createHashMap
   int ret;
   DO_TRANS(ret,
            m_impl.m_receiver.create_hashmap(NdbHashMapImpl::getImpl(map),
-                                            &NdbDictObjectImpl::getImpl(*dst)));
+                                            &NdbDictObjectImpl::getImpl(*dst),
+                                            0));
   return ret;
 }

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2009-05-29 06:23:51 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2009-08-05 10:48:56 +0000
@@ -2860,13 +2860,32 @@ NdbDictInterface::createTable(Ndb & ndb,
 
   DBUG_ENTER("NdbDictInterface::createTable");
 
+  if (impl.m_fragmentType == NdbDictionary::Object::HashMapPartition)
+  {
+    if (impl.m_hash_map_id == RNIL && impl.m_hash_map_version == ~(Uint32)0)
+    {
+      /**
+       * Make sure that hashmap exists (i.e after upgrade or similar)
+       */
+      NdbHashMapImpl hashmap;
+      ret = create_hashmap(hashmap, 0,
+                           CreateHashMapReq::CreateDefault |
+                           CreateHashMapReq::CreateIfNotExists);
+      if (ret)
+      {
+        DBUG_RETURN(ret);
+      }
+    }
+  }
 
   syncInternalName(ndb, impl);
 
   UtilBufferWriter w(m_buffer);
   ret= serializeTableDesc(ndb, impl, w);
   if(ret != 0)
+  {
     DBUG_RETURN(ret);
+  }
 
   DBUG_RETURN(sendCreateTable(impl, w));
 }
@@ -6903,6 +6922,7 @@ NdbDictInterface::drop_file(const NdbFil
   req->senderData = 0;
   req->file_id = file.m_id;
   req->file_version = file.m_version;
+  req->requestInfo = 0;
   req->requestInfo |= m_tx.requestFlags();
   req->transId = m_tx.transId();
   req->transKey = m_tx.transKey();
@@ -7000,6 +7020,7 @@ NdbDictInterface::create_filegroup(const
   req->senderRef = m_reference;
   req->senderData = 0;
   req->objType = fg.FilegroupType;
+  req->requestInfo = 0;
   req->requestInfo |= m_tx.requestFlags();
   req->transId = m_tx.transId();
   req->transKey = m_tx.transKey();
@@ -7063,6 +7084,7 @@ NdbDictInterface::drop_filegroup(const N
   req->senderData = 0;
   req->filegroup_id = group.m_id;
   req->filegroup_version = group.m_version;
+  req->requestInfo = 0;
   req->requestInfo |= m_tx.requestFlags();
   req->transId = m_tx.transId();
   req->transKey = m_tx.transKey();
@@ -7531,7 +7553,8 @@ NdbDictInterface::parseHashMapInfo(NdbHa
 
 int
 NdbDictInterface::create_hashmap(const NdbHashMapImpl& src,
-                                 NdbDictObjectImpl* obj)
+                                 NdbDictObjectImpl* obj,
+                                 Uint32 flags)
 {
   DictHashMapInfo::HashMap hm; hm.init();
   BaseString::snprintf(hm.HashMapName, sizeof(hm.HashMapName), src.getName());
@@ -7566,6 +7589,7 @@ NdbDictInterface::create_hashmap(const N
   CreateHashMapReq* req = CAST_PTR(CreateHashMapReq, tSignal.getDataPtrSend());
   req->clientRef = m_reference;
   req->clientData = 0;
+  req->requestInfo = flags;
   req->requestInfo |= m_tx.requestFlags();
   req->transId = m_tx.transId();
   req->transKey = m_tx.transKey();
@@ -7582,7 +7606,12 @@ NdbDictInterface::create_hashmap(const N
     Send signal without time-out since creating files can take a very long
     time if the file is very big.
   */
-  int ret = dictSignal(&tSignal, ptr, 1,
+  Uint32 seccnt = 1;
+  if (flags & CreateHashMapReq::CreateDefault)
+  {
+    seccnt = 0;
+  }
+  int ret = dictSignal(&tSignal, ptr, seccnt,
 		       0, // master
 		       WAIT_CREATE_INDX_REQ,
 		       -1, 100,

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2009-08-05 10:48:56 +0000
@@ -679,7 +679,7 @@ public:
 					 NdbTableImpl* index_table,
 					 const NdbTableImpl* primary_table);
 
-  int create_hashmap(const NdbHashMapImpl&, NdbDictObjectImpl*);
+  int create_hashmap(const NdbHashMapImpl&, NdbDictObjectImpl*, Uint32 flags);
   int get_hashmap(NdbHashMapImpl&, Uint32 id);
   int get_hashmap(NdbHashMapImpl&, const char * name);
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2009-08-05 10:48:56 +0000
@@ -420,6 +420,7 @@ ErrorBundle ErrorCodes[] = {
   { 769,  DMEC, SE, "Drop undofile not supported, drop logfile group instead" },
   { 770,  DMEC, SE, "Cant drop file, file is used" },
   { 774,  DMEC, SE, "Invalid schema object for drop" },
+  { 790,  HA_WRONG_CREATE_OPTION, SE, "Invalid hashmap" },
   { 241,  HA_ERR_TABLE_DEF_CHANGED, SE, "Invalid schema object version" },
   { 283,  HA_ERR_NO_SUCH_TABLE, SE, "Table is being dropped" },
   { 284,  HA_ERR_TABLE_DEF_CHANGED, SE, "Table not defined in transaction coordinator" },

=== modified file 'storage/ndb/test/include/AtrtClient.hpp'
--- a/storage/ndb/test/include/AtrtClient.hpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/test/include/AtrtClient.hpp	2009-08-05 09:54:52 +0000
@@ -43,6 +43,7 @@ public:
   bool getClusters(SqlResultSet& result);
   bool getMgmds(int cluster_id, SqlResultSet& result);
   bool getNdbds(int cluster_id, SqlResultSet& result);
+  int getOwnProcessId();
 
 private:
   int writeCommand(AtrtCommandType _type,

=== modified file 'storage/ndb/test/include/NDBT_Test.hpp'
--- a/storage/ndb/test/include/NDBT_Test.hpp	2009-07-03 15:08:14 +0000
+++ b/storage/ndb/test/include/NDBT_Test.hpp	2009-08-05 09:57:22 +0000
@@ -493,4 +493,8 @@ C##suitname():NDBT_TestSuite(#suitname){
 // Helper functions for retrieving variables from NDBT_Step
 #define GETNDB(ps) ((NDBT_Step*)ps)->getNdb()
 
+#define POSTUPGRADE(testname) \
+  TESTCASE(testname "--post-upgrade", \
+           "checks being run after upgrade has completed")
+
 #endif

=== modified file 'storage/ndb/test/ndbapi/testUpgrade.cpp'
--- a/storage/ndb/test/ndbapi/testUpgrade.cpp	2009-07-02 14:01:58 +0000
+++ b/storage/ndb/test/ndbapi/testUpgrade.cpp	2009-08-05 11:05:34 +0000
@@ -23,6 +23,7 @@
 #include <NdbRestarter.hpp>
 #include <AtrtClient.hpp>
 #include <Bitmask.hpp>
+#include <NdbBackup.hpp>
 
 static Vector<BaseString> table_list;
 
@@ -452,21 +453,97 @@ int runUpgrade_Traffic(NDBT_Context* ctx
   return res;
 }
 
+int
+startPostUpgradeChecks(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /**
+   * This will restart *self* in new version
+   */
+
+  /**
+   * mysql-getopt works so that passing "-n X -n Y" is ok
+   *   and is interpreted as "-n Y"
+   *
+   * so we restart ourselves with testcase-name and "--post-upgrade" appended
+   * e.g if testcase is "testUpgrade -n X"
+   *     this will restart it as "testUpgrade -n X -n X--post-upgrade"
+   */
+  BaseString tc;
+  tc.assfmt("-n %s--post-upgrade", ctx->getCase()->getName());
+
+  ndbout << "About to restart self with extra arg: " << tc.c_str() << endl;
+
+  AtrtClient atrt;
+  int process_id = atrt.getOwnProcessId();
+  if (process_id == -1)
+  {
+    g_err << "Failed to find own process id" << endl;
+    return NDBT_FAILED;
+  }
+
+  if (!atrt.changeVersion(process_id, tc.c_str()))
+    return NDBT_FAILED;
+
+  // Will not be reached...
+
+  return NDBT_OK;
+}
+
+int
+runPostUpgradeChecks(NDBT_Context* ctx, NDBT_Step* step)
+{
+  /**
+   * Table will be dropped/recreated
+   *   automatically by NDBT...
+   *   so when we enter here, this is already tested
+   */
+
+  NdbBackup backup(GETNDB(step)->getNodeId()+1);
+
+  ndbout << "Starting backup..." << flush;
+  if (backup.start() != 0)
+  {
+    ndbout << "Failed" << endl;
+    return NDBT_FAILED;
+  }
+  ndbout << "done" << endl;
+
+  return NDBT_OK;
+}
+
 NDBT_TESTSUITE(testUpgrade);
 TESTCASE("Upgrade_NR1",
 	 "Test that one node at a time can be upgraded"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR1);
+  VERIFIER(startPostUpgradeChecks);
+}
+POSTUPGRADE("Upgrade_NR1")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeChecks);
 }
 TESTCASE("Upgrade_NR2",
 	 "Test that one node in each nodegroup can be upgradde simultaneously"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR2);
+  VERIFIER(startPostUpgradeChecks);
+}
+POSTUPGRADE("Upgrade_NR2")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeChecks);
 }
 TESTCASE("Upgrade_NR3",
 	 "Test that one node in each nodegroup can be upgradde simultaneously"){
   INITIALIZER(runCheckStarted);
   STEP(runUpgrade_NR3);
+  VERIFIER(startPostUpgradeChecks);
+}
+POSTUPGRADE("Upgrade_NR3")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeChecks);
 }
 TESTCASE("Upgrade_FS",
 	 "Test that one node in each nodegroup can be upgrade simultaneously")
@@ -476,6 +553,12 @@ TESTCASE("Upgrade_FS",
   INITIALIZER(runCreateAllTables);
   INITIALIZER(runLoadAll);
   STEP(runUpgrade_Traffic);
+  VERIFIER(startPostUpgradeChecks);
+}
+POSTUPGRADE("Upgrade_FS")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeChecks);
 }
 TESTCASE("Upgrade_Traffic",
 	 "Test upgrade with traffic, all tables and restart --initial")
@@ -484,6 +567,12 @@ TESTCASE("Upgrade_Traffic",
   INITIALIZER(runCreateAllTables);
   STEP(runUpgrade_Traffic);
   STEP(runBasic);
+  VERIFIER(startPostUpgradeChecks);
+}
+POSTUPGRADE("Upgrade_Traffic")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeChecks);
 }
 TESTCASE("Upgrade_Traffic_FS",
 	 "Test upgrade with traffic, all tables and restart using FS")
@@ -493,6 +582,12 @@ TESTCASE("Upgrade_Traffic_FS",
   INITIALIZER(runCreateAllTables);
   STEP(runUpgrade_Traffic);
   STEP(runBasic);
+  VERIFIER(startPostUpgradeChecks);
+}
+POSTUPGRADE("Upgrade_Traffic_FS")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeChecks);
 }
 TESTCASE("Upgrade_Traffic_one",
 	 "Test upgrade with traffic, *one* table and restart --initial")
@@ -501,6 +596,12 @@ TESTCASE("Upgrade_Traffic_one",
   INITIALIZER(runCreateOneTable);
   STEP(runUpgrade_Traffic);
   STEP(runBasic);
+  VERIFIER(startPostUpgradeChecks);
+}
+POSTUPGRADE("Upgrade_Traffic_one")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeChecks);
 }
 TESTCASE("Upgrade_Traffic_FS_one",
 	 "Test upgrade with traffic, all tables and restart using FS")
@@ -510,6 +611,12 @@ TESTCASE("Upgrade_Traffic_FS_one",
   INITIALIZER(runCreateOneTable);
   STEP(runUpgrade_Traffic);
   STEP(runBasic);
+  VERIFIER(startPostUpgradeChecks);
+}
+POSTUPGRADE("Upgrade_Traffic_FS_one")
+{
+  INITIALIZER(runCheckStarted);
+  INITIALIZER(runPostUpgradeChecks);
 }
 NDBT_TESTSUITE_END(testUpgrade);
 

=== modified file 'storage/ndb/test/run-test/command.cpp'
--- a/storage/ndb/test/run-test/command.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/test/run-test/command.cpp	2009-08-05 09:54:52 +0000
@@ -98,7 +98,7 @@ do_change_version(atrt_config& config, S
 
   const char* new_prefix= g_prefix1 ? g_prefix1 : g_prefix;
   const char* old_prefix= g_prefix;
-  proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", new_prefix);
+  proc.m_proc.m_env.appfmt(" MYSQL_BASE_DIR=%s", new_prefix);
   const char *start= strstr(proc.m_proc.m_path.c_str(), old_prefix);
   if (!start){
     g_logger.critical("Could not find '%s' in '%s'",

=== modified file 'storage/ndb/test/run-test/db.cpp'
--- a/storage/ndb/test/run-test/db.cpp	2009-05-27 15:21:45 +0000
+++ b/storage/ndb/test/run-test/db.cpp	2009-08-05 09:57:22 +0000
@@ -425,11 +425,11 @@ populate_db(atrt_config& config, atrt_pr
 	return false;
       }
 
-      if (populate_options(&mysqld->m_mysql, stmtopt, &option_id, i,
+      if (populate_options(&mysqld->m_mysql, stmtopt, &option_id, id,
 			   &proc->m_options.m_loaded) == false)
 	return false;
       
-      if (populate_options(&mysqld->m_mysql, stmtopt, &option_id, i,
+      if (populate_options(&mysqld->m_mysql, stmtopt, &option_id, id,
 			   &proc->m_cluster->m_options.m_loaded) == false)
 	return false;
       

=== modified file 'storage/ndb/test/run-test/setup.cpp'
--- a/storage/ndb/test/run-test/setup.cpp	2008-12-12 17:34:22 +0000
+++ b/storage/ndb/test/run-test/setup.cpp	2009-08-05 09:57:22 +0000
@@ -207,13 +207,13 @@ load_process(atrt_config& config, atrt_c
   atrt_host * host_ptr = find(hostname, config.m_hosts);
   atrt_process *proc_ptr = new atrt_process;
 
+  const size_t proc_no = config.m_processes.size();
   config.m_processes.push_back(proc_ptr);
   host_ptr->m_processes.push_back(proc_ptr);
   cluster.m_processes.push_back(proc_ptr);
   
   atrt_process& proc = *proc_ptr;
   
-  const size_t proc_no = config.m_processes.size();
   proc.m_index = idx;
   proc.m_type = type;
   proc.m_host = host_ptr;
@@ -232,6 +232,7 @@ load_process(atrt_config& config, atrt_c
   proc.m_proc.m_ulimit = "c:unlimited";
   proc.m_proc.m_env.assfmt("MYSQL_BASE_DIR=%s", g_prefix);
   proc.m_proc.m_env.appfmt(" MYSQL_HOME=%s", g_basedir);
+  proc.m_proc.m_env.appfmt(" ATRT_PID=%u", (unsigned)proc_no);
   proc.m_proc.m_shutdown_options = "";
 
   int argc = 1;

=== modified file 'storage/ndb/test/src/AtrtClient.cpp'
--- a/storage/ndb/test/src/AtrtClient.cpp	2009-05-26 18:53:34 +0000
+++ b/storage/ndb/test/src/AtrtClient.cpp	2009-08-05 09:54:52 +0000
@@ -19,6 +19,7 @@
 #include <AtrtClient.hpp>
 #include <NDBT_Output.hpp>
 #include <NdbSleep.h>
+#include <NdbEnv.h>
 
 AtrtClient::AtrtClient(const char* _group_suffix)
   : DbUtil("atrt", _group_suffix)
@@ -210,7 +211,16 @@ AtrtClient::getNdbds(int cluster_id, Sql
                   result);
 }
 
-
-
-
-
+int
+AtrtClient::getOwnProcessId()
+{
+  /**
+   * Put in env for simplicity
+   */
+  char buf[100];
+  if (NdbEnv_GetEnv("ATRT_PID", buf, sizeof(buf)))
+  {
+    return atoi(buf);
+  }
+  return -1;
+}


Attachment: [text/bzr-bundle] bzr/jonas@mysql.com-20090805110534-ovymemsb130twcrr.bundle
Thread
bzr push into mysql-5.1-telco-7.0 branch (jonas:2957 to 2960) Bug#46563Jonas Oreland5 Aug