List:Commits« Previous MessageNext Message »
From:Ole John Aske Date:May 13 2011 8:43am
Subject:bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch
(ole.john.aske:3495)
View as plain text  
#At file:///net/fimafeng09/export/home/tmp/oleja/mysql/mysql-5.1-telco-7.0-spj-scan-scan/ based on revid:ole.john.aske@stripped

 3495 Ole John Aske	2011-05-13 [merge]
      Merge telco-7.0 -> SPJ

    modified:
      CMakeLists.txt
      mysql-test/suite/ndb/r/ndb_add_partition.result
      mysql-test/suite/ndb/r/ndb_rename.result
      mysql-test/suite/ndb/t/ndb_add_partition.test
      mysql-test/suite/ndb/t/ndb_rename.test
      mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event.test
      storage/ndb/src/common/portlib/NdbTCP.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/mgmsrv/MgmtSrvr.cpp
      storage/ndb/src/ndbapi/DictCache.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp
      storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp
=== modified file 'CMakeLists.txt'
--- a/CMakeLists.txt	2011-03-22 13:16:46 +0000
+++ b/CMakeLists.txt	2011-05-12 14:13:43 +0000
@@ -326,3 +326,20 @@ IF(WITH_EMBEDDED_SERVER) 
   ADD_SUBDIRECTORY(libmysqld/examples)
 ENDIF(WITH_EMBEDDED_SERVER)
 ADD_SUBDIRECTORY(mysql-test/lib/My/SafeProcess)
+
+# Dump cmake's output and error log to help diagnosing
+# platform checks
+MACRO(DUMP_FILE filename)
+  IF(EXISTS ${filename})
+    FILE(READ ${filename} content)
+    MESSAGE(STATUS "=vvvv= Dumping ${filename} ")
+    MESSAGE(STATUS "${content}")
+    MESSAGE(STATUS "=^^^^=")
+  ELSE()
+    MESSAGE(STATUS "'${filename}' does not exist")
+  ENDIF()
+ENDMACRO()
+ 
+DUMP_FILE("${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log")
+DUMP_FILE("${CMAKE_BINARY_DIR}/CMakeFiles/CMakeOutput.log")
+

=== modified file 'mysql-test/suite/ndb/r/ndb_add_partition.result'
--- a/mysql-test/suite/ndb/r/ndb_add_partition.result	2009-05-09 15:49:27 +0000
+++ b/mysql-test/suite/ndb/r/ndb_add_partition.result	2011-05-12 11:31:21 +0000
@@ -144,8 +144,20 @@ a	b	c
 50	50	50
 alter online table t1 reorganize partition;
 alter online table t2 reorganize partition;
+partitions added to t1
+t1_added
+0
+partitions added to t2
+t2_added
+0
 alter online table t1 add partition partitions 1;
 alter online table t2 add partition partitions 4;
+partitions added to t1
+t1_added
+1
+partitions added to t2
+t2_added
+4
 alter online table t1 reorganize partition;
 ERROR HY000: REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs
 select count(*) from t1;
@@ -246,6 +258,12 @@ a	b	c
 50	50	50
 alter online table t1 add partition partitions 2;
 alter online table t2 add partition partitions 1;
+partitions added to t1
+t1_added
+3
+partitions added to t2
+t2_added
+5
 select count(*) from t1;
 count(*)
 100

=== modified file 'mysql-test/suite/ndb/r/ndb_rename.result'
--- a/mysql-test/suite/ndb/r/ndb_rename.result	2007-06-27 12:28:02 +0000
+++ b/mysql-test/suite/ndb/r/ndb_rename.result	2011-05-12 09:01:21 +0000
@@ -21,4 +21,11 @@ SELECT * FROM ndbtest.t2 WHERE attr1 = 1
 pk1	attr1	attr2	attr3
 1	1	1	one
 drop table ndbtest.t2;
+create table t1 (
+pk1 INT NOT NULL PRIMARY KEY,
+b blob
+) engine = ndbcluster;
+alter table t1 rename ndbtest.t1;
+alter table ndbtest.t1 rename test.t1;
+drop table test.t1;
 drop database ndbtest;

=== modified file 'mysql-test/suite/ndb/t/ndb_add_partition.test'
--- a/mysql-test/suite/ndb/t/ndb_add_partition.test	2009-02-12 15:21:46 +0000
+++ b/mysql-test/suite/ndb/t/ndb_add_partition.test	2011-05-12 11:31:21 +0000
@@ -33,6 +33,10 @@ STORAGE DISK
 TABLESPACE ts1
 partition by key(a);
 
+let $t1_part_count_start = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't1', Value, 1);
+
+let $t2_part_count_start = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't2', Value, 1);
+
 INSERT INTO t1 VALUES
 (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),
 (6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10),
@@ -91,9 +95,31 @@ select * from t2 where b = 50;
 alter online table t1 reorganize partition;
 alter online table t2 reorganize partition;
 
+let $t1_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't1', Value, 1);
+
+let $t2_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't2', Value, 1);
+
+--disable_query_log
+--echo partitions added to t1
+eval select $t1_part_count_now - $t1_part_count_start as t1_added;
+--echo partitions added to t2
+eval select $t2_part_count_now - $t2_part_count_start as t2_added;
+--enable_query_log
+
 alter online table t1 add partition partitions 1;
 alter online table t2 add partition partitions 4;
 
+let $t1_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't1', Value, 1);
+
+let $t2_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't2', Value, 1);
+
+--disable_query_log
+--echo partitions added to t1
+eval select $t1_part_count_now - $t1_part_count_start as t1_added;
+--echo partitions added to t2
+eval select $t2_part_count_now - $t2_part_count_start as t2_added;
+--enable_query_log
+
 # reorganize partition not support if not default partitioning
 # and after a add partition it's no longer default
 --error ER_REORG_NO_PARAM_ERROR
@@ -134,6 +160,17 @@ select * from t2 where b = 50;
 alter online table t1 add partition partitions 2;
 alter online table t2 add partition partitions 1;
 
+let $t1_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't1', Value, 1);
+
+let $t2_part_count_now = query_get_value(select count(*) as Value from information_schema.partitions where table_schema = 'test' and table_name = 't2', Value, 1);
+
+--disable_query_log
+--echo partitions added to t1
+eval select $t1_part_count_now - $t1_part_count_start as t1_added;
+--echo partitions added to t2
+eval select $t2_part_count_now - $t2_part_count_start as t2_added;
+--enable_query_log
+
 select count(*) from t1;
 select count(*) from t2;
 --sorted_result

=== modified file 'mysql-test/suite/ndb/t/ndb_rename.test'
--- a/mysql-test/suite/ndb/t/ndb_rename.test	2007-11-29 10:29:35 +0000
+++ b/mysql-test/suite/ndb/t/ndb_rename.test	2011-05-12 09:01:21 +0000
@@ -30,6 +30,17 @@ alter table t2 rename ndbtest.t2;
 SELECT * FROM ndbtest.t2 WHERE attr1 = 1;
 
 drop table ndbtest.t2;
+
+create table t1 (
+  pk1 INT NOT NULL PRIMARY KEY,
+  b blob
+) engine = ndbcluster;
+
+alter table t1 rename ndbtest.t1;
+alter table ndbtest.t1 rename test.t1;
+
+drop table test.t1;
+
 drop database ndbtest;
 
 # End of 4.1 tests

=== modified file 'mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event.test'
--- a/mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event.test	2011-04-08 11:06:53 +0000
+++ b/mysql-test/suite/rpl_ndb/t/rpl_ndb_gap_event.test	2011-05-12 09:52:16 +0000
@@ -22,6 +22,13 @@ enable_query_log;
 --connection slave
 select count(*) from t1;
 
+# Add suppression for the LOST_EVENTS error message
+# which will occur on slave when master is restarted while
+# slave is running
+--disable_query_log
+call mtr.add_suppression("Slave.*: The incident LOST_EVENTS occured on the master");
+--enable_query_log
+
 --connection master
 
 --echo Restarting mysqld

=== modified file 'storage/ndb/src/common/portlib/NdbTCP.cpp'
--- a/storage/ndb/src/common/portlib/NdbTCP.cpp	2011-05-11 19:12:56 +0000
+++ b/storage/ndb/src/common/portlib/NdbTCP.cpp	2011-05-12 12:53:51 +0000
@@ -19,6 +19,13 @@
 #include <ndb_global.h>
 #include <NdbTCP.h>
 
+
+/* On some operating systems (e.g. Solaris) INADDR_NONE is not defined */
+#ifndef INADDR_NONE
+#define INADDR_NONE -1                          /* Error value from inet_addr */
+#endif
+
+
 extern "C"
 int
 Ndb_getInAddr(struct in_addr * dst, const char *address)

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-04-12 08:57:18 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-05-13 08:43:41 +0000
@@ -8028,9 +8028,23 @@ Dbdict::alterTable_parse(Signal* signal,
 
     // the new temporary table record seized from pool
     newTablePtr = parseRecord.tablePtr;
+    alterTabPtr.p->m_newTable_realObjectId = newTablePtr.p->tableId;
     newTablePtr.p->tableId = impl_req->tableId; // set correct table id...(not the temporary)
   }
 
+
+  {
+    /**
+     * Mark SchemaObject as in-use so that it's won't be found by other op
+     *   choose a state that will be automatically cleaned incase we crash
+     */
+    SchemaFile::TableEntry * objEntry =
+      objEntry = getTableEntry(alterTabPtr.p->m_newTable_realObjectId);
+    objEntry->m_tableType = DictTabInfo::SchemaTransaction;
+    objEntry->m_tableState = SchemaFile::SF_STARTED;
+    objEntry->m_transId = trans_ptr.p->m_transId + 1;
+  }
+
   // set the new version now
   impl_req->newTableVersion =
     newTablePtr.p->tableVersion =
@@ -9469,6 +9483,15 @@ Dbdict::alterTable_fromCommitComplete(Si
 	       JBB, ptr, 1);
   }
 
+  {
+    // Remark object as free
+    SchemaFile::TableEntry * objEntry =
+      objEntry = getTableEntry(alterTabPtr.p->m_newTable_realObjectId);
+    objEntry->m_tableType = DictTabInfo::SchemaTransaction;
+    objEntry->m_tableState = SchemaFile::SF_UNUSED;
+    objEntry->m_transId = 0;
+  }
+
   releaseTableObject(alterTabPtr.p->m_newTablePtr.i, false);
   sendTransConf(signal, op_ptr);
 }
@@ -9551,6 +9574,16 @@ Dbdict::alterTable_abortParse(Signal* si
   if (!newTablePtr.isNull()) {
     jam();
     // release the temporary work table
+
+    {
+      // Remark object as free
+      SchemaFile::TableEntry * objEntry =
+        objEntry = getTableEntry(alterTabPtr.p->m_newTable_realObjectId);
+      objEntry->m_tableType = DictTabInfo::SchemaTransaction;
+      objEntry->m_tableState = SchemaFile::SF_UNUSED;
+      objEntry->m_transId = 0;
+    }
+
     releaseTableObject(newTablePtr.i, false);
     newTablePtr.setNull();
   }

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-02-16 14:53:53 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp	2011-05-12 09:01:21 +0000
@@ -2365,6 +2365,7 @@ private:
     // current and new temporary work table
     TableRecordPtr m_tablePtr;
     TableRecordPtr m_newTablePtr;
+    Uint32 m_newTable_realObjectId;
 
     // before image
     RopeHandle m_oldTableName;

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-05-04 11:45:33 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-05-13 08:38:01 +0000
@@ -1150,7 +1150,7 @@ Dbspj::batchComplete(Signal* signal, Ptr
 /**
  * Locate next TreeNode(s) to retrieve more rows from.
  *
- *   Calcule set of 'm_active_nodes' we will receive from in NEXTREQ.
+ *   Calculate set of the 'm_active_nodes' we will receive from in NEXTREQ.
  *   Add these TreeNodes to the cursor list to be iterated.
  */
 void
@@ -1168,7 +1168,7 @@ Dbspj::prepareNextBatch(Signal* signal, 
   if (requestPtr.p->m_bits & Request::RT_REPEAT_SCAN_RESULT)
   {
     /**
-     * If REPEAT_SCAN_RESULT we handle byshy scans by return more *new* rows
+     * If REPEAT_SCAN_RESULT we handle bushy scans by return more *new* rows
      * from only one of the active child scans. If there are multiple 
      * bushy scans not being able to return their current result set in 
      * a single batch, result sets from the other child scans are repeated
@@ -1239,7 +1239,7 @@ Dbspj::prepareNextBatch(Signal* signal, 
   {
     /**
      * If not REPEAT_SCAN_RESULT multiple active TreeNodes may return their 
-     * remaining result simultaneously. In case of byshy-scans, these
+     * remaining result simultaneously. In case of bushy-scans, these
      * concurrent result streams are cross joins of each other
      * in SQL terms. In order to produce the cross joined result, it is
      * the responsibility of the API-client to buffer these streams and
@@ -1415,7 +1415,13 @@ Dbspj::releaseScanBuffers(Ptr<Request> r
         releaseNodeRows(requestPtr, treeNodePtr);
       }
       
-      if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE)
+      /**
+       * Cleanup ACTIVE nodes fetching more rows in a NEXTREQ,
+       * or nodes being in 'm_active_nodes' as they will 'repeat'.
+       * (and then become active)
+       */
+      if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE ||
+          requestPtr.p->m_active_nodes.get(treeNodePtr.p->m_node_no))
       {
         jam();
         cleanupChildBranch(requestPtr, treeNodePtr);
@@ -1423,9 +1429,11 @@ Dbspj::releaseScanBuffers(Ptr<Request> r
     }
 
     /**
-      * Build Bitmask of all nodes having TN_ACTIVE childs
+      * Collect ancestors of all nodes which are, or will
+      * become active in NEXTREQ (possibly repeated)
       */
-    if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE)
+    if (treeNodePtr.p->m_state == TreeNode::TN_ACTIVE ||
+        requestPtr.p->m_active_nodes.get(treeNodePtr.p->m_node_no))
     {
       ancestors_of_active.bitOR(treeNodePtr.p->m_ancestors);
     }

=== modified file 'storage/ndb/src/mgmsrv/MgmtSrvr.cpp'
--- a/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-04-15 08:09:04 +0000
+++ b/storage/ndb/src/mgmsrv/MgmtSrvr.cpp	2011-05-12 09:26:38 +0000
@@ -1345,7 +1345,7 @@ int MgmtSrvr::sendSTOP_REQ(const Vector<
     ndb_nodes_to_stop.copyto(NdbNodeBitmask::Size, stopReq->nodes);
     StopReq::setStopNodes(stopReq->requestInfo, 1);
   }
-  else
+  else if (ndb_nodes_to_stop.count() == 1)
   {
     Uint32 nodeId = ndb_nodes_to_stop.find(0);
     if (okToSendTo(nodeId, true) == 0)

=== modified file 'storage/ndb/src/ndbapi/DictCache.cpp'
--- a/storage/ndb/src/ndbapi/DictCache.cpp	2011-02-01 23:27:25 +0000
+++ b/storage/ndb/src/ndbapi/DictCache.cpp	2011-05-12 09:01:21 +0000
@@ -457,7 +457,6 @@ GlobalDictCache::alter_table_rep(const c
 				 bool altered)
 {
   DBUG_ENTER("GlobalDictCache::alter_table_rep");
-  assert(! is_ndb_blob_table(name));
   const Uint32 len = (Uint32)strlen(name);
   Vector<TableVersion> * vers = 
     m_tableHash.getData(name, len);
@@ -467,6 +466,7 @@ GlobalDictCache::alter_table_rep(const c
     DBUG_VOID_RETURN;
   }
 
+  assert(! is_ndb_blob_table(name));
   const Uint32 sz = vers->size();
   if(sz == 0)
   {

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-02-23 12:15:04 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp	2011-05-12 09:01:21 +0000
@@ -938,6 +938,72 @@ NdbTableImpl::getName() const
   return m_externalName.c_str();
 }
 
+int
+NdbTableImpl::getDbName(char buf[], size_t len) const
+{
+  if (len == 0)
+    return -1;
+
+  // db/schema/table
+  const char *ptr = m_internalName.c_str();
+
+  size_t pos = 0;
+  while (ptr[pos] && ptr[pos] != table_name_separator)
+  {
+    buf[pos] = ptr[pos];
+    pos++;
+
+    if (pos == len)
+      return -1;
+  }
+  buf[pos] = 0;
+  return 0;
+}
+
+int
+NdbTableImpl::getSchemaName(char buf[], size_t len) const
+{
+  if (len == 0)
+    return -1;
+
+  // db/schema/table
+  const char *ptr = m_internalName.c_str();
+
+  // skip over "db"
+  while (*ptr && *ptr != table_name_separator)
+    ptr++;
+
+  buf[0] = 0;
+  if (*ptr == table_name_separator)
+  {
+    ptr++;
+    size_t pos = 0;
+    while (ptr[pos] && ptr[pos] != table_name_separator)
+    {
+      buf[pos] = ptr[pos];
+      pos++;
+
+      if (pos == len)
+        return -1;
+    }
+    buf[pos] = 0;
+  }
+
+  return 0;
+}
+
+void
+NdbTableImpl::setDbSchema(const char * db, const char * schema)
+{
+  m_internalName.assfmt("%s%c%s%c%s",
+                        db,
+                        table_name_separator,
+                        schema,
+                        table_name_separator,
+                        m_externalName.c_str());
+  updateMysqlName();
+}
+
 void
 NdbTableImpl::computeAggregates()
 {
@@ -3090,7 +3156,8 @@ int NdbDictionaryImpl::alterTableGlobal(
 {
   DBUG_ENTER("NdbDictionaryImpl::alterTableGlobal");
   // Alter the table
-  int ret = m_receiver.alterTable(m_ndb, old_impl, impl);
+  Uint32 changeMask = 0;
+  int ret = m_receiver.alterTable(m_ndb, old_impl, impl, changeMask);
 #if ndb_bug41905
   old_impl.m_status = NdbDictionary::Object::Invalid;
 #endif
@@ -3107,18 +3174,93 @@ int NdbDictionaryImpl::alterTableGlobal(
     m_globalHash->unlock();
     if (ret != 0)
       m_error.code = 723;
+
+    if (ret == 0 && AlterTableReq::getNameFlag(changeMask) != 0)
+    {
+      char db0[MAX_TAB_NAME_SIZE];
+      char db1[MAX_TAB_NAME_SIZE];
+      if (old_impl.getDbName(db0, sizeof(db0)) != 0)
+      {
+        m_error.code = 705;
+        DBUG_RETURN(-1);
+      }
+      if (impl.getDbName(db1, sizeof(db0)) != 0)
+      {
+        m_error.code = 705;
+        DBUG_RETURN(-1);
+      }
+
+      bool db_change = strcmp(db0, db1) != 0;
+      if (old_impl.getSchemaName(db0, sizeof(db0)) != 0)
+      {
+        m_error.code = 705;
+        DBUG_RETURN(-1);
+      }
+      if (impl.getSchemaName(db1, sizeof(db0)) != 0)
+      {
+        m_error.code = 705;
+        DBUG_RETURN(-1);
+      }
+
+      bool schema_change = strcmp(db0, db1) != 0;
+      if (db_change || schema_change)
+      {
+        if (renameBlobTables(old_impl, impl) != 0)
+        {
+          DBUG_RETURN(-1);
+        }
+      }
+    }
     DBUG_RETURN(ret);
   }
   ERR_RETURN(getNdbError(), ret);
 }
 
 int
+NdbDictionaryImpl::renameBlobTables(const NdbTableImpl & old_tab,
+                                    const NdbTableImpl & new_tab)
+{
+  if (old_tab.m_noOfBlobs == 0)
+    return 0;
+
+  char db[MAX_TAB_NAME_SIZE];
+  char schema[MAX_TAB_NAME_SIZE];
+  new_tab.getDbName(db, sizeof(db));
+  new_tab.getSchemaName(schema, sizeof(schema));
+
+  for (unsigned i = 0; i < old_tab.m_columns.size(); i++)
+  {
+    NdbColumnImpl & c = *old_tab.m_columns[i];
+    if (! c.getBlobType() || c.getPartSize() == 0)
+      continue;
+    NdbTableImpl* _bt = c.m_blobTable;
+    if (_bt == NULL)
+    {
+      continue; // "force" mode on
+    }
+
+    NdbDictionary::Table& bt = * _bt->m_facade;
+    NdbDictionary::Table new_bt(bt);
+    new_bt.m_impl.setDbSchema(db, schema);
+
+    Uint32 changeMask = 0;
+    int ret = m_receiver.alterTable(m_ndb, bt.m_impl, new_bt.m_impl,changeMask);
+    if (ret != 0)
+    {
+      return ret;
+    }
+    assert(AlterTableReq::getNameFlag(changeMask) != 0);
+  }
+  return 0;
+}
+
+int
 NdbDictInterface::alterTable(Ndb & ndb,
                              const NdbTableImpl &old_impl,
-                             NdbTableImpl &impl)
+                             NdbTableImpl &impl,
+                             Uint32 & change_mask)
 {
   int ret;
-  Uint32 change_mask;
 
   DBUG_ENTER("NdbDictInterface::alterTable");
 
@@ -3168,8 +3310,9 @@ NdbDictInterface::compChangeMask(const N
                       impl.m_internalName.c_str()));
   if(impl.m_internalName != old_impl.m_internalName)
   {
-    if (unlikely(is_ndb_blob_table(old_impl.m_externalName.c_str()) ||
-                 is_ndb_blob_table(impl.m_externalName.c_str())))
+    bool old_blob = is_ndb_blob_table(old_impl.m_externalName.c_str());
+    bool new_blob = is_ndb_blob_table(impl.m_externalName.c_str());
+    if (unlikely(old_blob != new_blob))
     {
       /* Attempt to alter to/from Blob part table name */
       DBUG_PRINT("info", ("Attempt to alter to/from Blob part table name"));
@@ -3260,7 +3403,9 @@ NdbDictInterface::compChangeMask(const N
          col->m_autoIncrement ||                   // ToDo: allow this?
 	 (col->getBlobType() && col->getPartSize())
          )
+      {
         goto invalid_alter_table;
+      }
     }
     AlterTableReq::setAddAttrFlag(change_mask, true);
   }

=== modified file 'storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-02-23 12:15:04 +0000
+++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp	2011-05-12 09:01:21 +0000
@@ -185,14 +185,18 @@ public:
   int validate(NdbError& error);
 
   Uint32 m_primaryTableId;
-  BaseString m_internalName;
-  BaseString m_externalName;
-  BaseString m_mysqlName;
+  BaseString m_internalName; // db/schema/table
+  BaseString m_externalName; //           table
+  BaseString m_mysqlName;    //        db/table
   UtilBuffer m_frm; 
   Vector<Uint32> m_fd;
   Vector<Int32> m_range;
   NdbDictionary::Object::FragmentType m_fragmentType;
 
+  int getDbName(char * buf, size_t len) const;
+  int getSchemaName(char * buf, size_t len) const;
+  void setDbSchema(const char * db, const char * schema);
+
   /**
    * 
    */
@@ -617,7 +621,7 @@ public:
   int createTable(class Ndb & ndb, NdbTableImpl &);
   bool supportedAlterTable(const NdbTableImpl &,
 			   NdbTableImpl &);
-  int alterTable(class Ndb & ndb, const NdbTableImpl &, NdbTableImpl &);
+  int alterTable(class Ndb & ndb, const NdbTableImpl &, NdbTableImpl&, Uint32&);
   void syncInternalName(Ndb & ndb, NdbTableImpl &impl);
   int compChangeMask(const NdbTableImpl &old_impl,
                      const NdbTableImpl &impl,
@@ -828,6 +832,7 @@ public:
   int dropTable(const char * name);
   int dropTable(NdbTableImpl &);
   int dropBlobTables(NdbTableImpl &);
+  int renameBlobTables(const NdbTableImpl &old_impl, const NdbTableImpl &impl);
   int invalidateObject(NdbTableImpl &);
   int removeCachedObject(NdbTableImpl &);
 

No bundle (reason: revision is a merge).
Thread
bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch(ole.john.aske:3495) Ole John Aske13 May