MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:gni Date:April 12 2007 2:34am
Subject:bk commit into 5.1 tree (gni:1.2489) BUG#22240
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of root. When root does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet@stripped, 2007-04-12 10:34:05+08:00, gni@stripped +3 -0
  BUG#22240 Upgrading from cluster 5.0 to 5.1 does not resize VARCHARS as expected.

  storage/ndb/tools/restore/Restore.cpp@stripped, 2007-04-12 10:34:02+08:00, gni@stripped +22 -5
    These codes have 3 tasks:
      1) Assigne the correct array type for varchar and varbinary type 
      2) Add the "upgrade" variable to decide if upgrading array type for var type
      3) Replace the max length define with  the real length used for var data

  storage/ndb/tools/restore/Restore.hpp@stripped, 2007-04-12 10:34:02+08:00, gni@stripped +4 -2
    add 'upgrade' parameter for constructor of class TableS to decide if upgrade array type for var data

  storage/ndb/tools/restore/restore_main.cpp@stripped, 2007-04-12 10:34:02+08:00, gni@stripped +10 -0
    Add --upgrade(-u) option for ndb_restore to decide if upgrade array type for var data

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	gni
# Host:	dev3-221.dev.cn.tlan
# Root:	/home/ngb/mysql/mysql-5.1/mysql-5.1-bug22240

--- 1.47/storage/ndb/tools/restore/Restore.cpp	2007-04-12 10:34:12 +08:00
+++ 1.48/storage/ndb/tools/restore/Restore.cpp	2007-04-12 10:34:12 +08:00
@@ -85,6 +85,7 @@
   
   debug << "RestoreMetaData constructor" << endl;
   setCtlFile(nodeId, bNo, path);
+  upgrade = false;
 }
 
 RestoreMetaData::~RestoreMetaData(){
@@ -417,7 +418,7 @@
   return true;
 }
 
-TableS::TableS(Uint32 version, NdbTableImpl* tableImpl)
+TableS::TableS(Uint32 version, NdbTableImpl* tableImpl, bool upgrade)
   : m_dictTable(tableImpl)
 {
   m_dictTable = tableImpl;
@@ -428,8 +429,19 @@
   backupVersion = version;
   isSysTable = false;
   
-  for (int i = 0; i < tableImpl->getNoOfColumns(); i++)
+  NdbDictionary::Column::Type t;
+  for (int i = 0; i < tableImpl->getNoOfColumns(); i++){
+    t = tableImpl->getColumn(i)->getType();
+    if (upgrade){
+      if (t == NdbDictionary::Column::Varchar ||
+          t == NdbDictionary::Column::Varbinary)
+        tableImpl->getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeShortVar);
+      if (t == NdbDictionary::Column::Longvarchar ||
+          t == NdbDictionary::Column::Longvarbinary)
+        tableImpl->getColumn(i)->setArrayType(NdbDictionary::Column::ArrayTypeMediumVar);
+    }
     createAttr(tableImpl->getColumn(i));
+  }
 }
 
 TableS::~TableS()
@@ -455,7 +467,7 @@
     return false;
 
   debug << "parseTableInfo " << tableImpl->getName() << " done" << endl;
-  TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl);
+  TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl, upgrade);
   if(table == NULL) {
     return false;
   }
@@ -647,10 +659,15 @@
       sz *= 4;
     }
     
+    Uint32 vardata_real_len = 0;
+    NdbColumnImpl & col = NdbColumnImpl::getImpl(*attr_desc->m_column);
+    if (!col.get_var_length((void *)&data->Data[0],vardata_real_len))
+	return NULL;
+
     attr_data->null = false;
     attr_data->void_value = &data->Data[0];
-    attr_data->size = sz;
-
+    attr_data->size = vardata_real_len;
+    
     //if (m_currentTable->getTableId() >= 2) { ndbout << "var off=" << ptr-buf_ptr << " attrId=" << attrId << endl; }
 
     /**

--- 1.28/storage/ndb/tools/restore/Restore.hpp	2007-04-12 10:34:12 +08:00
+++ 1.29/storage/ndb/tools/restore/Restore.hpp	2007-04-12 10:34:12 +08:00
@@ -150,7 +150,7 @@
 
 public:
   class NdbDictionary::Table* m_dictTable;
-  TableS (Uint32 version, class NdbTableImpl* dictTable);
+  TableS (Uint32 version, class NdbTableImpl* dictTable, bool upgrade = false);
   ~TableS();
 
   Uint32 getTableId() const { 
@@ -308,7 +308,7 @@
   bool parseTableDescriptor(const Uint32 * data, Uint32 len);
 
   Vector<DictObject> m_objects;
-  
+   
 public:
   RestoreMetaData(const char * path, Uint32 nodeId, Uint32 bNo);
   virtual ~RestoreMetaData();
@@ -325,6 +325,8 @@
   void* getObjPtr(Uint32 i) const { return m_objects[i].m_objPtr; }
   
   Uint32 getStopGCP() const;
+  
+  bool upgrade; // for upgrade ArrayType from 5.0 backup file.
 }; // RestoreMetaData
 
 

--- 1.53/storage/ndb/tools/restore/restore_main.cpp	2007-04-12 10:34:12 +08:00
+++ 1.54/storage/ndb/tools/restore/restore_main.cpp	2007-04-12 10:34:12 +08:00
@@ -33,6 +33,7 @@
 static int ga_nParallelism = 128;
 static int ga_backupId = 0;
 static bool ga_dont_ignore_systab_0 = false;
+static bool ga_upgrade = false;
 static Vector<class BackupConsumer *> g_consumers;
 
 static const char* ga_backupPath = "." DIR_SEPARATOR;
@@ -82,6 +83,10 @@
     "Restore meta data into NDB Cluster using NDBAPI",
     (gptr*) &_restore_meta, (gptr*) &_restore_meta,  0,
     GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+  { "upgrade", 'u',
+    "upgrade array type for var attributes, which can resize VAR data",
+    (gptr*) &ga_upgrade, (gptr*) &ga_upgrade, 0,
+    GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
   { "no-restore-disk-objects", 'd',
     "Dont restore disk objects (tablespace/logfilegroups etc)",
     (gptr*) &_no_restore_disk, (gptr*) &_no_restore_disk,  0,
@@ -463,6 +468,8 @@
   g_options.appfmt(" -n %d", ga_nodeId);
   if (_restore_meta)
     g_options.appfmt(" -m");
+  if (ga_upgrade)
+    g_options.appfmt(" -u");
   if (ga_skip_table_check)
     g_options.appfmt(" -s");
   if (_restore_data)
@@ -480,6 +487,9 @@
    */
   debug << "Start restoring meta data" << endl;
   RestoreMetaData metaData(ga_backupPath, ga_nodeId, ga_backupId);
+  if (ga_upgrade)
+    metaData.upgrade = true;
+
   if (!metaData.readHeader())
   {
     err << "Failed to read " << metaData.getFilename() << endl << endl;
Thread
bk commit into 5.1 tree (gni:1.2489) BUG#22240gni12 Apr