List:Commits« Previous MessageNext Message »
From:lzhou Date:October 31 2006 10:32am
Subject:bk commit into 5.1 tree (lzhou:1.2317)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of root. When root does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet@stripped, 2006-10-31 10:32:38+00:00, lzhou@stripped +12 -0
  Merge dev3-138.dev.cn.tlan:/home/zhl/mysql/mysql-5.0/bug21052
  into  dev3-138.dev.cn.tlan:/home/zhl/mysql/mysql-5.1/bug21052
  MERGE: 1.1810.1698.135

  mysql-test/r/ctype_utf8.result@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.85.1.12

  mysql-test/t/ctype_utf8.test@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.84.1.5

  sql/sql_base.cc@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.235.1.117

  sql/sql_lex.h@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.175.1.52

  sql/sql_view.cc@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.78.1.17

  storage/ndb/include/ndbapi/ndberror.h@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.8.1.2

  storage/ndb/include/ndbapi/ndberror.h@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Merge rename: ndb/include/ndbapi/ndberror.h -> storage/ndb/include/ndbapi/ndberror.h

  storage/ndb/src/mgmclient/main.cpp@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.20.5.2

  storage/ndb/src/mgmclient/main.cpp@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Merge rename: ndb/src/mgmclient/main.cpp -> storage/ndb/src/mgmclient/main.cpp

  storage/ndb/src/mgmsrv/Services.cpp@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.45.31.2

  storage/ndb/src/mgmsrv/Services.cpp@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Merge rename: ndb/src/mgmsrv/Services.cpp -> storage/ndb/src/mgmsrv/Services.cpp

  storage/ndb/src/ndbapi/NdbScanOperation.cpp@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.66.15.2

  storage/ndb/src/ndbapi/NdbScanOperation.cpp@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Merge rename: ndb/src/ndbapi/NdbScanOperation.cpp -> storage/ndb/src/ndbapi/NdbScanOperation.cpp

  storage/ndb/src/ndbapi/ndberror.c@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.30.32.2

  storage/ndb/src/ndbapi/ndberror.c@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Merge rename: ndb/src/ndbapi/ndberror.c -> storage/ndb/src/ndbapi/ndberror.c

  storage/ndb/tools/ndb_condig.cpp@stripped, 2006-10-31 10:32:34+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.4.7.7

  storage/ndb/tools/ndb_condig.cpp@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Merge rename: ndb/tools/ndb_config.cpp -> storage/ndb/tools/ndb_condig.cpp

  storage/ndb/tools/restore/restore_main.cpp@stripped, 2006-10-31 10:32:34+00:00, lzhou@stripped +0 -0
    Auto merged
    MERGE: 1.29.8.3

  storage/ndb/tools/restore/restore_main.cpp@stripped, 2006-10-31 10:32:33+00:00, lzhou@stripped +0 -0
    Merge rename: ndb/tools/restore/restore_main.cpp -> storage/ndb/tools/restore/restore_main.cpp

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	lzhou
# Host:	dev3-138.dev.cn.tlan
# Root:	/home/zhl/mysql/mysql-5.1/bug21052/RESYNC

--- 1.29.8.2/ndb/tools/restore/restore_main.cpp	2006-10-31 10:32:44 +00:00
+++ 1.50/storage/ndb/tools/restore/restore_main.cpp	2006-10-31 10:32:44 +00:00
@@ -19,6 +19,7 @@
 #include <Vector.hpp>
 #include <ndb_limits.h>
 #include <NdbTCP.h>
+#include <NdbMem.h>
 #include <NdbOut.hpp>
 #include <NDBT_ReturnCodes.h>
 
@@ -37,11 +38,17 @@
 
 static const char* ga_backupPath = "." DIR_SEPARATOR;
 
+static const char *opt_nodegroup_map_str= 0;
+static unsigned opt_nodegroup_map_len= 0;
+static NODE_GROUP_MAP opt_nodegroup_map[MAX_NODE_GROUP_MAPS];
+#define OPT_NDB_NODEGROUP_MAP 'z'
+
 NDB_STD_OPTS_VARS;
 
 /**
  * print and restore flags
  */
+static bool ga_restore_epoch = false;
 static bool ga_restore = false;
 static bool ga_print = false;
 static int _print = 0;
@@ -50,6 +57,7 @@
 static int _print_log = 0;
 static int _restore_data = 0;
 static int _restore_meta = 0;
+static int _no_restore_disk = 0;
 BaseString g_options("ndb_restore");
 
 const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 };
@@ -74,6 +82,16 @@
     "Restore meta data into NDB Cluster using NDBAPI",
     (gptr*) &_restore_meta, (gptr*) &_restore_meta,  0,
     GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+  { "no-restore-disk-objects", 'd',
+    "Dont restore disk objects (tablespace/logfilegroups etc)",
+    (gptr*) &_no_restore_disk, (gptr*) &_no_restore_disk,  0,
+    GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+  { "restore_epoch", 'e', 
+    "Restore epoch info into the status table. Convenient on a MySQL Cluster "
+    "replication slave, for starting replication. The row in "
+    NDB_REP_DB "." NDB_APPLY_TABLE " with id 0 will be updated/inserted.", 
+    (gptr*) &ga_restore_epoch, (gptr*) &ga_restore_epoch,  0,
+    GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
   { "parallelism", 'p',
     "No of parallel transactions during restore of data."
     "(parallelism can be 1 to 1024)", 
@@ -91,13 +109,131 @@
   { "print_log", 259, "Print log to stdout",
     (gptr*) &_print_log, (gptr*) &_print_log,  0,
     GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+  { "backup_path", 260, "Path to backup files",
+    (gptr*) &ga_backupPath, (gptr*) &ga_backupPath, 0,
+    GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
   { "dont_ignore_systab_0", 'f',
     "Experimental. Do not ignore system table during restore.", 
     (gptr*) &ga_dont_ignore_systab_0, (gptr*) &ga_dont_ignore_systab_0, 0,
     GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+  { "ndb-nodegroup-map", OPT_NDB_NODEGROUP_MAP,
+    "Nodegroup map for ndbcluster. Syntax: list of (source_ng, dest_ng)",
+    (gptr*) &opt_nodegroup_map_str,
+    (gptr*) &opt_nodegroup_map_str,
+    0,
+    GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
   { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
 };
 
+
+static char* analyse_one_map(char *map_str, uint16 *source, uint16 *dest)
+{
+  char *end_ptr;
+  int number;
+  DBUG_ENTER("analyse_one_map");
+  /*
+    Search for pattern ( source_ng , dest_ng )
+  */
+
+  while (isspace(*map_str)) map_str++;
+
+  if (*map_str != '(')
+  {
+    DBUG_RETURN(NULL);
+  }
+  map_str++;
+
+  while (isspace(*map_str)) map_str++;
+
+  number= strtol(map_str, &end_ptr, 10);
+  if (!end_ptr || number < 0 || number >= MAX_NODE_GROUP_MAPS)
+  {
+    DBUG_RETURN(NULL);
+  }
+  *source= (uint16)number;
+  map_str= end_ptr;
+
+  while (isspace(*map_str)) map_str++;
+
+  if (*map_str != ',')
+  {
+    DBUG_RETURN(NULL);
+  }
+  map_str++;
+
+  number= strtol(map_str, &end_ptr, 10);
+  if (!end_ptr || number < 0 || number >= UNDEF_NODEGROUP)
+  {
+    DBUG_RETURN(NULL);
+  }
+  *dest= (uint16)number;
+  map_str= end_ptr;
+
+  if (*map_str != ')')
+  {
+    DBUG_RETURN(NULL);
+  }
+  map_str++;
+
+  while (isspace(*map_str)) map_str++;
+  DBUG_RETURN(map_str);
+}
+
+static bool insert_ng_map(NODE_GROUP_MAP *ng_map,
+                          uint16 source_ng, uint16 dest_ng)
+{
+  uint index= source_ng;
+  uint ng_index= ng_map[index].no_maps;
+
+  opt_nodegroup_map_len++;
+  if (ng_index >= MAX_MAPS_PER_NODE_GROUP)
+    return true;
+  ng_map[index].no_maps++;
+  ng_map[index].map_array[ng_index]= dest_ng;
+  return false;
+}
+
+static void init_nodegroup_map()
+{
+  uint i,j;
+  NODE_GROUP_MAP *ng_map = &opt_nodegroup_map[0];
+
+  for (i = 0; i < MAX_NODE_GROUP_MAPS; i++)
+  {
+    ng_map[i].no_maps= 0;
+    for (j= 0; j < MAX_MAPS_PER_NODE_GROUP; j++)
+      ng_map[i].map_array[j]= UNDEF_NODEGROUP;
+  }
+}
+
+static bool analyse_nodegroup_map(const char *ng_map_str,
+                                  NODE_GROUP_MAP *ng_map)
+{
+  uint16 source_ng, dest_ng;
+  char *local_str= (char*)ng_map_str;
+  DBUG_ENTER("analyse_nodegroup_map");
+
+  do
+  {
+    if (!local_str)
+    {
+      DBUG_RETURN(TRUE);
+    }
+    local_str= analyse_one_map(local_str, &source_ng, &dest_ng);
+    if (!local_str)
+    {
+      DBUG_RETURN(TRUE);
+    }
+    if (insert_ng_map(ng_map, source_ng, dest_ng))
+    {
+      DBUG_RETURN(TRUE);
+    }
+    if (!(*local_str))
+      break;
+  } while (TRUE);
+  DBUG_RETURN(FALSE);
+}
+
 static void short_usage_sub(void)
 {
   printf("Usage: %s [OPTIONS] [<path to backup files>]\n", my_progname);
@@ -126,6 +262,7 @@
       printf("Error in --nodeid,-n setting, see --help\n");
       exit(NDBT_ProgramExit(NDBT_WRONGARGS));
     }
+    info << "Nodeid = " << ga_nodeId << endl;
     break;
   case 'b':
     if (ga_backupId == 0)
@@ -133,6 +270,20 @@
       printf("Error in --backupid,-b setting, see --help\n");
       exit(NDBT_ProgramExit(NDBT_WRONGARGS));
     }
+    info << "Backup Id = " << ga_backupId << endl;
+    break;
+  case OPT_NDB_NODEGROUP_MAP:
+    /*
+      This option is used to set a map from nodegroup in original cluster
+      to nodegroup in new cluster.
+    */
+    opt_nodegroup_map_len= 0;
+    info << "Analyse node group map" << endl;
+    if (analyse_nodegroup_map(opt_nodegroup_map_str,
+                              &opt_nodegroup_map[0]))
+    {
+      exit(NDBT_ProgramExit(NDBT_WRONGARGS));
+    }
     break;
   }
   return 0;
@@ -140,17 +291,55 @@
 bool
 readArguments(int *pargc, char*** pargv) 
 {
+  Uint32 i;
+  debug << "Load defaults" << endl;
+  const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 };
+
+  init_nodegroup_map();
   load_defaults("my",load_default_groups,pargc,pargv);
+  debug << "handle_options" << endl;
   if (handle_options(pargc, pargv, my_long_options, get_one_option))
   {
     exit(NDBT_ProgramExit(NDBT_WRONGARGS));
   }
+  for (i = 0; i < MAX_NODE_GROUP_MAPS; i++)
+    opt_nodegroup_map[i].curr_index = 0;
 
-  BackupPrinter* printer = new BackupPrinter();
+#if 0
+  /*
+    Test code written t{
+o verify nodegroup mapping
+  */
+  printf("Handled options successfully\n");
+  Uint16 map_ng[16];
+  Uint32 j;
+  for (j = 0; j < 4; j++)
+  {
+  for (i = 0; i < 4 ; i++)
+    map_ng[i] = i;
+  map_nodegroups(&map_ng[0], (Uint32)4);
+  for (i = 0; i < 4 ; i++)
+    printf("NG %u mapped to %u \n", i, map_ng[i]);
+  }
+  for (j = 0; j < 4; j++)
+  {
+  for (i = 0; i < 8 ; i++)
+    map_ng[i] = i >> 1;
+  map_nodegroups(&map_ng[0], (Uint32)8);
+  for (i = 0; i < 8 ; i++)
+    printf("NG %u mapped to %u \n", i >> 1, map_ng[i]);
+  }
+  exit(NDBT_ProgramExit(NDBT_WRONGARGS));
+#endif
+
+  BackupPrinter* printer = new BackupPrinter(opt_nodegroup_map,
+                                             opt_nodegroup_map_len);
   if (printer == NULL)
     return false;
 
-  BackupRestore* restore = new BackupRestore(ga_nParallelism);
+  BackupRestore* restore = new BackupRestore(opt_nodegroup_map,
+                                             opt_nodegroup_map_len,
+                                             ga_nParallelism);
   if (restore == NULL) 
   {
     delete printer;
@@ -191,6 +380,16 @@
     restore->m_restore_meta = true;
   }
 
+  if (_no_restore_disk)
+  {
+    restore->m_no_restore_disk = true;
+  }
+  
+  if (ga_restore_epoch)
+  {
+    restore->m_restore_epoch = true;
+  }
+
   {
     BackupConsumer * c = printer;
     g_consumers.push_back(c);
@@ -204,7 +403,7 @@
   {
     ga_backupPath = *pargv[0];
   }
-
+  info << "backup path = " << ga_backupPath << endl;
   return true;
 }
 
@@ -216,14 +415,17 @@
   g_consumers.clear();
 }
 
-static bool
-checkSysTable(const char *tableName) 
+static inline bool
+checkSysTable(const TableS* table)
+{
+  return ga_dont_ignore_systab_0 || ! table->getSysTable();
+}
+
+static inline bool
+checkSysTable(const RestoreMetaData& metaData, uint i)
 {
-  return ga_dont_ignore_systab_0 ||
-    (strcmp(tableName, "SYSTAB_0") != 0 &&
-     strcmp(tableName, "NDB$EVENTS_0") != 0 &&
-     strcmp(tableName, "sys/def/SYSTAB_0") != 0 &&
-     strcmp(tableName, "sys/def/NDB$EVENTS_0") != 0);
+  assert(i < metaData.getNoOfTables());
+  return checkSysTable(metaData[i]);
 }
 
 static void
@@ -248,6 +450,7 @@
 {
   NDB_INIT(argv[0]);
 
+  debug << "Start readArguments" << endl;
   if (!readArguments(&argc, &argv))
   {
     exitHandler(NDBT_FAILED);
@@ -259,6 +462,10 @@
     g_options.appfmt(" -m");
   if (_restore_data)
     g_options.appfmt(" -r");
+  if (ga_restore_epoch)
+    g_options.appfmt(" -e");
+  if (_no_restore_disk)
+    g_options.appfmt(" -d");
   g_options.appfmt(" -p %d", ga_nParallelism);
 
   g_connect_string = opt_connect_str;
@@ -266,10 +473,11 @@
   /**
    * we must always load meta data, even if we will only print it to stdout
    */
+  debug << "Start restoring meta data" << endl;
   RestoreMetaData metaData(ga_backupPath, ga_nodeId, ga_backupId);
   if (!metaData.readHeader())
   {
-    ndbout << "Failed to read " << metaData.getFilename() << endl << endl;
+    err << "Failed to read " << metaData.getFilename() << endl << endl;
     exitHandler(NDBT_FAILED);
   }
 
@@ -277,66 +485,107 @@
   const Uint32 version = tmp.NdbVersion;
   
   char buf[NDB_VERSION_STRING_BUF_SZ];
-  ndbout << "Ndb version in backup files: " 
+  info << "Ndb version in backup files: " 
 	 <<  getVersionString(version, 0, buf, sizeof(buf)) << endl;
-  
+
   /**
    * check wheater we can restore the backup (right version).
    */
+  // in these versions there was an error in how replica info was
+  // stored on disk
+  if (version >= MAKE_VERSION(5,1,3) && version <= MAKE_VERSION(5,1,9))
+  {
+    err << "Restore program incompatible with backup versions between "
+        << getVersionString(MAKE_VERSION(5,1,3), 0, buf, sizeof(buf))
+        << " and "
+        << getVersionString(MAKE_VERSION(5,1,9), 0, buf, sizeof(buf))
+        << endl;
+    exitHandler(NDBT_FAILED);
+  }
+
+  if (version > NDB_VERSION)
+  {
+    err << "Restore program older than backup version. Not supported. "
+        << "Use new restore program" << endl;
+    exitHandler(NDBT_FAILED);
+  }
+
+  debug << "Load content" << endl;
   int res  = metaData.loadContent();
   
   if (res == 0)
   {
-    ndbout_c("Restore: Failed to load content");
+    err << "Restore: Failed to load content" << endl;
     exitHandler(NDBT_FAILED);
   }
-  
+  debug << "Get no of Tables" << endl; 
   if (metaData.getNoOfTables() == 0) 
   {
-    ndbout_c("Restore: The backup contains no tables ");
+    err << "The backup contains no tables" << endl;
     exitHandler(NDBT_FAILED);
   }
-
+  debug << "Validate Footer" << endl;
 
   if (!metaData.validateFooter()) 
   {
-    ndbout_c("Restore: Failed to validate footer.");
+    err << "Restore: Failed to validate footer." << endl;
     exitHandler(NDBT_FAILED);
   }
-
+  debug << "Init Backup objects" << endl;
   Uint32 i;
   for(i= 0; i < g_consumers.size(); i++)
   {
     if (!g_consumers[i]->init())
     {
       clearConsumers();
+      err << "Failed to initialize consumers" << endl;
       exitHandler(NDBT_FAILED);
     }
 
   }
-
+  debug << "Restore objects (tablespaces, ..)" << endl;
+  for(i = 0; i<metaData.getNoOfObjects(); i++)
+  {
+    for(Uint32 j= 0; j < g_consumers.size(); j++)
+      if (!g_consumers[j]->object(metaData.getObjType(i),
+				  metaData.getObjPtr(i)))
+      {
+	err << "Restore: Failed to restore table: ";
+        err << metaData[i]->getTableName() << " ... Exiting " << endl;
+	exitHandler(NDBT_FAILED);
+      } 
+  }
+  debug << "Restoring tables" << endl; 
   for(i = 0; i<metaData.getNoOfTables(); i++)
   {
-    if (checkSysTable(metaData[i]->getTableName()))
+    if (checkSysTable(metaData, i))
     {
       for(Uint32 j= 0; j < g_consumers.size(); j++)
 	if (!g_consumers[j]->table(* metaData[i]))
 	{
-	  ndbout_c("Restore: Failed to restore table: %s. "
-		   "Exiting...", 
-		   metaData[i]->getTableName());
+	  err << "Restore: Failed to restore table: ";
+          err << metaData[i]->getTableName() << " ... Exiting " << endl;
 	  exitHandler(NDBT_FAILED);
 	} 
+    } else {
+      for(Uint32 j= 0; j < g_consumers.size(); j++)
+        if (!g_consumers[j]->createSystable(* metaData[i]))
+        {
+          err << "Restore: Failed to restore system table: ";
+          err << metaData[i]->getTableName() << " ... Exiting " << endl;
+          exitHandler(NDBT_FAILED);
+        }
+
     }
   }
-  
+  debug << "Close tables" << endl; 
   for(i= 0; i < g_consumers.size(); i++)
     if (!g_consumers[i]->endOfTables())
     {
-      ndbout_c("Restore: Failed while closing tables");
+      err << "Restore: Failed while closing tables" << endl;
       exitHandler(NDBT_FAILED);
     } 
-  
+  debug << "Iterate over data" << endl; 
   if (ga_restore || ga_print) 
   {
     if(_restore_data || _print_data)
@@ -346,30 +595,30 @@
       // Read data file header
       if (!dataIter.readHeader())
       {
-	ndbout << "Failed to read header of data file. Exiting..." ;
+	err << "Failed to read header of data file. Exiting..." << endl;
 	exitHandler(NDBT_FAILED);
       }
       
-      
-      while (dataIter.readFragmentHeader(res= 0))
+      Uint32 fragmentId; 
+      while (dataIter.readFragmentHeader(res= 0, &fragmentId))
       {
 	const TupleS* tuple;
 	while ((tuple = dataIter.getNextTuple(res= 1)) != 0)
 	{
-	  if (checkSysTable(tuple->getTable()->getTableName()))
+	  if (checkSysTable(tuple->getTable()))
 	    for(Uint32 i= 0; i < g_consumers.size(); i++) 
-	      g_consumers[i]->tuple(* tuple);
+	      g_consumers[i]->tuple(* tuple, fragmentId);
 	} // while (tuple != NULL);
 	
 	if (res < 0)
 	{
-	  ndbout_c("Restore: An error occured while restoring data. "
-		   "Exiting...");
+	  err <<" Restore: An error occured while restoring data. Exiting...";
+          err << endl;
 	  exitHandler(NDBT_FAILED);
 	}
 	if (!dataIter.validateFragmentFooter()) {
-	  ndbout_c("Restore: Error validating fragment footer. "
-		   "Exiting...");
+	  err << "Restore: Error validating fragment footer. ";
+          err << "Exiting..." << endl;
 	  exitHandler(NDBT_FAILED);
 	}
       } // while (dataIter.readFragmentHeader(res))
@@ -377,7 +626,7 @@
       if (res < 0)
       {
 	err << "Restore: An error occured while restoring data. Exiting... "
-	    << "res=" << res << endl;
+	    << "res= " << res << endl;
 	exitHandler(NDBT_FAILED);
       }
       
@@ -400,7 +649,7 @@
       const LogEntry * logEntry = 0;
       while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0)
       {
-	if (checkSysTable(logEntry->m_table->getTableName()))
+	if (checkSysTable(logEntry->m_table))
 	  for(Uint32 i= 0; i < g_consumers.size(); i++)
 	    g_consumers[i]->logEntry(* logEntry);
       }
@@ -419,20 +668,30 @@
     {
       for(i = 0; i<metaData.getNoOfTables(); i++)
       {
-	if (checkSysTable(metaData[i]->getTableName()))
+	if (checkSysTable(metaData, i))
 	{
 	  for(Uint32 j= 0; j < g_consumers.size(); j++)
 	    if (!g_consumers[j]->finalize_table(* metaData[i]))
 	    {
-	      ndbout_c("Restore: Failed to finalize restore table: %s. "
-		       "Exiting...", 
-		       metaData[i]->getTableName());
+	      err << "Restore: Failed to finalize restore table: %s. ";
+              err << "Exiting... " << metaData[i]->getTableName() << endl;
 	      exitHandler(NDBT_FAILED);
 	    } 
 	}
       }
     }
   }
+
+  if (ga_restore_epoch)
+  {
+    for (i= 0; i < g_consumers.size(); i++)
+      if (!g_consumers[i]->update_apply_status(metaData))
+      {
+	err << "Restore: Failed to restore epoch" << endl;
+	return -1;
+      }
+  }
+
   for(Uint32 i= 0; i < g_consumers.size(); i++) 
   {
     if (g_consumers[i]->has_temp_error())
@@ -440,10 +699,9 @@
       clearConsumers();
       ndbout_c("\nRestore successful, but encountered temporary error, "
                "please look at configuration.");
-      return NDBT_ProgramExit(NDBT_TEMPORARY);
-    }
+    }               
   }
-
+  
   clearConsumers();
   return NDBT_ProgramExit(NDBT_OK);
 } // main

--- 1.20.5.1/ndb/src/mgmclient/main.cpp	2006-10-31 10:32:44 +00:00
+++ 1.27/storage/ndb/src/mgmclient/main.cpp	2006-10-31 10:32:44 +00:00
@@ -19,7 +19,7 @@
 
 // copied from mysql.cc to get readline
 extern "C" {
-#if defined( __WIN__) || defined(OS2)
+#if defined( __WIN__)
 #include <conio.h>
 #elif !defined(__NETWARE__)
 #include <readline/readline.h>

--- 1.45.31.1/ndb/src/mgmsrv/Services.cpp	2006-10-31 10:32:44 +00:00
+++ 1.76/storage/ndb/src/mgmsrv/Services.cpp	2006-10-31 10:32:44 +00:00
@@ -123,8 +123,6 @@
 
 const
 ParserRow<MgmApiSession> commands[] = {
-  MGM_CMD("get statport", &MgmApiSession::getStatPort, ""),
-  
   MGM_CMD("get config", &MgmApiSession::getConfig, ""),
     MGM_ARG("version", Int, Mandatory, "Configuration version number"),
     MGM_ARG("node", Int, Optional, "Node ID"),
@@ -272,6 +270,13 @@
     MGM_ARG("length", Int, Mandatory, "Length"),
     MGM_ARG("data", String, Mandatory, "Data"),
 
+  MGM_CMD("list sessions", &MgmApiSession::listSessions, ""),
+
+  MGM_CMD("get session id", &MgmApiSession::getSessionId, ""),
+
+  MGM_CMD("get session", &MgmApiSession::getSession, ""),
+    MGM_ARG("id", Int, Mandatory, "SessionID"),
+
   MGM_END()
 };
 
@@ -284,7 +289,7 @@
   NDB_TICKS tick;
 };
 
-MgmApiSession::MgmApiSession(class MgmtSrvr & mgm, NDB_SOCKET_TYPE sock)
+MgmApiSession::MgmApiSession(class MgmtSrvr & mgm, NDB_SOCKET_TYPE sock, Uint64 session_id)
   : SocketServer::Session(sock), m_mgmsrv(mgm)
 {
   DBUG_ENTER("MgmApiSession::MgmApiSession");
@@ -293,6 +298,9 @@
   m_parser = new Parser_t(commands, *m_input, true, true, true);
   m_allocated_resources= new MgmtSrvr::Allocated_resources(m_mgmsrv);
   m_stopSelf= 0;
+  m_ctx= NULL;
+  m_session_id= session_id;
+  m_mutex= NdbMutex_Create();
   DBUG_VOID_RETURN;
 }
 
@@ -316,6 +324,7 @@
     g_RestartServer= true;
   if(m_stopSelf)
     g_StopServer= true;
+  NdbMutex_Destroy(m_mutex);
   DBUG_VOID_RETURN;
 }
 
@@ -325,11 +334,19 @@
   DBUG_ENTER("MgmApiSession::runSession");
 
   Parser_t::Context ctx;
-  while(!m_stop) {
+  ctx.m_mutex= m_mutex;
+  m_ctx= &ctx;
+  bool stop= false;
+  while(!stop) {
+    NdbMutex_Lock(m_mutex);
+
     m_parser->run(ctx, *this);
 
     if(ctx.m_currentToken == 0)
+    {
+      NdbMutex_Unlock(m_mutex);
       break;
+    }
 
     switch(ctx.m_status) {
     case Parser_t::UnknownCommand:
@@ -350,13 +367,19 @@
     default:
       break;
     }
-  }
+
+    stop= m_stop;
+    NdbMutex_Unlock(m_mutex);
+  };
+
+  NdbMutex_Lock(m_mutex);
+  m_ctx= NULL;
   if(m_socket != NDB_INVALID_SOCKET)
   {
     NDB_CLOSE_SOCKET(m_socket);
     m_socket= NDB_INVALID_SOCKET;
   }
-
+  NdbMutex_Unlock(m_mutex);
   DBUG_VOID_RETURN;
 }
 
@@ -504,7 +527,8 @@
     NDB_TICKS tick= 0;
     /* only report error on second attempt as not to clog the cluster log */
     while (!m_mgmsrv.alloc_node_id(&tmp, (enum ndb_mgm_node_type)nodetype, 
-                                   (struct sockaddr*)&addr, &addrlen, error_code, error_string,
+                                   (struct sockaddr*)&addr, &addrlen,
+                                   error_code, error_string,
                                    tick == 0 ? 0 : log_event))
     {
       /* NDB_MGM_ALLOCID_CONFIG_MISMATCH is a non retriable error */
@@ -668,15 +692,6 @@
 }
 
 void
-MgmApiSession::getStatPort(Parser_t::Context &, 
-			   const class Properties &) {
-
-  m_output->println("get statport reply");
-  m_output->println("tcpport: %d", 0);
-  m_output->println("");
-}
-
-void
 MgmApiSession::insertError(Parser<MgmApiSession>::Context &,
 			   Properties const &args) {
   Uint32 node = 0, error = 0;
@@ -1605,11 +1620,6 @@
     result = -1;
     goto done;
   }
-  
-  m_mgmsrv.m_event_listner.add_listener(le);
-  
-  m_stop = true;
-  m_socket = NDB_INVALID_SOCKET;
 
 done:
   m_output->println("listen event");
@@ -1617,6 +1627,13 @@
   if(result != 0)
     m_output->println("msg: %s", msg.c_str());
   m_output->println("");
+
+  if(result==0)
+  {
+    m_mgmsrv.m_event_listner.add_listener(le);
+    m_stop = true;
+    m_socket = NDB_INVALID_SOCKET;
+  }
 }
 
 void
@@ -1716,6 +1733,123 @@
   m_mgmsrv.eventReport(data);
   m_output->println("report event reply");
   m_output->println("result: ok");
+  m_output->println("");
+}
+
+void
+MgmApiSession::list_session(SocketServer::Session *_s, void *data)
+{
+  MgmApiSession *s= (MgmApiSession *)_s;
+  MgmApiSession *lister= (MgmApiSession*) data;
+
+  if(s!=lister)
+    NdbMutex_Lock(s->m_mutex);
+
+  Uint64 id= s->m_session_id;
+  lister->m_output->println("session: %llu",id);
+  lister->m_output->println("session.%llu.m_stopSelf: %d",id,s->m_stopSelf);
+  lister->m_output->println("session.%llu.m_stop: %d",id,s->m_stop);
+  lister->m_output->println("session.%llu.allocated.nodeid: %d",id,s->m_allocated_resources->get_nodeid());
+  if(s->m_ctx)
+  {
+    int l= strlen(s->m_ctx->m_tokenBuffer);
+    char *buf= (char*) malloc(2*l+1);
+    char *b= buf;
+    for(int i=0; i<l;i++)
+      if(s->m_ctx->m_tokenBuffer[i]=='\n')
+      {
+        *b++='\\';
+        *b++='n';
+      }
+      else
+      {
+        *b++= s->m_ctx->m_tokenBuffer[i];
+      }
+    *b= '\0';
+
+    lister->m_output->println("session.%llu.parser.buffer.len: %u",id,l);
+    lister->m_output->println("session.%llu.parser.buffer: %s",id,buf);
+    lister->m_output->println("session.%llu.parser.status: %d",id,s->m_ctx->m_status);
+
+    free(buf);
+  }
+
+  if(s!=lister)
+    NdbMutex_Unlock(s->m_mutex);
+}
+
+void
+MgmApiSession::listSessions(Parser_t::Context &ctx,
+                            Properties const &args) {
+  m_mgmsrv.get_socket_server()->foreachSession(list_session,(void*)this);
+
+  m_output->println("");
+}
+
+void
+MgmApiSession::getSessionId(Parser_t::Context &ctx,
+                                 Properties const &args) {
+  m_output->println("get session id reply");
+  m_output->println("id: %llu",m_session_id);
+  m_output->println("");
+}
+
+struct get_session_param {
+  MgmApiSession *l;
+  Uint64 id;
+  int found;
+};
+
+void
+MgmApiSession::get_session(SocketServer::Session *_s, void *data)
+{
+  struct get_session_param *p= (struct get_session_param*)data;
+  MgmApiSession *s= (MgmApiSession *)_s;
+
+  if(s!=p->l)
+    NdbMutex_Lock(s->m_mutex);
+
+  if(p->id != s->m_session_id)
+  {
+    if(s!=p->l)
+      NdbMutex_Unlock(s->m_mutex);
+    return;
+  }
+
+  p->found= true;
+  p->l->m_output->println("id: %llu",s->m_session_id);
+  p->l->m_output->println("m_stopSelf: %d",s->m_stopSelf);
+  p->l->m_output->println("m_stop: %d",s->m_stop);
+  p->l->m_output->println("nodeid: %d",s->m_allocated_resources->get_nodeid());
+  if(s->m_ctx)
+  {
+    int l= strlen(s->m_ctx->m_tokenBuffer);
+    p->l->m_output->println("parser_buffer_len: %u",l);
+    p->l->m_output->println("parser_status: %d",s->m_ctx->m_status);
+  }
+
+  if(s!=p->l)
+    NdbMutex_Unlock(s->m_mutex);
+}
+
+void
+MgmApiSession::getSession(Parser_t::Context &ctx,
+                          Properties const &args) {
+  Uint64 id;
+  struct get_session_param p;
+
+  args.get("id", &id);
+
+  p.l= this;
+  p.id= id;
+  p.found= false;
+
+  m_output->println("get session reply");
+  m_mgmsrv.get_socket_server()->foreachSession(get_session,(void*)&p);
+
+  if(p.found==false)
+    m_output->println("id: 0");
+
   m_output->println("");
 }
 

--- 1.66.15.1/ndb/src/ndbapi/NdbScanOperation.cpp	2006-10-31 10:32:44 +00:00
+++ 1.99/storage/ndb/src/ndbapi/NdbScanOperation.cpp	2006-10-31 10:32:44 +00:00
@@ -50,6 +50,7 @@
   m_receivers = 0;
   m_array = new Uint32[1]; // skip if on delete in fix_receivers
   theSCAN_TABREQ = 0;
+  m_executed = false;
 }
 
 NdbScanOperation::~NdbScanOperation()
@@ -111,6 +112,7 @@
   theNdbCon->theMagicNumber = 0xFE11DF;
   theNoOfTupKeyLeft = tab->m_noOfDistributionKeys;
   m_read_range_no = 0;
+  m_executed = false;
   return 0;
 }
 
@@ -162,7 +164,22 @@
   }
 
   m_keyInfo = ((scan_flags & SF_KeyInfo) || lockExcl) ? 1 : 0;
+  bool tupScan = (scan_flags & SF_TupScan);
 
+#if 1 // XXX temp for testing
+  { char* p = getenv("NDB_USE_TUPSCAN");
+    if (p != 0) {
+      unsigned n = atoi(p); // 0-10
+      if ((unsigned int) (::time(0) % 10) < n) tupScan = true;
+    }
+  }
+#endif
+  if (scan_flags & SF_DiskScan)
+  {
+    tupScan = true;
+    m_no_disk_flag = false;
+  }
+  
   bool rangeScan = false;
   if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex)
   {
@@ -177,12 +194,9 @@
     theStatus = GetValue;
     theOperationType  = OpenRangeScanRequest;
     rangeScan = true;
-  }
-
-  bool tupScan = (scan_flags & SF_TupScan);
-  if (tupScan && rangeScan)
     tupScan = false;
-
+  }
+  
   if (rangeScan && (scan_flags & SF_OrderBy))
     parallel = fragCount;
   
@@ -202,7 +216,7 @@
   theSCAN_TABREQ->setSignal(GSN_SCAN_TABREQ);
   ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
   req->apiConnectPtr = theNdbCon->theTCConPtr;
-  req->tableId = m_accessTable->m_tableId;
+  req->tableId = m_accessTable->m_id;
   req->tableSchemaVersion = m_accessTable->m_version;
   req->storedProcId = 0xFFFF;
   req->buddyConPtr = theNdbCon->theBuddyConPtr;
@@ -363,7 +377,7 @@
 int
 NdbScanOperation::executeCursor(int nodeId){
   NdbTransaction * tCon = theNdbCon;
-  TransporterFacade* tp = TransporterFacade::instance();
+  TransporterFacade* tp = theNdb->theImpl->m_transporter_facade;
   Guard guard(tp->theMutexPtr);
 
   Uint32 magic = tCon->theMagicNumber;
@@ -385,6 +399,7 @@
     if (doSendScan(nodeId) == -1)
       return -1;
 
+    m_executed= true; // Mark operation as executed
     return 0;
   } else {
     if (!(tp->get_node_stopping(nodeId) &&
@@ -464,18 +479,28 @@
   }
   
   Uint32 nodeId = theNdbCon->theDBnode;
-  TransporterFacade* tp = TransporterFacade::instance();
-  Guard guard(tp->theMutexPtr);
-  if(theError.code)
-    return -1;
+  TransporterFacade* tp = theNdb->theImpl->m_transporter_facade;
+  /*
+    The PollGuard has an implicit call of unlock_and_signal through the
+    ~PollGuard method. This method is called implicitly by the compiler
+    in all places where the object is out of context due to a return,
+    break, continue or simply end of statement block
+  */
+  PollGuard poll_guard(tp, &theNdb->theImpl->theWaiter,
+                       theNdb->theNdbBlockNumber);
 
-  Uint32 seq = theNdbCon->theNodeSequence;
-  if(seq == tp->getNodeSequence(nodeId) && send_next_scan(idx, false,
-							  forceSend) == 0){
+  const Uint32 seq = theNdbCon->theNodeSequence;
+
+  if(theError.code)
+  {
+    goto err4;
+  }
+  
+  if(seq == tp->getNodeSequence(nodeId) && send_next_scan(idx, false) == 0)
+  {
       
     idx = m_current_api_receiver;
     last = m_api_receivers_count;
-
     Uint32 timeout = tp->m_waitfor_timeout;
       
     do {
@@ -502,12 +527,10 @@
 	/**
 	 * No completed...
 	 */
-	theNdb->theImpl->theWaiter.m_node = nodeId;
-	theNdb->theImpl->theWaiter.m_state = WAIT_SCAN;
-	int return_code = theNdb->receiveResponse(3*timeout);
-	if (return_code == 0 && seq == tp->getNodeSequence(nodeId)) {
+        int ret_code= poll_guard.wait_scan(3*timeout, nodeId, forceSend);
+	if (ret_code == 0 && seq == tp->getNodeSequence(nodeId)) {
 	  continue;
-	} else if(return_code == -1){
+	} else if(ret_code == -1){
 	  retVal = -1;
 	} else {
 	  idx = last;
@@ -557,6 +580,10 @@
     if(theError.code == 0)
       setErrorCode(4028); // seq changed = Node fail
     break;
+  case -4:
+err4:
+    setErrorCode(theError.code);
+    break;
   }
     
   theNdbCon->theTransactionIsStarted = false;
@@ -566,8 +593,8 @@
 }
 
 int
-NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag,
-				 bool forceSend){  
+NdbScanOperation::send_next_scan(Uint32 cnt, bool stopScanFlag)
+{
   if(cnt > 0){
     NdbApiSignal tSignal(theNdb->theMyRef);
     tSignal.setSignal(GSN_SCAN_NEXTREQ);
@@ -602,7 +629,7 @@
     if(sent)
     {
       Uint32 nodeId = theNdbCon->theDBnode;
-      TransporterFacade * tp = TransporterFacade::instance();
+      TransporterFacade * tp = theNdb->theImpl->m_transporter_facade;
       if(cnt > 21){
 	tSignal.setLength(4);
 	LinearSectionPtr ptr[3];
@@ -614,9 +641,6 @@
 	ret = tp->sendSignal(&tSignal, nodeId);
       }
     }
-    
-    if (!ret) checkForceSend(forceSend);
-
     m_sent_receivers_count = last + sent;
     m_api_receivers_count -= cnt;
     m_current_api_receiver = 0;
@@ -626,15 +650,6 @@
   return 0;
 }
 
-void NdbScanOperation::checkForceSend(bool forceSend)
-{
-  if (forceSend) {
-    TransporterFacade::instance()->forceSend(theNdb->theNdbBlockNumber);
-  } else {
-    TransporterFacade::instance()->checkForceSend(theNdb->theNdbBlockNumber);
-  }//if
-}
-
 int 
 NdbScanOperation::prepareSend(Uint32  TC_ConnectPtr, Uint64  TransactionId)
 {
@@ -669,10 +684,16 @@
 	       m_conf_receivers_count,
 	       m_sent_receivers_count);
     
-    TransporterFacade* tp = TransporterFacade::instance();
-    Guard guard(tp->theMutexPtr);
-    close_impl(tp, forceSend);
-    
+    TransporterFacade* tp = theNdb->theImpl->m_transporter_facade;
+    /*
+      The PollGuard has an implicit call of unlock_and_signal through the
+      ~PollGuard method. This method is called implicitly by the compiler
+      in all places where the object is out of context due to a return,
+      break, continue or simply end of statement block
+    */
+    PollGuard poll_guard(tp, &theNdb->theImpl->theWaiter,
+                         theNdb->theNdbBlockNumber);
+    close_impl(tp, forceSend, &poll_guard);
   }
 
   NdbConnection* tCon = theNdbCon;
@@ -680,9 +701,27 @@
   theNdbCon = NULL;
   m_transConnection = NULL;
 
-  if (releaseOp && tTransCon) {
+  if (tTransCon && releaseOp) 
+  {
     NdbIndexScanOperation* tOp = (NdbIndexScanOperation*)this;
-    tTransCon->releaseExecutedScanOperation(tOp);
+
+    bool ret = true;
+    if (theStatus != WaitResponse)
+    {
+      /**
+       * Not executed yet
+       */
+      ret = 
+	tTransCon->releaseScanOperation(&tTransCon->m_theFirstScanOperation,
+					&tTransCon->m_theLastScanOperation,
+					tOp);
+    }
+    else
+    {
+      ret = tTransCon->releaseScanOperation(&tTransCon->m_firstExecutedScanOp,
+					    0, tOp);
+    }
+    assert(ret);
   }
   
   tCon->theScanningOp = 0;
@@ -777,6 +816,7 @@
    */
   Uint32 reqInfo = req->requestInfo;
   ScanTabReq::setKeyinfoFlag(reqInfo, keyInfo);
+  ScanTabReq::setNoDiskFlag(reqInfo, m_no_disk_flag);
   req->requestInfo = reqInfo;
   
   for(Uint32 i = 0; i<theParallelism; i++){
@@ -827,7 +867,7 @@
   req->requestInfo = tmp;
   tSignal->setLength(ScanTabReq::StaticLength + theDistrKeyIndicator_);
 
-  TransporterFacade *tp = TransporterFacade::instance();
+  TransporterFacade *tp = theNdb->theImpl->m_transporter_facade;
   LinearSectionPtr ptr[3];
   ptr[0].p = m_prepared_receivers;
   ptr[0].sz = theParallelism;
@@ -911,13 +951,20 @@
  *     the scan process. 
  ****************************************************************************/
 int
-NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
+NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, Uint32 & size)
 {
   NdbRecAttr * tRecAttr = m_curr_row;
   if(tRecAttr)
   {
     const Uint32 * src = (Uint32*)tRecAttr->aRef();
-    memcpy(data, src, 4*size);
+
+    assert(tRecAttr->get_size_in_bytes() > 0);
+    assert(tRecAttr->get_size_in_bytes() < 65536);
+    const Uint32 len = (tRecAttr->get_size_in_bytes() + 3)/4-1;
+
+    assert(size >= len);
+    memcpy(data, src, 4*len);
+    size = len;
     return 0;
   }
   return -1;
@@ -942,8 +989,10 @@
     }
     pTrans->theSimpleState = 0;
     
-    const Uint32 len = (tRecAttr->attrSize() * tRecAttr->arraySize() + 3)/4-1;
-
+    assert(tRecAttr->get_size_in_bytes() > 0);
+    assert(tRecAttr->get_size_in_bytes() < 65536);
+    const Uint32 len = (tRecAttr->get_size_in_bytes() + 3)/4-1;
+    
     newOp->theTupKeyLen = len;
     newOp->theOperationType = opType;
     switch (opType) {
@@ -1041,23 +1090,23 @@
 
 int
 NdbIndexScanOperation::setBound(const char* anAttrName, int type, 
-				const void* aValue, Uint32 len)
+				const void* aValue)
 {
-  return setBound(m_accessTable->getColumn(anAttrName), type, aValue, len);
+  return setBound(m_accessTable->getColumn(anAttrName), type, aValue);
 }
 
 int
 NdbIndexScanOperation::setBound(Uint32 anAttrId, int type, 
-				const void* aValue, Uint32 len)
+				const void* aValue)
 {
-  return setBound(m_accessTable->getColumn(anAttrId), type, aValue, len);
+  return setBound(m_accessTable->getColumn(anAttrId), type, aValue);
 }
 
 int
 NdbIndexScanOperation::equal_impl(const NdbColumnImpl* anAttrObject, 
-				  const char* aValue, 
-				  Uint32 len){
-  return setBound(anAttrObject, BoundEQ, aValue, len);
+				  const char* aValue)
+{
+  return setBound(anAttrObject, BoundEQ, aValue);
 }
 
 NdbRecAttr*
@@ -1067,7 +1116,7 @@
     return NdbScanOperation::getValue_impl(attrInfo, aValue);
   }
   
-  int id = attrInfo->m_attrId;                // In "real" table
+  int id = attrInfo->getColumnNo();                // In "real" table
   assert(m_accessTable->m_index);
   int sz = (int)m_accessTable->m_index->m_key_ids.size();
   if(id >= sz || (id = m_accessTable->m_index->m_key_ids[id]) == -1){
@@ -1104,7 +1153,7 @@
  */
 int
 NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo, 
-				int type, const void* aValue, Uint32 len)
+				int type, const void* aValue)
 {
   if (!tAttrInfo)
   {
@@ -1112,24 +1161,23 @@
     return -1;
   }
   if (theOperationType == OpenRangeScanRequest &&
-      (0 <= type && type <= 4) &&
-      len <= 8000) {
+      (0 <= type && type <= 4)) {
     // insert bound type
     Uint32 currLen = theTotalNrOfKeyWordInSignal;
     Uint32 remaining = KeyInfo::DataLength - currLen;
-    Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
     bool tDistrKey = tAttrInfo->m_distributionKey;
 
-    len = aValue != NULL ? sizeInBytes : 0;
-    if (len != sizeInBytes && (len != 0)) {
-      setErrorCodeAbort(4209);
-      return -1;
-    }
+    Uint32 len = 0;
+    if (aValue != NULL)
+      if (! tAttrInfo->get_var_length(aValue, len)) {
+        setErrorCodeAbort(4209);
+        return -1;
+      }
 
     // insert attribute header
     Uint32 tIndexAttrId = tAttrInfo->m_attrId;
     Uint32 sizeInWords = (len + 3) / 4;
-    AttributeHeader ah(tIndexAttrId, sizeInWords);
+    AttributeHeader ah(tIndexAttrId, sizeInWords << 2);
     const Uint32 ahValue = ah.m_value;
 
     const Uint32 align = (UintPtr(aValue) & 7);
@@ -1223,6 +1271,31 @@
   return -1;
 }
 
+Uint32
+NdbIndexScanOperation::getKeyFromSCANTABREQ(Uint32* data, Uint32 size)
+{
+  DBUG_ENTER("NdbIndexScanOperation::getKeyFromSCANTABREQ");
+  assert(size >= theTotalNrOfKeyWordInSignal);
+  size = theTotalNrOfKeyWordInSignal;
+  NdbApiSignal* tSignal = theSCAN_TABREQ->next();
+  Uint32 pos = 0;
+  while (pos < size) {
+    assert(tSignal != NULL);
+    Uint32* tData = tSignal->getDataPtrSend();
+    Uint32 rem = size - pos;
+    if (rem > KeyInfo::DataLength)
+      rem = KeyInfo::DataLength;
+    Uint32 i = 0;
+    while (i < rem) {
+      data[pos + i] = tData[KeyInfo::HeaderLength + i];
+      i++;
+    }
+    pos += rem;
+  }
+  DBUG_DUMP("key", (char*)data, size << 2);
+  DBUG_RETURN(size);
+}
+
 int
 NdbIndexScanOperation::readTuples(LockMode lm,
 				  Uint32 scan_flags,
@@ -1322,10 +1395,11 @@
       return (r1_null ? -1 : 1) * jdir;
     }
     const NdbColumnImpl & col = NdbColumnImpl::getImpl(* r1->m_column);
-    Uint32 len = r1->theAttrSize * r1->theArraySize;
+    Uint32 len1 = r1->get_size_in_bytes();
+    Uint32 len2 = r2->get_size_in_bytes();
     if(!r1_null){
       const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(col.m_type);
-      int r = (*sqlType.m_cmp)(col.m_cs, d1, len, d2, len, true);
+      int r = (*sqlType.m_cmp)(col.m_cs, d1, len1, d2, len2, true);
       if(r){
 	assert(r != NdbSqlUtil::CmpUnknown);
 	return r * jdir;
@@ -1363,26 +1437,31 @@
   if(fetchNeeded){
     if(fetchAllowed){
       if(DEBUG_NEXT_RESULT) ndbout_c("performing fetch...");
-      TransporterFacade* tp = TransporterFacade::instance();
-      Guard guard(tp->theMutexPtr);
+      TransporterFacade* tp = theNdb->theImpl->m_transporter_facade;
+      /*
+        The PollGuard has an implicit call of unlock_and_signal through the
+        ~PollGuard method. This method is called implicitly by the compiler
+        in all places where the object is out of context due to a return,
+        break, continue or simply end of statement block
+      */
+      PollGuard poll_guard(tp, &theNdb->theImpl->theWaiter,
+                           theNdb->theNdbBlockNumber);
       if(theError.code)
 	return -1;
       Uint32 seq = theNdbCon->theNodeSequence;
       Uint32 nodeId = theNdbCon->theDBnode;
       Uint32 timeout = tp->m_waitfor_timeout;
       if(seq == tp->getNodeSequence(nodeId) &&
-	 !send_next_scan_ordered(s_idx, forceSend)){
+	 !send_next_scan_ordered(s_idx)){
 	Uint32 tmp = m_sent_receivers_count;
 	s_idx = m_current_api_receiver; 
 	while(m_sent_receivers_count > 0 && !theError.code){
-	  theNdb->theImpl->theWaiter.m_node = nodeId;
-	  theNdb->theImpl->theWaiter.m_state = WAIT_SCAN;
-	  int return_code = theNdb->receiveResponse(3*timeout);
-	  if (return_code == 0 && seq == tp->getNodeSequence(nodeId)) {
+          int ret_code= poll_guard.wait_scan(3*timeout, nodeId, forceSend);
+	  if (ret_code == 0 && seq == tp->getNodeSequence(nodeId)) {
 	    continue;
 	  }
 	  if(DEBUG_NEXT_RESULT) ndbout_c("return -1");
-	  if(return_code == -1){
+	  if(ret_code == -1){
 	    setErrorCode(4008);
 	  } else {
 	    setErrorCode(4028);
@@ -1469,7 +1548,8 @@
 }
 
 int
-NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx, bool forceSend){  
+NdbIndexScanOperation::send_next_scan_ordered(Uint32 idx)
+{
   if(idx == theParallelism)
     return 0;
   
@@ -1504,15 +1584,16 @@
   m_sent_receivers_count = last + 1;
   
   Uint32 nodeId = theNdbCon->theDBnode;
-  TransporterFacade * tp = TransporterFacade::instance();
+  TransporterFacade * tp = theNdb->theImpl->m_transporter_facade;
   tSignal.setLength(4+1);
   int ret= tp->sendSignal(&tSignal, nodeId);
-  if (!ret) checkForceSend(forceSend);
   return ret;
 }
 
 int
-NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend){
+NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend,
+                             PollGuard *poll_guard)
+{
   Uint32 seq = theNdbCon->theNodeSequence;
   Uint32 nodeId = theNdbCon->theDBnode;
   
@@ -1523,15 +1604,12 @@
   }
   
   Uint32 timeout = tp->m_waitfor_timeout;
-
   /**
    * Wait for outstanding
    */
   while(theError.code == 0 && m_sent_receivers_count)
   {
-    theNdb->theImpl->theWaiter.m_node = nodeId;
-    theNdb->theImpl->theWaiter.m_state = WAIT_SCAN;
-    int return_code = theNdb->receiveResponse(3*timeout);
+    int return_code= poll_guard->wait_scan(3*timeout, nodeId, forceSend);
     switch(return_code){
     case 0:
       break;
@@ -1588,7 +1666,7 @@
   }
   
   // Send close scan
-  if(send_next_scan(api+conf, true, forceSend) == -1)
+  if(send_next_scan(api+conf, true) == -1)
   {
     theNdbCon->theReleaseOnClose = true;
     return -1;
@@ -1599,9 +1677,7 @@
    */
   while(m_sent_receivers_count+m_api_receivers_count+m_conf_receivers_count)
   {
-    theNdb->theImpl->theWaiter.m_node = nodeId;
-    theNdb->theImpl->theWaiter.m_state = WAIT_SCAN;
-    int return_code = theNdb->receiveResponse(3*timeout);
+    int return_code= poll_guard->wait_scan(3*timeout, nodeId, forceSend);
     switch(return_code){
     case 0:
       break;
@@ -1640,13 +1716,20 @@
 NdbScanOperation::restart(bool forceSend)
 {
   
-  TransporterFacade* tp = TransporterFacade::instance();
-  Guard guard(tp->theMutexPtr);
+  TransporterFacade* tp = theNdb->theImpl->m_transporter_facade;
+  /*
+    The PollGuard has an implicit call of unlock_and_signal through the
+    ~PollGuard method. This method is called implicitly by the compiler
+    in all places where the object is out of context due to a return,
+    break, continue or simply end of statement block
+  */
+  PollGuard poll_guard(tp, &theNdb->theImpl->theWaiter,
+                       theNdb->theNdbBlockNumber);
   Uint32 nodeId = theNdbCon->theDBnode;
   
   {
     int res;
-    if((res= close_impl(tp, forceSend)))
+    if((res= close_impl(tp, forceSend, &poll_guard)))
     {
       return res;
     }
@@ -1660,7 +1743,6 @@
   theError.code = 0;
   if (doSendScan(nodeId) == -1)
     return -1;
-  
   return 0;
 }
 
@@ -1669,9 +1751,16 @@
   int res;
   
   {
-    TransporterFacade* tp = TransporterFacade::instance();
-    Guard guard(tp->theMutexPtr);
-    res= close_impl(tp, forceSend);
+    TransporterFacade* tp = theNdb->theImpl->m_transporter_facade;
+    /*
+      The PollGuard has an implicit call of unlock_and_signal through the
+      ~PollGuard method. This method is called implicitly by the compiler
+      in all places where the object is out of context due to a return,
+      break, continue or simply end of statement block
+    */
+    PollGuard poll_guard(tp, &theNdb->theImpl->theWaiter,
+                         theNdb->theNdbBlockNumber);
+    res= close_impl(tp, forceSend, &poll_guard);
   }
 
   if(!res)

--- 1.8.1.1/ndb/include/ndbapi/ndberror.h	2006-10-31 10:32:44 +00:00
+++ 1.12/storage/ndb/include/ndbapi/ndberror.h	2006-10-31 10:32:44 +00:00
@@ -50,7 +50,8 @@
   ndberror_cl_unknown_error_code = 14,
   ndberror_cl_node_shutdown = 15,
   ndberror_cl_configuration = 16,
-  ndberror_cl_schema_object_already_exists = 17 
+  ndberror_cl_schema_object_already_exists = 17,
+  ndberror_cl_internal_temporary = 18
 } ndberror_classification_enum;
 
 
@@ -70,6 +71,11 @@
    * Error code
    */
   int code;
+
+  /**
+   * Mysql error code
+   */
+  int mysql_code;
 
   /**
    * Error message

--- 1.30.32.1/ndb/src/ndbapi/ndberror.c	2006-10-31 10:32:44 +00:00
+++ 1.77/storage/ndb/src/ndbapi/ndberror.c	2006-10-31 10:32:44 +00:00
@@ -16,11 +16,13 @@
 
 
 #include <ndb_global.h>
+#include <my_base.h>
 #include <ndberror.h>
 #include <m_string.h>
 
 typedef struct ErrorBundle {
   int code;
+  int mysql_code;
   ndberror_classification classification;
   const char * message;
 } ErrorBundle;
@@ -57,6 +59,11 @@
 
 #define OE ndberror_cl_schema_object_already_exists
 
+#define IT ndberror_cl_internal_temporary
+
+/* default mysql error code for unmapped codes */
+#define DMEC -1
+
 static const char* empty_string = "";
 
 /*
@@ -71,6 +78,10 @@
  *  900 - TUX
  * 1200 - LQH
  * 1300 - BACKUP
+ * 1400 - SUMA
+ * 1500 - LGMAN
+ * 1600 - TSMAN
+ * 1700 - QMGR
  * 4000 - API
  * 4100 - ""
  * 4200 - ""
@@ -88,442 +99,522 @@
   /**
    * No error
    */
-  { 0,    NE, "No error" },
+  { 0,    0, NE, "No error" },
   
   /**
    * NoDataFound
    */
-  { 626,  ND, "Tuple did not exist" },
+  { 626,  HA_ERR_KEY_NOT_FOUND, ND, "Tuple did not exist" },
 
   /**
    * ConstraintViolation 
    */
-  { 630,  CV, "Tuple already existed when attempting to insert" },
-  { 840,  CV, "Trying to set a NOT NULL attribute to NULL" },
-  { 893,  CV, "Constraint violation e.g. duplicate value in unique index" },
+  { 630,  HA_ERR_FOUND_DUPP_KEY, CV, "Tuple already existed when attempting to insert" },
+  { 839,  DMEC, CV, "Illegal null attribute" },
+  { 840,  DMEC, CV, "Trying to set a NOT NULL attribute to NULL" },
+  { 893,  HA_ERR_FOUND_DUPP_KEY, CV, "Constraint violation e.g. duplicate value in unique index" },
 
   /**
    * Node recovery errors
    */
-  {  286, NR, "Node failure caused abort of transaction" }, 
-  {  250, NR, "Node where lock was held crashed, restart scan transaction" },
-  {  499, NR, "Scan take over error, restart scan transaction" },  
-  { 1204, NR, "Temporary failure, distribution changed" },
-  { 4002, NR, "Send to NDB failed" },
-  { 4010, NR, "Node failure caused abort of transaction" }, 
-  { 4025, NR, "Node failure caused abort of transaction" }, 
-  { 4027, NR, "Node failure caused abort of transaction" },
-  { 4028, NR, "Node failure caused abort of transaction" },
-  { 4029, NR, "Node failure caused abort of transaction" },
-  { 4031, NR, "Node failure caused abort of transaction" },
-  { 4033, NR, "Send to NDB failed" },
-  { 4115, NR, 
+  {  286, DMEC, NR, "Node failure caused abort of transaction" }, 
+  {  250, DMEC, NR, "Node where lock was held crashed, restart scan transaction" },
+  {  499, DMEC, NR, "Scan take over error, restart scan transaction" },  
+  { 1204, DMEC, NR, "Temporary failure, distribution changed" },
+  { 4002, DMEC, NR, "Send to NDB failed" },
+  { 4010, DMEC, NR, "Node failure caused abort of transaction" }, 
+  { 4025, DMEC, NR, "Node failure caused abort of transaction" }, 
+  { 4027, DMEC, NR, "Node failure caused abort of transaction" },
+  { 4028, DMEC, NR, "Node failure caused abort of transaction" },
+  { 4029, DMEC, NR, "Node failure caused abort of transaction" },
+  { 4031, DMEC, NR, "Node failure caused abort of transaction" },
+  { 4033, DMEC, NR, "Send to NDB failed" },
+  { 4115, DMEC, NR, 
     "Transaction was committed but all read information was not "
     "received due to node crash" },
-  { 4119, NR, "Simple/dirty read failed due to node failure" },
+  { 4119, DMEC, NR, "Simple/dirty read failed due to node failure" },
   
   /**
    * Node shutdown
    */
-  {  280, NS, "Transaction aborted due to node shutdown" },
+  {  280, DMEC, NS, "Transaction aborted due to node shutdown" },
   /* This scan trans had an active fragment scan in a LQH which have crashed */
-  {  270, NS, "Transaction aborted due to node shutdown" }, 
-  { 1223, NS, "Read operation aborted due to node shutdown" },
-  { 4023, NS, "Transaction aborted due to node shutdown" },
-  { 4030, NS, "Transaction aborted due to node shutdown" },
-  { 4034, NS, "Transaction aborted due to node shutdown" },
+  {  270, DMEC, NS, "Transaction aborted due to node shutdown" }, 
+  { 1223, DMEC, NS, "Read operation aborted due to node shutdown" },
+  { 4023, DMEC, NS, "Transaction aborted due to node shutdown" },
+  { 4030, DMEC, NS, "Transaction aborted due to node shutdown" },
+  { 4034, DMEC, NS, "Transaction aborted due to node shutdown" },
 
 
   
   /**
    * Unknown result
    */
-  { 4008, UR, "Receive from NDB failed" },
-  { 4009, UR, "Cluster Failure" },
-  { 4012, UR, 
+  { 4008, DMEC, UR, "Receive from NDB failed" },
+  { 4009, DMEC, UR, "Cluster Failure" },
+  { 4012, DMEC, UR, 
     "Request ndbd time-out, maybe due to high load or communication problems"}, 
-  { 4024, UR, 
+  { 4024, DMEC, UR, 
     "Time-out, most likely caused by simple read or cluster failure" }, 
   
   /**
    * TemporaryResourceError
    */
-  { 217,  TR, "217" },
-  { 218,  TR, "218" },
-  { 219,  TR, "219" },
-  { 233,  TR,
+  { 217,  DMEC, TR, "217" },
+  { 218,  DMEC, TR, "218" },
+  { 219,  DMEC, TR, "219" },
+  { 233,  DMEC, TR,
     "Out of operation records in transaction coordinator (increase MaxNoOfConcurrentOperations)" },
-  { 275,  TR, "275" },
-  { 279,  TR, "Out of transaction markers in transaction coordinator" },
-  { 414,  TR, "414" },
-  { 418,  TR, "Out of transaction buffers in LQH" },
-  { 419,  TR, "419" },
-  { 245,  TR, "Too many active scans" },
-  { 488,  TR, "Too many active scans" },
-  { 490,  TR, "Too many active scans" },
-  { 805,  TR, "Out of attrinfo records in tuple manager" },
-  { 830,  TR, "Out of add fragment operation records" },
-  { 873,  TR, "Out of attrinfo records for scan in tuple manager" },
-  { 1217, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
-  { 1220, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
-  { 1222, TR, "Out of transaction markers in LQH" },
-  { 1224, TR, "Out of Send Buffer space in LQH" },
-  { 4021, TR, "Out of Send Buffer space in NDB API" },
-  { 4022, TR, "Out of Send Buffer space in NDB API" },
-  { 4032, TR, "Out of Send Buffer space in NDB API" },
-  {  288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
+  { 275,  DMEC, TR, "275" },
+  { 279,  DMEC, TR, "Out of transaction markers in transaction coordinator" },
+  { 414,  DMEC, TR, "414" },
+  { 418,  DMEC, TR, "Out of transaction buffers in LQH" },
+  { 419,  DMEC, TR, "419" },
+  { 245,  DMEC, TR, "Too many active scans" },
+  { 488,  DMEC, TR, "Too many active scans" },
+  { 490,  DMEC, TR, "Too many active scans" },
+  { 805,  DMEC, TR, "Out of attrinfo records in tuple manager" },
+  { 830,  DMEC, TR, "Out of add fragment operation records" },
+  { 873,  DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
+  { 899,  DMEC, TR, "Rowid already allocated" },
+  { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
+  { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
+  { 1222, DMEC, TR, "Out of transaction markers in LQH" },
+  { 4021, DMEC, TR, "Out of Send Buffer space in NDB API" },
+  { 4022, DMEC, TR, "Out of Send Buffer space in NDB API" },
+  { 4032, DMEC, TR, "Out of Send Buffer space in NDB API" },
+  { 1501, DMEC, TR, "Out of undo space" },
+  {  288, DMEC, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
+
   /**
    * InsufficientSpace
    */
-  { 623,  IS, "623" },
-  { 624,  IS, "624" },
-  { 625,  IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
-  { 640,  IS, "Too many hash indexes (should not happen)" },
-  { 826,  IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
-  { 827,  IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
-  { 902,  IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
-  { 903,  IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
-  { 904,  IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
-  { 905,  IS, "Out of attribute records (increase MaxNoOfAttributes)" },
-
+  { 623,  HA_ERR_RECORD_FILE_FULL, IS, "623" },
+  { 624,  HA_ERR_RECORD_FILE_FULL, IS, "624" },
+  { 625,  HA_ERR_INDEX_FILE_FULL, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
+  { 640,  DMEC, IS, "Too many hash indexes (should not happen)" },
+  { 826,  HA_ERR_RECORD_FILE_FULL, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
+  { 827,  HA_ERR_RECORD_FILE_FULL, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
+  { 902,  HA_ERR_RECORD_FILE_FULL, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
+  { 903,  HA_ERR_INDEX_FILE_FULL, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
+  { 904,  HA_ERR_INDEX_FILE_FULL, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
+  { 905,  DMEC, IS, "Out of attribute records (increase MaxNoOfAttributes)" },
+  { 1601, HA_ERR_RECORD_FILE_FULL, IS, "Out extents, tablespace full" },
+  
   /**
    * TimeoutExpired 
    */
-  { 266,  TO, "Time-out in NDB, probably caused by deadlock" },
-  { 274,  TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
-  { 296,  TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
-  { 297,  TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout, temporary!! */
-  { 237,  TO, "Transaction had timed out when trying to commit it" },
+  { 266,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" },
+  { 274,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
+  { 296,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
+  { 297,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout, temporary!! */
+  { 237,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Transaction had timed out when trying to commit it" },
   
   /**
    * OverloadError
    */
-  { 410,  OL, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
-  { 677,  OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" },
-  { 891,  OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" },
-  { 1221, OL, "REDO buffers overloaded, consult online manual (increase RedoBuffer)" },
-  { 4006, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" }, 
+  { 701,  DMEC, OL, "System busy with other schema operation" },
+  { 711,  DMEC, OL, "System busy with node restart, schema operations not allowed" },
+  { 410,  DMEC, OL, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
+  { 677,  DMEC, OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" },
+  { 891,  DMEC, OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" },
+  { 1221, DMEC, OL, "REDO buffers overloaded, consult online manual (increase RedoBuffer)" },
+  { 4006, DMEC, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" }, 
 
 
+  /*
+   * Internal Temporary
+   */
+  { 702,  DMEC, IT, "Request to non-master" },
   
   /**
    * Internal errors
    */
-  { 892,  IE, "Inconsistent hash index. The index needs to be dropped and recreated" },
-  { 896,  IE, "Tuple corrupted - wrong checksum or column data in invalid format" },
-  { 901,  IE, "Inconsistent ordered index. The index needs to be dropped and recreated" },
-  { 202,  IE, "202" },
-  { 203,  IE, "203" },
-  { 207,  IE, "207" },
-  { 208,  IE, "208" },
-  { 209,  IE, "Communication problem, signal error" },
-  { 220,  IE, "220" },
-  { 230,  IE, "230" },
-  { 232,  IE, "232" },
-  { 238,  IE, "238" },
-  { 271,  IE, "Simple Read transaction without any attributes to read" },
-  { 272,  IE, "Update operation without any attributes to update" },
-  { 276,  IE, "276" },
-  { 277,  IE, "277" },
-  { 278,  IE, "278" },
-  { 287,  IE, "Index corrupted" },
-  { 290,  IE, "Corrupt key in TC, unable to xfrm" },
-  { 631,  IE, "631" },
-  { 632,  IE, "632" },
-  { 702,  IE, "Request to non-master" },
-  { 706,  IE, "Inconsistency during table creation" },
-  { 809,  IE, "809" },
-  { 812,  IE, "812" },
-  { 829,  IE, "829" },
-  { 833,  IE, "833" },
-  { 839,  IE, "Illegal null attribute" },
-  { 871,  IE, "871" },
-  { 882,  IE, "882" },
-  { 883,  IE, "883" },
-  { 887,  IE, "887" },
-  { 888,  IE, "888" },
-  { 890,  IE, "890" },
-  { 4000, IE, "MEMORY ALLOCATION ERROR" },
-  { 4001, IE, "Signal Definition Error" },
-  { 4005, IE, "Internal Error in NdbApi" },
-  { 4011, IE, "Internal Error in NdbApi" }, 
-  { 4107, IE, "Simple Transaction and Not Start" },
-  { 4108, IE, "Faulty operation type" },
-  { 4109, IE, "Faulty primary key attribute length" },
-  { 4110, IE, "Faulty length in ATTRINFO signal" },
-  { 4111, IE, "Status Error in NdbConnection" },
-  { 4113, IE, "Too many operations received" },
-  { 4320, IE, "Cannot use the same object twice to create table" },
-  { 4321, IE, "Trying to start two schema transactions" },
-  { 4344, IE, "Only DBDICT and TRIX can send requests to TRIX" },
-  { 4345, IE, "TRIX block is not available yet, probably due to node failure" },
-  { 4346, IE, "Internal error at index create/build" },
-  { 4347, IE, "Bad state at alter index" },
-  { 4348, IE, "Inconsistency detected at alter index" },
-  { 4349, IE, "Inconsistency detected at index usage" },
-  { 4350, IE, "Transaction already aborted" },
+  { 892,  DMEC, IE, "Inconsistent hash index. The index needs to be dropped and recreated" },
+  { 896,  DMEC, IE, "Tuple corrupted - wrong checksum or column data in invalid format" },
+  { 901,  DMEC, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" },
+  { 202,  DMEC, IE, "202" },
+  { 203,  DMEC, IE, "203" },
+  { 207,  DMEC, IE, "207" },
+  { 208,  DMEC, IE, "208" },
+  { 209,  DMEC, IE, "Communication problem, signal error" },
+  { 220,  DMEC, IE, "220" },
+  { 230,  DMEC, IE, "230" },
+  { 232,  DMEC, IE, "232" },
+  { 238,  DMEC, IE, "238" },
+  { 271,  DMEC, IE, "Simple Read transaction without any attributes to read" },
+  { 272,  DMEC, IE, "Update operation without any attributes to update" },
+  { 276,  DMEC, IE, "276" },
+  { 277,  DMEC, IE, "277" },
+  { 278,  DMEC, IE, "278" },
+  { 287,  DMEC, IE, "Index corrupted" },
+  { 290,  DMEC, IE, "Corrupt key in TC, unable to xfrm" },
+  { 631,  DMEC, IE, "631" },
+  { 632,  DMEC, IE, "632" },
+  { 706,  DMEC, IE, "Inconsistency during table creation" },
+  { 809,  DMEC, IE, "809" },
+  { 812,  DMEC, IE, "812" },
+  { 829,  DMEC, IE, "829" },
+  { 833,  DMEC, IE, "833" },
+  { 871,  DMEC, IE, "871" },
+  { 882,  DMEC, IE, "882" },
+  { 883,  DMEC, IE, "883" },
+  { 887,  DMEC, IE, "887" },
+  { 888,  DMEC, IE, "888" },
+  { 890,  DMEC, IE, "890" },
+  { 4000, DMEC, IE, "MEMORY ALLOCATION ERROR" },
+  { 4001, DMEC, IE, "Signal Definition Error" },
+  { 4005, DMEC, IE, "Internal Error in NdbApi" },
+  { 4011, DMEC, IE, "Internal Error in NdbApi" }, 
+  { 4107, DMEC, IE, "Simple Transaction and Not Start" },
+  { 4108, DMEC, IE, "Faulty operation type" },
+  { 4109, DMEC, IE, "Faulty primary key attribute length" },
+  { 4110, DMEC, IE, "Faulty length in ATTRINFO signal" },
+  { 4111, DMEC, IE, "Status Error in NdbConnection" },
+  { 4113, DMEC, IE, "Too many operations received" },
+  { 4320, DMEC, IE, "Cannot use the same object twice to create table" },
+  { 4321, DMEC, IE, "Trying to start two schema transactions" },
+  { 4344, DMEC, IE, "Only DBDICT and TRIX can send requests to TRIX" },
+  { 4345, DMEC, IE, "TRIX block is not available yet, probably due to node failure" },
+  { 4346, DMEC, IE, "Internal error at index create/build" },
+  { 4347, DMEC, IE, "Bad state at alter index" },
+  { 4348, DMEC, IE, "Inconsistency detected at alter index" },
+  { 4349, DMEC, IE, "Inconsistency detected at index usage" },
+  { 4350, DMEC, IE, "Transaction already aborted" },
 
   /**
    * Application error
    */
-  { 763,  AE, "Alter table requires cluster nodes to have exact same version" },
-  { 823,  AE, "Too much attrinfo from application in tuple manager" },
-  { 831,  AE, "Too many nullable/bitfields in table definition" },
-  { 876,  AE, "876" },
-  { 877,  AE, "877" },
-  { 878,  AE, "878" },
-  { 879,  AE, "879" },
-  { 880,  AE, "Tried to read too much - too many getValue calls" },
-  { 884,  AE, "Stack overflow in interpreter" },
-  { 885,  AE, "Stack underflow in interpreter" },
-  { 886,  AE, "More than 65535 instructions executed in interpreter" },
-  { 897,  AE, "Update attempt of primary key via ndbcluster internal api (if this occurs via the MySQL server it is a bug, please report)" },
-  { 4256, AE, "Must call Ndb::init() before this function" },
-  { 4257, AE, "Tried to read too much - too many getValue calls" },
-  
+  { 763,  DMEC, AE, "Alter table requires cluster nodes to have exact same version" },
+  { 823,  DMEC, AE, "Too much attrinfo from application in tuple manager" },
+  { 831,  DMEC, AE, "Too many nullable/bitfields in table definition" },
+  { 876,  DMEC, AE, "876" },
+  { 877,  DMEC, AE, "877" },
+  { 878,  DMEC, AE, "878" },
+  { 879,  DMEC, AE, "879" },
+  { 880,  DMEC, AE, "Tried to read too much - too many getValue calls" },
+  { 884,  DMEC, AE, "Stack overflow in interpreter" },
+  { 885,  DMEC, AE, "Stack underflow in interpreter" },
+  { 886,  DMEC, AE, "More than 65535 instructions executed in interpreter" },
+  { 897,  DMEC, AE, "Update attempt of primary key via ndbcluster internal api (if this occurs via the MySQL server it is a bug, please report)" },
+  { 4256, DMEC, AE, "Must call Ndb::init() before this function" },
+  { 4257, DMEC, AE, "Tried to read too much - too many getValue calls" },
+
   /** 
    * Scan application errors
    */
-  { 242,  AE, "Zero concurrency in scan"},
-  { 244,  AE, "Too high concurrency in scan"},
-  { 269,  AE, "No condition and attributes to read in scan"},
-  { 4600, AE, "Transaction is already started"},
-  { 4601, AE, "Transaction is not started"},
-  { 4602, AE, "You must call getNdbOperation before executeScan" },
-  { 4603, AE, "There can only be ONE operation in a scan transaction" },
-  { 4604, AE, "takeOverScanOp, to take over a scanned row one must explicitly request keyinfo in readTuples call" },
-  { 4605, AE, "You may only call openScanRead or openScanExclusive once for each operation"},
-  { 4607, AE, "There may only be one operation in a scan transaction"},
-  { 4608, AE, "You can not takeOverScan unless you have used openScanExclusive"},
-  { 4609, AE, "You must call nextScanResult before trying to takeOverScan"},
-  { 4232, AE, "Parallelism can only be between 1 and 240" },
+  { 242,  DMEC, AE, "Zero concurrency in scan"},
+  { 244,  DMEC, AE, "Too high concurrency in scan"},
+  { 269,  DMEC, AE, "No condition and attributes to read in scan"},
+  { 4600, DMEC, AE, "Transaction is already started"},
+  { 4601, DMEC, AE, "Transaction is not started"},
+  { 4602, DMEC, AE, "You must call getNdbOperation before executeScan" },
+  { 4603, DMEC, AE, "There can only be ONE operation in a scan transaction" },
+  { 4604, DMEC, AE, "takeOverScanOp, to take over a scanned row one must explicitly request keyinfo on readTuples call" },
+  { 4605, DMEC, AE, "You may only call openScanRead or openScanExclusive once for each operation"},
+  { 4607, DMEC, AE, "There may only be one operation in a scan transaction"},
+  { 4608, DMEC, AE, "You can not takeOverScan unless you have used openScanExclusive"},
+  { 4609, DMEC, AE, "You must call nextScanResult before trying to takeOverScan"},
+  { 4232, DMEC, AE, "Parallelism can only be between 1 and 240" },
 
   /** 
    * Event schema errors
    */
 
-  { 4713,  SE, "Column defined in event does not exist in table"},
+  { 4713,  DMEC, SE, "Column defined in event does not exist in table"},
   
   /** 
    * Event application errors
    */
 
-  { 4707,  AE, "Too many event have been defined"},
-  { 4708,  AE, "Event name is too long"},
-  { 4709,  AE, "Can't accept more subscribers"},
-  {  746,  OE, "Event name already exists"},
-  { 4710,  AE, "Event not found"},
-  { 4711,  AE, "Creation of event failed"},
-  { 4712,  AE, "Stopped event operation does not exist. Already stopped?"},
+  { 4707,  DMEC, AE, "Too many event have been defined"},
+  { 4708,  DMEC, AE, "Event name is too long"},
+  { 4709,  DMEC, AE, "Can't accept more subscribers"},
+  {  746,  DMEC, OE, "Event name already exists"},
+  {  747,  DMEC, IS, "Out of event records"},
+  {  748,  DMEC, TR, "Busy during read of event table"},
+  { 4710,  DMEC, AE, "Event not found"},
+  { 4711,  DMEC, AE, "Creation of event failed"},
+  { 4712,  DMEC, AE, "Stopped event operation does not exist. Already stopped?"},
 
   /** 
    * Event internal errors
    */
 
-  { 4731,  IE, "Event not found"},
+  { 4731,  DMEC, IE, "Event not found"},
 
   /**
    * SchemaError
    */
-  { 701,  SE, "System busy with other schema operation" },
-  { 711,  SE, "System busy with node restart, schema operations not allowed" },
-  { 703,  SE, "Invalid table format" },
-  { 704,  SE, "Attribute name too long" },
-  { 705,  SE, "Table name too long" },
-  { 707,  SE, "No more table metadata records (increase MaxNoOfTables)" },  
-  { 708,  SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
-  { 709,  SE, "No such table existed" },
-  { 721,  OE, "Table or index with given name already exists" },
-  { 723,  SE, "No such table existed" },
-  { 736,  SE, "Unsupported array size" },
-  { 737,  SE, "Attribute array size too big" },
-  { 738,  SE, "Record too big" },
-  { 739,  SE, "Unsupported primary key length" },
-  { 740,  SE, "Nullable primary key not supported" },
-  { 741,  SE, "Unsupported alter table" },
-  { 743,  SE, "Unsupported character set in table or index" },
-  { 744,  SE, "Character string is invalid for given character set" },
-  { 745,  SE, "Distribution key not supported for char attribute (use binary attribute)" },
-  { 761,  SE, "Unable to drop table as backup is in progress" },
-  { 762,  SE, "Unable to alter table as backup is in progress" },
-  { 241,  SE, "Invalid schema object version" },
-  { 283,  SE, "Table is being dropped" },
-  { 284,  SE, "Table not defined in transaction coordinator" },
-  { 285,  SE, "Unknown table error in transaction coordinator" },
-  { 881,  SE, "Unable to create table, out of data pages (increase DataMemory) " },
-  { 906,  SE, "Unsupported attribute type in index" },
-  { 907,  SE, "Unsupported character set in table or index" },
-  { 908,  IS, "Invalid ordered index tree node size" },
-  { 1225, SE, "Table not defined in local query handler" },
-  { 1226, SE, "Table is being dropped" },
-  { 1228, SE, "Cannot use drop table for drop index" },
-  { 1229, SE, "Too long frm data supplied" },
-  { 1231, SE, "Invalid table or index to scan" },
-  { 1232, SE, "Invalid table or index to scan" },
-
+  { 311,  DMEC, AE, "Undefined partition used in setPartitionId" },
+  { 703,  DMEC, SE, "Invalid table format" },
+  { 704,  DMEC, SE, "Attribute name too long" },
+  { 705,  DMEC, SE, "Table name too long" },
+  { 707,  DMEC, SE, "No more table metadata records (increase MaxNoOfTables)" },  
+  { 708,  DMEC, SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
+  { 709,  HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
+  { 710,  DMEC, SE, "Internal: Get by table name not supported, use table id." },
+  { 721,  HA_ERR_TABLE_EXIST,   OE, "Table or index with given name already exists" },
+  { 723,  HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
+  { 736,  DMEC, SE, "Unsupported array size" },
+  { 737,  HA_WRONG_CREATE_OPTION, SE, "Attribute array size too big" },
+  { 738,  HA_WRONG_CREATE_OPTION, SE, "Record too big" },
+  { 739,  HA_WRONG_CREATE_OPTION, SE, "Unsupported primary key length" },
+  { 740,  HA_WRONG_CREATE_OPTION, SE, "Nullable primary key not supported" },
+  { 741,  DMEC, SE, "Unsupported alter table" },
+  { 743,  HA_WRONG_CREATE_OPTION, SE, "Unsupported character set in table or index" },
+  { 744,  DMEC, SE, "Character string is invalid for given character set" },
+  { 745,  HA_WRONG_CREATE_OPTION, SE, "Distribution key not supported for char attribute (use binary attribute)" },
+  { 771,  HA_WRONG_CREATE_OPTION, AE, "Given NODEGROUP doesn't exist in this cluster" },
+  { 772,  HA_WRONG_CREATE_OPTION, IE, "Given fragmentType doesn't exist" },
+  { 749,  HA_WRONG_CREATE_OPTION, IE, "Primary Table in wrong state" },
+  { 763,  HA_WRONG_CREATE_OPTION, SE, "Invalid undo buffer size" },
+  { 764,  HA_WRONG_CREATE_OPTION, SE, "Invalid extent size" },
+  { 765,  DMEC, SE, "Out of filegroup records" },
+  { 750,  IE, SE, "Invalid file type" },
+  { 751,  DMEC, SE, "Out of file records" },
+  { 752,  DMEC, SE, "Invalid file format" },
+  { 753,  IE, SE, "Invalid filegroup for file" },
+  { 754,  IE, SE, "Invalid filegroup version when creating file" },
+  { 755,  HA_WRONG_CREATE_OPTION, SE, "Invalid tablespace" },
+  { 756,  DMEC, SE, "Index on disk column is not supported" },
+  { 757,  DMEC, SE, "Varsize bitfield not supported" },
+  { 758,  DMEC, SE, "Tablespace has changed" },
+  { 759,  DMEC, SE, "Invalid tablespace version " },
+  { 760,  DMEC, SE, "File already exists", },
+  { 761,  DMEC, SE, "Unable to drop table as backup is in progress" },
+  { 762,  DMEC, SE, "Unable to alter table as backup is in progress" },
+  { 766,  DMEC, SE, "Cant drop file, no such file" },
+  { 767,  DMEC, SE, "Cant drop filegroup, no such filegroup" },
+  { 768,  DMEC, SE, "Cant drop filegroup, filegroup is used" },
+  { 769,  DMEC, SE, "Drop undofile not supported, drop logfile group instead" },
+  { 770,  DMEC, SE, "Cant drop file, file is used" },
+  { 774,  DMEC, SE, "Invalid schema object for drop" },
+  { 241,  HA_ERR_TABLE_DEF_CHANGED, SE, "Invalid schema object version" },
+  { 283,  HA_ERR_NO_SUCH_TABLE, SE, "Table is being dropped" },
+  { 284,  HA_ERR_TABLE_DEF_CHANGED, SE, "Table not defined in transaction coordinator" },
+  { 285,  DMEC, SE, "Unknown table error in transaction coordinator" },
+  { 881,  DMEC, SE, "Unable to create table, out of data pages (increase DataMemory) " },
+  { 906,  DMEC, SE, "Unsupported attribute type in index" },
+  { 907,  DMEC, SE, "Unsupported character set in table or index" },
+  { 908,  DMEC, IS, "Invalid ordered index tree node size" },
+  { 1225, DMEC, SE, "Table not defined in local query handler" },
+  { 1226, DMEC, SE, "Table is being dropped" },
+  { 1228, DMEC, SE, "Cannot use drop table for drop index" },
+  { 1229, DMEC, SE, "Too long frm data supplied" },
+  { 1231, DMEC, SE, "Invalid table or index to scan" },
+  { 1232, DMEC, SE, "Invalid table or index to scan" },
+
+  { 1502, DMEC, IE, "Filegroup already exists" },
+  { 1503, DMEC, SE, "Out of filegroup records" },
+  { 1504, DMEC, SE, "Out of logbuffer memory" },
+  { 1505, DMEC, IE, "Invalid filegroup" },
+  { 1506, DMEC, IE, "Invalid filegroup version" },
+  { 1507, DMEC, IE, "File no already inuse" },
+  { 1508, DMEC, SE, "Out of file records" },
+  { 1509, DMEC, SE, "File system error, check if path,permissions etc" },
+  { 1510, DMEC, IE, "File meta data error" },
+  { 1511, DMEC, IE, "Out of memory" },
+  { 1512, DMEC, SE, "File read error" },
+  { 1513, DMEC, IE, "Filegroup not online" },
+  { 1514, DMEC, SE, "Currently there is a limit of one logfile group" },
+  
+  { 773,  DMEC, SE, "Out of string memory, please modify StringMemory config parameter" },
+  { 775,  DMEC, SE, "Create file is not supported when Diskless=1" },
+  { 776,  DMEC, AE, "Index created on temporary table must itself be temporary" },
+  { 777,  DMEC, AE, "Cannot create a temporary index on a non-temporary table" },
+  { 778,  DMEC, AE, "A temporary table or index must be specified as not logging" },
+  
   /**
    * FunctionNotImplemented
    */
-  { 4003, NI, "Function not implemented yet" },
+  { 4003, DMEC, NI, "Function not implemented yet" },
 
   /**
    * Backup error codes
    */ 
 
-  { 1300, IE, "Undefined error" },
-  { 1301, IE, "Backup issued to not master (reissue command to master)" },
-  { 1302, IE, "Out of backup record" },
-  { 1303, IS, "Out of resources" },
-  { 1304, IE, "Sequence failure" },
-  { 1305, IE, "Backup definition not implemented" },
-  { 1306, AE, "Backup not supported in diskless mode (change Diskless)" },
-
-  { 1321, UD, "Backup aborted by user request" },
-  { 1322, IE, "Backup already completed" },
-  { 1323, IE, "1323" },
-  { 1324, IE, "Backup log buffer full" },
-  { 1325, IE, "File or scan error" },
-  { 1326, IE, "Backup abortet due to node failure" },
-  { 1327, IE, "1327" },
-  
-  { 1340, IE, "Backup undefined error" },
-  { 1342, AE, "Backup failed to allocate buffers (check configuration)" },
-  { 1343, AE, "Backup failed to setup fs buffers (check configuration)" },
-  { 1344, AE, "Backup failed to allocate tables (check configuration)" },
-  { 1345, AE, "Backup failed to insert file header (check configuration)" },
-  { 1346, AE, "Backup failed to insert table list (check configuration)" },
-  { 1347, AE, "Backup failed to allocate table memory (check configuration)" },
-  { 1348, AE, "Backup failed to allocate file record (check configuration)" },
-  { 1349, AE, "Backup failed to allocate attribute record (check configuration)" },
-  { 1329, AE, "Backup during software upgrade not supported" },
+  { 1300, DMEC, IE, "Undefined error" },
+  { 1301, DMEC, IE, "Backup issued to not master (reissue command to master)" },
+  { 1302, DMEC, IE, "Out of backup record" },
+  { 1303, DMEC, IS, "Out of resources" },
+  { 1304, DMEC, IE, "Sequence failure" },
+  { 1305, DMEC, IE, "Backup definition not implemented" },
+  { 1306, DMEC, AE, "Backup not supported in diskless mode (change Diskless)" },
+
+  { 1321, DMEC, UD, "Backup aborted by user request" },
+  { 1322, DMEC, IE, "Backup already completed" },
+  { 1323, DMEC, IE, "1323" },
+  { 1324, DMEC, IE, "Backup log buffer full" },
+  { 1325, DMEC, IE, "File or scan error" },
+  { 1326, DMEC, IE, "Backup abortet due to node failure" },
+  { 1327, DMEC, IE, "1327" },
+  
+  { 1340, DMEC, IE, "Backup undefined error" },
+  { 1342, DMEC, AE, "Backup failed to allocate buffers (check configuration)" },
+  { 1343, DMEC, AE, "Backup failed to setup fs buffers (check configuration)" },
+  { 1344, DMEC, AE, "Backup failed to allocate tables (check configuration)" },
+  { 1345, DMEC, AE, "Backup failed to insert file header (check configuration)" },
+  { 1346, DMEC, AE, "Backup failed to insert table list (check configuration)" },
+  { 1347, DMEC, AE, "Backup failed to allocate table memory (check configuration)" },
+  { 1348, DMEC, AE, "Backup failed to allocate file record (check configuration)" },
+  { 1349, DMEC, AE, "Backup failed to allocate attribute record (check configuration)" },
+  { 1329, DMEC, AE, "Backup during software upgrade not supported" },
+
+  /**
+   * Node id allocation error codes
+   */ 
+
+  { 1700, DMEC, IE, "Undefined error" },
+  { 1701, DMEC, AE, "Node already reserved" },
+  { 1702, DMEC, AE, "Node already connected" },
+  { 1703, DMEC, AE, "Node failure handling not completed" },
+  { 1704, DMEC, AE, "Node type mismatch" },
   
   /**
    * Still uncategorized
    */
-  { 720,  AE, "Attribute name reused in table definition" },
-  { 4004, AE, "Attribute name or id not found in the table" },
-  
-  { 4100, AE, "Status Error in NDB" },
-  { 4101, AE, "No connections to NDB available and connect failed" },
-  { 4102, AE, "Type in NdbTamper not correct" },
-  { 4103, AE, "No schema connections to NDB available and connect failed" },
-  { 4104, AE, "Ndb Init in wrong state, destroy Ndb object and create a new" },
-  { 4105, AE, "Too many Ndb objects" },
-  { 4106, AE, "All Not NULL attribute have not been defined" },
-  { 4114, AE, "Transaction is already completed" },
-  { 4116, AE, "Operation was not defined correctly, probably missing a key" },
-  { 4117, AE, "Could not start transporter, configuration error"}, 
-  { 4118, AE, "Parameter error in API call" },
-  { 4300, AE, "Tuple Key Type not correct" },
-  { 4301, AE, "Fragment Type not correct" },
-  { 4302, AE, "Minimum Load Factor not correct" },
-  { 4303, AE, "Maximum Load Factor not correct" },
-  { 4304, AE, "Maximum Load Factor smaller than Minimum" },
-  { 4305, AE, "K value must currently be set to 6" },
-  { 4306, AE, "Memory Type not correct" },
-  { 4307, AE, "Invalid table name" },
-  { 4308, AE, "Attribute Size not correct" },
-  { 4309, AE, "Fixed array too large, maximum 64000 bytes" },
-  { 4310, AE, "Attribute Type not correct" },
-  { 4311, AE, "Storage Mode not correct" },
-  { 4312, AE, "Null Attribute Type not correct" },
-  { 4313, AE, "Index only storage for non-key attribute" },
-  { 4314, AE, "Storage Type of attribute not correct" },
-  { 4315, AE, "No more key attributes allowed after defining variable length key attribute" },
-  { 4316, AE, "Key attributes are not allowed to be NULL attributes" },
-  { 4317, AE, "Too many primary keys defined in table" },
-  { 4318, AE, "Invalid attribute name" },
-  { 4319, AE, "createAttribute called at erroneus place" },
-  { 4322, AE, "Attempt to define distribution key when not prepared to" },
-  { 4323, AE, "Distribution Key set on table but not defined on first attribute" },
-  { 4324, AE, "Attempt to define distribution group when not prepared to" },
-  { 4325, AE, "Distribution Group set on table but not defined on first attribute" },
-  { 4326, AE, "Distribution Group with erroneus number of bits" },
-  { 4327, AE, "Distribution Group with 1 byte attribute is not allowed" },
-  { 4328, AE, "Disk memory attributes not yet supported" },
-  { 4329, AE, "Variable stored attributes not yet supported" },
-
-  { 4400, AE, "Status Error in NdbSchemaCon" },
-  { 4401, AE, "Only one schema operation per schema transaction" },
-  { 4402, AE, "No schema operation defined before calling execute" },
-
-  { 4501, AE, "Insert in hash table failed when getting table information from Ndb" },
-  { 4502, AE, "GetValue not allowed in Update operation" },
-  { 4503, AE, "GetValue not allowed in Insert operation" },
-  { 4504, AE, "SetValue not allowed in Read operation" },
-  { 4505, AE, "NULL value not allowed in primary key search" },
-  { 4506, AE, "Missing getValue/setValue when calling execute" },
-  { 4507, AE, "Missing operation request when calling execute" },
-
-  { 4200, AE, "Status Error when defining an operation" },
-  { 4201, AE, "Variable Arrays not yet supported" },
-  { 4202, AE, "Set value on tuple key attribute is not allowed" },
-  { 4203, AE, "Trying to set a NOT NULL attribute to NULL" },
-  { 4204, AE, "Set value and Read/Delete Tuple is incompatible" },
-  { 4205, AE, "No Key attribute used to define tuple" },
-  { 4206, AE, "Not allowed to equal key attribute twice" },
-  { 4207, AE, "Key size is limited to 4092 bytes" },
-  { 4208, AE, "Trying to read a non-stored attribute" },
-  { 4209, AE, "Length parameter in equal/setValue is incorrect" },
-  { 4210, AE, "Ndb sent more info than the length he specified" },
-  { 4211, AE, "Inconsistency in list of NdbRecAttr-objects" },
-  { 4212, AE, "Ndb reports NULL value on Not NULL attribute" },
-  { 4213, AE, "Not all data of an attribute has been received" },
-  { 4214, AE, "Not all attributes have been received" },
-  { 4215, AE, "More data received than reported in TCKEYCONF message" },
-  { 4216, AE, "More than 8052 bytes in setValue cannot be handled" },
-  { 4217, AE, "It is not allowed to increment any other than unsigned ints" },
-  { 4218, AE, "Currently not allowed to increment NULL-able attributes" },
-  { 4219, AE, "Maximum size of interpretative attributes are 64 bits" },
-  { 4220, AE, "Maximum size of interpretative attributes are 64 bits" },
-  { 4221, AE, "Trying to jump to a non-defined label" },
-  { 4222, AE, "Label was not found, internal error" },
-  { 4223, AE, "Not allowed to create jumps to yourself" },
-  { 4224, AE, "Not allowed to jump to a label in a different subroutine" },
-  { 4225, AE, "All primary keys defined, call setValue/getValue"},
-  { 4226, AE, "Bad number when defining a label" },
-  { 4227, AE, "Bad number when defining a subroutine" },
-  { 4228, AE, "Illegal interpreter function in scan definition" },
-  { 4229, AE, "Illegal register in interpreter function definition" },
-  { 4230, AE, "Illegal state when calling getValue, probably not a read" },
-  { 4231, AE, "Illegal state when calling interpreter routine" },
-  { 4233, AE, "Calling execute (synchronous) when already prepared asynchronous transaction exists" },
-  { 4234, AE, "Illegal to call setValue in this state" },
-  { 4235, AE, "No callback from execute" },
-  { 4236, AE, "Trigger name too long" },
-  { 4237, AE, "Too many triggers" },
-  { 4238, AE, "Trigger not found" },
-  { 4239, AE, "Trigger with given name already exists"},
-  { 4240, AE, "Unsupported trigger type"},
-  { 4241, AE, "Index name too long" },
-  { 4242, AE, "Too many indexes" },
-  { 4243, AE, "Index not found" },
-  { 4244, OE, "Index or table with given name already exists" },
-  { 4247, AE, "Illegal index/trigger create/drop/alter request" },
-  { 4248, AE, "Trigger/index name invalid" },
-  { 4249, AE, "Invalid table" },
-  { 4250, AE, "Invalid index type or index logging option" },
-  { 4251, AE, "Cannot create unique index, duplicate keys found" },
-  { 4252, AE, "Failed to allocate space for index" },
-  { 4253, AE, "Failed to create index table" },
-  { 4254, AE, "Table not an index table" },
-  { 4255, AE, "Hash index attributes must be specified in same order as table attributes" },
-  { 4258, AE, "Cannot create unique index, duplicate attributes found in definition" },
-  { 4259, AE, "Invalid set of range scan bounds" },
-  { 4260, UD, "NdbScanFilter: Operator is not defined in NdbScanFilter::Group"},
-  { 4261, UD, "NdbScanFilter: Column is NULL"},
-  { 4262, UD, "NdbScanFilter: Condition is out of bounds"},
-  { 4263, IE, "Invalid blob attributes or invalid blob parts table" },
-  { 4264, AE, "Invalid usage of blob attribute" },
-  { 4265, AE, "The blob method is not valid in current blob state" },
-  { 4266, AE, "Invalid blob seek position" },
-  { 4267, IE, "Corrupted blob value" },
-  { 4268, IE, "Error in blob head update forced rollback of transaction" },
-  { 4269, IE, "No connection to ndb management server" },
-  { 4270, IE, "Unknown blob error" },
-  { 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
-  { 4271, AE, "Invalid index object, not retrieved via getIndex()" },
-  { 4275, AE, "The blob method is incompatible with operation type or lock mode" }
+  { 720,  DMEC, AE, "Attribute name reused in table definition" },
+  { 1405, DMEC, NR, "Subscriber manager busy with node recovery" },
+  { 1407, DMEC, SE, "Subscription not found in subscriber manager" },
+  { 1411, DMEC, TR, "Subscriber manager busy with adding/removing a subscriber" },
+  { 1412, DMEC, IS, "Can't accept more subscribers, out of space in pool" },
+  { 1413, DMEC, TR, "Subscriber manager busy with adding the subscription" },
+  { 1414, DMEC, TR, "Subscriber manager has subscribers on this subscription" },
+  { 1415, DMEC, SE, "Subscription not unique in subscriber manager" },
+  { 1416, DMEC, IS, "Can't accept more subscriptions, out of space in pool" },
+  { 1417, DMEC, SE, "Table in suscription not defined, probably dropped" },
+  { 1418, DMEC, SE, "Subscription dropped, no new subscribers allowed" },
+  { 1419, DMEC, SE, "Subscription already dropped" },
+
+  { 1420, DMEC, TR, "Subscriber manager busy with adding/removing a table" },
+
+  { 4004, DMEC, AE, "Attribute name or id not found in the table" },
+  
+  { 4100, DMEC, AE, "Status Error in NDB" },
+  { 4101, DMEC, AE, "No connections to NDB available and connect failed" },
+  { 4102, DMEC, AE, "Type in NdbTamper not correct" },
+  { 4103, DMEC, AE, "No schema connections to NDB available and connect failed" },
+  { 4104, DMEC, AE, "Ndb Init in wrong state, destroy Ndb object and create a new" },
+  { 4105, DMEC, AE, "Too many Ndb objects" },
+  { 4106, DMEC, AE, "All Not NULL attribute have not been defined" },
+  { 4114, DMEC, AE, "Transaction is already completed" },
+  { 4116, DMEC, AE, "Operation was not defined correctly, probably missing a key" },
+  { 4117, DMEC, AE, "Could not start transporter, configuration error"}, 
+  { 4118, DMEC, AE, "Parameter error in API call" },
+  { 4300, DMEC, AE, "Tuple Key Type not correct" },
+  { 4301, DMEC, AE, "Fragment Type not correct" },
+  { 4302, DMEC, AE, "Minimum Load Factor not correct" },
+  { 4303, DMEC, AE, "Maximum Load Factor not correct" },
+  { 4304, DMEC, AE, "Maximum Load Factor smaller than Minimum" },
+  { 4305, DMEC, AE, "K value must currently be set to 6" },
+  { 4306, DMEC, AE, "Memory Type not correct" },
+  { 4307, DMEC, AE, "Invalid table name" },
+  { 4308, DMEC, AE, "Attribute Size not correct" },
+  { 4309, DMEC, AE, "Fixed array too large, maximum 64000 bytes" },
+  { 4310, DMEC, AE, "Attribute Type not correct" },
+  { 4311, DMEC, AE, "Storage Mode not correct" },
+  { 4312, DMEC, AE, "Null Attribute Type not correct" },
+  { 4313, DMEC, AE, "Index only storage for non-key attribute" },
+  { 4314, DMEC, AE, "Storage Type of attribute not correct" },
+  { 4315, DMEC, AE, "No more key attributes allowed after defining variable length key attribute" },
+  { 4316, DMEC, AE, "Key attributes are not allowed to be NULL attributes" },
+  { 4317, DMEC, AE, "Too many primary keys defined in table" },
+  { 4318, DMEC, AE, "Invalid attribute name or number" },
+  { 4319, DMEC, AE, "createAttribute called at erroneus place" },
+  { 4322, DMEC, AE, "Attempt to define distribution key when not prepared to" },
+  { 4323, DMEC, AE, "Distribution Key set on table but not defined on first attribute" },
+  { 4324, DMEC, AE, "Attempt to define distribution group when not prepared to" },
+  { 4325, DMEC, AE, "Distribution Group set on table but not defined on first attribute" },
+  { 4326, DMEC, AE, "Distribution Group with erroneus number of bits" },
+  { 4327, DMEC, AE, "Distribution Group with 1 byte attribute is not allowed" },
+  { 4328, DMEC, AE, "Disk memory attributes not yet supported" },
+  { 4329, DMEC, AE, "Variable stored attributes not yet supported" },
+
+  { 4400, DMEC, AE, "Status Error in NdbSchemaCon" },
+  { 4401, DMEC, AE, "Only one schema operation per schema transaction" },
+  { 4402, DMEC, AE, "No schema operation defined before calling execute" },
+
+  { 4501, DMEC, AE, "Insert in hash table failed when getting table information from Ndb" },
+  { 4502, DMEC, AE, "GetValue not allowed in Update operation" },
+  { 4503, DMEC, AE, "GetValue not allowed in Insert operation" },
+  { 4504, DMEC, AE, "SetValue not allowed in Read operation" },
+  { 4505, DMEC, AE, "NULL value not allowed in primary key search" },
+  { 4506, DMEC, AE, "Missing getValue/setValue when calling execute" },
+  { 4507, DMEC, AE, "Missing operation request when calling execute" },
+
+  { 4200, DMEC, AE, "Status Error when defining an operation" },
+  { 4201, DMEC, AE, "Variable Arrays not yet supported" },
+  { 4202, DMEC, AE, "Set value on tuple key attribute is not allowed" },
+  { 4203, DMEC, AE, "Trying to set a NOT NULL attribute to NULL" },
+  { 4204, DMEC, AE, "Set value and Read/Delete Tuple is incompatible" },
+  { 4205, DMEC, AE, "No Key attribute used to define tuple" },
+  { 4206, DMEC, AE, "Not allowed to equal key attribute twice" },
+  { 4207, DMEC, AE, "Key size is limited to 4092 bytes" },
+  { 4208, DMEC, AE, "Trying to read a non-stored attribute" },
+  { 4209, DMEC, AE, "Length parameter in equal/setValue is incorrect" },
+  { 4210, DMEC, AE, "Ndb sent more info than the length he specified" },
+  { 4211, DMEC, AE, "Inconsistency in list of NdbRecAttr-objects" },
+  { 4212, DMEC, AE, "Ndb reports NULL value on Not NULL attribute" },
+  { 4213, DMEC, AE, "Not all data of an attribute has been received" },
+  { 4214, DMEC, AE, "Not all attributes have been received" },
+  { 4215, DMEC, AE, "More data received than reported in TCKEYCONF message" },
+  { 4216, DMEC, AE, "More than 8052 bytes in setValue cannot be handled" },
+  { 4217, DMEC, AE, "It is not allowed to increment any other than unsigned ints" },
+  { 4218, DMEC, AE, "Currently not allowed to increment NULL-able attributes" },
+  { 4219, DMEC, AE, "Maximum size of interpretative attributes are 64 bits" },
+  { 4220, DMEC, AE, "Maximum size of interpretative attributes are 64 bits" },
+  { 4221, DMEC, AE, "Trying to jump to a non-defined label" },
+  { 4222, DMEC, AE, "Label was not found, internal error" },
+  { 4223, DMEC, AE, "Not allowed to create jumps to yourself" },
+  { 4224, DMEC, AE, "Not allowed to jump to a label in a different subroutine" },
+  { 4225, DMEC, AE, "All primary keys defined, call setValue/getValue"},
+  { 4226, DMEC, AE, "Bad number when defining a label" },
+  { 4227, DMEC, AE, "Bad number when defining a subroutine" },
+  { 4228, DMEC, AE, "Illegal interpreter function in scan definition" },
+  { 4229, DMEC, AE, "Illegal register in interpreter function definition" },
+  { 4230, DMEC, AE, "Illegal state when calling getValue, probably not a read" },
+  { 4231, DMEC, AE, "Illegal state when calling interpreter routine" },
+  { 4233, DMEC, AE, "Calling execute (synchronous) when already prepared asynchronous transaction exists" },
+  { 4234, DMEC, AE, "Illegal to call setValue in this state" },
+  { 4235, DMEC, AE, "No callback from execute" },
+  { 4236, DMEC, AE, "Trigger name too long" },
+  { 4237, DMEC, AE, "Too many triggers" },
+  { 4238, DMEC, AE, "Trigger not found" },
+  { 4239, DMEC, AE, "Trigger with given name already exists"},
+  { 4240, DMEC, AE, "Unsupported trigger type"},
+  { 4241, DMEC, AE, "Index name too long" },
+  { 4242, DMEC, AE, "Too many indexes" },
+  { 4243, DMEC, AE, "Index not found" },
+  { 4244, HA_ERR_TABLE_EXIST, OE, "Index or table with given name already exists" },
+  { 4247, DMEC, AE, "Illegal index/trigger create/drop/alter request" },
+  { 4248, DMEC, AE, "Trigger/index name invalid" },
+  { 4249, DMEC, AE, "Invalid table" },
+  { 4250, DMEC, AE, "Invalid index type or index logging option" },
+  { 4251, HA_ERR_FOUND_DUPP_UNIQUE, AE, "Cannot create unique index, duplicate keys found" },
+  { 4252, DMEC, AE, "Failed to allocate space for index" },
+  { 4253, DMEC, AE, "Failed to create index table" },
+  { 4254, DMEC, AE, "Table not an index table" },
+  { 4255, DMEC, AE, "Hash index attributes must be specified in same order as table attributes" },
+  { 4258, DMEC, AE, "Cannot create unique index, duplicate attributes found in definition" },
+  { 4259, DMEC, AE, "Invalid set of range scan bounds" },
+  { 4260, DMEC, UD, "NdbScanFilter: Operator is not defined in NdbScanFilter::Group"},
+  { 4261, DMEC, UD, "NdbScanFilter: Column is NULL"},
+  { 4262, DMEC, UD, "NdbScanFilter: Condition is out of bounds"},
+  { 4263, DMEC, IE, "Invalid blob attributes or invalid blob parts table" },
+  { 4264, DMEC, AE, "Invalid usage of blob attribute" },
+  { 4265, DMEC, AE, "The method is not valid in current blob state" },
+  { 4266, DMEC, AE, "Invalid blob seek position" },
+  { 4267, DMEC, IE, "Corrupted blob value" },
+  { 4268, DMEC, IE, "Error in blob head update forced rollback of transaction" },
+  { 4269, DMEC, IE, "No connection to ndb management server" },
+  { 4270, DMEC, IE, "Unknown blob error" },
+  { 4335, DMEC, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
+  { 4271, DMEC, AE, "Invalid index object, not retrieved via getIndex()" },
+  { 4272, DMEC, AE, "Table definition has undefined column" },
+  { 4273, DMEC, IE, "No blob table in dict cache" },
+  { 4274, DMEC, IE, "Corrupted main table PK in blob operation" },
+  { 4275, DMEC, AE, "The blob method is incompatible with operation type or lock mode" },
 };
 
 static
@@ -574,6 +665,7 @@
   { ST_T, OL, "Overload error"},
   { ST_T, TO, "Timeout expired"},
   { ST_T, NS, "Node shutdown"},
+  { ST_T, IT, "Internal temporary"},
   
   { ST_U , UR, "Unknown result error"},
   { ST_U , UE, "Unknown error code"},
@@ -614,6 +706,7 @@
     if(ErrorCodes[i].code == error->code){
       error->classification = ErrorCodes[i].classification;
       error->message        = ErrorCodes[i].message;
+      error->mysql_code     = ErrorCodes[i].mysql_code;
       found = 1;
       break;
     }
@@ -622,6 +715,7 @@
   if(!found){
     error->classification = UE;
     error->message        = "Unknown error code";
+    error->mysql_code     = DMEC;
   }
 
   found = 0;
@@ -639,6 +733,7 @@
   error->details = 0;
 }
 
+#if CHECK_ERRORCODES
 int
 checkErrorCodes(){
   int i, j;
@@ -655,7 +750,6 @@
 
 /*static const int a = checkErrorCodes();*/
 
-#if CHECK_ERRORCODES
 int main(void){
   checkErrorCodes();
   return 0;
Thread
bk commit into 5.1 tree (lzhou:1.2317)lzhou31 Oct