List:Commits« Previous MessageNext Message »
From:jonas Date:December 14 2005 10:06am
Subject:bk commit into 5.1 tree (jonas:1.1962)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of jonas. When jonas does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.1962 05/12/14 11:06:40 jonas@stripped +7 -0
  Merge perch.ndb.mysql.com:/home/jonas/src/mysql-5.0
  into  perch.ndb.mysql.com:/home/jonas/src/mysql-5.1-new

  storage/ndb/src/ndbapi/ndberror.c
    1.45 05/12/14 11:06:38 jonas@stripped +1 -2
    merge

  storage/ndb/src/kernel/vm/SimulatedBlock.cpp
    1.26 05/12/14 11:06:38 jonas@stripped +0 -8
    merge

  storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
    1.96 05/12/14 11:02:07 jonas@stripped +0 -0
    Auto merged

  storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
    1.34 05/12/14 11:02:07 jonas@stripped +0 -0
    Auto merged

  storage/ndb/src/ndbapi/ndberror.c
    1.30.13.2 05/12/14 11:02:07 jonas@stripped +0 -0
    Merge rename: ndb/src/ndbapi/ndberror.c -> storage/ndb/src/ndbapi/ndberror.c

  storage/ndb/src/kernel/vm/SimulatedBlock.cpp
    1.17.5.2 05/12/14 11:02:07 jonas@stripped +0 -0
    Merge rename: ndb/src/kernel/vm/SimulatedBlock.cpp -> storage/ndb/src/kernel/vm/SimulatedBlock.cpp

  storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
    1.73.14.2 05/12/14 11:02:07 jonas@stripped +0 -0
    Merge rename: ndb/src/kernel/blocks/dbtc/DbtcMain.cpp -> storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp

  storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
    1.26.5.2 05/12/14 11:02:07 jonas@stripped +0 -0
    Merge rename: ndb/src/kernel/blocks/dbtc/Dbtc.hpp -> storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp

  sql/ha_ndbcluster.cc
    1.221 05/12/14 11:02:07 jonas@stripped +0 -0
    Auto merged

  mysql-test/t/ndb_basic.test
    1.33 05/12/14 11:02:07 jonas@stripped +0 -0
    Auto merged

  mysql-test/r/ndb_basic.result
    1.32 05/12/14 11:02:07 jonas@stripped +0 -0
    Auto merged

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	jonas
# Host:	perch.ndb.mysql.com
# Root:	/home/jonas/src/mysql-5.1-new/RESYNC

--- 1.31/mysql-test/r/ndb_basic.result	2005-11-28 20:07:09 +01:00
+++ 1.32/mysql-test/r/ndb_basic.result	2005-12-14 11:02:07 +01:00
@@ -677,3 +677,19 @@
 a
 2
 drop table atablewithareallylongandirritatingname;
+create table t1 (f1 varchar(50), f2 text,f3 int, primary key(f1)) engine=NDB;
+insert into t1 (f1,f2,f3)VALUES("111111","aaaaaa",1);
+insert into t1 (f1,f2,f3)VALUES("222222","bbbbbb",2);
+select * from t1 order by f1;
+f1	f2	f3
+111111	aaaaaa	1
+222222	bbbbbb	2
+select * from t1 order by f2;
+f1	f2	f3
+111111	aaaaaa	1
+222222	bbbbbb	2
+select * from t1 order by f3;
+f1	f2	f3
+111111	aaaaaa	1
+222222	bbbbbb	2
+drop table t1;

--- 1.32/mysql-test/t/ndb_basic.test	2005-11-25 11:57:07 +01:00
+++ 1.33/mysql-test/t/ndb_basic.test	2005-12-14 11:02:07 +01:00
@@ -623,3 +623,14 @@
 insert into atablewithareallylongandirritatingname values (2);
 select * from atablewithareallylongandirritatingname;
 drop table atablewithareallylongandirritatingname;
+
+#
+# Bug#15682
+#
+create table t1 (f1 varchar(50), f2 text,f3 int, primary key(f1)) engine=NDB;
+insert into t1 (f1,f2,f3)VALUES("111111","aaaaaa",1);
+insert into t1 (f1,f2,f3)VALUES("222222","bbbbbb",2);
+select * from t1 order by f1;
+select * from t1 order by f2;
+select * from t1 order by f3;
+drop table t1;

--- 1.26.5.1/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2005-12-14 10:57:06 +01:00
+++ 1.34/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2005-12-14 11:02:07 +01:00
@@ -397,6 +397,13 @@
     Uint32 fireingOperation;
 
     /**
+     * The fragment id of the firing operation. This will be appended
+     * to the Primary Key such that the record can be found even in the
+     * case of user defined partitioning.
+     */
+    Uint32 fragId;
+
+    /**
      * Used for scrapping in case of node failure
      */
     Uint32 nodeId;
@@ -874,7 +881,7 @@
     
     Uint8  distributionKeyIndicator;
     Uint8  m_special_hash; // collation or distribution key
-    Uint8  unused2;
+    Uint8  m_no_disk_flag;
     Uint8  lenAiInTckeyreq;  /* LENGTH OF ATTRIBUTE INFORMATION IN TCKEYREQ */
 
     Uint8  fragmentDistributionKey;  /* DIH generation no */
@@ -963,7 +970,8 @@
     Uint8 noOfKeyAttr;
     Uint8 hasCharAttr;
     Uint8 noOfDistrKeys;
-    
+    Uint8 hasVarKeys;
+
     bool checkTable(Uint32 schemaVersion) const {
       return enabled && !dropping && 
 	(table_version_major(schemaVersion) == table_version_major(currentSchemaVersion));

--- 1.73.14.1/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2005-12-14 10:57:06 +01:00
+++ 1.96/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2005-12-14 11:02:07 +01:00
@@ -340,7 +340,7 @@
   tabptr.p->noOfKeyAttr = desc->noOfKeyAttr;
   tabptr.p->hasCharAttr = desc->hasCharAttr;
   tabptr.p->noOfDistrKeys = desc->noOfDistrKeys;
-  
+  tabptr.p->hasVarKeys = desc->noOfVarKeys > 0;
   signal->theData[0] = tabptr.i;
   signal->theData[1] = retPtr;
   sendSignal(retRef, GSN_TC_SCHVERCONF, signal, 2, JBB);
@@ -2302,14 +2302,15 @@
 {
   Uint64 Tmp[MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY];
   const TableRecord* tabPtrP = &tableRecord[tabPtrI];
+  const bool hasVarKeys = tabPtrP->hasVarKeys;
   const bool hasCharAttr = tabPtrP->hasCharAttr;
-  const bool hasDistKeys = tabPtrP->noOfDistrKeys > 0;
+  const bool compute_distkey = distr && (tabPtrP->noOfDistrKeys > 0);
   
   Uint32 *dst = (Uint32*)Tmp;
   Uint32 dstPos = 0;
   Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX];
   Uint32 * keyPartLenPtr;
-  if(hasCharAttr)
+  if(hasCharAttr || (compute_distkey && hasVarKeys))
   {
     keyPartLenPtr = keyPartLen;
     dstPos = xfrm_key(tabPtrI, src, dst, sizeof(Tmp) >> 2, keyPartLenPtr);
@@ -2327,7 +2328,7 @@
   
   md5_hash(dstHash, (Uint64*)dst, dstPos);
   
-  if(distr && hasDistKeys)
+  if(compute_distkey)
   {
     jam();
     
@@ -2735,12 +2736,14 @@
   Uint8 TDirtyFlag          = tcKeyReq->getDirtyFlag(Treqinfo);
   Uint8 TInterpretedFlag    = tcKeyReq->getInterpretedFlag(Treqinfo);
   Uint8 TDistrKeyFlag       = tcKeyReq->getDistributionKeyFlag(Treqinfo);
+  Uint8 TNoDiskFlag         = TcKeyReq::getNoDiskFlag(Treqinfo);
   Uint8 TexecuteFlag        = TexecFlag;
   
   regCachePtr->opSimple = TSimpleFlag;
   regCachePtr->opExec   = TInterpretedFlag;
   regTcPtr->dirtyOp  = TDirtyFlag;
   regCachePtr->distributionKeyIndicator = TDistrKeyFlag;
+  regCachePtr->m_no_disk_flag = TNoDiskFlag;
 
   //-------------------------------------------------------------
   // The next step is to read the upto three conditional words.
@@ -3213,6 +3216,8 @@
   LqhKeyReq::setInterpretedFlag(Tdata10, regCachePtr->opExec);
   LqhKeyReq::setSimpleFlag(Tdata10, regCachePtr->opSimple);
   LqhKeyReq::setOperation(Tdata10, regTcPtr->operation);
+  LqhKeyReq::setNoDiskFlag(Tdata10, regCachePtr->m_no_disk_flag);
+
   /* ----------------------------------------------------------------------- 
    * Sequential Number of first LQH = 0, bit 22-23                           
    * IF ATTRIBUTE INFORMATION IS SENT IN TCKEYREQ,
@@ -3925,7 +3930,7 @@
   const UintR TopWords = (UintR)regApiPtr->tckeyrec;
   localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
   const Uint32 type = getNodeInfo(localHostptr.i).m_type;
-  const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+  const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::MGM);
   const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
   const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL) ? 0 : 1;
   ptrAss(localHostptr, hostRecord);
@@ -4607,7 +4612,8 @@
     commitConf->transId1 = regApiPtr->transid[0];
     commitConf->transId2 = regApiPtr->transid[1];
     commitConf->gci = regApiPtr->globalcheckpointid;
-    sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal, 
+
+    sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal,
 	       TcCommitConf::SignalLength, JBB);
   } else if (regApiPtr->returnsignal == RS_NO_RETURN) {
     jam();
@@ -5145,6 +5151,19 @@
 	return;
       }
       
+      /* Only ref in certain situations */
+      {
+	const Uint32 opType = regTcPtr->operation;
+	if (   (opType == ZDELETE && errCode != ZNOT_FOUND)
+	    || (opType == ZINSERT && errCode != ZALREADYEXIST)
+	    || (opType == ZUPDATE && errCode != ZNOT_FOUND)
+	    || (opType == ZWRITE  && errCode != 839 && errCode != 840))
+	{
+	  TCKEY_abort(signal, 49);
+	  return;
+	}
+      }
+
       /* *************** */
       /*    TCKEYREF   < */
       /* *************** */
@@ -8763,6 +8782,7 @@
   ScanFragReq::setDescendingFlag(tmp, ScanTabReq::getDescendingFlag(ri));
   ScanFragReq::setTupScanFlag(tmp, ScanTabReq::getTupScanFlag(ri));
   ScanFragReq::setAttrLen(tmp, scanTabReq->attrLenKeyLen & 0xFFFF);
+  ScanFragReq::setNoDiskFlag(tmp, ScanTabReq::getNoDiskFlag(ri));
   
   scanptr.p->scanRequestInfo = tmp;
   scanptr.p->scanStoredProcId = scanTabReq->storedProcId;
@@ -10120,6 +10140,7 @@
     tabptr.p->noOfKeyAttr = 0;
     tabptr.p->hasCharAttr = 0;
     tabptr.p->noOfDistrKeys = 0;
+    tabptr.p->hasVarKeys = 0;
   }//for
 }//Dbtc::initTable()
 
@@ -11154,7 +11175,6 @@
   ApiConnectRecordPtr transPtr;
   TcConnectRecord *localTcConnectRecord = tcConnectRecord;
   TcConnectRecordPtr opPtr;
-
   /**
    * TODO
    * Check transid,
@@ -11168,6 +11188,7 @@
     
     c_firedTriggerHash.remove(trigPtr);
 
+    trigPtr.p->fragId= fireOrd->fragId;
     bool ok = trigPtr.p->keyValues.getSize() == fireOrd->m_noPrimKeyWords;
     ok &= trigPtr.p->afterValues.getSize() == fireOrd->m_noAfterValueWords;
     ok &= trigPtr.p->beforeValues.getSize() == fireOrd->m_noBeforeValueWords;
@@ -11379,7 +11400,7 @@
   const UintR TopWords = (UintR)regApiPtr->tcindxrec;
   localHostptr.i = refToNode(regApiPtr->ndbapiBlockref);
   const Uint32 type = getNodeInfo(localHostptr.i).m_type;
-  const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::REP);
+  const bool is_api = (type >= NodeInfo::API && type <= NodeInfo::MGM);
   const BlockNumber TblockNum = refToBlock(regApiPtr->ndbapiBlockref);
   const Uint32 Tmarker = (regApiPtr->commitAckMarker == RNIL ? 0 : 1);
   ptrAss(localHostptr, hostRecord);
@@ -12067,7 +12088,11 @@
   Uint32 dataPos = 0;
   TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
   TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
-  Uint32 * dataPtr = &tcKeyReq->scanInfo;
+  /*
+    Data points to distrGroupHashValue since scanInfo is used to send
+    fragment id of receiving fragment
+  */
+  Uint32 * dataPtr = &tcKeyReq->distrGroupHashValue;
   Uint32 tcKeyLength = TcKeyReq::StaticLength;
   Uint32 tcKeyRequestInfo = tcIndxReq->requestInfo;
   TcIndexData* indexData;
@@ -12106,11 +12131,16 @@
   regApiPtr->executingIndexOp = indexOp->indexOpId;;
   regApiPtr->noIndexOp++; // Increase count
 
-  // Filter out AttributeHeader:s since this should not be in key
+  /*
+    Filter out AttributeHeader:s since this should not be in key.
+    Also filter out fragment id from primary key and handle that
+    separately by setting it as Distribution Key and set indicator.
+  */
+
   AttributeHeader* attrHeader = (AttributeHeader *) aiIter.data;
     
   Uint32 headerSize = attrHeader->getHeaderSize();
-  Uint32 keySize = attrHeader->getDataSize();
+  Uint32 keySize = attrHeader->getDataSize() - 1;
   TcKeyReq::setKeyLength(tcKeyRequestInfo, keySize);
   // Skip header
   if (headerSize == 1) {
@@ -12120,6 +12150,9 @@
     jam();
     moreKeyData = indexOp->transIdAI.next(aiIter, headerSize - 1);
   }//if
+  tcKeyReq->scanInfo = *aiIter.data; //Fragment Id
+  moreKeyData = indexOp->transIdAI.next(aiIter);
+  TcKeyReq::setDistributionKeyFlag(tcKeyRequestInfo, 1U);
   while(// If we have not read complete key
 	(keySize != 0) &&
 	(dataPos < keyBufSize)) {
@@ -12475,7 +12508,7 @@
   AttributeBuffer::DataBufferIterator iter;
   Uint32 attrId = 0;
   Uint32 keyLength = 0;
-  Uint32 totalPrimaryKeyLength = 0;
+  Uint32 totalPrimaryKeyLength = 1; // fragment length
   Uint32 hops;
 
   indexTabPtr.i = indexData->indexId;
@@ -12528,11 +12561,12 @@
     hops = attrHeader->getHeaderSize() + attrHeader->getDataSize();
     moreAttrData = keyValues.next(iter, hops);
   }
-  AttributeHeader pkAttrHeader(attrId, totalPrimaryKeyLength);
+  AttributeHeader pkAttrHeader(attrId, totalPrimaryKeyLength << 2);
+  Uint32 attributesLength = afterValues.getSize() + 
+    pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
   
   TcKeyReq::setKeyLength(tcKeyRequestInfo, keyLength);
-  tcKeyReq->attrLen = afterValues.getSize() + 
-    pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
+  tcKeyReq->attrLen = attributesLength;
   tcKeyReq->tableId = indexData->indexId;
   TcKeyReq::setOperationType(tcKeyRequestInfo, ZINSERT);
   TcKeyReq::setExecutingTrigger(tcKeyRequestInfo, true);
@@ -12582,8 +12616,11 @@
   }
 
   tcKeyLength += dataPos;
-  Uint32 attributesLength = afterValues.getSize() + 
-    pkAttrHeader.getHeaderSize() + pkAttrHeader.getDataSize();
+  /*
+    Size of attrinfo is unique index attributes one by one, header for each
+    of them (all contained in the afterValues data structure), plus a header,
+    the primary key (compacted) and the fragment id before the primary key
+  */
   if (attributesLength <= attrBufSize) {
     jam();
     // ATTRINFO fits in TCKEYREQ
@@ -12600,6 +12637,10 @@
     // as one attribute
     pkAttrHeader.insertHeader(dataPtr);
     dataPtr += pkAttrHeader.getHeaderSize();
+    /*
+      Insert fragment id before primary key as part of reference to tuple
+    */
+    *dataPtr++ = firedTriggerData->fragId;
     moreAttrData = keyValues.first(iter);
     while(moreAttrData) {
       jam();
@@ -12764,6 +12805,29 @@
     pkAttrHeader.insertHeader(dataPtr);
     dataPtr += pkAttrHeader.getHeaderSize();
     attrInfoPos += pkAttrHeader.getHeaderSize();
+    /*
+      Add fragment id before primary key
+      TODO: This code really needs to be made into a long signal
+      to remove this messy code.
+    */
+    if (attrInfoPos == AttrInfo::DataLength)
+    {
+      jam();
+      // Flush ATTRINFO
+#if INTERNAL_TRIGGER_TCKEYREQ_JBA
+      sendSignal(reference(), GSN_ATTRINFO, signal, 
+                 AttrInfo::HeaderLength + AttrInfo::DataLength, JBA);
+#else
+      EXECUTE_DIRECT(DBTC, GSN_ATTRINFO, signal,
+                     AttrInfo::HeaderLength + AttrInfo::DataLength);
+      jamEntry();
+#endif
+      dataPtr = (Uint32 *) &attrInfo->attrData;	  
+      attrInfoPos = 0;
+    }
+    attrInfoPos++;
+    *dataPtr++ = firedTriggerData->fragId;
+
     moreAttrData = keyValues.first(iter);
     while(moreAttrData) {
       jam();

--- 1.17.5.1/ndb/src/kernel/vm/SimulatedBlock.cpp	2005-12-14 10:57:06 +01:00
+++ 1.26/storage/ndb/src/kernel/vm/SimulatedBlock.cpp	2005-12-14 11:06:38 +01:00
@@ -36,6 +36,8 @@
 
 #include <Properties.hpp>
 #include "Configuration.hpp"
+#include <AttributeDescriptor.hpp>
+#include <NdbSqlUtil.hpp>
 
 #define ljamEntry() jamEntryLine(30000 + __LINE__)
 #define ljam() jamLine(30000 + __LINE__)
@@ -49,12 +51,12 @@
     theNumber(blockNumber),
     theReference(numberToRef(blockNumber, globalData.ownId)),
     theConfiguration(conf),
+    m_global_page_pool(globalData.m_global_page_pool),
     c_fragmentInfoHash(c_fragmentInfoPool),
     c_linearFragmentSendList(c_fragmentSendPool),
     c_segmentedFragmentSendList(c_fragmentSendPool),
     c_mutexMgr(* this),
-    c_counterMgr(* this),
-    c_ptrMetaDataCommon(0)
+    c_counterMgr(* this)
 {
   NewVarRef = 0;
   
@@ -148,6 +150,8 @@
   a[GSN_FSREMOVEREF]  = &SimulatedBlock::execFSREMOVEREF;
   a[GSN_FSSYNCREF]    = &SimulatedBlock::execFSSYNCREF;
   a[GSN_FSAPPENDREF]  = &SimulatedBlock::execFSAPPENDREF;
+  a[GSN_NODE_START_REP] = &SimulatedBlock::execNODE_START_REP;
+  a[GSN_API_START_REP] = &SimulatedBlock::execAPI_START_REP;
 }
 
 void
@@ -913,6 +917,16 @@
   sendSignal(reference(), GSN_CONTINUE_FRAGMENTED, signal, 1, JBB);
 }
 
+void
+SimulatedBlock::execNODE_START_REP(Signal* signal)
+{
+}
+
+void
+SimulatedBlock::execAPI_START_REP(Signal* signal)
+{
+}
+
 #ifdef VM_TRACE_TIME
 void
 SimulatedBlock::clearTimes() {
@@ -1868,59 +1882,99 @@
   while (i < noOfKeyAttr) 
   {
     const KeyDescriptor::KeyAttr& keyAttr = desc->keyAttr[i];
+    Uint32 dstWords =
+      xfrm_attr(keyAttr.attributeDescriptor, keyAttr.charsetInfo,
+                src, srcPos, dst, dstPos, dstSize);
+    keyPartLen[i++] = dstWords;
+  }
+
+  if (0)
+  {
+    for(Uint32 i = 0; i<dstPos; i++)
+    {
+      printf("%.8x ", dst[i]);
+    }
+    printf("\n");
+  }
+  return dstPos;
+}
+
+Uint32
+SimulatedBlock::xfrm_attr(Uint32 attrDesc, CHARSET_INFO* cs,
+                          const Uint32* src, Uint32 & srcPos,
+                          Uint32* dst, Uint32 & dstPos, Uint32 dstSize) const
+{
+  Uint32 array = 
+    AttributeDescriptor::getArrayType(attrDesc);
+  Uint32 srcBytes = 
+    AttributeDescriptor::getSizeInBytes(attrDesc);
+
+  Uint32 srcWords = ~0;
+  Uint32 dstWords = ~0;
+  uchar* dstPtr = (uchar*)&dst[dstPos];
+  const uchar* srcPtr = (const uchar*)&src[srcPos];
+  
+  if (cs == NULL)
+  {
+    jam();
+    Uint32 len;
+    switch(array){
+    case NDB_ARRAYTYPE_SHORT_VAR:
+      len = 1 + srcPtr[0];
+      break;
+    case NDB_ARRAYTYPE_MEDIUM_VAR:
+      len = 2 + srcPtr[0] + (srcPtr[1] << 8);
+      break;
+#ifndef VM_TRACE
+    default:
+#endif
+    case NDB_ARRAYTYPE_FIXED:
+      len = srcBytes;
+    }
+    srcWords = (len + 3) >> 2;
+    dstWords = srcWords;
+    memcpy(dstPtr, srcPtr, dstWords << 2);
     
-    Uint32 srcBytes = 
-      AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
-    Uint32 srcWords = (srcBytes + 3) / 4;
-    Uint32 dstWords = ~0;
-    uchar* dstPtr = (uchar*)&dst[dstPos];
-    const uchar* srcPtr = (const uchar*)&src[srcPos];
-    CHARSET_INFO* cs = keyAttr.charsetInfo;
-    
-    if (cs == NULL) 
+    if (0)
     {
-      jam();
-      memcpy(dstPtr, srcPtr, srcWords << 2);
-      dstWords = srcWords;
-    } 
-    else 
+      ndbout_c("srcPos: %d dstPos: %d len: %d srcWords: %d dstWords: %d",
+               srcPos, dstPos, len, srcWords, dstWords);
+      
+      for(Uint32 i = 0; i<srcWords; i++)
+        printf("%.8x ", src[srcPos + i]);
+      printf("\n");
+    }
+  } 
+  else
+  {
+    jam();
+    Uint32 typeId =
+      AttributeDescriptor::getType(attrDesc);
+    Uint32 lb, len;
+    bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+    ndbrequire(ok);
+    Uint32 xmul = cs->strxfrm_multiply;
+    if (xmul == 0)
+      xmul = 1;
+    /*
+     * Varchar end-spaces are ignored in comparisons.  To get same hash
+     * we blank-pad to maximum length via strnxfrm.
+     */
+    Uint32 dstLen = xmul * (srcBytes - lb);
+    ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
+    int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+    ndbrequire(n != -1);
+    while ((n & 3) != 0) 
     {
-      jam();
-      Uint32 typeId =
-	AttributeDescriptor::getType(keyAttr.attributeDescriptor);
-      Uint32 lb, len;
-      bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
-      if (unlikely(!ok))
-      {
-	return 0;
-      }
-      Uint32 xmul = cs->strxfrm_multiply;
-      if (xmul == 0)
-	xmul = 1;
-      /*
-       * Varchar is really Char.  End spaces do not matter.  To get
-       * same hash we blank-pad to maximum length via strnxfrm.
-       * TODO use MySQL charset-aware hash function instead
-       */
-      Uint32 dstLen = xmul * (srcBytes - lb);
-      ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
-      int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
-      if (unlikely(n == -1))
-      {
-	return 0;
-      }
-      while ((n & 3) != 0) 
-      {
-	dstPtr[n++] = 0;
-      }
-      dstWords = (n >> 2);
+      dstPtr[n++] = 0;
     }
-    dstPos += dstWords;
-    srcPos += srcWords;
-    keyPartLen[i++] = dstWords;
+    dstWords = (n >> 2);
+    srcWords = (lb + len + 3) >> 2; 
   }
 
-  return dstPos;
+  dstPos += dstWords;
+  srcPos += srcWords;
+  return dstWords;
 }
 
 Uint32
@@ -1960,6 +2014,7 @@
     {
       Uint32 attr = desc->keyAttr[i].attributeDescriptor;
       Uint32 len = AttributeDescriptor::getSizeInWords(attr);
+      ndbrequire(AttributeDescriptor::getArrayType(attr) == NDB_ARRAYTYPE_FIXED);
       if(AttributeDescriptor::getDKey(attr))
       {
 	noOfDistrKeys--;

--- 1.220/sql/ha_ndbcluster.cc	2005-11-28 20:14:03 +01:00
+++ 1.221/sql/ha_ndbcluster.cc	2005-12-14 11:02:07 +01:00
@@ -3014,8 +3014,26 @@
         }      
         *buff++= 0;
       }
-      memcpy(buff, record + key_part->offset, key_part->length);
-      buff += key_part->length;
+
+      size_t len = key_part->length;
+      const byte * ptr = record + key_part->offset;
+      Field *field = key_part->field;
+      if ((field->type() ==  MYSQL_TYPE_VARCHAR) &&
+	  ((Field_varstring*)field)->length_bytes == 1)
+      {
+	/** 
+	 * Keys always use 2 bytes length
+	 */
+	buff[0] = ptr[0];
+	buff[1] = 0;
+	memcpy(buff+2, ptr + 1, len);	
+	len += 2;
+      }
+      else
+      {
+	memcpy(buff, ptr, len);
+      }
+      buff += len;
     }
   } 
   else 

--- 1.30.13.1/ndb/src/ndbapi/ndberror.c	2005-12-14 10:57:06 +01:00
+++ 1.45/storage/ndb/src/ndbapi/ndberror.c	2005-12-14 11:06:38 +01:00
@@ -16,11 +16,13 @@
 
 
 #include <ndb_global.h>
+#include <my_base.h>
 #include <ndberror.h>
 #include <m_string.h>
 
 typedef struct ErrorBundle {
   int code;
+  int mysql_code;
   ndberror_classification classification;
   const char * message;
 } ErrorBundle;
@@ -57,6 +59,9 @@
 
 #define OE ndberror_cl_schema_object_already_exists
 
+/* default mysql error code for unmapped codes */
+#define DMEC -1
+
 static const char* empty_string = "";
 
 /*
@@ -71,6 +76,8 @@
  *  900 - TUX
  * 1200 - LQH
  * 1300 - BACKUP
+ * 1400 - SUMA
+ * 1500 - LGMAN
  * 4000 - API
  * 4100 - ""
  * 4200 - ""
@@ -88,439 +95,475 @@
   /**
    * No error
    */
-  { 0,    NE, "No error" },
+  { 0,    0, NE, "No error" },
   
   /**
    * NoDataFound
    */
-  { 626,  ND, "Tuple did not exist" },
+  { 626,  HA_ERR_KEY_NOT_FOUND, ND, "Tuple did not exist" },
 
   /**
    * ConstraintViolation 
    */
-  { 630,  CV, "Tuple already existed when attempting to insert" },
-  { 840,  CV, "Trying to set a NOT NULL attribute to NULL" },
-  { 893,  CV, "Constraint violation e.g. duplicate value in unique index" },
+  { 630,  HA_ERR_FOUND_DUPP_KEY, CV, "Tuple already existed when attempting to insert" },
+  { 839,  DMEC, CV, "Illegal null attribute" },
+  { 840,  DMEC, CV, "Trying to set a NOT NULL attribute to NULL" },
+  { 893,  HA_ERR_FOUND_DUPP_KEY, CV, "Constraint violation e.g. duplicate value in unique index" },
 
   /**
    * Node recovery errors
    */
-  {  286, NR, "Node failure caused abort of transaction" }, 
-  {  250, NR, "Node where lock was held crashed, restart scan transaction" },
-  {  499, NR, "Scan take over error, restart scan transaction" },  
-  { 1204, NR, "Temporary failure, distribution changed" },
-  { 4002, NR, "Send to NDB failed" },
-  { 4010, NR, "Node failure caused abort of transaction" }, 
-  { 4025, NR, "Node failure caused abort of transaction" }, 
-  { 4027, NR, "Node failure caused abort of transaction" },
-  { 4028, NR, "Node failure caused abort of transaction" },
-  { 4029, NR, "Node failure caused abort of transaction" },
-  { 4031, NR, "Node failure caused abort of transaction" },
-  { 4033, NR, "Send to NDB failed" },
-  { 4115, NR, 
+  {  286, DMEC, NR, "Node failure caused abort of transaction" }, 
+  {  250, DMEC, NR, "Node where lock was held crashed, restart scan transaction" },
+  {  499, DMEC, NR, "Scan take over error, restart scan transaction" },  
+  { 1204, DMEC, NR, "Temporary failure, distribution changed" },
+  { 4002, DMEC, NR, "Send to NDB failed" },
+  { 4010, DMEC, NR, "Node failure caused abort of transaction" }, 
+  { 4025, DMEC, NR, "Node failure caused abort of transaction" }, 
+  { 4027, DMEC, NR, "Node failure caused abort of transaction" },
+  { 4028, DMEC, NR, "Node failure caused abort of transaction" },
+  { 4029, DMEC, NR, "Node failure caused abort of transaction" },
+  { 4031, DMEC, NR, "Node failure caused abort of transaction" },
+  { 4033, DMEC, NR, "Send to NDB failed" },
+  { 4115, DMEC, NR, 
     "Transaction was committed but all read information was not "
     "received due to node crash" },
-  { 4119, NR, "Simple/dirty read failed due to node failure" },
+  { 4119, DMEC, NR, "Simple/dirty read failed due to node failure" },
   
   /**
    * Node shutdown
    */
-  {  280, NS, "Transaction aborted due to node shutdown" },
+  {  280, DMEC, NS, "Transaction aborted due to node shutdown" },
   /* This scan trans had an active fragment scan in a LQH which have crashed */
-  {  270, NS, "Transaction aborted due to node shutdown" }, 
-  { 1223, NS, "Read operation aborted due to node shutdown" },
-  { 4023, NS, "Transaction aborted due to node shutdown" },
-  { 4030, NS, "Transaction aborted due to node shutdown" },
-  { 4034, NS, "Transaction aborted due to node shutdown" },
+  {  270, DMEC, NS, "Transaction aborted due to node shutdown" }, 
+  { 1223, DMEC, NS, "Read operation aborted due to node shutdown" },
+  { 4023, DMEC, NS, "Transaction aborted due to node shutdown" },
+  { 4030, DMEC, NS, "Transaction aborted due to node shutdown" },
+  { 4034, DMEC, NS, "Transaction aborted due to node shutdown" },
 
 
   
   /**
    * Unknown result
    */
-  { 4008, UR, "Receive from NDB failed" },
-  { 4009, UR, "Cluster Failure" },
-  { 4012, UR, 
+  { 4008, DMEC, UR, "Receive from NDB failed" },
+  { 4009, DMEC, UR, "Cluster Failure" },
+  { 4012, DMEC, UR, 
     "Request ndbd time-out, maybe due to high load or communication problems"}, 
-  { 4024, UR, 
+  { 4024, DMEC, UR, 
     "Time-out, most likely caused by simple read or cluster failure" }, 
   
   /**
    * TemporaryResourceError
    */
-  { 217,  TR, "217" },
-  { 218,  TR, "218" },
-  { 219,  TR, "219" },
-  { 233,  TR,
+  { 217,  DMEC, TR, "217" },
+  { 218,  DMEC, TR, "218" },
+  { 219,  DMEC, TR, "219" },
+  { 233,  DMEC, TR,
     "Out of operation records in transaction coordinator (increase MaxNoOfConcurrentOperations)" },
-  { 275,  TR, "275" },
-  { 279,  TR, "Out of transaction markers in transaction coordinator" },
-  { 414,  TR, "414" },
-  { 418,  TR, "Out of transaction buffers in LQH" },
-  { 419,  TR, "419" },
-  { 245,  TR, "Too many active scans" },
-  { 488,  TR, "Too many active scans" },
-  { 490,  TR, "Too many active scans" },
-  { 805,  TR, "Out of attrinfo records in tuple manager" },
-  { 830,  TR, "Out of add fragment operation records" },
-  { 873,  TR, "Out of attrinfo records for scan in tuple manager" },
-  { 1217, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
-  { 1220, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
-  { 1222, TR, "Out of transaction markers in LQH" },
-  { 1224, TR, "Out of Send Buffer space in LQH" },
-  { 4021, TR, "Out of Send Buffer space in NDB API" },
-  { 4022, TR, "Out of Send Buffer space in NDB API" },
-  { 4032, TR, "Out of Send Buffer space in NDB API" },
-  {  288, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
+  { 275,  DMEC, TR, "275" },
+  { 279,  DMEC, TR, "Out of transaction markers in transaction coordinator" },
+  { 414,  DMEC, TR, "414" },
+  { 418,  DMEC, TR, "Out of transaction buffers in LQH" },
+  { 419,  DMEC, TR, "419" },
+  { 245,  DMEC, TR, "Too many active scans" },
+  { 488,  DMEC, TR, "Too many active scans" },
+  { 490,  DMEC, TR, "Too many active scans" },
+  { 805,  DMEC, TR, "Out of attrinfo records in tuple manager" },
+  { 830,  DMEC, TR, "Out of add fragment operation records" },
+  { 873,  DMEC, TR, "Out of attrinfo records for scan in tuple manager" },
+  { 1217, DMEC, TR, "Out of operation records in local data manager (increase MaxNoOfLocalOperations)" },
+  { 1220, DMEC, TR, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
+  { 1222, DMEC, TR, "Out of transaction markers in LQH" },
+  { 4021, DMEC, TR, "Out of Send Buffer space in NDB API" },
+  { 4022, DMEC, TR, "Out of Send Buffer space in NDB API" },
+  { 4032, DMEC, TR, "Out of Send Buffer space in NDB API" },
+  { 1501, DMEC, TR, "Out of undo space" },
+  {  288, DMEC, TR, "Out of index operations in transaction coordinator (increase MaxNoOfConcurrentIndexOperations)" },
+
   /**
    * InsufficientSpace
    */
-  { 623,  IS, "623" },
-  { 624,  IS, "624" },
-  { 625,  IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
-  { 640,  IS, "Too many hash indexes (should not happen)" },
-  { 826,  IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
-  { 827,  IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
-  { 902,  IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
-  { 903,  IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
-  { 904,  IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
-  { 905,  IS, "Out of attribute records (increase MaxNoOfAttributes)" },
+  { 623,  HA_ERR_RECORD_FILE_FULL, IS, "623" },
+  { 624,  HA_ERR_RECORD_FILE_FULL, IS, "624" },
+  { 625,  HA_ERR_INDEX_FILE_FULL, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
+  { 640,  DMEC, IS, "Too many hash indexes (should not happen)" },
+  { 826,  HA_ERR_RECORD_FILE_FULL, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
+  { 827,  HA_ERR_RECORD_FILE_FULL, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
+  { 902,  HA_ERR_RECORD_FILE_FULL, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
+  { 903,  HA_ERR_INDEX_FILE_FULL, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
+  { 904,  HA_ERR_INDEX_FILE_FULL, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
+  { 905,  DMEC, IS, "Out of attribute records (increase MaxNoOfAttributes)" },
 
   /**
    * TimeoutExpired 
    */
-  { 266,  TO, "Time-out in NDB, probably caused by deadlock" },
-  { 274,  TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
-  { 296,  TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
-  { 297,  TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout, temporary!! */
-  { 237,  TO, "Transaction had timed out when trying to commit it" },
+  { 266,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" },
+  { 274,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
+  { 296,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout */
+  { 297,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Time-out in NDB, probably caused by deadlock" }, /* Scan trans timeout, temporary!! */
+  { 237,  HA_ERR_LOCK_WAIT_TIMEOUT, TO, "Transaction had timed out when trying to commit it" },
   
   /**
    * OverloadError
    */
-  { 410,  OL, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
-  { 677,  OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" },
-  { 891,  OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" },
-  { 1221, OL, "REDO buffers overloaded, consult online manual (increase RedoBuffer)" },
-  { 4006, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" }, 
+  { 410,  DMEC, OL, "REDO log files overloaded, consult online manual (decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)" },
+  { 677,  DMEC, OL, "Index UNDO buffers overloaded (increase UndoIndexBuffer)" },
+  { 891,  DMEC, OL, "Data UNDO buffers overloaded (increase UndoDataBuffer)" },
+  { 1221, DMEC, OL, "REDO buffers overloaded, consult online manual (increase RedoBuffer)" },
+  { 4006, DMEC, OL, "Connect failure - out of connection objects (increase MaxNoOfConcurrentTransactions)" }, 
 
 
   
   /**
    * Internal errors
    */
-  { 892,  IE, "Inconsistent hash index. The index needs to be dropped and recreated" },
-  { 896,  IE, "Tuple corrupted - wrong checksum or column data in invalid format" },
-  { 901,  IE, "Inconsistent ordered index. The index needs to be dropped and recreated" },
-  { 202,  IE, "202" },
-  { 203,  IE, "203" },
-  { 207,  IE, "207" },
-  { 208,  IE, "208" },
-  { 209,  IE, "Communication problem, signal error" },
-  { 220,  IE, "220" },
-  { 230,  IE, "230" },
-  { 232,  IE, "232" },
-  { 238,  IE, "238" },
-  { 271,  IE, "Simple Read transaction without any attributes to read" },
-  { 272,  IE, "Update operation without any attributes to update" },
-  { 276,  IE, "276" },
-  { 277,  IE, "277" },
-  { 278,  IE, "278" },
-  { 287,  IE, "Index corrupted" },
-  { 290,  IE, "Corrupt key in TC, unable to xfrm" },
-  { 631,  IE, "631" },
-  { 632,  IE, "632" },
-  { 702,  IE, "Request to non-master" },
-  { 706,  IE, "Inconsistency during table creation" },
-  { 809,  IE, "809" },
-  { 812,  IE, "812" },
-  { 829,  IE, "829" },
-  { 833,  IE, "833" },
-  { 839,  IE, "Illegal null attribute" },
-  { 871,  IE, "871" },
-  { 882,  IE, "882" },
-  { 883,  IE, "883" },
-  { 887,  IE, "887" },
-  { 888,  IE, "888" },
-  { 890,  IE, "890" },
-  { 4000, IE, "MEMORY ALLOCATION ERROR" },
-  { 4001, IE, "Signal Definition Error" },
-  { 4005, IE, "Internal Error in NdbApi" },
-  { 4011, IE, "Internal Error in NdbApi" }, 
-  { 4107, IE, "Simple Transaction and Not Start" },
-  { 4108, IE, "Faulty operation type" },
-  { 4109, IE, "Faulty primary key attribute length" },
-  { 4110, IE, "Faulty length in ATTRINFO signal" },
-  { 4111, IE, "Status Error in NdbConnection" },
-  { 4113, IE, "Too many operations received" },
-  { 4320, IE, "Cannot use the same object twice to create table" },
-  { 4321, IE, "Trying to start two schema transactions" },
-  { 4344, IE, "Only DBDICT and TRIX can send requests to TRIX" },
-  { 4345, IE, "TRIX block is not available yet, probably due to node failure" },
-  { 4346, IE, "Internal error at index create/build" },
-  { 4347, IE, "Bad state at alter index" },
-  { 4348, IE, "Inconsistency detected at alter index" },
-  { 4349, IE, "Inconsistency detected at index usage" },
-  { 4350, IE, "Transaction already aborted" },
+  { 892,  DMEC, IE, "Inconsistent hash index. The index needs to be dropped and recreated" },
+  { 896,  DMEC, IE, "Tuple corrupted - wrong checksum or column data in invalid format" },
+  { 901,  DMEC, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" },
+  { 202,  DMEC, IE, "202" },
+  { 203,  DMEC, IE, "203" },
+  { 207,  DMEC, IE, "207" },
+  { 208,  DMEC, IE, "208" },
+  { 209,  DMEC, IE, "Communication problem, signal error" },
+  { 220,  DMEC, IE, "220" },
+  { 230,  DMEC, IE, "230" },
+  { 232,  DMEC, IE, "232" },
+  { 238,  DMEC, IE, "238" },
+  { 271,  DMEC, IE, "Simple Read transaction without any attributes to read" },
+  { 272,  DMEC, IE, "Update operation without any attributes to update" },
+  { 276,  DMEC, IE, "276" },
+  { 277,  DMEC, IE, "277" },
+  { 278,  DMEC, IE, "278" },
+  { 287,  DMEC, IE, "Index corrupted" },
+  { 290,  DMEC, IE, "Corrupt key in TC, unable to xfrm" },
+  { 631,  DMEC, IE, "631" },
+  { 632,  DMEC, IE, "632" },
+  { 702,  DMEC, IE, "Request to non-master" },
+  { 706,  DMEC, IE, "Inconsistency during table creation" },
+  { 809,  DMEC, IE, "809" },
+  { 812,  DMEC, IE, "812" },
+  { 829,  DMEC, IE, "829" },
+  { 833,  DMEC, IE, "833" },
+  { 871,  DMEC, IE, "871" },
+  { 882,  DMEC, IE, "882" },
+  { 883,  DMEC, IE, "883" },
+  { 887,  DMEC, IE, "887" },
+  { 888,  DMEC, IE, "888" },
+  { 890,  DMEC, IE, "890" },
+  { 4000, DMEC, IE, "MEMORY ALLOCATION ERROR" },
+  { 4001, DMEC, IE, "Signal Definition Error" },
+  { 4005, DMEC, IE, "Internal Error in NdbApi" },
+  { 4011, DMEC, IE, "Internal Error in NdbApi" }, 
+  { 4107, DMEC, IE, "Simple Transaction and Not Start" },
+  { 4108, DMEC, IE, "Faulty operation type" },
+  { 4109, DMEC, IE, "Faulty primary key attribute length" },
+  { 4110, DMEC, IE, "Faulty length in ATTRINFO signal" },
+  { 4111, DMEC, IE, "Status Error in NdbConnection" },
+  { 4113, DMEC, IE, "Too many operations received" },
+  { 4320, DMEC, IE, "Cannot use the same object twice to create table" },
+  { 4321, DMEC, IE, "Trying to start two schema transactions" },
+  { 4344, DMEC, IE, "Only DBDICT and TRIX can send requests to TRIX" },
+  { 4345, DMEC, IE, "TRIX block is not available yet, probably due to node failure" },
+  { 4346, DMEC, IE, "Internal error at index create/build" },
+  { 4347, DMEC, IE, "Bad state at alter index" },
+  { 4348, DMEC, IE, "Inconsistency detected at alter index" },
+  { 4349, DMEC, IE, "Inconsistency detected at index usage" },
+  { 4350, DMEC, IE, "Transaction already aborted" },
 
   /**
    * Application error
    */
-  { 823,  AE, "Too much attrinfo from application in tuple manager" },
-  { 831,  AE, "Too many nullable/bitfields in table definition" },
-  { 876,  AE, "876" },
-  { 877,  AE, "877" },
-  { 878,  AE, "878" },
-  { 879,  AE, "879" },
-  { 880,  AE, "Tried to read too much - too many getValue calls" },
-  { 884,  AE, "Stack overflow in interpreter" },
-  { 885,  AE, "Stack underflow in interpreter" },
-  { 886,  AE, "More than 65535 instructions executed in interpreter" },
-  { 897,  AE, "Update attempt of primary key via ndbcluster internal api (if this occurs via the MySQL server it is a bug, please report)" },
-  { 4256, AE, "Must call Ndb::init() before this function" },
-  { 4257, AE, "Tried to read too much - too many getValue calls" },
-  
+  { 823,  DMEC, AE, "Too much attrinfo from application in tuple manager" },
+  { 831,  DMEC, AE, "Too many nullable/bitfields in table definition" },
+  { 876,  DMEC, AE, "876" },
+  { 877,  DMEC, AE, "877" },
+  { 878,  DMEC, AE, "878" },
+  { 879,  DMEC, AE, "879" },
+  { 880,  DMEC, AE, "Tried to read too much - too many getValue calls" },
+  { 884,  DMEC, AE, "Stack overflow in interpreter" },
+  { 885,  DMEC, AE, "Stack underflow in interpreter" },
+  { 886,  DMEC, AE, "More than 65535 instructions executed in interpreter" },
+  { 897,  DMEC, AE, "Update attempt of primary key via ndbcluster internal api (if this occurs via the MySQL server it is a bug, please report)" },
+  { 4256, DMEC, AE, "Must call Ndb::init() before this function" },
+  { 4257, DMEC, AE, "Tried to read too much - too many getValue calls" },
+
   /** 
    * Scan application errors
    */
-  { 242,  AE, "Zero concurrency in scan"},
-  { 244,  AE, "Too high concurrency in scan"},
-  { 269,  AE, "No condition and attributes to read in scan"},
-  { 4600, AE, "Transaction is already started"},
-  { 4601, AE, "Transaction is not started"},
-  { 4602, AE, "You must call getNdbOperation before executeScan" },
-  { 4603, AE, "There can only be ONE operation in a scan transaction" },
-  { 4604, AE, "takeOverScanOp, opType must be UpdateRequest or DeleteRequest" },
-  { 4605, AE, "You may only call openScanRead or openScanExclusive once for each operation"},
-  { 4607, AE, "There may only be one operation in a scan transaction"},
-  { 4608, AE, "You can not takeOverScan unless you have used openScanExclusive"},
-  { 4609, AE, "You must call nextScanResult before trying to takeOverScan"},
-  { 4232, AE, "Parallelism can only be between 1 and 240" },
+  { 242,  DMEC, AE, "Zero concurrency in scan"},
+  { 244,  DMEC, AE, "Too high concurrency in scan"},
+  { 269,  DMEC, AE, "No condition and attributes to read in scan"},
+  { 4600, DMEC, AE, "Transaction is already started"},
+  { 4601, DMEC, AE, "Transaction is not started"},
+  { 4602, DMEC, AE, "You must call getNdbOperation before executeScan" },
+  { 4603, DMEC, AE, "There can only be ONE operation in a scan transaction" },
+  { 4604, DMEC, AE, "takeOverScanOp, opType must be UpdateRequest or DeleteRequest" },
+  { 4605, DMEC, AE, "You may only call openScanRead or openScanExclusive once for each operation"},
+  { 4607, DMEC, AE, "There may only be one operation in a scan transaction"},
+  { 4608, DMEC, AE, "You can not takeOverScan unless you have used openScanExclusive"},
+  { 4609, DMEC, AE, "You must call nextScanResult before trying to takeOverScan"},
+  { 4232, DMEC, AE, "Parallelism can only be between 1 and 240" },
 
   /** 
    * Event schema errors
    */
 
-  { 4713,  SE, "Column defined in event does not exist in table"},
+  { 4713,  DMEC, SE, "Column defined in event does not exist in table"},
   
   /** 
    * Event application errors
    */
 
-  { 4707,  AE, "Too many event have been defined"},
-  { 4708,  AE, "Event name is too long"},
-  { 4709,  AE, "Can't accept more subscribers"},
-  {  746,  OE, "Event name already exists"},
-  { 4710,  AE, "Event not found"},
-  { 4711,  AE, "Creation of event failed"},
-  { 4712,  AE, "Stopped event operation does not exist. Already stopped?"},
+  { 4707,  DMEC, AE, "Too many event have been defined"},
+  { 4708,  DMEC, AE, "Event name is too long"},
+  { 4709,  DMEC, AE, "Can't accept more subscribers"},
+  {  746,  DMEC, OE, "Event name already exists"},
+  {  747,  DMEC, IS, "Out of event records"},
+  {  748,  DMEC, TR, "Busy during read of event table"},
+  { 4710,  DMEC, AE, "Event not found"},
+  { 4711,  DMEC, AE, "Creation of event failed"},
+  { 4712,  DMEC, AE, "Stopped event operation does not exist. Already stopped?"},
 
   /** 
    * Event internal errors
    */
 
-  { 4731,  IE, "Event not found"},
+  { 4731,  DMEC, IE, "Event not found"},
 
   /**
    * SchemaError
    */
-  { 701,  SE, "System busy with other schema operation" },
-  { 703,  SE, "Invalid table format" },
-  { 704,  SE, "Attribute name too long" },
-  { 705,  SE, "Table name too long" },
-  { 707,  SE, "No more table metadata records (increase MaxNoOfTables)" },  
-  { 708,  SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
-  { 709,  SE, "No such table existed" },
-  { 721,  OE, "Table or index with given name already exists" },
-  { 723,  SE, "No such table existed" },
-  { 736,  SE, "Unsupported array size" },
-  { 737,  SE, "Attribute array size too big" },
-  { 738,  SE, "Record too big" },
-  { 739,  SE, "Unsupported primary key length" },
-  { 740,  SE, "Nullable primary key not supported" },
-  { 741,  SE, "Unsupported alter table" },
-  { 743,  SE, "Unsupported character set in table or index" },
-  { 744,  SE, "Character string is invalid for given character set" },
-  { 745,  SE, "Distribution key not supported for char attribute (use binary attribute)" },
-  { 761,  SE, "Unable to drop table as backup is in progress" },
-  { 762,  SE, "Unable to alter table as backup is in progress" },
-  { 241,  SE, "Invalid schema object version" },
-  { 283,  SE, "Table is being dropped" },
-  { 284,  SE, "Table not defined in transaction coordinator" },
-  { 285,  SE, "Unknown table error in transaction coordinator" },
-  { 881,  SE, "Unable to create table, out of data pages (increase DataMemory) " },
-  { 906,  SE, "Unsupported attribute type in index" },
-  { 907,  SE, "Unsupported character set in table or index" },
-  { 908,  IS, "Invalid ordered index tree node size" },
-  { 1225, SE, "Table not defined in local query handler" },
-  { 1226, SE, "Table is being dropped" },
-  { 1228, SE, "Cannot use drop table for drop index" },
-  { 1229, SE, "Too long frm data supplied" },
-  { 1231, SE, "Invalid table or index to scan" },
-  { 1232, SE, "Invalid table or index to scan" },
+  { 701,  DMEC, SE, "System busy with other schema operation" },
+  { 703,  DMEC, SE, "Invalid table format" },
+  { 704,  DMEC, SE, "Attribute name too long" },
+  { 705,  DMEC, SE, "Table name too long" },
+  { 707,  DMEC, SE, "No more table metadata records (increase MaxNoOfTables)" },  
+  { 708,  DMEC, SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
+  { 709,  HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
+  { 710,  DMEC, SE, "Internal: Get by table name not supported, use table id." },
+  { 721,  HA_ERR_TABLE_EXIST,   OE, "Table or index with given name already exists" },
+  { 723,  HA_ERR_NO_SUCH_TABLE, SE, "No such table existed" },
+  { 736,  DMEC, SE, "Unsupported array size" },
+  { 737,  HA_WRONG_CREATE_OPTION, SE, "Attribute array size too big" },
+  { 738,  HA_WRONG_CREATE_OPTION, SE, "Record too big" },
+  { 739,  HA_WRONG_CREATE_OPTION, SE, "Unsupported primary key length" },
+  { 740,  HA_WRONG_CREATE_OPTION, SE, "Nullable primary key not supported" },
+  { 741,  DMEC, SE, "Unsupported alter table" },
+  { 743,  HA_WRONG_CREATE_OPTION, SE, "Unsupported character set in table or index" },
+  { 744,  DMEC, SE, "Character string is invalid for given character set" },
+  { 745,  HA_WRONG_CREATE_OPTION, SE, "Distribution key not supported for char attribute (use binary attribute)" },
+  { 771,  HA_WRONG_CREATE_OPTION, AE, "Given NODEGROUP doesn't exist in this cluster" },
+  { 772,  HA_WRONG_CREATE_OPTION, IE, "Given fragmentType doesn't exist" },
+  { 749,  HA_WRONG_CREATE_OPTION, IE, "Primary Table in wrong state" },
+  { 763,  HA_WRONG_CREATE_OPTION, SE, "Invalid undo buffer size" },
+  { 764,  HA_WRONG_CREATE_OPTION, SE, "Invalid extent size" },
+  { 765,  DMEC, SE, "Out of filegroup records" },
+  { 750,  IE, SE, "Invalid file type" },
+  { 751,  DMEC, SE, "Out of file records" },
+  { 752,  DMEC, SE, "Invalid file format" },
+  { 753,  IE, SE, "Invalid filegroup for file" },
+  { 754,  IE, SE, "Invalid filegroup version when creating file" },
+  { 755,  DMEC, SE, "Invalid tablespace" },
+  { 756,  DMEC, SE, "Index on disk column is not supported" },
+  { 757,  DMEC, SE, "Varsize bitfield not supported" },
+  { 758,  DMEC, SE, "Tablespace has changed" },
+  { 759,  DMEC, SE, "Invalid tablespace version " },
+  { 760,  DMEC, SE, "File already exists", },
+  { 761,  DMEC, SE, "Unable to drop table as backup is in progress" },
+  { 762,  DMEC, SE, "Unable to alter table as backup is in progress" },
+  { 766,  DMEC, SE, "Cant drop file, no such file" },
+  { 767,  DMEC, SE, "Cant drop filegroup, no such filegroup" },
+  { 768,  DMEC, SE, "Cant drop filegroup, filegroup is used" },
+  { 769,  DMEC, SE, "Drop undofile not supported, drop logfile group instead" },
+  { 770,  DMEC, SE, "Cant drop file, file is used" },
+  { 241,  HA_ERR_TABLE_DEF_CHANGED, SE, "Invalid schema object version" },
+  { 283,  HA_ERR_NO_SUCH_TABLE, SE, "Table is being dropped" },
+  { 284,  HA_ERR_TABLE_DEF_CHANGED, SE, "Table not defined in transaction coordinator" },
+  { 285,  DMEC, SE, "Unknown table error in transaction coordinator" },
+  { 881,  DMEC, SE, "Unable to create table, out of data pages (increase DataMemory) " },
+  { 906,  DMEC, SE, "Unsupported attribute type in index" },
+  { 907,  DMEC, SE, "Unsupported character set in table or index" },
+  { 908,  DMEC, IS, "Invalid ordered index tree node size" },
+  { 1225, DMEC, SE, "Table not defined in local query handler" },
+  { 1226, DMEC, SE, "Table is being dropped" },
+  { 1228, DMEC, SE, "Cannot use drop table for drop index" },
+  { 1229, DMEC, SE, "Too long frm data supplied" },
+  { 1231, DMEC, SE, "Invalid table or index to scan" },
+  { 1232, DMEC, SE, "Invalid table or index to scan" },
 
   /**
    * FunctionNotImplemented
    */
-  { 4003, NI, "Function not implemented yet" },
+  { 4003, DMEC, NI, "Function not implemented yet" },
 
   /**
    * Backup error codes
    */ 
 
-  { 1300, IE, "Undefined error" },
-  { 1301, IE, "Backup issued to not master (reissue command to master)" },
-  { 1302, IE, "Out of backup record" },
-  { 1303, IS, "Out of resources" },
-  { 1304, IE, "Sequence failure" },
-  { 1305, IE, "Backup definition not implemented" },
-  { 1306, AE, "Backup not supported in diskless mode (change Diskless)" },
-
-  { 1321, IE, "Backup aborted by application" },
-  { 1322, IE, "Backup already completed" },
-  { 1323, IE, "1323" },
-  { 1324, IE, "Backup log buffer full" },
-  { 1325, IE, "File or scan error" },
-  { 1326, IE, "Backup abortet due to node failure" },
-  { 1327, IE, "1327" },
-  
-  { 1340, IE, "Backup undefined error" },
-  { 1342, AE, "Backup failed to allocate buffers (check configuration)" },
-  { 1343, AE, "Backup failed to setup fs buffers (check configuration)" },
-  { 1344, AE, "Backup failed to allocate tables (check configuration)" },
-  { 1345, AE, "Backup failed to insert file header (check configuration)" },
-  { 1346, AE, "Backup failed to insert table list (check configuration)" },
-  { 1347, AE, "Backup failed to allocate table memory (check configuration)" },
-  { 1348, AE, "Backup failed to allocate file record (check configuration)" },
-  { 1349, AE, "Backup failed to allocate attribute record (check configuration)" },
-  { 1329, AE, "Backup during software upgrade not supported" },
+  { 1300, DMEC, IE, "Undefined error" },
+  { 1301, DMEC, IE, "Backup issued to not master (reissue command to master)" },
+  { 1302, DMEC, IE, "Out of backup record" },
+  { 1303, DMEC, IS, "Out of resources" },
+  { 1304, DMEC, IE, "Sequence failure" },
+  { 1305, DMEC, IE, "Backup definition not implemented" },
+  { 1306, DMEC, AE, "Backup not supported in diskless mode (change Diskless)" },
+
+  { 1321, DMEC, IE, "Backup aborted by application" },
+  { 1322, DMEC, IE, "Backup already completed" },
+  { 1323, DMEC, IE, "1323" },
+  { 1324, DMEC, IE, "Backup log buffer full" },
+  { 1325, DMEC, IE, "File or scan error" },
+  { 1326, DMEC, IE, "Backup abortet due to node failure" },
+  { 1327, DMEC, IE, "1327" },
+  
+  { 1340, DMEC, IE, "Backup undefined error" },
+  { 1342, DMEC, AE, "Backup failed to allocate buffers (check configuration)" },
+  { 1343, DMEC, AE, "Backup failed to setup fs buffers (check configuration)" },
+  { 1344, DMEC, AE, "Backup failed to allocate tables (check configuration)" },
+  { 1345, DMEC, AE, "Backup failed to insert file header (check configuration)" },
+  { 1346, DMEC, AE, "Backup failed to insert table list (check configuration)" },
+  { 1347, DMEC, AE, "Backup failed to allocate table memory (check configuration)" },
+  { 1348, DMEC, AE, "Backup failed to allocate file record (check configuration)" },
+  { 1349, DMEC, AE, "Backup failed to allocate attribute record (check configuration)" },
+  { 1329, DMEC, AE, "Backup during software upgrade not supported" },
   
   /**
    * Still uncategorized
    */
-  { 720,  AE, "Attribute name reused in table definition" },
-  { 4004, AE, "Attribute name not found in the Table" },
-  
-  { 4100, AE, "Status Error in NDB" },
-  { 4101, AE, "No connections to NDB available and connect failed" },
-  { 4102, AE, "Type in NdbTamper not correct" },
-  { 4103, AE, "No schema connections to NDB available and connect failed" },
-  { 4104, AE, "Ndb Init in wrong state, destroy Ndb object and create a new" },
-  { 4105, AE, "Too many Ndb objects" },
-  { 4106, AE, "All Not NULL attribute have not been defined" },
-  { 4114, AE, "Transaction is already completed" },
-  { 4116, AE, "Operation was not defined correctly, probably missing a key" },
-  { 4117, AE, "Could not start transporter, configuration error"}, 
-  { 4118, AE, "Parameter error in API call" },
-  { 4300, AE, "Tuple Key Type not correct" },
-  { 4301, AE, "Fragment Type not correct" },
-  { 4302, AE, "Minimum Load Factor not correct" },
-  { 4303, AE, "Maximum Load Factor not correct" },
-  { 4304, AE, "Maximum Load Factor smaller than Minimum" },
-  { 4305, AE, "K value must currently be set to 6" },
-  { 4306, AE, "Memory Type not correct" },
-  { 4307, AE, "Invalid table name" },
-  { 4308, AE, "Attribute Size not correct" },
-  { 4309, AE, "Fixed array too large, maximum 64000 bytes" },
-  { 4310, AE, "Attribute Type not correct" },
-  { 4311, AE, "Storage Mode not correct" },
-  { 4312, AE, "Null Attribute Type not correct" },
-  { 4313, AE, "Index only storage for non-key attribute" },
-  { 4314, AE, "Storage Type of attribute not correct" },
-  { 4315, AE, "No more key attributes allowed after defining variable length key attribute" },
-  { 4316, AE, "Key attributes are not allowed to be NULL attributes" },
-  { 4317, AE, "Too many primary keys defined in table" },
-  { 4318, AE, "Invalid attribute name" },
-  { 4319, AE, "createAttribute called at erroneus place" },
-  { 4322, AE, "Attempt to define distribution key when not prepared to" },
-  { 4323, AE, "Distribution Key set on table but not defined on first attribute" },
-  { 4324, AE, "Attempt to define distribution group when not prepared to" },
-  { 4325, AE, "Distribution Group set on table but not defined on first attribute" },
-  { 4326, AE, "Distribution Group with erroneus number of bits" },
-  { 4327, AE, "Distribution Group with 1 byte attribute is not allowed" },
-  { 4328, AE, "Disk memory attributes not yet supported" },
-  { 4329, AE, "Variable stored attributes not yet supported" },
-
-  { 4400, AE, "Status Error in NdbSchemaCon" },
-  { 4401, AE, "Only one schema operation per schema transaction" },
-  { 4402, AE, "No schema operation defined before calling execute" },
-
-  { 4501, AE, "Insert in hash table failed when getting table information from Ndb" },
-  { 4502, AE, "GetValue not allowed in Update operation" },
-  { 4503, AE, "GetValue not allowed in Insert operation" },
-  { 4504, AE, "SetValue not allowed in Read operation" },
-  { 4505, AE, "NULL value not allowed in primary key search" },
-  { 4506, AE, "Missing getValue/setValue when calling execute" },
-  { 4507, AE, "Missing operation request when calling execute" },
-
-  { 4200, AE, "Status Error when defining an operation" },
-  { 4201, AE, "Variable Arrays not yet supported" },
-  { 4202, AE, "Set value on tuple key attribute is not allowed" },
-  { 4203, AE, "Trying to set a NOT NULL attribute to NULL" },
-  { 4204, AE, "Set value and Read/Delete Tuple is incompatible" },
-  { 4205, AE, "No Key attribute used to define tuple" },
-  { 4206, AE, "Not allowed to equal key attribute twice" },
-  { 4207, AE, "Key size is limited to 4092 bytes" },
-  { 4208, AE, "Trying to read a non-stored attribute" },
-  { 4209, AE, "Length parameter in equal/setValue is incorrect" },
-  { 4210, AE, "Ndb sent more info than the length he specified" },
-  { 4211, AE, "Inconsistency in list of NdbRecAttr-objects" },
-  { 4212, AE, "Ndb reports NULL value on Not NULL attribute" },
-  { 4213, AE, "Not all data of an attribute has been received" },
-  { 4214, AE, "Not all attributes have been received" },
-  { 4215, AE, "More data received than reported in TCKEYCONF message" },
-  { 4216, AE, "More than 8052 bytes in setValue cannot be handled" },
-  { 4217, AE, "It is not allowed to increment any other than unsigned ints" },
-  { 4218, AE, "Currently not allowed to increment NULL-able attributes" },
-  { 4219, AE, "Maximum size of interpretative attributes are 64 bits" },
-  { 4220, AE, "Maximum size of interpretative attributes are 64 bits" },
-  { 4221, AE, "Trying to jump to a non-defined label" },
-  { 4222, AE, "Label was not found, internal error" },
-  { 4223, AE, "Not allowed to create jumps to yourself" },
-  { 4224, AE, "Not allowed to jump to a label in a different subroutine" },
-  { 4225, AE, "All primary keys defined, call setValue/getValue"},
-  { 4226, AE, "Bad number when defining a label" },
-  { 4227, AE, "Bad number when defining a subroutine" },
-  { 4228, AE, "Illegal interpreter function in scan definition" },
-  { 4229, AE, "Illegal register in interpreter function definition" },
-  { 4230, AE, "Illegal state when calling getValue, probably not a read" },
-  { 4231, AE, "Illegal state when calling interpreter routine" },
-  { 4233, AE, "Calling execute (synchronous) when already prepared asynchronous transaction exists" },
-  { 4234, AE, "Illegal to call setValue in this state" },
-  { 4235, AE, "No callback from execute" },
-  { 4236, AE, "Trigger name too long" },
-  { 4237, AE, "Too many triggers" },
-  { 4238, AE, "Trigger not found" },
-  { 4239, AE, "Trigger with given name already exists"},
-  { 4240, AE, "Unsupported trigger type"},
-  { 4241, AE, "Index name too long" },
-  { 4242, AE, "Too many indexes" },
-  { 4243, AE, "Index not found" },
-  { 4244, OE, "Index or table with given name already exists" },
-  { 4247, AE, "Illegal index/trigger create/drop/alter request" },
-  { 4248, AE, "Trigger/index name invalid" },
-  { 4249, AE, "Invalid table" },
-  { 4250, AE, "Invalid index type or index logging option" },
-  { 4251, AE, "Cannot create unique index, duplicate keys found" },
-  { 4252, AE, "Failed to allocate space for index" },
-  { 4253, AE, "Failed to create index table" },
-  { 4254, AE, "Table not an index table" },
-  { 4255, AE, "Hash index attributes must be specified in same order as table attributes" },
-  { 4258, AE, "Cannot create unique index, duplicate attributes found in definition" },
-  { 4259, AE, "Invalid set of range scan bounds" },
-  { 4260, UD, "NdbScanFilter: Operator is not defined in NdbScanFilter::Group"},
-  { 4261, UD, "NdbScanFilter: Column is NULL"},
-  { 4262, UD, "NdbScanFilter: Condition is out of bounds"},
-  { 4263, IE, "Invalid blob attributes or invalid blob parts table" },
-  { 4264, AE, "Invalid usage of blob attribute" },
-  { 4265, AE, "Method is not valid in current blob state" },
-  { 4266, AE, "Invalid blob seek position" },
-  { 4267, IE, "Corrupted blob value" },
-  { 4268, IE, "Error in blob head update forced rollback of transaction" },
-  { 4269, IE, "No connection to ndb management server" },
-  { 4270, IE, "Unknown blob error" },
-  { 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
-  { 4271, AE, "Invalid index object, not retrieved via getIndex()" }
+  { 720,  DMEC, AE, "Attribute name reused in table definition" },
+  { 1405, DMEC, NR, "Subscriber manager busy with node recovery" },
+  { 1407, DMEC, SE, "Subscription not found in subscriber manager" },
+  { 1411, DMEC, TR, "Subscriber manager busy with adding/removing a subscriber" },
+  { 1412, DMEC, IS, "Can't accept more subscribers, out of space in pool" },
+  { 1413, DMEC, TR, "Subscriber manager busy with adding the subscription" },
+  { 1414, DMEC, TR, "Subscriber manager has subscribers on this subscription" },
+  { 1415, DMEC, SE, "Subscription not unique in subscriber manager" },
+  { 1416, DMEC, IS, "Can't accept more subscriptions, out of space in pool" },
+  { 1417, DMEC, SE, "Table in suscription not defined, probably dropped" },
+
+  { 4004, DMEC, AE, "Attribute name not found in the Table" },
+  
+  { 4100, DMEC, AE, "Status Error in NDB" },
+  { 4101, DMEC, AE, "No connections to NDB available and connect failed" },
+  { 4102, DMEC, AE, "Type in NdbTamper not correct" },
+  { 4103, DMEC, AE, "No schema connections to NDB available and connect failed" },
+  { 4104, DMEC, AE, "Ndb Init in wrong state, destroy Ndb object and create a new" },
+  { 4105, DMEC, AE, "Too many Ndb objects" },
+  { 4106, DMEC, AE, "All Not NULL attribute have not been defined" },
+  { 4114, DMEC, AE, "Transaction is already completed" },
+  { 4116, DMEC, AE, "Operation was not defined correctly, probably missing a key" },
+  { 4117, DMEC, AE, "Could not start transporter, configuration error"}, 
+  { 4118, DMEC, AE, "Parameter error in API call" },
+  { 4300, DMEC, AE, "Tuple Key Type not correct" },
+  { 4301, DMEC, AE, "Fragment Type not correct" },
+  { 4302, DMEC, AE, "Minimum Load Factor not correct" },
+  { 4303, DMEC, AE, "Maximum Load Factor not correct" },
+  { 4304, DMEC, AE, "Maximum Load Factor smaller than Minimum" },
+  { 4305, DMEC, AE, "K value must currently be set to 6" },
+  { 4306, DMEC, AE, "Memory Type not correct" },
+  { 4307, DMEC, AE, "Invalid table name" },
+  { 4308, DMEC, AE, "Attribute Size not correct" },
+  { 4309, DMEC, AE, "Fixed array too large, maximum 64000 bytes" },
+  { 4310, DMEC, AE, "Attribute Type not correct" },
+  { 4311, DMEC, AE, "Storage Mode not correct" },
+  { 4312, DMEC, AE, "Null Attribute Type not correct" },
+  { 4313, DMEC, AE, "Index only storage for non-key attribute" },
+  { 4314, DMEC, AE, "Storage Type of attribute not correct" },
+  { 4315, DMEC, AE, "No more key attributes allowed after defining variable length key attribute" },
+  { 4316, DMEC, AE, "Key attributes are not allowed to be NULL attributes" },
+  { 4317, DMEC, AE, "Too many primary keys defined in table" },
+  { 4318, DMEC, AE, "Invalid attribute name" },
+  { 4319, DMEC, AE, "createAttribute called at erroneus place" },
+  { 4322, DMEC, AE, "Attempt to define distribution key when not prepared to" },
+  { 4323, DMEC, AE, "Distribution Key set on table but not defined on first attribute" },
+  { 4324, DMEC, AE, "Attempt to define distribution group when not prepared to" },
+  { 4325, DMEC, AE, "Distribution Group set on table but not defined on first attribute" },
+  { 4326, DMEC, AE, "Distribution Group with erroneus number of bits" },
+  { 4327, DMEC, AE, "Distribution Group with 1 byte attribute is not allowed" },
+  { 4328, DMEC, AE, "Disk memory attributes not yet supported" },
+  { 4329, DMEC, AE, "Variable stored attributes not yet supported" },
+
+  { 4400, DMEC, AE, "Status Error in NdbSchemaCon" },
+  { 4401, DMEC, AE, "Only one schema operation per schema transaction" },
+  { 4402, DMEC, AE, "No schema operation defined before calling execute" },
+
+  { 4501, DMEC, AE, "Insert in hash table failed when getting table information from Ndb" },
+  { 4502, DMEC, AE, "GetValue not allowed in Update operation" },
+  { 4503, DMEC, AE, "GetValue not allowed in Insert operation" },
+  { 4504, DMEC, AE, "SetValue not allowed in Read operation" },
+  { 4505, DMEC, AE, "NULL value not allowed in primary key search" },
+  { 4506, DMEC, AE, "Missing getValue/setValue when calling execute" },
+  { 4507, DMEC, AE, "Missing operation request when calling execute" },
+
+  { 4200, DMEC, AE, "Status Error when defining an operation" },
+  { 4201, DMEC, AE, "Variable Arrays not yet supported" },
+  { 4202, DMEC, AE, "Set value on tuple key attribute is not allowed" },
+  { 4203, DMEC, AE, "Trying to set a NOT NULL attribute to NULL" },
+  { 4204, DMEC, AE, "Set value and Read/Delete Tuple is incompatible" },
+  { 4205, DMEC, AE, "No Key attribute used to define tuple" },
+  { 4206, DMEC, AE, "Not allowed to equal key attribute twice" },
+  { 4207, DMEC, AE, "Key size is limited to 4092 bytes" },
+  { 4208, DMEC, AE, "Trying to read a non-stored attribute" },
+  { 4209, DMEC, AE, "Length parameter in equal/setValue is incorrect" },
+  { 4210, DMEC, AE, "Ndb sent more info than the length he specified" },
+  { 4211, DMEC, AE, "Inconsistency in list of NdbRecAttr-objects" },
+  { 4212, DMEC, AE, "Ndb reports NULL value on Not NULL attribute" },
+  { 4213, DMEC, AE, "Not all data of an attribute has been received" },
+  { 4214, DMEC, AE, "Not all attributes have been received" },
+  { 4215, DMEC, AE, "More data received than reported in TCKEYCONF message" },
+  { 4216, DMEC, AE, "More than 8052 bytes in setValue cannot be handled" },
+  { 4217, DMEC, AE, "It is not allowed to increment any other than unsigned ints" },
+  { 4218, DMEC, AE, "Currently not allowed to increment NULL-able attributes" },
+  { 4219, DMEC, AE, "Maximum size of interpretative attributes are 64 bits" },
+  { 4220, DMEC, AE, "Maximum size of interpretative attributes are 64 bits" },
+  { 4221, DMEC, AE, "Trying to jump to a non-defined label" },
+  { 4222, DMEC, AE, "Label was not found, internal error" },
+  { 4223, DMEC, AE, "Not allowed to create jumps to yourself" },
+  { 4224, DMEC, AE, "Not allowed to jump to a label in a different subroutine" },
+  { 4225, DMEC, AE, "All primary keys defined, call setValue/getValue"},
+  { 4226, DMEC, AE, "Bad number when defining a label" },
+  { 4227, DMEC, AE, "Bad number when defining a subroutine" },
+  { 4228, DMEC, AE, "Illegal interpreter function in scan definition" },
+  { 4229, DMEC, AE, "Illegal register in interpreter function definition" },
+  { 4230, DMEC, AE, "Illegal state when calling getValue, probably not a read" },
+  { 4231, DMEC, AE, "Illegal state when calling interpreter routine" },
+  { 4233, DMEC, AE, "Calling execute (synchronous) when already prepared asynchronous transaction exists" },
+  { 4234, DMEC, AE, "Illegal to call setValue in this state" },
+  { 4235, DMEC, AE, "No callback from execute" },
+  { 4236, DMEC, AE, "Trigger name too long" },
+  { 4237, DMEC, AE, "Too many triggers" },
+  { 4238, DMEC, AE, "Trigger not found" },
+  { 4239, DMEC, AE, "Trigger with given name already exists"},
+  { 4240, DMEC, AE, "Unsupported trigger type"},
+  { 4241, DMEC, AE, "Index name too long" },
+  { 4242, DMEC, AE, "Too many indexes" },
+  { 4243, DMEC, AE, "Index not found" },
+  { 4244, HA_ERR_TABLE_EXIST, OE, "Index or table with given name already exists" },
+  { 4247, DMEC, AE, "Illegal index/trigger create/drop/alter request" },
+  { 4248, DMEC, AE, "Trigger/index name invalid" },
+  { 4249, DMEC, AE, "Invalid table" },
+  { 4250, DMEC, AE, "Invalid index type or index logging option" },
+  { 4251, DMEC, AE, "Cannot create unique index, duplicate keys found" },
+  { 4252, DMEC, AE, "Failed to allocate space for index" },
+  { 4253, DMEC, AE, "Failed to create index table" },
+  { 4254, DMEC, AE, "Table not an index table" },
+  { 4255, DMEC, AE, "Hash index attributes must be specified in same order as table attributes" },
+  { 4258, DMEC, AE, "Cannot create unique index, duplicate attributes found in definition" },
+  { 4259, DMEC, AE, "Invalid set of range scan bounds" },
+  { 4260, DMEC, UD, "NdbScanFilter: Operator is not defined in NdbScanFilter::Group"},
+  { 4261, DMEC, UD, "NdbScanFilter: Column is NULL"},
+  { 4262, DMEC, UD, "NdbScanFilter: Condition is out of bounds"},
+  { 4263, DMEC, IE, "Invalid blob attributes or invalid blob parts table" },
+  { 4264, DMEC, AE, "Invalid usage of blob attribute" },
+  { 4265, DMEC, AE, "Method is not valid in current blob state" },
+  { 4266, DMEC, AE, "Invalid blob seek position" },
+  { 4267, DMEC, IE, "Corrupted blob value" },
+  { 4268, DMEC, IE, "Error in blob head update forced rollback of transaction" },
+  { 4269, DMEC, IE, "No connection to ndb management server" },
+  { 4270, DMEC, IE, "Unknown blob error" },
+  { 4335, DMEC, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
+  { 4271, DMEC, AE, "Invalid index object, not retrieved via getIndex()" }
 };
 
 static
@@ -611,6 +654,7 @@
     if(ErrorCodes[i].code == error->code){
       error->classification = ErrorCodes[i].classification;
       error->message        = ErrorCodes[i].message;
+      error->mysql_code     = ErrorCodes[i].mysql_code;
       found = 1;
       break;
     }
@@ -619,6 +663,7 @@
   if(!found){
     error->classification = UE;
     error->message        = "Unknown error code";
+    error->mysql_code     = DMEC;
   }
 
   found = 0;
@@ -636,6 +681,7 @@
   error->details = 0;
 }
 
+#if CHECK_ERRORCODES
 int
 checkErrorCodes(){
   int i, j;
@@ -652,7 +698,6 @@
 
 /*static const int a = checkErrorCodes();*/
 
-#if CHECK_ERRORCODES
 int main(void){
   checkErrorCodes();
   return 0;
Thread
bk commit into 5.1 tree (jonas:1.1962)jonas14 Dec