MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:Frazer Clement Date:March 20 2009 3:12pm
Subject:bzr push into mysql-5.1-telco-7.0 branch (frazer:2978 to 2979)
View as plain text  
 2979 Frazer Clement	2009-03-20
      Fixes to testLimits testcase
      
       - Remove code exercising old NdbAPI to get 'short' signal 
         trains as mysql-5.1-telco-7.0 no longer generates short
         signal trains
       - Add trans->close() calls on error paths to avoid testcase
         timeouts
       - Modify mechanism used to simulate SegmentedSection exhaustion
         to deal with MT 2-layer/chunk segment pooling mechanism.
       - Fix issue with ExhaustSegmentedSectionIx and TransIdAI arriving
         after transaction abort.
      
      Remaining to fix : DropSignalFragments and ndbmtd.
      modified:
        storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
        storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp
        storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
        storage/ndb/src/kernel/vm/LongSignal.cpp
        storage/ndb/src/kernel/vm/TransporterCallback.cpp
        storage/ndb/test/ndbapi/testLimits.cpp

 2978 Tomas Ulin	2009-03-20 [merge]
      revert to run mt

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2009-03-02 09:28:26 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/Dbtc.hpp	2009-03-20 15:10:06 +0000
@@ -246,10 +246,11 @@ public:
    * from index table lookup
    */
   enum IndexTransIdAIState {
-    ITAS_WAIT_HEADER = 0,     // Initial state
-    ITAS_WAIT_FRAGID = 1,     // Waiting for fragment id word
-    ITAS_WAIT_KEY = 2,        // Waiting for (more) key information
-    ITAS_ALL_RECEIVED = 3     // All TransIdAI info received
+    ITAS_WAIT_HEADER   = 0,     // Initial state
+    ITAS_WAIT_FRAGID   = 1,     // Waiting for fragment id word
+    ITAS_WAIT_KEY      = 2,     // Waiting for (more) key information
+    ITAS_ALL_RECEIVED  = 3,     // All TransIdAI info received
+    ITAS_WAIT_KEY_FAIL = 4     // Failed collecting key
   };
   
 
@@ -1876,9 +1877,6 @@ private:
   bool testFragmentDrop(Signal* signal);
 #endif
 
-  // For Error inserts
-  Uint32 errorInsertHoardedSegments;
-
   /************************** API CONNECT RECORD ***********************/
   /* *******************************************************************/
   /* THE API CONNECT RECORD CONTAINS THE CONNECTION RECORD TO WHICH THE*/

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp	2008-12-03 19:51:33 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcInit.cpp	2009-03-20 15:10:06 +0000
@@ -331,8 +331,6 @@ Dbtc::Dbtc(Block_context& ctx):
   tcFailRecord = 0;
   c_apiConTimer = 0;
   c_apiConTimer_line = 0;
-
-  errorInsertHoardedSegments= RNIL;
 }//Dbtc::Dbtc()
 
 Dbtc::~Dbtc() 

=== modified file 'storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2009-03-02 09:28:26 +0000
+++ b/storage/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp	2009-03-20 15:10:06 +0000
@@ -161,6 +161,11 @@ operator<<(NdbOut& out, Dbtc::ScanFragRe
 }
 #endif
 
+#ifdef ERROR_INSERT
+extern int ErrorSignalReceive;
+extern int ErrorMaxSegmentsToSeize;
+#endif
+
 void
 Dbtc::updateBuddyTimer(ApiConnectRecordPtr apiPtr)
 {
@@ -2597,44 +2602,33 @@ void Dbtc::execTCKEYREQ(Signal* signal) 
   UintR TapiConnectptrIndex = apiConnectptr.i;
   UintR TsenderData = tcKeyReq->senderData;
 
-  if (ERROR_INSERTED(8065) || 
-      ERROR_INSERTED(8066) ||
-      ERROR_INSERTED(8067))
-  {
-    /* Consume all but 10(8065) or all but 1 (8066) or all (8067) of 
-     * the SegmentedSection buffers to allow testing of what happens 
-     * when they're exhausted, either in this signal or one to follow
-     * 8068 frees all 'hoarded' segments
-     */
-    Uint32 segmentsToLeave= ERROR_INSERTED(8065)? 
-      10 : 
-      ERROR_INSERTED(8066) ? 
-      1 :
-      0;
-    Uint32 freeSegments= g_sectionSegmentPool.getNoOfFree();
-    DEBUG("Hoarding all but " << segmentsToLeave << 
-          " of " << freeSegments << " free segments");
-    if (freeSegments >= segmentsToLeave)
-    {
-      Uint32 numToAlloc= (g_sectionSegmentPool.getNoOfFree() - segmentsToLeave);
-      Uint32 segmentsIVal= errorInsertHoardedSegments;
-      Uint32 space[SectionSegment::DataLength];
-      
-      while (numToAlloc-- > 0)
-        appendToSection(segmentsIVal, space, SectionSegment::DataLength);
-      
-      errorInsertHoardedSegments= segmentsIVal;
-    }
+  if (ERROR_INSERTED(8065))
+  {
+    ErrorSignalReceive= 1;
+    ErrorMaxSegmentsToSeize= 10;
   }
-
-  if (ERROR_INSERTED(8068) && 
-      (errorInsertHoardedSegments != RNIL))
-  {
-    /* Free the SegmentedSection buffers taken previously */
-    DEBUG("Freeing hoarded segments");
-    releaseSection(errorInsertHoardedSegments);
-    errorInsertHoardedSegments= RNIL;
-  }   
+  if (ERROR_INSERTED(8066))
+  {
+    ErrorSignalReceive= 1;
+    ErrorMaxSegmentsToSeize= 5;
+  }
+  if (ERROR_INSERTED(8067))
+  {
+    ErrorSignalReceive= 1;
+    ErrorMaxSegmentsToSeize= 0;
+  }
+  if (ERROR_INSERTED(8068))
+  {
+    ErrorSignalReceive= 0;
+    ErrorMaxSegmentsToSeize= 0;
+    CLEAR_ERROR_INSERT_VALUE;
+    DEBUG("Max segments to seize cleared");
+  }
+#ifdef ERROR_INSERT
+  if (ErrorSignalReceive)
+    DEBUG("Max segments to seize : " 
+          << ErrorMaxSegmentsToSeize);
+#endif
 
   /* Key and attribute lengths are passed in the header for 
    * short TCKEYREQ and  passed as section lengths for long 
@@ -13424,12 +13418,18 @@ bool  Dbtc::saveTRANSID_AI(Signal* signa
    *                                   -> ITAS_WAIT_KEY
    *
    *   [2..N]   Base table primary    ITAS_WAIT_KEY
-   *            key info               -> [ ITAS_WAIT_KEY ]
+   *            key info               -> [ ITAS_WAIT_KEY |
+   *                                        ITAS_WAIT_KEY_FAIL ]
    *                                   -> ITAS_ALL_RECEIVED
    *
    * The outgoing KeyInfo section contains the base
    * table primary key info, with the fragment id passed
    * as the distribution key.
+   * ITAS_WAIT_KEY_FAIL state is entered when there is no 
+   * space to store received TRANSID_AI information and
+   * key collection must fail.  Transaction abort is performed
+   * once all TRANSID_AI is received, and the system waits in
+   * ITAS_WAIT_KEY_FAIL state until then.
    *
    */
   Uint32 remain= len;
@@ -13474,19 +13474,41 @@ bool  Dbtc::saveTRANSID_AI(Signal* signa
                            remain))
       {
         jam();
+        remain= 0;
+        break;
+      }
+      else
+      {
+        jam();
 #ifdef VM_TRACE
-        ndbout_c("Dbtc::saveTRANSID_AI: Failed to seize beffer for TRANSID_AI\n");
+        ndbout_c("Dbtc::saveTRANSID_AI: Failed to seize buffer for TRANSID_AI\n");
 #endif
-        apiConnectptr.i = indexOp->connectionIndex;
-        ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
-        releaseIndexOperation(apiConnectptr.p, indexOp);
-        terrorCode = ZGET_DATAREC_ERROR;
-        abortErrorLab(signal);
-        return false;
+        indexOp->transIdAIState= ITAS_WAIT_KEY_FAIL;
+        /* Fall through to ITAS_WAIT_KEY_FAIL state handling */
       }
+    }
 
-      remain= 0;
-      break;
+    case ITAS_WAIT_KEY_FAIL:
+    {
+      /* Failed when collecting key previously - if we have all the
+       * TRANSID_AI now then we abort
+       */
+      if (indexOp->pendingTransIdAI > len)
+      {
+        /* Still some TransIdAI to arrive, keep waiting as if we had
+         * stored it
+         */
+        remain= 0;
+        break;
+      }
+
+      /* All TransIdAI has arrived, abort */
+      apiConnectptr.i = indexOp->connectionIndex;
+      ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
+      releaseIndexOperation(apiConnectptr.p, indexOp);
+      terrorCode = ZGET_DATAREC_ERROR;
+      abortErrorLab(signal);
+      return false;
     }
 
     case ITAS_ALL_RECEIVED:
@@ -13789,7 +13811,7 @@ void Dbtc::execTRANSID_AI(Signal* signal
       tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
       tcIndxRef->transId[0] = regApiPtr->transid[0];
       tcIndxRef->transId[1] = regApiPtr->transid[1];
-      tcIndxRef->errorCode = 4000;
+      tcIndxRef->errorCode = ZGET_DATAREC_ERROR;
       tcIndxRef->errorData = 0;
       sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
                  TcKeyRef::SignalLength, JBB);

=== modified file 'storage/ndb/src/kernel/vm/LongSignal.cpp'
--- a/storage/ndb/src/kernel/vm/LongSignal.cpp	2008-10-08 19:09:05 +0000
+++ b/storage/ndb/src/kernel/vm/LongSignal.cpp	2009-03-20 15:10:06 +0000
@@ -179,6 +179,12 @@ dupSection(SPC_ARG Uint32& copyFirstIVal
   return true;
 }
 
+#ifdef ERROR_INSERT
+extern int ErrorSignalReceive;
+extern int ErrorMaxSegmentsToSeize;
+#endif
+
+
 /**
  * appendToSection
  * Append supplied words to the chain of section segments
@@ -211,6 +217,15 @@ appendToSection(SPC_ARG Uint32& firstSeg
 
   if (firstSegmentIVal == RNIL)
   {
+#ifdef ERROR_INSERT
+    /* Simulate running out of segments */
+    if ((ErrorSignalReceive == 1) && 
+        (ErrorMaxSegmentsToSeize == 0))
+    {
+      ndbout_c("append exhausted on first segment");
+      return false;
+    }
+#endif
     /* First data to be added to this section */
     bool result= g_sectionSegmentPool.seize(SPC_SEIZE_ARG firstPtr);
 
@@ -245,6 +260,10 @@ appendToSection(SPC_ARG Uint32& firstSeg
 
   firstPtr.p->m_sz+= len;
 
+#ifdef ERROR_INSERT
+  Uint32 remainSegs= (Uint32) ErrorMaxSegmentsToSeize - 1;
+#endif
+
   while(len > remain) {
     /* Fill this segment, and link in another one
      * Note that we can memcpy to a bad address with size 0
@@ -253,6 +272,18 @@ appendToSection(SPC_ARG Uint32& firstSeg
     src += remain;
     len -= remain;
     Ptr<SectionSegment> prevPtr= currPtr;
+
+#ifdef ERROR_INSERT
+    /* Simulate running out of segments */
+    if ((ErrorSignalReceive == 1) && 
+        (0 == remainSegs--))
+    {
+      ndbout_c("Append exhausted on segment %d", ErrorMaxSegmentsToSeize);
+      firstPtr.p->m_lastSegment= prevPtr.i;
+      firstPtr.p->m_sz-= len;
+      return false;
+    }
+#endif
     bool result = g_sectionSegmentPool.seize(SPC_SEIZE_ARG currPtr);
     if (!result)
     {
@@ -282,6 +313,16 @@ appendToSection(SPC_ARG Uint32& firstSeg
 bool
 import(SPC_ARG Ptr<SectionSegment> & first, const Uint32 * src, Uint32 len){
 
+#ifdef ERROR_INSERT
+  /* Simulate running out of segments */
+  if ((ErrorSignalReceive == 1) &&
+      (ErrorMaxSegmentsToSeize == 0))
+  {
+    ndbout_c("Import exhausted on first segment");
+    return false;
+  }
+#endif
+
   first.p = 0;
   if(g_sectionSegmentPool.seize(SPC_SEIZE_ARG first)){
     ;
@@ -295,11 +336,30 @@ import(SPC_ARG Ptr<SectionSegment> & fir
 
   Ptr<SectionSegment> currPtr = first;
 
+#ifdef ERROR_INSERT
+  Uint32 remainSegs= (Uint32) ErrorMaxSegmentsToSeize - 1;
+#endif
+
   while(len > SectionSegment::DataLength){
     memcpy(&currPtr.p->theData[0], src, 4 * SectionSegment::DataLength);
     src += SectionSegment::DataLength;
     len -= SectionSegment::DataLength;
     Ptr<SectionSegment> prevPtr = currPtr;
+
+#ifdef ERROR_INSERT
+    /* Simulate running out of segments */
+    if ((ErrorSignalReceive == 1) &&
+        (0 == remainSegs--))
+    {
+      ndbout_c("Import exhausted on segment %d", 
+               ErrorMaxSegmentsToSeize);
+      first.p->m_lastSegment= prevPtr.i;
+      first.p->m_sz-= len;
+      prevPtr.p->m_nextSegment = RNIL;
+      return false;
+    }
+#endif
+
     if(g_sectionSegmentPool.seize(SPC_SEIZE_ARG currPtr)){
       prevPtr.p->m_nextSegment = currPtr.i;
       ;

=== modified file 'storage/ndb/src/kernel/vm/TransporterCallback.cpp'
--- a/storage/ndb/src/kernel/vm/TransporterCallback.cpp	2009-03-14 20:42:04 +0000
+++ b/storage/ndb/src/kernel/vm/TransporterCallback.cpp	2009-03-20 15:10:06 +0000
@@ -39,6 +39,14 @@
  */
 SectionSegmentPool g_sectionSegmentPool;
 
+/* Instance debugging vars
+ * Set from DBTC
+ */
+#ifdef ERROR_INSERT
+int ErrorSignalReceive= 0;
+int ErrorMaxSegmentsToSeize= 0;
+#endif
+
 struct ConnectionError
 {
   enum TransporterError err;

=== modified file 'storage/ndb/test/ndbapi/testLimits.cpp'
--- a/storage/ndb/test/ndbapi/testLimits.cpp	2008-12-09 17:15:12 +0000
+++ b/storage/ndb/test/ndbapi/testLimits.cpp	2009-03-20 15:10:06 +0000
@@ -20,12 +20,14 @@
 #define CHECKNOTNULL(p) if ((p) == NULL) {          \
     ndbout << "Error at line " << __LINE__ << endl; \
     ERR(trans->getNdbError());                      \
+    trans->close();                                 \
     return NDBT_FAILED; }
 
 #define CHECKEQUAL(v, e) if ((e) != (v)) {            \
     ndbout << "Error at line " << __LINE__ <<         \
       " expected " << v << endl;                      \
     ERR(trans->getNdbError());                        \
+    trans->close();                                   \
     return NDBT_FAILED; }
 
 
@@ -83,7 +85,6 @@ int activateErrorInsert(NdbTransaction* 
     
 /* Test for correct behaviour using primary key operations
  * when an NDBD node's SegmentedSection pool is exhausted.
- * Long and Short TCKEYREQ variants are tested
  */
 int testSegmentedSectionPk(NDBT_Context* ctx, NDBT_Step* step){
   /*
@@ -94,11 +95,8 @@ int testSegmentedSectionPk(NDBT_Context*
    *                     TCKEYREQ in batch      Consume + send
    * Long TCKEYREQ     Initial import, not last
    *                     TCKEYREQ in batch      Consume + send
-   * Short TCKEYREQ    KeyInfo accumulate       Consume + send long
-   *                     (TCKEYREQ + KEYINFO)
-   * Short TCKEYREQ    AttrInfo accumulate      Consume + send short key
-   *                                             + long AI
-   *                      (TCKEYREQ + ATTRINFO)
+   * No testing of short TCKEYREQ variants as they cannot be
+   * generated in mysql-5.1-telco-6.4+
    */
 
   /* We just run on one table */
@@ -309,75 +307,6 @@ int testSegmentedSectionPk(NDBT_Context*
 
   trans->close();
 
-  /* Change error insert so that next TCKEYREQ will grab
-   * all but one SegmentedSection so that we can then test SegmentedSection
-   * exhaustion when importing the Key/AttrInfo words from the
-   * TCKEYREQ signal itself.
-   */
-  restarter.insertErrorInAllNodes(8066);
-
-
-  /* Now a 'short' TCKEYREQ, there will be space to import the
-   * short key, but not the AttrInfo
-   */
-  /* Start transaction on same node */
-  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
-                                             &smallKey[0],
-                                             smallKeySize));
-  
-  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
-  
-  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
-  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0, 
-                                       NdbDictionary::getValuePtr
-                                       (record,
-                                        smallRowBuf,
-                                        0)));
-  CHECKEQUAL(0, bigInsertOldApi->setValue(1, NdbDictionary::getValuePtr
-                                          (record,
-                                           smallRowBuf,
-                                           1)));
-
-  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
-
-  /* ZGET_DATABUF_ERR expected */
-  CHECKEQUAL(218, trans->getNdbError().code)
-
-  trans->close();
-
-  /* Change error insert so that there are no SectionSegments 
-   * This will cause failure when attempting to import the
-   * KeyInfo from the TCKEYREQ
-   */
-  restarter.insertErrorInAllNodes(8067);
-
-  /* Now a 'short' TCKEYREQ - there will be no space to import the key */
-  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
-                                             &smallKey[0],
-                                             smallKeySize));
-  
-  CHECKNOTNULL(bigInsertOldApi= trans->getNdbOperation(ctx->getTab()));
-  
-  CHECKEQUAL(0, bigInsertOldApi->insertTuple());
-  CHECKEQUAL(0, bigInsertOldApi->equal((Uint32)0, 
-                                       NdbDictionary::getValuePtr
-                                       (record,
-                                        smallRowBuf,
-                                        0)));
-  CHECKEQUAL(0, bigInsertOldApi->setValue(1, 
-                                          NdbDictionary::getValuePtr
-                                          (record,
-                                           smallRowBuf,
-                                           1)));
-
-  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
-
-  /* ZGET_DATABUF_ERR expected */
-  CHECKEQUAL(218, trans->getNdbError().code)
-
-  trans->close();  
-
-
   /* Finished with error insert, cleanup the error insertion
    * Error insert 8068 will free the hoarded segments
    */
@@ -403,7 +332,6 @@ int testSegmentedSectionPk(NDBT_Context*
   
 /* Test for correct behaviour using unique key operations
  * when an NDBD node's SegmentedSection pool is exhausted.
- * Long and Short TCKEYREQ variants are tested
  */
 int testSegmentedSectionIx(NDBT_Context* ctx, NDBT_Step* step){
   /* 
@@ -412,23 +340,13 @@ int testSegmentedSectionIx(NDBT_Context*
    * Long TCINDXREQ    Initial import           Consume + send 
    * Long TCINDXREQ    Build second TCKEYREQ    Consume + send short
    *                                             w. long base key
-   * Short TCINDXREQ   KeyInfo accumulate       Consume + send long
-   *                     (TCINDXREQ + KEYINFO)
-   * Short TCINDXREQ   AttrInfo accumulate      Consume + send short key
-   *                                             + long AI
-   *                     (TCINDXREQ + ATTRINFO)
    */
   /* We will generate : 
    *   10 SS left : 
    *     Long IndexReq with too long Key/AttrInfo
+   *    5 SS left :
    *     Long IndexReq read with short Key + Attrinfo to long 
    *       base table Key
-   *     Short IndexReq with long Keyinfo
-   *     Short IndexReq with long AttrInfo
-   *   1 SS left
-   *     Short IndexReq with any AttrInfo
-   *   0 SS left
-   *     Short IndexReq with any key info 
    */
   /* We just run on one table */
   if (strcmp(ctx->getTab()->getName(), "WIDE_2COL_IX") != 0)
@@ -643,6 +561,17 @@ int testSegmentedSectionIx(NDBT_Context*
                                              &smallKey[0],
                                              smallKeySize));
 
+  /* Activate error insert 8066 in this transaction, consumes
+   * all but 5 SectionSegments
+   */
+  CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, 
+                                          baseRecord, 
+                                          ctx->getTab(),
+                                          smallRowBuf, 
+                                          &restarter, 
+                                          8066));
+  CHECKEQUAL(0, trans->execute(NdbTransaction::NoCommit));
+  
   CHECKNOTNULL(bigRead= trans->readTuple(ixRecord,
                                          bigAttrIxBuf,
                                          baseRecord,
@@ -655,137 +584,6 @@ int testSegmentedSectionIx(NDBT_Context*
 
   trans->close();
 
-  /* Now try with a 'short' TCINDXREQ, generated using the old Api 
-   * with a big index key value
-   */
-  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
-                                             &smallKey[0],
-                                             smallKeySize));
-  
-  const NdbDictionary::Index* index;
-  CHECKNOTNULL(index= pNdb->getDictionary()->
-               getIndex(indexName,
-                        ctx->getTab()->getName()));
-
-  NdbIndexOperation* bigReadOldApi;
-  CHECKNOTNULL(bigReadOldApi= trans->getNdbIndexOperation(index));
-
-  CHECKEQUAL(0, bigReadOldApi->readTuple());
-  /* We use the attribute id of the index, not the base table here */
-  CHECKEQUAL(0, bigReadOldApi->equal((Uint32)0, 
-                                     NdbDictionary::getValuePtr
-                                     (ixRecord,
-                                      bigKeyIxBuf,
-                                      1)));
-
-  CHECKNOTNULL(bigReadOldApi->getValue((Uint32)1));
-
-  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
-
-  /* ZGET_DATABUF_ERR expected */
-  CHECKEQUAL(218, trans->getNdbError().code)
-
-  trans->close();
-
-  /* Now try with a 'short' TCINDXREQ, generated using the old Api 
-   * with a big attrinfo value
-   */
-  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
-                                             &smallKey[0],
-                                             smallKeySize));
-  
-  NdbIndexOperation* bigUpdateOldApi;
-  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
-
-  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
-  /* We use the attribute id of the index, not the base table here */
-  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, 
-                                       NdbDictionary::getValuePtr
-                                       (baseRecord,
-                                        smallRowBuf,
-                                        1)));
-
-  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
-                                          NdbDictionary::getValuePtr
-                                          (baseRecord,
-                                           bigAttrIxBuf,
-                                           1)));
-  
-  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
-
-  /* ZGET_DATABUF_ERR expected */
-  CHECKEQUAL(218, trans->getNdbError().code)
-
-  trans->close();
-
-  /* Change error insert so that next TCINDXREQ will grab
-   * all but one SegmentedSection
-   */
-  restarter.insertErrorInAllNodes(8066);
-
-  /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
-   * can be imported, but the ATTRINFO can't
-   */
-  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
-                                             &smallKey[0],
-                                             smallKeySize));
-  
-  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
-
-  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
-  /* We use the attribute id of the index, not the base table here */
-  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, 
-                                       NdbDictionary::getValuePtr
-                                       (baseRecord,
-                                        smallRowBuf,
-                                        1)));
-
-  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
-                                          NdbDictionary::getValuePtr
-                                          (baseRecord,
-                                           bigAttrIxBuf,
-                                           1)));
-  
-  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
-
-  /* ZGET_DATABUF_ERR expected */
-  CHECKEQUAL(218, trans->getNdbError().code)
-
-  trans->close();
-
-  /* Change error insert so that there are no SectionSegments */
-  restarter.insertErrorInAllNodes(8067);
-
-  /* Now a short TCINDXREQ where the KeyInfo from the TCINDXREQ
-   * can't be imported
-   */
-  CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
-                                             &smallKey[0],
-                                             smallKeySize));
-
-  CHECKNOTNULL(bigUpdateOldApi= trans->getNdbIndexOperation(index));
-
-  CHECKEQUAL(0, bigUpdateOldApi->updateTuple());
-  /* We use the attribute id of the index, not the base table here */
-  CHECKEQUAL(0, bigUpdateOldApi->equal((Uint32)0, 
-                                       NdbDictionary::getValuePtr
-                                       (baseRecord,
-                                        smallRowBuf,
-                                        1)));
-
-  CHECKEQUAL(0, bigUpdateOldApi->setValue((Uint32)1,
-                                          NdbDictionary::getValuePtr
-                                          (baseRecord,
-                                           bigAttrIxBuf,
-                                           1)));
-  
-  CHECKEQUAL(-1, trans->execute(NdbTransaction::NoCommit));
-
-  /* ZGET_DATABUF_ERR expected */
-  CHECKEQUAL(218, trans->getNdbError().code)
-
-  trans->close();
-
   /* Finished with error insert, cleanup the error insertion */
   CHECKNOTNULL(trans= pNdb->startTransaction(ctx->getTab(),
                                              &smallKey[0],
@@ -854,18 +652,18 @@ int testSegmentedSectionScan(NDBT_Contex
                                                 smallKeySize);
   CHECKNOTNULL(trans);
 
-  /* Activate error insert 8065 in this transaction, consumes
-   * all but 10 SectionSegments
+  /* Activate error insert 8066 in this transaction, consumes
+   * all but 5 SectionSegments
    */
   CHECKEQUAL(NDBT_OK, activateErrorInsert(trans, 
                                           record, 
                                           ctx->getTab(),
                                           smallRowBuf, 
                                           &restarter, 
-                                          8065));
+                                          8066));
 
-  /* Ok, now the chosen TC's node should have only 10 
-   * SegmentedSection buffers = ~ 60 words * 10 = 2400 bytes
+  /* Ok, now the chosen TC's node should have only 5 
+   * SegmentedSection buffers = ~ 60 words * 5 = 1200 bytes
    * A scan will always send 2 long sections (Receiver Ids
    * + AttrInfo), so let's start a scan with > 2400 bytes of
    * ATTRINFO and see what happens

Thread
bzr push into mysql-5.1-telco-7.0 branch (frazer:2978 to 2979)Frazer Clement20 Mar