List:Commits« Previous MessageNext Message »
From:Ole John Aske Date:July 4 2012 12:44pm
Subject:bzr push into mysql-5.5-cluster-7.2 branch (ole.john.aske:3957 to 3958)
View as plain text  
 3958 Ole John Aske	2012-07-04 [merge]
      Merged various 'SPJ resource leakage fixes' from 7.2-spj -> mysql-5.5-cluster.

    modified:
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/include/HugoQueries.hpp
      storage/ndb/test/ndbapi/testSpj.cpp
      storage/ndb/test/src/HugoQueries.cpp
      storage/ndb/test/tools/hugoJoin.cpp
 3957 John David Duncan	2012-06-27 [merge]
      local merge

    modified:
      mysql-test/suite/ndb_memcache/r/ttls_flags.result
      storage/ndb/memcache/unit/extvals.pl
=== modified file 'storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2012-05-08 08:03:29 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2012-06-19 13:29:32 +0000
@@ -69,6 +69,8 @@ private:
   void execDIH_SCAN_GET_NODES_REF(Signal*);
   void execDIH_SCAN_GET_NODES_CONF(Signal*);
 
+  void execSIGNAL_DROPPED_REP(Signal*);
+
   /**
    * Signals from LQH
    */
@@ -296,7 +298,6 @@ public:
     Uint32 m_senderRef;  // TC (used for routing)
     Uint32 m_scan_cnt;
     Signal* m_start_signal; // Argument to first node in tree
-    SegmentedSectionPtr m_keyPtr;
 
     TreeNodeBitMask m_scans; // TreeNodes doing scans
 
@@ -1262,6 +1263,13 @@ private:
   SLList<RowPage>::Head m_free_page_list;
   ArrayPool<RowPage> m_page_pool;
 
+  /* Random fault injection */
+
+#ifdef ERROR_INSERT
+  bool appendToSection(Uint32& firstSegmentIVal,
+                       const Uint32* src, Uint32 len);
+#endif
+
   /**
    * Scratch buffers...
    */

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2012-05-08 08:03:29 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2012-06-19 13:29:32 +0000
@@ -37,6 +37,7 @@
 
 #include <signaldata/NodeFailRep.hpp>
 #include <signaldata/ReadNodesConf.hpp>
+#include <signaldata/SignalDroppedRep.hpp>
 
 // Use DEBUG to print messages that should be
 // seen only when we debug the product
@@ -65,6 +66,50 @@
 const Ptr<Dbspj::TreeNode> Dbspj::NullTreeNodePtr = { 0, RNIL };
 const Dbspj::RowRef Dbspj::NullRowRef = { RNIL, GLOBAL_PAGE_SIZE_WORDS, { 0 } };
 
+
+void Dbspj::execSIGNAL_DROPPED_REP(Signal* signal)
+{
+  /* An incoming signal was dropped, handle it.
+   * Dropped signal really means that we ran out of
+   * long signal buffering to store its sections.
+   */
+  jamEntry();
+
+  if (!assembleDroppedFragments(signal))
+  {
+    jam();
+    return;
+  }
+
+  const SignalDroppedRep* rep = (SignalDroppedRep*) &signal->theData[0];
+  Uint32 originalGSN= rep->originalGsn;
+
+  DEBUG("SignalDroppedRep received for GSN " << originalGSN);
+
+  switch(originalGSN) {
+  case GSN_SCAN_FRAGREQ:
+  {
+    jam();
+    /* Get information necessary to send SCAN_FRAGREF back to TC */
+    // TODO : Handle dropped signal fragments
+
+    const ScanFragReq * const truncatedScanFragReq = 
+      (ScanFragReq *) &rep->originalData[0];
+
+    handle_early_scanfrag_ref(signal, truncatedScanFragReq,
+                              DbspjErr::OutOfSectionMemory);
+    break;
+  }
+  default:
+    jam();
+    /* Don't expect dropped signals for other GSNs
+     */
+    SimulatedBlock::execSIGNAL_DROPPED_REP(signal);
+  };
+
+  return;
+}
+
 /** A noop for now.*/
 void Dbspj::execREAD_CONFIG_REQ(Signal* signal)
 {
@@ -381,9 +426,10 @@ void Dbspj::execLQHKEYREQ(Signal* signal
    *      (unless StoredProcId is set, when only paramters are sent,
    *       but this is not yet implemented)
    */
+  SegmentedSectionPtr attrPtr;
   SectionHandle handle = SectionHandle(this, signal);
-  SegmentedSectionPtr ssPtr;
-  handle.getSection(ssPtr, LqhKeyReq::AttrInfoSectionNum);
+  handle.getSection(attrPtr, LqhKeyReq::AttrInfoSectionNum);
+  const Uint32 keyPtrI = handle.m_ptr[LqhKeyReq::KeyInfoSectionNum].i;
 
   Uint32 err;
   Ptr<Request> requestPtr = { 0, RNIL };
@@ -412,7 +458,7 @@ void Dbspj::execLQHKEYREQ(Signal* signal
     Uint32 len_cnt;
 
     {
-      SectionReader r0(ssPtr, getSectionSegmentPool());
+      SectionReader r0(attrPtr, getSectionSegmentPool());
 
       err = DbspjErr::ZeroLengthQueryTree;
       if (unlikely(!r0.getWord(&len_cnt)))
@@ -423,8 +469,8 @@ void Dbspj::execLQHKEYREQ(Signal* signal
     Uint32 cnt = QueryTree::getNodeCnt(len_cnt);
 
     {
-      SectionReader treeReader(ssPtr, getSectionSegmentPool());
-      SectionReader paramReader(ssPtr, getSectionSegmentPool());
+      SectionReader treeReader(attrPtr, getSectionSegmentPool());
+      SectionReader paramReader(attrPtr, getSectionSegmentPool());
       paramReader.step(len); // skip over tree to parameters
 
       Build_context ctx;
@@ -432,31 +478,39 @@ void Dbspj::execLQHKEYREQ(Signal* signal
       ctx.m_savepointId = req->savePointId;
       ctx.m_scanPrio = 1;
       ctx.m_start_signal = signal;
-      ctx.m_keyPtr.i = handle.m_ptr[LqhKeyReq::KeyInfoSectionNum].i;
       ctx.m_senderRef = signal->getSendersBlockRef();
 
       err = build(ctx, requestPtr, treeReader, paramReader);
       if (unlikely(err != 0))
         break;
-    }
 
-    /**
-     * a query being shipped as a LQHKEYREQ may only return finite rows
-     *   i.e be a (multi-)lookup
-     */
-    ndbassert(requestPtr.p->isLookup());
-    ndbassert(requestPtr.p->m_node_cnt == cnt);
-    err = DbspjErr::InvalidRequest;
-    if (unlikely(!requestPtr.p->isLookup() || requestPtr.p->m_node_cnt != cnt))
-      break;
+      /**
+       * Root TreeNode in Request takes ownership of keyPtr
+       * section when build has completed.
+       * We are done with attrPtr which is now released.
+       */
+      Ptr<TreeNode> rootNodePtr = ctx.m_node_list[0];
+      rootNodePtr.p->m_send.m_keyInfoPtrI = keyPtrI;
+      release(attrPtr);
+      handle.clear();
+    }
 
     /**
      * Store request in list(s)/hash(es)
      */
     store_lookup(requestPtr);
 
-    release(ssPtr);
-    handle.clear();
+    /**
+     * A query being shipped as a LQHKEYREQ may return at most a row
+     * per operation i.e be a (multi-)lookup 
+     */
+    if (ERROR_INSERTED_CLEAR(17013) ||
+        unlikely(!requestPtr.p->isLookup() || requestPtr.p->m_node_cnt != cnt))
+    {
+      jam();
+      err = DbspjErr::InvalidRequest;
+      break;
+    }
 
     start(signal, requestPtr);
     return;
@@ -469,9 +523,9 @@ void Dbspj::execLQHKEYREQ(Signal* signal
   if (!requestPtr.isNull())
   {
     jam();
-    m_request_pool.release(requestPtr);
+    cleanup(requestPtr);
   }
-  releaseSections(handle);
+  releaseSections(handle);  // a NOOP, if we reached 'handle.clear()' above
   handle_early_lqhkey_ref(signal, req, err);
 }
 
@@ -684,8 +738,8 @@ Dbspj::execSCAN_FRAGREQ(Signal* signal)
    *              if first op is lookup - contains keyinfo for lookup
    */
   SectionHandle handle = SectionHandle(this, signal);
-  SegmentedSectionPtr ssPtr;
-  handle.getSection(ssPtr, ScanFragReq::AttrInfoSectionNum);
+  SegmentedSectionPtr attrPtr;
+  handle.getSection(attrPtr, ScanFragReq::AttrInfoSectionNum);
 
   Uint32 err;
   Ptr<Request> requestPtr = { 0, RNIL };
@@ -713,7 +767,7 @@ Dbspj::execSCAN_FRAGREQ(Signal* signal)
 
     Uint32 len_cnt;
     {
-      SectionReader r0(ssPtr, getSectionSegmentPool());
+      SectionReader r0(attrPtr, getSectionSegmentPool());
       err = DbspjErr::ZeroLengthQueryTree;
       if (unlikely(!r0.getWord(&len_cnt)))
         break;
@@ -723,8 +777,8 @@ Dbspj::execSCAN_FRAGREQ(Signal* signal)
     Uint32 cnt = QueryTree::getNodeCnt(len_cnt);
 
     {
-      SectionReader treeReader(ssPtr, getSectionSegmentPool());
-      SectionReader paramReader(ssPtr, getSectionSegmentPool());
+      SectionReader treeReader(attrPtr, getSectionSegmentPool());
+      SectionReader paramReader(attrPtr, getSectionSegmentPool());
       paramReader.step(len); // skip over tree to parameters
 
       Build_context ctx;
@@ -735,35 +789,38 @@ Dbspj::execSCAN_FRAGREQ(Signal* signal)
       ctx.m_start_signal = signal;
       ctx.m_senderRef = signal->getSendersBlockRef();
 
+      err = build(ctx, requestPtr, treeReader, paramReader);
+      if (unlikely(err != 0))
+        break;
+
+      /**
+       * Root TreeNode in Request takes ownership of keyPtr
+       * section when build has completed.
+       * We are done with attrPtr which is now released.
+       */
+      Ptr<TreeNode> rootNodePtr = ctx.m_node_list[0];
       if (handle.m_cnt > 1)
       {
         jam();
-        ctx.m_keyPtr.i = handle.m_ptr[ScanFragReq::KeyInfoSectionNum].i;
-      }
-      else
-      {
-        jam();
-        ctx.m_keyPtr.i = RNIL;
+        const Uint32 keyPtrI = handle.m_ptr[ScanFragReq::KeyInfoSectionNum].i;
+        rootNodePtr.p->m_send.m_keyInfoPtrI = keyPtrI;
       }
-
-      err = build(ctx, requestPtr, treeReader, paramReader);
-      if (unlikely(err != 0))
-        break;
+      release(attrPtr);
+      handle.clear();
     }
 
-    ndbassert(requestPtr.p->isScan());
-    ndbassert(requestPtr.p->m_node_cnt == cnt);
-    err = DbspjErr::InvalidRequest;
-    if (unlikely(!requestPtr.p->isScan() || requestPtr.p->m_node_cnt != cnt))
-      break;
-
     /**
      * Store request in list(s)/hash(es)
      */
     store_scan(requestPtr);
 
-    release(ssPtr);
-    handle.clear();
+    if (ERROR_INSERTED_CLEAR(17013) ||
+        unlikely(!requestPtr.p->isScan() || requestPtr.p->m_node_cnt != cnt))
+    {
+      jam();
+      err = DbspjErr::InvalidRequest;
+      break;
+    }
 
     start(signal, requestPtr);
     return;
@@ -772,9 +829,9 @@ Dbspj::execSCAN_FRAGREQ(Signal* signal)
   if (!requestPtr.isNull())
   {
     jam();
-    m_request_pool.release(requestPtr);
+    cleanup(requestPtr);
   }
-  releaseSections(handle);
+  releaseSections(handle);  // a NOOP, if we reached 'handle.clear()' above
   handle_early_scanfrag_ref(signal, req, err);
 }
 
@@ -1855,33 +1912,12 @@ Dbspj::cleanup(Ptr<Request> requestPtr)
       requestPtr.p->m_state = Request::RS_ABORTED;
       return;
     }
-
-#ifdef VM_TRACE
-    {
-      Request key;
-      key.m_transId[0] = requestPtr.p->m_transId[0];
-      key.m_transId[1] = requestPtr.p->m_transId[1];
-      key.m_senderData = requestPtr.p->m_senderData;
-      Ptr<Request> tmp;
-      ndbrequire(m_scan_request_hash.find(tmp, key));
-    }
-#endif
-    m_scan_request_hash.remove(requestPtr);
+    m_scan_request_hash.remove(requestPtr, *requestPtr.p);
   }
   else
   {
     jam();
-#ifdef VM_TRACE
-    {
-      Request key;
-      key.m_transId[0] = requestPtr.p->m_transId[0];
-      key.m_transId[1] = requestPtr.p->m_transId[1];
-      key.m_senderData = requestPtr.p->m_senderData;
-      Ptr<Request> tmp;
-      ndbrequire(m_lookup_request_hash.find(tmp, key));
-    }
-#endif
-    m_lookup_request_hash.remove(requestPtr);
+    m_lookup_request_hash.remove(requestPtr, *requestPtr.p);
   }
   releaseRequestBuffers(requestPtr, false);
   ArenaHead ah = requestPtr.p->m_arena;
@@ -3002,7 +3038,6 @@ Dbspj::lookup_build(Build_context& ctx,
       dst->fragmentData = fragId;
       dst->attrLen = attrLen; // fragdist is in here
 
-      treeNodePtr.p->m_send.m_keyInfoPtrI = ctx.m_keyPtr.i;
       treeNodePtr.p->m_bits |= TreeNode::T_ONE_SHOT;
     }
     return 0;
@@ -3094,6 +3129,7 @@ Dbspj::lookup_send(Signal* signal,
         if (!dupSection(tmp, keyInfoPtrI))
         {
           jam();
+          ndbassert(tmp == RNIL);  // Guard for memleak
           err = DbspjErr::OutOfSectionMemory;
           break;
         }
@@ -3126,8 +3162,7 @@ Dbspj::lookup_send(Signal* signal,
           jam();
           ndbout_c("Injecting OutOfSectionMemory error at line %d file %s",
                    __LINE__,  __FILE__);
-          if (keyInfoPtrI != RNIL)
-            releaseSection(keyInfoPtrI);
+          releaseSection(keyInfoPtrI);
           err = DbspjErr::OutOfSectionMemory;
           break;
         }
@@ -3135,8 +3170,8 @@ Dbspj::lookup_send(Signal* signal,
         if (!dupSection(tmp, attrInfoPtrI))
         {
           jam();
-          if (keyInfoPtrI != RNIL)
-            releaseSection(keyInfoPtrI);
+          ndbassert(tmp == RNIL);  // Guard for memleak
+          releaseSection(keyInfoPtrI);
           err = DbspjErr::OutOfSectionMemory;
           break;
         }
@@ -3523,7 +3558,11 @@ Dbspj::lookup_parent_row(Signal* signal,
       bool keyIsNull;
       err = expand(ptrI, pattern, rowRef, keyIsNull);
       if (unlikely(err != 0))
+      {
+        jam();
+        releaseSection(ptrI);
         break;
+      }
 
       if (keyIsNull)
       {
@@ -3545,10 +3584,7 @@ Dbspj::lookup_parent_row(Signal* signal,
            */
           jam();
           DEBUG("..Ignore impossible KEYREQ");
-          if (ptrI != RNIL)
-          {
-            releaseSection(ptrI);
-          }
+          releaseSection(ptrI);
           return;  // Bailout, KEYREQ would have returned KEYREF(626) anyway
         }
         else  // isLookup()
@@ -3633,6 +3669,7 @@ Dbspj::lookup_parent_row(Signal* signal,
       if (!dupSection(tmp, attrInfoPtrI))
       {
         jam();
+        ndbassert(tmp == RNIL);  // Guard for memleak
         err = DbspjErr::OutOfSectionMemory;
         break;
       }
@@ -3649,7 +3686,11 @@ Dbspj::lookup_parent_row(Signal* signal,
       Local_pattern_store pattern(pool, treeNodePtr.p->m_attrParamPattern);
       err = expand(tmp, pattern, rowRef, hasNull);
       if (unlikely(err != 0))
+      {
+        jam();
+        releaseSection(tmp);
         break;
+      }
 //    ndbrequire(!hasNull);
 
       /**
@@ -4058,6 +4099,7 @@ Dbspj::scanFrag_build(Build_context& ctx
       break;
     }
 
+    treeNodePtr.p->m_info = &g_ScanFragOpInfo;
     treeNodePtr.p->m_scanfrag_data.m_scanFragHandlePtrI = RNIL;
     Ptr<ScanFragHandle> scanFragHandlePtr;
     if (ERROR_INSERTED_CLEAR(17004))
@@ -4081,7 +4123,6 @@ Dbspj::scanFrag_build(Build_context& ctx
     treeNodePtr.p->m_scanfrag_data.m_scanFragHandlePtrI = scanFragHandlePtr.i;
 
     requestPtr.p->m_bits |= Request::RT_SCAN;
-    treeNodePtr.p->m_info = &g_ScanFragOpInfo;
     treeNodePtr.p->m_bits |= TreeNode::T_ATTR_INTERPRETED;
     treeNodePtr.p->m_batch_size = ctx.m_batch_size_rows;
 
@@ -4201,7 +4242,6 @@ Dbspj::scanFrag_build(Build_context& ctx
       ndbassert(dst->transId2 == transId2);
 #endif
 
-      treeNodePtr.p->m_send.m_keyInfoPtrI = ctx.m_keyPtr.i;
       treeNodePtr.p->m_bits |= TreeNode::T_ONE_SHOT;
 
       if (rangeScanFlag)
@@ -4792,8 +4832,10 @@ Dbspj::parseScanIndex(Build_context& ctx
          */
         err = expand(pattern, treeNodePtr, tree, len, origParam, cnt);
         if (unlikely(err != 0))
+        {
+          jam();
           break;
-
+        }
         treeNodePtr.p->m_bits |= TreeNode::T_PRUNE_PATTERN;
         c_Counters.incr_counter(CI_PRUNED_RANGE_SCANS_RECEIVED, 1);
       }
@@ -4811,13 +4853,18 @@ Dbspj::parseScanIndex(Build_context& ctx
         bool hasNull;
         err = expand(prunePtrI, tree, len, origParam, cnt, hasNull);
         if (unlikely(err != 0))
+        {
+          jam();
+          releaseSection(prunePtrI);
           break;
+        }
 
         if (unlikely(hasNull))
         {
           /* API should have elliminated requests w/ const-NULL keys */
           jam();
           DEBUG("BEWARE: T_CONST_PRUNE-key contain NULL values");
+          releaseSection(prunePtrI);
 //        treeNodePtr.p->m_bits |= TreeNode::T_NULL_PRUNE;
 //        break;
           ndbrequire(false);
@@ -5160,6 +5207,7 @@ Dbspj::scanIndex_parent_row(Signal* sign
       if (unlikely(err != 0))
       {
         jam();
+        releaseSection(pruneKeyPtrI);
         break;
       }
 
@@ -5169,10 +5217,7 @@ Dbspj::scanIndex_parent_row(Signal* sign
         DEBUG("T_PRUNE_PATTERN-key contain NULL values");
 
         // Ignore this request as 'NULL == <column>' will never give a match
-        if (pruneKeyPtrI != RNIL)
-        {
-          releaseSection(pruneKeyPtrI);
-        }
+        releaseSection(pruneKeyPtrI);
         return;  // Bailout, SCANREQ would have returned 0 rows anyway
       }
 
@@ -5223,7 +5268,6 @@ Dbspj::scanIndex_parent_row(Signal* sign
       list.first(fragPtr);
     }
 
-    Uint32 ptrI = fragPtr.p->m_rangePtrI;
     bool hasNull;
     if (treeNodePtr.p->m_bits & TreeNode::T_KEYINFO_CONSTRUCTED)
     {
@@ -5248,7 +5292,7 @@ Dbspj::scanIndex_parent_row(Signal* sign
         break;
       }
 
-      err = expand(ptrI, pattern, rowRef, hasNull);
+      err = expand(fragPtr.p->m_rangePtrI, pattern, rowRef, hasNull);
       if (unlikely(err != 0))
       {
         jam();
@@ -5262,8 +5306,7 @@ Dbspj::scanIndex_parent_row(Signal* sign
       ndbrequire(false);
     }
 //  ndbrequire(!hasNull);  // FIXME, can't ignore request as we already added it to keyPattern
-    fragPtr.p->m_rangePtrI = ptrI;
-    scanIndex_fixupBound(fragPtr, ptrI, rowRef.m_src_correlation);
+    scanIndex_fixupBound(fragPtr, fragPtr.p->m_rangePtrI, rowRef.m_src_correlation);
 
     if (treeNodePtr.p->m_bits & TreeNode::T_ONE_SHOT)
     {
@@ -5693,6 +5736,7 @@ Dbspj::scanIndex_send(Signal* signal,
           if (!dupSection(tmp, attrInfoPtrI))
           {
             jam();
+            ndbassert(tmp == RNIL);  // Guard for memleak
             err = DbspjErr::OutOfSectionMemory;
             break;
           }
@@ -6686,6 +6730,26 @@ Dbspj::appendParamToPattern(Local_patter
   return dst.append(&info,1) && dst.append(ptr,len) ? 0 : DbspjErr::OutOfQueryMemory;
 }
 
+#ifdef ERROR_INSERT
+static int fi_cnt = 0;
+bool
+Dbspj::appendToSection(Uint32& firstSegmentIVal,
+                         const Uint32* src, Uint32 len)
+{
+  if (fi_cnt++ % 13 == 0 && ERROR_INSERTED(17510))
+  {
+    jam();
+    ndbout_c("Injecting appendToSection error 17510 at line %d file %s",
+             __LINE__,  __FILE__);
+    return false;
+  }
+  else
+  {
+    return SimulatedBlock::appendToSection(firstSegmentIVal, src, len);
+  }
+}
+#endif
+
 Uint32
 Dbspj::appendParamHeadToPattern(Local_pattern_store& dst,
                                 const RowPtr::Linear & row, Uint32 col)
@@ -7138,6 +7202,7 @@ Dbspj::expandS(Uint32 & _dst, Local_patt
     if (unlikely(err != 0))
     {
       jam();
+      _dst = dst;
       return err;
     }
   }
@@ -7199,6 +7264,7 @@ Dbspj::expandL(Uint32 & _dst, Local_patt
     if (unlikely(err != 0))
     {
       jam();
+      _dst = dst;
       return err;
     }
   }
@@ -7513,7 +7579,11 @@ Dbspj::parseDA(Build_context& ctx,
          * Expand pattern into a new pattern (with linked values)
          */
         err = expand(pattern, treeNodePtr, tree, len, param, cnt);
-
+        if (unlikely(err != 0))
+        {
+          jam();
+          break;
+        }
         /**
          * This node constructs a new key for each send
          */
@@ -7530,23 +7600,25 @@ Dbspj::parseDA(Build_context& ctx,
         bool hasNull;
         Uint32 keyInfoPtrI = RNIL;
         err = expand(keyInfoPtrI, tree, len, param, cnt, hasNull);
+        if (unlikely(err != 0))
+        {
+          jam();
+          releaseSection(keyInfoPtrI);
+          break;
+        }
         if (unlikely(hasNull))
         {
           /* API should have elliminated requests w/ const-NULL keys */
           jam();
           DEBUG("BEWARE: FIXED-key contain NULL values");
+          releaseSection(keyInfoPtrI);
 //        treeNodePtr.p->m_bits |= TreeNode::T_NULL_PRUNE;
 //        break;
           ndbrequire(false);
         }
         treeNodePtr.p->m_send.m_keyInfoPtrI = keyInfoPtrI;
       }
-
-      if (unlikely(err != 0))
-      {
-        jam();
-        break;
-      }
+      ndbassert(err == 0); // All errors should have been handled
     } // DABits::NI_KEY_...
 
     const Uint32 mask =
@@ -7837,18 +7909,20 @@ Dbspj::parseDA(Build_context& ctx,
           {
             SectionReader r0(ptr, getSectionSegmentPool());
             err = appendTreeToSection(attrInfoPtrI, r0, ptr.sz);
-            sectionptrs[4] = ptr.sz;
             if (unlikely(err != 0))
             {
               jam();
               break;
             }
+            sectionptrs[4] = ptr.sz;
           }
           releaseSection(attrParamPtrI);
+          attrParamPtrI = RNIL;
         }
       }
 
       treeNodePtr.p->m_send.m_attrInfoPtrI = attrInfoPtrI;
+      attrInfoPtrI = RNIL;
     } // if (((treeBits & mask) | (paramBits & DABits::PI_ATTR_LIST)) != 0)
 
     // Empty attrinfo would cause node crash.
@@ -7869,6 +7943,18 @@ Dbspj::parseDA(Build_context& ctx,
     return 0;
   } while (0);
 
+  if (attrInfoPtrI != RNIL)
+  {
+    jam();
+    releaseSection(attrInfoPtrI);
+  }
+
+  if (attrParamPtrI != RNIL)
+  {
+    jam();
+    releaseSection(attrParamPtrI);
+  }
+
   return err;
 }
 

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2012-06-26 11:05:35 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2012-07-04 12:44:30 +0000
@@ -1,5 +1,5 @@
 /*
-   Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+   Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
 
    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -146,22 +146,22 @@ ErrorBundle ErrorCodes[] = {
    * SPJ error codes
    */ 
 
-  { 20000, DMEC, IS, "Query aborted due out of operation records" },
+  { 20000, DMEC, TR, "Query aborted due out of operation records" },
   { 20001, DMEC, IE, "Query aborted due to empty query tree" },
   { 20002, DMEC, IE, "Query aborted due to invalid request" },
   { 20003, DMEC, IE, "Query aborted due to  unknown query operation" },
   { 20004, DMEC, IE, "Query aborted due to invalid tree node specification" },
   { 20005, DMEC, IE, "Query aborted due to invalid tree parameter specification" },
-  { 20006, DMEC, IS, "Query aborted due to out of section memory" },
+  { 20006, DMEC, TR, "Query aborted due to out of LongMessageBuffer" },
   { 20007, DMEC, IE, "Query aborted due to invalid pattern" },
-  { 20008, DMEC, IS, "Query aborted due to out of query memory" },
+  { 20008, DMEC, TR, "Query aborted due to out of query memory" },
   { 20009, DMEC, IE, "Query aborted due to query node too big" },
   { 20010, DMEC, IE, "Query aborted due to query node parameters too big" },
   { 20011, DMEC, IE, "Query aborted due to both tree and parameters contain interpreted program" },
   { 20012, DMEC, IE, "Query aborted due to invalid tree parameter specification: Key parameter bits mismatch" },
   { 20013, DMEC, IE, "Query aborted due to invalid tree parameter specification: Incorrect key parameter count" },
   { 20014, DMEC, IE, "Query aborted due to internal error" },
-  { 20015, DMEC, IS, "Query aborted due to out of row memory" },
+  { 20015, DMEC, TR, "Query aborted due to out of row memory" },
   { 20016, DMEC, NR, "Query aborted due to node failure" },
   { 20017, DMEC, IE, "Query aborted due to invalid node count" },
   { 20018, DMEC, IE, "Query aborted due to index fragment not found" },

=== modified file 'storage/ndb/test/include/HugoQueries.hpp'
--- a/storage/ndb/test/include/HugoQueries.hpp	2011-07-05 12:46:07 +0000
+++ b/storage/ndb/test/include/HugoQueries.hpp	2012-05-02 11:31:46 +0000
@@ -42,7 +42,9 @@ public:
   // Rows for for each of the operations
   Vector<Uint32> m_rows_found;
 
-  int runLookupQuery(Ndb*, int records, int batchsize = 1);
+  int runLookupQuery(Ndb*,
+                     int queries = 100,
+                     int batchsize = 1);
   int runScanQuery(Ndb*,
                    int abort = 4,
                    int parallelism = 0,

=== modified file 'storage/ndb/test/ndbapi/testSpj.cpp'
--- a/storage/ndb/test/ndbapi/testSpj.cpp	2012-03-28 15:02:39 +0000
+++ b/storage/ndb/test/ndbapi/testSpj.cpp	2012-06-19 11:04:55 +0000
@@ -31,7 +31,7 @@ static int faultToInject = 0;
 
 enum faultsToInject {
   FI_START = 17001,
-  FI_END = 17121
+  FI_END = 17510
 };
 
 int
@@ -75,6 +75,7 @@ runLookupJoin(NDBT_Context* ctx, NDBT_St
   int loops = ctx->getNumLoops();
   int joinlevel = ctx->getProperty("JoinLevel", 3);
   int records = ctx->getNumRecords();
+  int queries = records/joinlevel;
   int until_stopped = ctx->getProperty("UntilStopped", (Uint32)0);
   Uint32 stepNo = step->getStepNo();
 
@@ -86,7 +87,7 @@ runLookupJoin(NDBT_Context* ctx, NDBT_St
   while ((i<loops || until_stopped) && !ctx->isTestStopped())
   {
     g_info << i << ": ";
-    if (hugoTrans.runLookupQuery(GETNDB(step), records))
+    if (hugoTrans.runLookupQuery(GETNDB(step), queries))
     {
       g_info << endl;
       return NDBT_FAILED;
@@ -103,6 +104,7 @@ runLookupJoinError(NDBT_Context* ctx, ND
   int loops = ctx->getNumLoops();
   int joinlevel = ctx->getProperty("JoinLevel", 8);
   int records = ctx->getNumRecords();
+  int queries = records/joinlevel;
   int until_stopped = ctx->getProperty("UntilStopped", (Uint32)0);
   Uint32 stepNo = step->getStepNo();
 
@@ -116,6 +118,7 @@ runLookupJoinError(NDBT_Context* ctx, ND
   int lookupFaults[] = {
       17001, 17005, 17006, 17008,
       17012, // testing abort in :execDIH_SCAN_TAB_CONF
+      17013, // Simulate DbspjErr::InvalidRequest
       17020, 17021, 17022, // lookup_send() encounter dead node -> NodeFailure
       17030, 17031, 17032, // LQHKEYREQ reply is LQHKEYREF('Invalid..')
       17040, 17041, 17042, // lookup_parent_row -> OutOfQueryMemory
@@ -123,7 +126,8 @@ runLookupJoinError(NDBT_Context* ctx, ND
       17060, 17061, 17062, 17063, // scanIndex_parent_row -> outOfSectionMem
       17070, 17071, 17072, // lookup_send.dupsec -> outOfSectionMem
       17080, 17081, 17082, // lookup_parent_row -> OutOfQueryMemory
-      17120, 17121 // execTRANSID_AI -> OutOfRowMemory
+      17120, 17121, // execTRANSID_AI -> OutOfRowMemory
+      17510 // random failure when allocating seection memory
   }; 
   loops =  faultToInject ? 1 : sizeof(lookupFaults)/sizeof(int);
 
@@ -148,7 +152,7 @@ runLookupJoinError(NDBT_Context* ctx, ND
     // It'd be better if test could differentiates failures from
     // fault injection and others.
     // We expect to fail, and it's a failure if we don't
-    if (!hugoTrans.runLookupQuery(GETNDB(step), records))
+    if (!hugoTrans.runLookupQuery(GETNDB(step), queries))
     {
       g_info << "LookUpJoinError didn't fail as expected."<< endl;
       // return NDBT_FAILED;
@@ -205,6 +209,7 @@ runScanJoinError(NDBT_Context* ctx, NDBT
   int scanFaults[] = {
       17002, 17004, 17005, 17006, 17008,
       17012, // testing abort in :execDIH_SCAN_TAB_CONF
+      17013, // Simulate DbspjErr::InvalidRequest
       17020, 17021, 17022, // lookup_send() encounter dead node -> NodeFailure
       17030, 17031, 17032, // LQHKEYREQ reply is LQHKEYREF('Invalid..')
       17040, 17041, 17042, // lookup_parent_row -> OutOfQueryMemory
@@ -215,7 +220,8 @@ runScanJoinError(NDBT_Context* ctx, NDBT
       17090, 17091, 17092, 17093, // scanIndex_send -> OutOfQueryMemory
       17100, // scanFrag_sends invalid schema version, to get a SCAN_FRAGREF
       17110, 17111, 17112, // scanIndex_sends invalid schema version, to get a SCAN_FRAGREF
-      17120, 17121 // execTRANSID_AI -> OutOfRowMemory
+      17120, 17121, // execTRANSID_AI -> OutOfRowMemory
+      17510 // random failure when allocating seection memory
   }; 
   loops =  faultToInject ? 1 : sizeof(scanFaults)/sizeof(int);
 
@@ -258,6 +264,7 @@ runJoin(NDBT_Context* ctx, NDBT_Step* st
   int loops = ctx->getNumLoops();
   int joinlevel = ctx->getProperty("JoinLevel", 3);
   int records = ctx->getNumRecords();
+  int queries = records/joinlevel;
   int until_stopped = ctx->getProperty("UntilStopped", (Uint32)0);
   Uint32 stepNo = step->getStepNo();
 
@@ -278,7 +285,7 @@ runJoin(NDBT_Context* ctx, NDBT_Step* st
       g_info << endl;
       return NDBT_FAILED;
     }
-    if (hugoTrans2.runLookupQuery(GETNDB(step), records))
+    if (hugoTrans2.runLookupQuery(GETNDB(step), queries))
     {
       g_info << endl;
       return NDBT_FAILED;
@@ -351,7 +358,7 @@ runRestarter(NDBT_Context* ctx, NDBT_Ste
 
     if (waitprogress)
     {
-      Uint32 maxwait = 30;
+      Uint32 maxwait = 60;
       ndbout_c("running: 0x%.8x", running);
       for (Uint32 checks = 0; checks < 3 && !ctx->isTestStopped(); checks++)
       {
@@ -391,7 +398,7 @@ runRestarter(NDBT_Context* ctx, NDBT_Ste
 
     if (waitprogress)
     {
-      Uint32 maxwait = 30;
+      Uint32 maxwait = 60;
       ndbout_c("running: 0x%.8x", running);
       for (Uint32 checks = 0; checks < 3 && !ctx->isTestStopped(); checks++)
       {

=== modified file 'storage/ndb/test/src/HugoQueries.cpp'
--- a/storage/ndb/test/src/HugoQueries.cpp	2011-11-03 17:22:01 +0000
+++ b/storage/ndb/test/src/HugoQueries.cpp	2012-05-02 11:31:46 +0000
@@ -135,10 +135,10 @@ HugoQueries::getValueForQueryOp(NdbQuery
 
 int
 HugoQueries::runLookupQuery(Ndb* pNdb,
-                            int records,
+                            int queries,
                             int batch)
 {
-  int r = 0;
+  int q = 0;
   int retryAttempt = 0;
 
   m_rows_found.clear();
@@ -153,10 +153,10 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
 
   allocRows(batch);
 
-  while (r < records)
+  while (q < queries)
   {
-    if(r + batch > records)
-      batch = records - r;
+    if (q + batch > queries)
+      batch = queries - q;
 
     if (retryAttempt >= m_retryMax)
     {
@@ -188,7 +188,7 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
     {
       char buf[NDB_MAX_TUPLE_SIZE];
       NdbQueryParamValue params[NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY];
-      equalForParameters(buf, m_ops[0], params, b + r);
+      equalForParameters(buf, m_ops[0], params, b + q);
 
       NdbQuery * query = pTrans->createQuery(m_query_def, params);
       if (query == 0)
@@ -301,7 +301,7 @@ HugoQueries::runLookupQuery(Ndb* pNdb,
     }
 
     pTrans->close();
-    r += batch;
+    q += batch;
 
     for (unsigned i = 0; i<batch_rows_found.size(); i++)
       m_rows_found[i] += batch_rows_found[i];

=== modified file 'storage/ndb/test/tools/hugoJoin.cpp'
--- a/storage/ndb/test/tools/hugoJoin.cpp	2011-09-28 09:54:05 +0000
+++ b/storage/ndb/test/tools/hugoJoin.cpp	2012-05-02 11:31:46 +0000
@@ -209,7 +209,7 @@ int main(int argc, char** argv){
       }
       else
       {
-        res = hq.runLookupQuery(&MyNdb, _records, _batch);
+        res = hq.runLookupQuery(&MyNdb, _records/_depth, _batch);
       }
       if (res != 0)
       {

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.5-cluster-7.2 branch (ole.john.aske:3957 to 3958) Ole John Aske4 Jul