List:Commits« Previous MessageNext Message »
From:Jonas Oreland Date:October 3 2011 7:12am
Subject:bzr push into mysql-5.1-telco-7.1 branch (jonas.oreland:4296 to 4297)
View as plain text  
 4297 Jonas Oreland	2011-10-03 [merge]
      ndb - merge 70 to 71

    modified:
      sql/ha_ndbcluster_binlog.cc
      storage/ndb/src/common/debugger/signaldata/ScanTab.cpp
      storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
      storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilder.cpp
      storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp
      storage/ndb/test/ndbapi/testNdbApi.cpp
      storage/ndb/test/ndbapi/testRestartGci.cpp
      storage/ndb/test/src/NDBT_Find.cpp
 4296 Craig L Russell	2011-10-02
      Add isNull and isNotNull to clusterj query

    added:
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNotNullPredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/IsNullPredicateImpl.java
    modified:
      storage/ndb/clusterj/clusterj-api/src/main/java/com/mysql/clusterj/query/PredicateOperand.java
      storage/ndb/clusterj/clusterj-core/Makefile.am
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/metadata/AbstractDomainFieldHandlerImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/ParameterImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PredicateImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/query/PropertyImpl.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/spi/DomainFieldHandler.java
      storage/ndb/clusterj/clusterj-core/src/main/java/com/mysql/clusterj/core/store/ScanFilter.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/AbstractQueryTest.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNotNullTest.java
      storage/ndb/clusterj/clusterj-test/src/main/java/testsuite/clusterj/QueryNullTest.java
      storage/ndb/clusterj/clusterj-tie/src/main/java/com/mysql/clusterj/tie/ScanFilterImpl.java
=== modified file 'sql/ha_ndbcluster_binlog.cc'
--- a/sql/ha_ndbcluster_binlog.cc	2011-09-21 10:48:01 +0000
+++ b/sql/ha_ndbcluster_binlog.cc	2011-10-03 07:08:19 +0000
@@ -3508,7 +3508,7 @@ ndb_binlog_index_table__open(THD *thd,
   if (simple_open_n_lock_tables(thd, tables))
   {
     if (thd->killed)
-      sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed");
+      DBUG_PRINT("error", ("NDB Binlog: Opening ndb_binlog_index: killed"));
     else
       sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'",
                       thd_stmt_da(thd)->sql_errno(),
@@ -3543,7 +3543,10 @@ ndb_binlog_index_table__write_rows(THD *
 
   if (ndb_binlog_index_table__open(thd, &binlog_tables, &ndb_binlog_index))
   {
-    sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
+    if (thd->killed)
+      DBUG_PRINT("error", ("NDB Binlog: Unable to lock table ndb_binlog_index, killed"));
+    else
+      sql_print_error("NDB Binlog: Unable to lock table ndb_binlog_index");
     error= -1;
     goto add_ndb_binlog_index_err;
   }
@@ -7492,6 +7495,7 @@ restart_cluster_failure:
               */
               if (thd->killed)
               {
+                DBUG_PRINT("error", ("Failed to write to ndb_binlog_index at shutdown, retrying"));
                 (void) mysql_mutex_lock(&LOCK_thread_count);
                 volatile THD::killed_state killed= thd->killed;
                 /* We are cleaning up, allow for flushing last epoch */

=== modified file 'storage/ndb/src/common/debugger/signaldata/ScanTab.cpp'
--- a/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp	2011-06-30 15:59:25 +0000
+++ b/storage/ndb/src/common/debugger/signaldata/ScanTab.cpp	2011-09-29 11:31:28 +0000
@@ -78,9 +78,9 @@ printSCANTABCONF(FILE * output, const Ui
   size_t op_count= requestInfo & (~ScanTabConf::EndOfData);
   if (op_count)
   {
-    fprintf(output, " Operation(s) [api tc rows len]:\n");
     if (len == ScanTabConf::SignalLength + 4 * op_count)
     {
+      fprintf(output, " Operation(s) [api tc rows len]:\n");
       ScanTabConf::OpData * op = (ScanTabConf::OpData*)
         (theData + ScanTabConf::SignalLength);
       for(size_t i = 0; i<op_count; i++)
@@ -91,9 +91,9 @@ printSCANTABCONF(FILE * output, const Ui
         op++;
       }
     }
-    else
+    else if (len == ScanTabConf::SignalLength + 3 * op_count)
     {
-      assert(len == ScanTabConf::SignalLength + 3 * op_count);
+      fprintf(output, " Operation(s) [api tc rows len]:\n");      
       for(size_t i = 0; i<op_count; i++)
       {
         ScanTabConf::OpData * op = (ScanTabConf::OpData*)
@@ -104,6 +104,12 @@ printSCANTABCONF(FILE * output, const Ui
                 ScanTabConf::getLength(op->rows));
       }
     }
+    else
+    {
+      // ScanTabConf::OpData stored in section 0 of signal.
+      assert(len == ScanTabConf::SignalLength);
+      fprintf(output, " Long signal. Cannot print operations.");
+    }
     fprintf(output, "\n");
   }
   return false;

=== modified file 'storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp'
--- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-07-04 16:30:34 +0000
+++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp	2011-10-03 07:08:19 +0000
@@ -442,6 +442,12 @@ void Dbdict::execDBINFO_SCANREQ(Signal *
         c_opRecordPool.getEntrySize(),
         c_opRecordPool.getUsedHi(),
         { 0,0,0,0 }},
+      { "Operation Data",
+        c_opSectionBufferPool.getUsed(),
+        c_opSectionBufferPool.getSize(),
+        c_opSectionBufferPool.getEntrySize(),
+        c_opSectionBufferPool.getUsedHi(),
+        { 0,0,0,0 }},
       { NULL, 0,0,0,0,{0,0,0,0}}
     };
 

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2011-08-25 06:30:20 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/Dbspj.hpp	2011-10-03 07:08:19 +0000
@@ -580,6 +580,8 @@ public:
     Uint32 m_fragCount;
     // The number of fragments that we scan in parallel.
     Uint32 m_parallelism;
+    // True if we are still receiving the first batch for this operation.
+    bool   m_firstBatch;
     /**
      * True if this is the first instantiation of this operation. A child
      * operation will be instantiated once for each batch of its parent.
@@ -1229,7 +1231,6 @@ private:
   void scanIndex_execSCAN_FRAGCONF(Signal*, Ptr<Request>, Ptr<TreeNode>, Ptr<ScanFragHandle>);
   void scanIndex_parent_row(Signal*,Ptr<Request>,Ptr<TreeNode>, const RowPtr&);
   void scanIndex_fixupBound(Ptr<ScanFragHandle> fragPtr, Uint32 ptrI, Uint32);
-  void scanIndex_send(Signal*,Ptr<Request>,Ptr<TreeNode>);
   void scanIndex_send(Signal* signal,
                       Ptr<Request> requestPtr,
                       Ptr<TreeNode> treeNodePtr,

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-09-26 08:02:13 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-10-03 07:08:19 +0000
@@ -5023,6 +5023,7 @@ Dbspj::scanIndex_parent_batch_complete(S
   const ScanFragReq * org = (const ScanFragReq*)data.m_scanFragReq;
   ndbrequire(org->batch_size_rows > 0);
 
+  data.m_firstBatch = true;
   if (treeNodePtr.p->m_bits & TreeNode::T_SCAN_PARALLEL)
   {
     jam();
@@ -5171,6 +5172,9 @@ Dbspj::scanIndex_send(Signal* signal,
                       Uint32 bs_rows,
                       Uint32& batchRange)
 {
+  jam();
+  ndbassert(bs_bytes > 0);
+  ndbassert(bs_rows > 0);
   /**
    * if (m_bits & prunemask):
    * - Range keys sliced out to each ScanFragHandle
@@ -5451,6 +5455,9 @@ Dbspj::scanIndex_execSCAN_FRAGCONF(Signa
 
   if (data.m_frags_outstanding == 0)
   {
+    const bool isFirstBatch = data.m_firstBatch;
+    data.m_firstBatch = false;
+
     const ScanFragReq * const org
       = reinterpret_cast<const ScanFragReq*>(data.m_scanFragReq);
 
@@ -5486,24 +5493,78 @@ Dbspj::scanIndex_execSCAN_FRAGCONF(Signa
     {
       jam();
       ndbrequire((requestPtr.p->m_state & Request::RS_ABORTING) != 0);
-    }
-    else if (! (data.m_rows_received == data.m_rows_expecting))
-    {
-      jam();
+      checkBatchComplete(signal, requestPtr, 1);
       return;
     }
-    else
+
+    if (isFirstBatch && data.m_frags_not_started > 0)
     {
-      if (treeNodePtr.p->m_bits & TreeNode::T_REPORT_BATCH_COMPLETE)
+      /**
+       * Check if we can expect to be able to fetch the entire result set by
+       * asking for more fragments within the same batch. This may improve 
+       * performance for bushy scans, as subsequent bushy branches must be
+       * re-executed for each batch of this scan.
+       */
+      
+      /**
+       * Find the maximal correlation value that we may have seen so far.
+       * Correlation value must be unique within batch and smaller than 
+       * org->batch_size_rows.
+       */
+      const Uint32 maxCorrVal = (data.m_totalRows) == 0 ? 0 :
+        org->batch_size_rows / data.m_parallelism * (data.m_parallelism - 1)
+        + data.m_totalRows;
+      
+      // Number of rows that we can still fetch in this batch.
+      const Int32 remainingRows 
+        = static_cast<Int32>(org->batch_size_rows - maxCorrVal);
+      
+      if (remainingRows >= data.m_frags_not_started &&
+          /**
+           * Check that (remaning row capacity)/(remaining fragments) is 
+           * greater or equal to (rows read so far)/(finished fragments).
+           */
+          remainingRows * static_cast<Int32>(data.m_parallelism) >=
+          static_cast<Int32>(data.m_totalRows * data.m_frags_not_started) &&
+          (org->batch_size_bytes - data.m_totalBytes) * data.m_parallelism >=
+          data.m_totalBytes * data.m_frags_not_started)
       {
         jam();
-        reportBatchComplete(signal, requestPtr, treeNodePtr);
+        Uint32 batchRange = maxCorrVal;
+        DEBUG("::scanIndex_execSCAN_FRAGCONF() first batch was not full."
+              " Asking for new batches from " << data.m_frags_not_started <<
+              " fragments with " << 
+              remainingRows / data.m_frags_not_started 
+              <<" rows and " << 
+              (org->batch_size_bytes - data.m_totalBytes)
+              / data.m_frags_not_started 
+              << " bytes.");
+        scanIndex_send(signal,
+                       requestPtr,
+                       treeNodePtr,
+                       data.m_frags_not_started,
+                       (org->batch_size_bytes - data.m_totalBytes)
+                       / data.m_frags_not_started,
+                       remainingRows / data.m_frags_not_started,
+                       batchRange);
+        return;
       }
     }
+    
+    if (data.m_rows_received != data.m_rows_expecting)
+    {
+      jam();
+      return;
+    }
+    
+    if (treeNodePtr.p->m_bits & TreeNode::T_REPORT_BATCH_COMPLETE)
+    {
+      jam();
+      reportBatchComplete(signal, requestPtr, treeNodePtr);
+    }
 
     checkBatchComplete(signal, requestPtr, 1);
-    return;
-  }
+  } // if (data.m_frags_outstanding == 0)
 }
 
 void

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilder.cpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-09-14 10:30:08 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilder.cpp	2011-09-29 11:35:02 +0000
@@ -343,7 +343,8 @@ NdbQueryDef::destroy() const
 void
 NdbQueryDef::print() const
 {
-  m_impl.getQueryOperation(0U).printTree(0, Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32>());
+  m_impl.getQueryOperation(0U)
+    .printTree(0, NdbQueryOperationDefImpl::SiblingMask());
 }
 
 /*************************************************************************
@@ -1188,7 +1189,8 @@ NdbQueryBuilderImpl::prepare()
   if (doPrintQueryTree)
   {
     ndbout << "Query tree:" << endl;
-    def->getQueryOperation(0U).printTree(0, Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32>());
+    def->getQueryOperation(0U)
+      .printTree(0, NdbQueryOperationDefImpl::SiblingMask());
   }
 
   return def;
@@ -2159,7 +2161,8 @@ NdbQueryOperationDefImpl::appendChildPro
  * that connect the tree nodes.
  */
 static void printMargin(Uint32 depth, 
-                        Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> hasMoreSiblingsMask, 
+                        NdbQueryOperationDefImpl::SiblingMask 
+                        hasMoreSiblingsMask, 
                         bool header)
 {
   if (depth > 0)
@@ -2193,11 +2196,10 @@ static void printMargin(Uint32 depth,
 
 void 
 NdbQueryOperationDefImpl::printTree(Uint32 depth, 
-                                    Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> 
-                                    hasMoreSiblingsMask) const
+                                    SiblingMask hasMoreSiblingsMask) const
 {
   // Print vertical line leading down to this node.
-  Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> firstLineMask = hasMoreSiblingsMask;
+  SiblingMask firstLineMask = hasMoreSiblingsMask;
   firstLineMask.set(depth);
   printMargin(depth, firstLineMask, false);
   ndbout << endl;
@@ -2214,22 +2216,24 @@ NdbQueryOperationDefImpl::printTree(Uint
     printMargin(depth, hasMoreSiblingsMask, false);
     ndbout << " index: " << getIndex()->getName() << endl; 
   }
-  /* For each child but the last one, use a mask with an extra bit set to
-   * indicate that there are more siblings.
-   */
-  hasMoreSiblingsMask.set(depth+1);
+
   for (int childNo = 0; 
-       childNo < static_cast<int>(getNoOfChildOperations()) - 1; 
+       childNo < static_cast<int>(getNoOfChildOperations()); 
        childNo++)
   {
-    getChildOperation(childNo).printTree(depth+1, hasMoreSiblingsMask);
-  }
-  if (getNoOfChildOperations() > 0)
-  {
-    // The last child has no more siblings.
-    hasMoreSiblingsMask.clear(depth+1);
-    getChildOperation(getNoOfChildOperations() - 1)
-      .printTree(depth+1, hasMoreSiblingsMask);
+    if (childNo == 0)
+    {
+      /* For each child but the last one, use a mask with an extra bit set to
+       * indicate that there are more siblings.
+       */
+      hasMoreSiblingsMask.set(depth+1);
+    }
+    if (childNo == static_cast<int>(getNoOfChildOperations()) - 1)
+    {
+      // The last child has no more siblings.
+      hasMoreSiblingsMask.clear(depth+1);
+    }
+    getChildOperation(childNo).printTree(depth+1, hasMoreSiblingsMask); 
   }
 } // NdbQueryOperationDefImpl::printTree()
 

=== modified file 'storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp'
--- a/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-09-14 10:30:08 +0000
+++ b/storage/ndb/src/ndbapi/NdbQueryBuilderImpl.hpp	2011-09-29 11:35:02 +0000
@@ -429,6 +429,12 @@ public:
   // Get type of query operation
   virtual NdbQueryOperationDef::Type getType() const = 0;
 
+  /**
+   * Used for telling if parent at depth n has more siblings. (In that case
+   * we need to draw a horisontal line leading to that sibling.)
+   */
+  typedef Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> SiblingMask;
+
   /** Print query tree graph to trace file (using recursion).
    * @param depth Number of ancestor nodes that this node has.
    * @param hasMoreSiblingsMask The n'th bit should be set if the n'th ancestor
@@ -436,7 +442,7 @@ public:
    */
   void printTree(
            Uint32 depth, 
-           Bitmask<(NDB_SPJ_MAX_TREE_NODES+31)/32> hasMoreSiblingsMask) const;
+           SiblingMask hasMoreSiblingsMask) const;
 
 protected:
   // QueryTree building:

=== modified file 'storage/ndb/test/ndbapi/testNdbApi.cpp'
--- a/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-09-27 05:37:30 +0000
+++ b/storage/ndb/test/ndbapi/testNdbApi.cpp	2011-09-29 06:48:39 +0000
@@ -4826,8 +4826,8 @@ runRestarts(NDBT_Context* ctx, NDBT_Step
       }
 
       ndbout << "Restart cluster" << endl;
-      if (restarter.restartAll(NdbRestarter::NRRF_NOSTART |
-                               NdbRestarter::NRRF_ABORT) != 0)
+      if (restarter.restartAll2(Uint32(NdbRestarter::NRRF_NOSTART |
+                                       NdbRestarter::NRRF_ABORT)) != 0)
       {
         g_err << "Failed to restartAll" << endl;
         result = NDBT_FAILED;

=== modified file 'storage/ndb/test/ndbapi/testRestartGci.cpp'
--- a/storage/ndb/test/ndbapi/testRestartGci.cpp	2011-09-28 10:04:03 +0000
+++ b/storage/ndb/test/ndbapi/testRestartGci.cpp	2011-09-29 06:48:39 +0000
@@ -486,7 +486,7 @@ int runNodeInitialRestarts(NDBT_Context*
     int nodeId = restarter.getNode(NdbRestarter::NS_RANDOM);
     ndbout_c("Restarting node %u", nodeId);
 
-    if (restarter.restartOneDbNode(nodeId, NdbRestarter::NRRF_INITIAL) != 0)
+    if (restarter.restartOneDbNode2(nodeId, NdbRestarter::NRRF_INITIAL) != 0)
     {
       ndbout_c("Error restarting node");
       ctx->stopTest();

=== modified file 'storage/ndb/test/src/NDBT_Find.cpp'
--- a/storage/ndb/test/src/NDBT_Find.cpp	2011-07-04 16:30:34 +0000
+++ b/storage/ndb/test/src/NDBT_Find.cpp	2011-10-03 07:08:19 +0000
@@ -81,7 +81,10 @@ NDBT_find_ndb_mgmd(BaseString& path)
 {
   NDBT_find_binary(path, "ndb_mgmd",
                    "../../src/mgmsrv",
-                   "../storage/ndb/src/mgmsrv/",
+                   "../storage/ndb/src/mgmsrv",
+                   "../libexec",
+                   "../sbin",
+                   "../bin",
                    NULL);
 }
 

No bundle (reason: useless for push emails).
Thread
bzr push into mysql-5.1-telco-7.1 branch (jonas.oreland:4296 to 4297) Jonas Oreland3 Oct