List:Commits« Previous MessageNext Message »
From:Jonas Oreland Date:June 13 2011 9:17am
Subject:bzr commit into mysql-5.1-telco-7.0-wl4124-new0 branch (jonas:4398)
View as plain text  
#At file:///home/jonas/src/mysql-5.1-telco-7.0-wl4124-new0/ based on revid:pekka.nousiainen@stripped

 4398 Jonas Oreland	2011-06-13 [merge]
      ndb merge 70 into wl4124-new0

    modified:
      mysql-test/suite/ndb_rpl/my.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf
      mysql-test/suite/ndb_team/my.cnf
      mysql-test/suite/rpl_ndb/my.cnf
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/sql_class.cc
      sql/sql_class.h
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/util/ndbzio.c
      storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp
      storage/ndb/src/kernel/vm/NdbSeqLock.hpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp
      storage/ndb/test/ndbapi/testSystemRestart.cpp
      storage/ndb/test/src/NdbBackup.cpp
=== modified file 'mysql-test/suite/ndb_rpl/my.cnf'
--- a/mysql-test/suite/ndb_rpl/my.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/my.cnf	2011-06-08 19:25:29 +0000
@@ -60,7 +60,6 @@ relay-log=                    slave-rela
 # Cluster only supports row format
 binlog-format=                 row
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -83,8 +82,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf	2011-06-08 19:25:29 +0000
@@ -66,8 +66,6 @@ default-storage-engine=myisam
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.cluster2.server-id
-
 [mysqld.1.cluster3]
 log-bin=                      cluster3-bin
 relay-log=                    cluster3-relay-bin
@@ -83,8 +81,6 @@ default-storage-engine=myisam
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.cluster3.server-id
-
 [ENV]
 SERVER_MYPORT_1=              @mysqld.1.cluster1.port
 SERVER_MYPORT_2=              @mysqld.1.cluster2.port

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf	2011-06-08 19:25:29 +0000
@@ -58,7 +58,6 @@ binlog_format=row
 [mysqld.1.slave]
 # Note no binlog on this slave
 server-id= 4
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp
@@ -69,7 +68,6 @@ ndb_connectstring= @mysql_cluster.slave.
 [mysqld.2.slave]
 # Note binlog on this slave, but not logging slave updates
 server-id= 5
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp
@@ -82,7 +80,6 @@ binlog_format=row
 [mysqld.3.slave]
 # Note binlog on this slave, with slave updates logged
 server-id= 6
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp

=== modified file 'mysql-test/suite/ndb_team/my.cnf'
--- a/mysql-test/suite/ndb_team/my.cnf	2011-04-15 09:31:03 +0000
+++ b/mysql-test/suite/ndb_team/my.cnf	2011-06-08 19:25:29 +0000
@@ -50,7 +50,6 @@ master-connect-retry=         1
 log-bin=                      slave-bin
 relay-log=                    slave-relay-bin
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -68,9 +67,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'mysql-test/suite/rpl_ndb/my.cnf'
--- a/mysql-test/suite/rpl_ndb/my.cnf	2011-04-26 09:28:41 +0000
+++ b/mysql-test/suite/rpl_ndb/my.cnf	2011-06-08 19:25:29 +0000
@@ -60,7 +60,6 @@ relay-log=                    slave-rela
 # Cluster only supports row format
 binlog-format=                 row
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -83,8 +82,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-06-12 16:54:59 +0000
+++ b/sql/ha_ndbcluster.cc	2011-06-13 09:17:39 +0000
@@ -4664,26 +4664,79 @@ int ha_ndbcluster::bulk_update_row(const
 
 int ha_ndbcluster::exec_bulk_update(uint *dup_key_found)
 {
+  NdbTransaction* trans= m_thd_ndb->trans;
   DBUG_ENTER("ha_ndbcluster::exec_bulk_update");
   *dup_key_found= 0;
-  if (m_thd_ndb->m_unsent_bytes &&
-      !thd_allow_batch(table->in_use) &&
-      (!m_thd_ndb->m_handler ||
-       m_blobs_pending))
+
+  // m_handler must be NULL or point to _this_ handler instance
+  assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this);
+
+  if (m_thd_ndb->m_handler &&
+      m_read_before_write_removal_possible)
   {
+    /*
+      This is an autocommit involving only one table and rbwr is on
+
+      Commit the autocommit transaction early(before the usual place
+      in ndbcluster_commit) in order to:
+      1) save one round trip, "no-commit+commit" converted to "commit"
+      2) return the correct number of updated and affected rows
+         to the update loop(which will ask handler in rbwr mode)
+    */
+    DBUG_PRINT("info", ("committing auto-commit+rbwr early"));
     uint ignore_count= 0;
-    if (execute_no_commit(m_thd_ndb, m_thd_ndb->trans,
-                          m_ignore_no_key || m_read_before_write_removal_used,
-                          &ignore_count) != 0)
+    const int ignore_error= 1;
+    if (execute_commit(m_thd_ndb, trans,
+                       m_thd_ndb->m_force_send, ignore_error,
+                       &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
-      DBUG_RETURN(ndb_err(m_thd_ndb->trans));
+      DBUG_RETURN(ndb_err(trans));
     }
+    DBUG_PRINT("info", ("ignore_count: %u", ignore_count));
     assert(m_rows_changed >= ignore_count);
     assert(m_rows_updated >= ignore_count);
     m_rows_changed-= ignore_count;
     m_rows_updated-= ignore_count;
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_unsent_bytes == 0)
+  {
+    DBUG_PRINT("exit", ("skip execute - no unsent bytes"));
+    DBUG_RETURN(0);
+  }
+
+  if (thd_allow_batch(table->in_use))
+  {
+    /*
+      Turned on by @@transaction_allow_batching=ON
+      or implicitly by slave exec thread
+    */
+    DBUG_PRINT("exit", ("skip execute - transaction_allow_batching is ON"));
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_handler &&
+      !m_blobs_pending)
+  {
+    // Execute at commit time(in 'ndbcluster_commit') to save a round trip
+    DBUG_PRINT("exit", ("skip execute - simple autocommit"));
+    DBUG_RETURN(0);
   }
+
+  uint ignore_count= 0;
+  if (execute_no_commit(m_thd_ndb, trans,
+                        m_ignore_no_key || m_read_before_write_removal_used,
+                        &ignore_count) != 0)
+  {
+    no_uncommitted_rows_execute_failure();
+    DBUG_RETURN(ndb_err(trans));
+  }
+  assert(m_rows_changed >= ignore_count);
+  assert(m_rows_updated >= ignore_count);
+  m_rows_changed-= ignore_count;
+  m_rows_updated-= ignore_count;
   DBUG_RETURN(0);
 }
 
@@ -5019,25 +5072,76 @@ bool ha_ndbcluster::start_bulk_delete()
 
 int ha_ndbcluster::end_bulk_delete()
 {
+  NdbTransaction* trans= m_thd_ndb->trans;
   DBUG_ENTER("end_bulk_delete");
   assert(m_is_bulk_delete); // Don't allow end() without start()
-  if (m_thd_ndb->m_unsent_bytes &&
-      !thd_allow_batch(table->in_use) &&
-      !m_thd_ndb->m_handler)
+  m_is_bulk_delete = false;
+
+  // m_handler must be NULL or point to _this_ handler instance
+  assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this);
+
+  if (m_thd_ndb->m_handler &&
+      m_read_before_write_removal_possible)
   {
+    /*
+      This is an autocommit involving only one table and rbwr is on
+
+      Commit the autocommit transaction early(before the usual place
+      in ndbcluster_commit) in order to:
+      1) save one round trip, "no-commit+commit" converted to "commit"
+      2) return the correct number of updated and affected rows
+         to the delete loop(which will ask handler in rbwr mode)
+    */
+    DBUG_PRINT("info", ("committing auto-commit+rbwr early"));
     uint ignore_count= 0;
-    if (execute_no_commit(m_thd_ndb, m_thd_ndb->trans,
-                          m_ignore_no_key || m_read_before_write_removal_used,
-                          &ignore_count) != 0)
+    const int ignore_error= 1;
+    if (execute_commit(m_thd_ndb, trans,
+                       m_thd_ndb->m_force_send, ignore_error,
+                       &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
-      m_is_bulk_delete = false;
-      DBUG_RETURN(ndb_err(m_thd_ndb->trans));
+      DBUG_RETURN(ndb_err(trans));
     }
+    DBUG_PRINT("info", ("ignore_count: %u", ignore_count));
     assert(m_rows_deleted >= ignore_count);
     m_rows_deleted-= ignore_count;
+    DBUG_RETURN(0);
   }
-  m_is_bulk_delete = false;
+
+  if (m_thd_ndb->m_unsent_bytes == 0)
+  {
+    DBUG_PRINT("exit", ("skip execute - no unsent bytes"));
+    DBUG_RETURN(0);
+  }
+
+  if (thd_allow_batch(table->in_use))
+  {
+    /*
+      Turned on by @@transaction_allow_batching=ON
+      or implicitly by slave exec thread
+    */
+    DBUG_PRINT("exit", ("skip execute - transaction_allow_batching is ON"));
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_handler)
+  {
+    // Execute at commit time(in 'ndbcluster_commit') to save a round trip
+    DBUG_PRINT("exit", ("skip execute - simple autocommit"));
+    DBUG_RETURN(0);
+  }
+
+  uint ignore_count= 0;
+  if (execute_no_commit(m_thd_ndb, trans,
+                        m_ignore_no_key || m_read_before_write_removal_used,
+                        &ignore_count) != 0)
+  {
+    no_uncommitted_rows_execute_failure();
+    DBUG_RETURN(ndb_err(trans));
+  }
+
+  assert(m_rows_deleted >= ignore_count);
+  m_rows_deleted-= ignore_count;
   DBUG_RETURN(0);
 }
 
@@ -7113,58 +7217,18 @@ int ndbcluster_commit(handlerton *hton,
     if (thd_ndb->m_handler &&
         thd_ndb->m_handler->m_read_before_write_removal_possible)
     {
-#ifndef NDB_WITHOUT_READ_BEFORE_WRITE_REMOVAL
-      /* Autocommit with read-before-write removal
-       * Some operations in this autocommitted statement have not
-       * yet been executed
-       * They will be executed here as part of commit, and the results
-       * (rowcount, message) sent back to the client will then be modified 
-       * according to how the execution went.
-       * This saves a single roundtrip in the autocommit case
-       */
-      uint ignore_count= 0;
-      res= execute_commit(thd_ndb, trans, THDVAR(thd, force_send),
-                          TRUE, &ignore_count);
-      if (!res && ignore_count)
-      {
-        DBUG_PRINT("info", ("AutoCommit + RBW removal, ignore_count=%u",
-                            ignore_count));
-        /* We have some rows to ignore, modify recorded results,
-         * regenerate result message as required.
-         */
-        thd->row_count_func-= ignore_count;
-
-        ha_rows affected= 0;
-        char buff[ STRING_BUFFER_USUAL_SIZE ];
-        const char* msg= NULL;
-        if (thd->lex->sql_command == SQLCOM_DELETE)
-        {
-          assert(thd_ndb->m_handler->m_rows_deleted >= ignore_count);
-          affected= (thd_ndb->m_handler->m_rows_deleted-= ignore_count);
-        }
-        else
-        {
-          DBUG_PRINT("info", ("Update : message was %s", 
-                              thd->main_da.message()));
-          assert(thd_ndb->m_handler->m_rows_updated >= ignore_count);
-          affected= (thd_ndb->m_handler->m_rows_updated-= ignore_count);
-          /* For update in this scenario, we set found and changed to be 
-           * the same as affected
-           * Regenerate the update message
-           */
-          sprintf(buff, ER(ER_UPDATE_INFO), (ulong)affected, (ulong)affected,
-                  (ulong) thd->cuted_fields);
-          msg= buff;
-          DBUG_PRINT("info", ("Update : message changed to %s",
-                              msg));
-        }
-
-        /* Modify execution result + optionally message */
-        thd->main_da.modify_affected_rows(affected, msg);
+      /*
+        This is an autocommit involving only one table and
+        rbwr is on, thus the transaction has already been
+        committed in exec_bulk_update() or end_bulk_delete()
+      */
+      DBUG_PRINT("info", ("autocommit+rbwr, transaction already comitted"));
+      if (trans->commitStatus() != NdbTransaction::Committed)
+      {
+        sql_print_error("found uncomitted autocommit+rbwr transaction, "
+                        "commit status: %d", trans->commitStatus());
+        abort();
       }
-#else
-      abort(); // Should never come here without rbwr support
-#endif
     }
     else
       res= execute_commit(thd_ndb, trans, THDVAR(thd, force_send), FALSE);
@@ -8453,6 +8517,20 @@ int ha_ndbcluster::create(const char *na
     goto abort_return;
   }
 
+  // Save the table level storage media setting
+  switch(create_info->storage_media)
+  {
+    case HA_SM_DISK:
+      tab.setStorageType(NdbDictionary::Column::StorageTypeDisk);
+      break;
+    case HA_SM_DEFAULT:
+      tab.setStorageType(NdbDictionary::Column::StorageTypeDefault);
+      break;
+    case HA_SM_MEMORY:
+      tab.setStorageType(NdbDictionary::Column::StorageTypeMemory);
+      break;
+  }
+
   DBUG_PRINT("info", ("Table %s is %s stored with tablespace %s",
                       m_tabname,
                       (use_disk) ? "disk" : "memory",
@@ -10593,10 +10671,11 @@ int ndbcluster_find_all_files(THD *thd)
   DBUG_RETURN(-(skipped + unhandled));
 }
 
-int ndbcluster_find_files(handlerton *hton, THD *thd,
-                          const char *db,
-                          const char *path,
-                          const char *wild, bool dir, List<LEX_STRING> *files)
+
+static int
+ndbcluster_find_files(handlerton *hton, THD *thd,
+                      const char *db, const char *path,
+                      const char *wild, bool dir, List<LEX_STRING> *files)
 {
   DBUG_ENTER("ndbcluster_find_files");
   DBUG_PRINT("enter", ("db: %s", db));
@@ -10970,7 +11049,7 @@ static int ndbcluster_init(void *p)
     ndbcluster_binlog_init_handlerton();
     h->flags=            HTON_CAN_RECREATE | HTON_TEMPORARY_NOT_SUPPORTED;
     h->discover=         ndbcluster_discover;
-    h->find_files= ndbcluster_find_files;
+    h->find_files=       ndbcluster_find_files;
     h->table_exists_in_engine= ndbcluster_table_exists_in_engine;
   }
 

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-06-12 16:54:59 +0000
+++ b/sql/ha_ndbcluster.h	2011-06-13 09:17:39 +0000
@@ -944,8 +944,6 @@ private:
 
 int ndbcluster_discover(THD* thd, const char* dbname, const char* name,
                         const void** frmblob, uint* frmlen);
-int ndbcluster_find_files(THD *thd,const char *db,const char *path,
-                          const char *wild, bool dir, List<LEX_STRING> *files);
 int ndbcluster_table_exists_in_engine(THD* thd,
                                       const char *db, const char *name);
 void ndbcluster_print_error(int error, const NdbOperation *error_op);

=== modified file 'sql/sql_class.cc'
--- a/sql/sql_class.cc	2011-04-08 11:06:53 +0000
+++ b/sql/sql_class.cc	2011-06-09 09:20:47 +0000
@@ -583,24 +583,6 @@ Diagnostics_area::set_error_status(THD *
   m_status= DA_ERROR;
 }
 
-/**
- * modify_affected_rows
- * Modify the number of affected rows, and optionally the 
- * message in the Diagnostics area
- */
-void
-Diagnostics_area::modify_affected_rows(ha_rows new_affected_rows,
-                                       const char* new_message)
-{
-  DBUG_ASSERT(is_set());
-  DBUG_ASSERT(m_status == DA_OK);
-  DBUG_ASSERT(can_overwrite_status);
-
-  m_affected_rows= new_affected_rows;
-  if (new_message)
-    strmake(m_message, new_message, sizeof(m_message) - 1);
-}
-
 
 /**
   Mark the diagnostics area as 'DISABLED'.

=== modified file 'sql/sql_class.h'
--- a/sql/sql_class.h	2011-04-08 13:59:44 +0000
+++ b/sql/sql_class.h	2011-06-09 09:20:47 +0000
@@ -1163,9 +1163,6 @@ public:
   void set_eof_status(THD *thd);
   void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg);
 
-  /* Modify affected rows count and optionally message */
-  void modify_affected_rows(ha_rows new_affected_rows, const char *new_message= 0);
-
   void disable_status();
 
   void reset_diagnostics_area();

=== modified file 'storage/ndb/src/common/portlib/CMakeLists.txt'
--- a/storage/ndb/src/common/portlib/CMakeLists.txt	2011-05-11 12:23:24 +0000
+++ b/storage/ndb/src/common/portlib/CMakeLists.txt	2011-05-25 06:52:33 +0000
@@ -40,5 +40,7 @@ TARGET_LINK_LIBRARIES(NdbDir-t ndbportli
 ADD_EXECUTABLE(NdbGetInAddr-t NdbTCP.cpp)
 SET_TARGET_PROPERTIES(NdbGetInAddr-t
                       PROPERTIES COMPILE_FLAGS "-DTEST_NDBGETINADDR")
+TARGET_LINK_LIBRARIES(NdbGetInAddr-t ${LIBSOCKET} ${LIBNSL})
+
 
 

=== modified file 'storage/ndb/src/common/util/ndbzio.c'
--- a/storage/ndb/src/common/util/ndbzio.c	2011-04-18 14:15:23 +0000
+++ b/storage/ndb/src/common/util/ndbzio.c	2011-05-24 14:34:41 +0000
@@ -167,9 +167,11 @@ void ndbz_free(voidpf opaque, voidpf add
 }
 
 #ifdef _WIN32
-/* Windows doesn't define ENOTSUP, define it same as Solaris */
+#ifndef ENOTSUP
+/* If Windows doesn't define ENOTSUP, define it same as Solaris */
 #define ENOTSUP 48
 #endif
+#endif
 
 #ifndef HAVE_POSIX_MEMALIGN
 static inline int posix_memalign(void **memptr, size_t alignment, size_t size)

=== modified file 'storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-05-25 09:31:27 +0000
+++ b/storage/ndb/src/kernel/blocks/dbspj/DbspjMain.cpp	2011-06-07 12:08:29 +0000
@@ -4879,12 +4879,6 @@ Dbspj::scanIndex_parent_row(Signal* sign
     }
 
     Uint32 ptrI = fragPtr.p->m_rangePtrI;
-    if (ptrI == RNIL)
-    {
-      jam();
-      data.m_frags_not_complete++;
-    }
-
     bool hasNull;
     if (treeNodePtr.p->m_bits & TreeNode::T_KEYINFO_CONSTRUCTED)
     {
@@ -4972,6 +4966,39 @@ Dbspj::scanIndex_parent_batch_complete(S
   data.m_rows_received = 0;
   data.m_rows_expecting = 0;
   ndbassert(data.m_frags_outstanding == 0);
+  ndbassert(data.m_frags_not_complete == 0);
+
+  Ptr<ScanFragHandle> fragPtr;
+  {
+    Local_ScanFragHandle_list list(m_scanfraghandle_pool, data.m_fragments);
+    list.first(fragPtr);
+
+    if ((treeNodePtr.p->m_bits & TreeNode::T_PRUNE_PATTERN) == 0)
+    {
+      if (fragPtr.p->m_rangePtrI != RNIL)
+      {
+        // No pruning, so we must scan all fragments.
+        jam();
+        data.m_frags_not_complete = data.m_fragCount;
+      }
+    }
+    else
+    {
+      while(!fragPtr.isNull())
+      {
+        if (fragPtr.p->m_rangePtrI != RNIL)
+        {
+          jam();
+          /**
+           * This is a pruned scan, so we must scan those fragments that
+           * some distribution key hashed to.
+           */
+          data.m_frags_not_complete++;
+        }
+        list.next(fragPtr);
+      }
+    }
+  }
 
   if (data.m_frags_not_complete == 0)
   {
@@ -4981,11 +5008,6 @@ Dbspj::scanIndex_parent_batch_complete(S
      */
     return;
   }
-  else if ((treeNodePtr.p->m_bits & TreeNode::T_PRUNE_PATTERN) == 0)
-  {
-    jam();
-    data.m_frags_not_complete = data.m_fragCount;
-  }
 
   /**
    * When parent's batch is complete, we send our batch
@@ -5014,7 +5036,6 @@ Dbspj::scanIndex_parent_batch_repeat(Sig
     DEBUG("Register TreeNode for restart, m_node_no: " << treeNodePtr.p->m_node_no);
     ndbrequire(treeNodePtr.p->m_state != TreeNode::TN_ACTIVE);
     registerActiveCursor(requestPtr, treeNodePtr);
-    data.m_frags_not_complete = 1;
     data.m_batch_chunks = 0;
   }
 }

=== modified file 'storage/ndb/src/kernel/vm/NdbSeqLock.hpp'
--- a/storage/ndb/src/kernel/vm/NdbSeqLock.hpp	2011-05-17 07:06:30 +0000
+++ b/storage/ndb/src/kernel/vm/NdbSeqLock.hpp	2011-06-10 12:17:51 +0000
@@ -86,7 +86,7 @@ struct NdbSeqLock
   void write_lock() {}
   void write_unlock() {}
 
-  Uint32 read_lock() {}
+  Uint32 read_lock() { return 0; }
   bool read_unlock(Uint32 val) const { return true;}
 };
 

=== modified file 'storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-12 16:54:32 +0000
+++ b/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-13 09:17:39 +0000
@@ -2263,7 +2263,7 @@ NdbIndexStatImpl::MemDefault::mem_alloc(
   {
     size += 4 - size % 4;
   }
-  Item* item = (Item*)my_malloc(sizeof(Item) + size, MYF(0));
+  Item* item = (Item*)malloc(sizeof(Item) + size);
   if (item != 0)
   {
     item->m_magic = MemMagic;
@@ -2282,9 +2282,9 @@ NdbIndexStatImpl::MemDefault::mem_free(v
   {
     Item* item = (Item*)ptr - 1;
     assert(item->m_magic == MemMagic);
-    Uint32 size = item->m_size;
+    size_t size = item->m_size;
     item->m_magic = 0;
-    my_free(item, MYF(0));
+    free(item);
     assert(m_used >= size);
     m_used -= size;
   }

=== modified file 'storage/ndb/test/ndbapi/testSystemRestart.cpp'
--- a/storage/ndb/test/ndbapi/testSystemRestart.cpp	2011-02-18 18:40:25 +0000
+++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp	2011-06-10 12:50:28 +0000
@@ -35,6 +35,14 @@ int runLoadTable(NDBT_Context* ctx, NDBT
   return NDBT_OK;
 }
 
+int
+clearOldBackups(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbBackup backup(GETNDB(step)->getNodeId());
+  backup.clearOldBackups();
+  return NDBT_OK;
+}
+
 #define CHECK(b) if (!(b)) { \
   g_err << "ERR: "<< step->getName() \
          << " failed on line " << __LINE__ << endl; \
@@ -2594,6 +2602,7 @@ TESTCASE("SR_DD_1", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
@@ -2601,6 +2610,7 @@ TESTCASE("SR_DD_1", "")
 TESTCASE("SR_DD_1b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
 }
@@ -2609,6 +2619,7 @@ TESTCASE("SR_DD_1_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
@@ -2617,6 +2628,7 @@ TESTCASE("SR_DD_1b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
 }
@@ -2624,6 +2636,7 @@ TESTCASE("SR_DD_2", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
@@ -2631,6 +2644,7 @@ TESTCASE("SR_DD_2", "")
 TESTCASE("SR_DD_2b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
 }
@@ -2639,6 +2653,7 @@ TESTCASE("SR_DD_2_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
@@ -2647,6 +2662,7 @@ TESTCASE("SR_DD_2b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
 }
@@ -2654,6 +2670,7 @@ TESTCASE("SR_DD_3", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
@@ -2661,6 +2678,7 @@ TESTCASE("SR_DD_3", "")
 TESTCASE("SR_DD_3b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
 }
@@ -2669,6 +2687,7 @@ TESTCASE("SR_DD_3_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
@@ -2677,6 +2696,7 @@ TESTCASE("SR_DD_3b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
 }

=== modified file 'storage/ndb/test/src/NdbBackup.cpp'
--- a/storage/ndb/test/src/NdbBackup.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/src/NdbBackup.cpp	2011-06-10 12:50:28 +0000
@@ -64,7 +64,7 @@ NdbBackup::clearOldBackups()
      * Clear old backup files
      */ 
     BaseString tmp;
-    tmp.assfmt("ssh -v %s rm -rf %s/BACKUP", host, path);
+    tmp.assfmt("ssh %s rm -rf %s/BACKUP", host, path);
   
     ndbout << "buf: "<< tmp.c_str() <<endl;
     int res = system(tmp.c_str());  
@@ -107,6 +107,7 @@ loop:
     {
       NdbSleep_SecSleep(3);
       _backup_id += 100;
+      user_backup_id += 100;
       goto loop;
     }
     

No bundle (reason: revision is a merge (you can force generation of a bundle with env var BZR_FORCE_BUNDLE=1)).
Thread
bzr commit into mysql-5.1-telco-7.0-wl4124-new0 branch (jonas:4398) Jonas Oreland13 Jun