List:Commits« Previous MessageNext Message »
From:jonas oreland Date:June 14 2011 6:27am
Subject:bzr commit into mysql-5.1-telco-7.0-llcp branch (jonas:3690)
View as plain text  
#At file:///home/jonas/src/70-jonas/ based on revid:jonas@stripped

 3690 jonas oreland	2011-06-14 [merge]
      ndb - merge 70 to 70-lcp

    modified:
      mysql-test/suite/ndb_rpl/my.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf
      mysql-test/suite/ndb_team/my.cnf
      mysql-test/suite/rpl_ndb/my.cnf
      sql/ha_ndbcluster.cc
      sql/sql_class.cc
      sql/sql_class.h
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/vm/NdbSeqLock.hpp
      storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/ndbapi/testSystemRestart.cpp
      storage/ndb/test/run-test/autotest-boot.sh
      storage/ndb/test/src/NdbBackup.cpp
=== modified file 'mysql-test/suite/ndb_rpl/my.cnf'
--- a/mysql-test/suite/ndb_rpl/my.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/my.cnf	2011-06-08 19:25:29 +0000
@@ -60,7 +60,6 @@ relay-log=                    slave-rela
 # Cluster only supports row format
 binlog-format=                 row
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -83,8 +82,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf	2011-06-08 19:25:29 +0000
@@ -66,8 +66,6 @@ default-storage-engine=myisam
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.cluster2.server-id
-
 [mysqld.1.cluster3]
 log-bin=                      cluster3-bin
 relay-log=                    cluster3-relay-bin
@@ -83,8 +81,6 @@ default-storage-engine=myisam
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.cluster3.server-id
-
 [ENV]
 SERVER_MYPORT_1=              @mysqld.1.cluster1.port
 SERVER_MYPORT_2=              @mysqld.1.cluster2.port

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf	2011-06-08 19:25:29 +0000
@@ -58,7 +58,6 @@ binlog_format=row
 [mysqld.1.slave]
 # Note no binlog on this slave
 server-id= 4
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp
@@ -69,7 +68,6 @@ ndb_connectstring= @mysql_cluster.slave.
 [mysqld.2.slave]
 # Note binlog on this slave, but not logging slave updates
 server-id= 5
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp
@@ -82,7 +80,6 @@ binlog_format=row
 [mysqld.3.slave]
 # Note binlog on this slave, with slave updates logged
 server-id= 6
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp

=== modified file 'mysql-test/suite/ndb_team/my.cnf'
--- a/mysql-test/suite/ndb_team/my.cnf	2011-04-15 09:31:03 +0000
+++ b/mysql-test/suite/ndb_team/my.cnf	2011-06-08 19:25:29 +0000
@@ -50,7 +50,6 @@ master-connect-retry=         1
 log-bin=                      slave-bin
 relay-log=                    slave-relay-bin
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -68,9 +67,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'mysql-test/suite/rpl_ndb/my.cnf'
--- a/mysql-test/suite/rpl_ndb/my.cnf	2011-04-26 09:28:41 +0000
+++ b/mysql-test/suite/rpl_ndb/my.cnf	2011-06-08 19:25:29 +0000
@@ -60,7 +60,6 @@ relay-log=                    slave-rela
 # Cluster only supports row format
 binlog-format=                 row
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -83,8 +82,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-06-07 12:36:05 +0000
+++ b/sql/ha_ndbcluster.cc	2011-06-09 09:20:47 +0000
@@ -4650,26 +4650,79 @@ int ha_ndbcluster::bulk_update_row(const
 
 int ha_ndbcluster::exec_bulk_update(uint *dup_key_found)
 {
+  NdbTransaction* trans= m_thd_ndb->trans;
   DBUG_ENTER("ha_ndbcluster::exec_bulk_update");
   *dup_key_found= 0;
-  if (m_thd_ndb->m_unsent_bytes &&
-      !thd_allow_batch(table->in_use) &&
-      (!m_thd_ndb->m_handler ||
-       m_blobs_pending))
+
+  // m_handler must be NULL or point to _this_ handler instance
+  assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this);
+
+  if (m_thd_ndb->m_handler &&
+      m_read_before_write_removal_possible)
   {
+    /*
+      This is an autocommit involving only one table and rbwr is on
+
+      Commit the autocommit transaction early(before the usual place
+      in ndbcluster_commit) in order to:
+      1) save one round trip, "no-commit+commit" converted to "commit"
+      2) return the correct number of updated and affected rows
+         to the update loop(which will ask handler in rbwr mode)
+    */
+    DBUG_PRINT("info", ("committing auto-commit+rbwr early"));
     uint ignore_count= 0;
-    if (execute_no_commit(m_thd_ndb, m_thd_ndb->trans,
-                          m_ignore_no_key || m_read_before_write_removal_used,
-                          &ignore_count) != 0)
+    const int ignore_error= 1;
+    if (execute_commit(m_thd_ndb, trans,
+                       m_thd_ndb->m_force_send, ignore_error,
+                       &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
-      DBUG_RETURN(ndb_err(m_thd_ndb->trans));
+      DBUG_RETURN(ndb_err(trans));
     }
+    DBUG_PRINT("info", ("ignore_count: %u", ignore_count));
     assert(m_rows_changed >= ignore_count);
     assert(m_rows_updated >= ignore_count);
     m_rows_changed-= ignore_count;
     m_rows_updated-= ignore_count;
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_unsent_bytes == 0)
+  {
+    DBUG_PRINT("exit", ("skip execute - no unsent bytes"));
+    DBUG_RETURN(0);
+  }
+
+  if (thd_allow_batch(table->in_use))
+  {
+    /*
+      Turned on by @@transaction_allow_batching=ON
+      or implicitly by slave exec thread
+    */
+    DBUG_PRINT("exit", ("skip execute - transaction_allow_batching is ON"));
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_handler &&
+      !m_blobs_pending)
+  {
+    // Execute at commit time(in 'ndbcluster_commit') to save a round trip
+    DBUG_PRINT("exit", ("skip execute - simple autocommit"));
+    DBUG_RETURN(0);
+  }
+
+  uint ignore_count= 0;
+  if (execute_no_commit(m_thd_ndb, trans,
+                        m_ignore_no_key || m_read_before_write_removal_used,
+                        &ignore_count) != 0)
+  {
+    no_uncommitted_rows_execute_failure();
+    DBUG_RETURN(ndb_err(trans));
   }
+  assert(m_rows_changed >= ignore_count);
+  assert(m_rows_updated >= ignore_count);
+  m_rows_changed-= ignore_count;
+  m_rows_updated-= ignore_count;
   DBUG_RETURN(0);
 }
 
@@ -5005,25 +5058,76 @@ bool ha_ndbcluster::start_bulk_delete()
 
 int ha_ndbcluster::end_bulk_delete()
 {
+  NdbTransaction* trans= m_thd_ndb->trans;
   DBUG_ENTER("end_bulk_delete");
   assert(m_is_bulk_delete); // Don't allow end() without start()
   m_is_bulk_delete = false;
 
-  if (m_thd_ndb->m_unsent_bytes &&
-      !thd_allow_batch(table->in_use) &&
-      !m_thd_ndb->m_handler)
+  // m_handler must be NULL or point to _this_ handler instance
+  assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this);
+
+  if (m_thd_ndb->m_handler &&
+      m_read_before_write_removal_possible)
   {
+    /*
+      This is an autocommit involving only one table and rbwr is on
+
+      Commit the autocommit transaction early(before the usual place
+      in ndbcluster_commit) in order to:
+      1) save one round trip, "no-commit+commit" converted to "commit"
+      2) return the correct number of updated and affected rows
+         to the delete loop(which will ask handler in rbwr mode)
+    */
+    DBUG_PRINT("info", ("committing auto-commit+rbwr early"));
     uint ignore_count= 0;
-    if (execute_no_commit(m_thd_ndb, m_thd_ndb->trans,
-                          m_ignore_no_key || m_read_before_write_removal_used,
-                          &ignore_count) != 0)
+    const int ignore_error= 1;
+    if (execute_commit(m_thd_ndb, trans,
+                       m_thd_ndb->m_force_send, ignore_error,
+                       &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
-      DBUG_RETURN(ndb_err(m_thd_ndb->trans));
+      DBUG_RETURN(ndb_err(trans));
     }
+    DBUG_PRINT("info", ("ignore_count: %u", ignore_count));
     assert(m_rows_deleted >= ignore_count);
     m_rows_deleted-= ignore_count;
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_unsent_bytes == 0)
+  {
+    DBUG_PRINT("exit", ("skip execute - no unsent bytes"));
+    DBUG_RETURN(0);
   }
+
+  if (thd_allow_batch(table->in_use))
+  {
+    /*
+      Turned on by @@transaction_allow_batching=ON
+      or implicitly by slave exec thread
+    */
+    DBUG_PRINT("exit", ("skip execute - transaction_allow_batching is ON"));
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_handler)
+  {
+    // Execute at commit time(in 'ndbcluster_commit') to save a round trip
+    DBUG_PRINT("exit", ("skip execute - simple autocommit"));
+    DBUG_RETURN(0);
+  }
+
+  uint ignore_count= 0;
+  if (execute_no_commit(m_thd_ndb, trans,
+                        m_ignore_no_key || m_read_before_write_removal_used,
+                        &ignore_count) != 0)
+  {
+    no_uncommitted_rows_execute_failure();
+    DBUG_RETURN(ndb_err(trans));
+  }
+
+  assert(m_rows_deleted >= ignore_count);
+  m_rows_deleted-= ignore_count;
   DBUG_RETURN(0);
 }
 
@@ -7097,58 +7201,18 @@ int ndbcluster_commit(handlerton *hton,
     if (thd_ndb->m_handler &&
         thd_ndb->m_handler->m_read_before_write_removal_possible)
     {
-#ifndef NDB_WITHOUT_READ_BEFORE_WRITE_REMOVAL
-      /* Autocommit with read-before-write removal
-       * Some operations in this autocommitted statement have not
-       * yet been executed
-       * They will be executed here as part of commit, and the results
-       * (rowcount, message) sent back to the client will then be modified 
-       * according to how the execution went.
-       * This saves a single roundtrip in the autocommit case
-       */
-      uint ignore_count= 0;
-      res= execute_commit(thd_ndb, trans, THDVAR(thd, force_send),
-                          TRUE, &ignore_count);
-      if (!res && ignore_count)
-      {
-        DBUG_PRINT("info", ("AutoCommit + RBW removal, ignore_count=%u",
-                            ignore_count));
-        /* We have some rows to ignore, modify recorded results,
-         * regenerate result message as required.
-         */
-        thd->row_count_func-= ignore_count;
-
-        ha_rows affected= 0;
-        char buff[ STRING_BUFFER_USUAL_SIZE ];
-        const char* msg= NULL;
-        if (thd->lex->sql_command == SQLCOM_DELETE)
-        {
-          assert(thd_ndb->m_handler->m_rows_deleted >= ignore_count);
-          affected= (thd_ndb->m_handler->m_rows_deleted-= ignore_count);
-        }
-        else
-        {
-          DBUG_PRINT("info", ("Update : message was %s", 
-                              thd->main_da.message()));
-          assert(thd_ndb->m_handler->m_rows_updated >= ignore_count);
-          affected= (thd_ndb->m_handler->m_rows_updated-= ignore_count);
-          /* For update in this scenario, we set found and changed to be 
-           * the same as affected
-           * Regenerate the update message
-           */
-          sprintf(buff, ER(ER_UPDATE_INFO), (ulong)affected, (ulong)affected,
-                  (ulong) thd->cuted_fields);
-          msg= buff;
-          DBUG_PRINT("info", ("Update : message changed to %s",
-                              msg));
-        }
-
-        /* Modify execution result + optionally message */
-        thd->main_da.modify_affected_rows(affected, msg);
+      /*
+        This is an autocommit involving only one table and
+        rbwr is on, thus the transaction has already been
+        committed in exec_bulk_update() or end_bulk_delete()
+      */
+      DBUG_PRINT("info", ("autocommit+rbwr, transaction already comitted"));
+      if (trans->commitStatus() != NdbTransaction::Committed)
+      {
+        sql_print_error("found uncomitted autocommit+rbwr transaction, "
+                        "commit status: %d", trans->commitStatus());
+        abort();
       }
-#else
-      abort(); // Should never come here without rbwr support
-#endif
     }
     else
       res= execute_commit(thd_ndb, trans, THDVAR(thd, force_send), FALSE);

=== modified file 'sql/sql_class.cc'
--- a/sql/sql_class.cc	2011-04-08 11:06:53 +0000
+++ b/sql/sql_class.cc	2011-06-09 09:20:47 +0000
@@ -583,24 +583,6 @@ Diagnostics_area::set_error_status(THD *
   m_status= DA_ERROR;
 }
 
-/**
- * modify_affected_rows
- * Modify the number of affected rows, and optionally the 
- * message in the Diagnostics area
- */
-void
-Diagnostics_area::modify_affected_rows(ha_rows new_affected_rows,
-                                       const char* new_message)
-{
-  DBUG_ASSERT(is_set());
-  DBUG_ASSERT(m_status == DA_OK);
-  DBUG_ASSERT(can_overwrite_status);
-
-  m_affected_rows= new_affected_rows;
-  if (new_message)
-    strmake(m_message, new_message, sizeof(m_message) - 1);
-}
-
 
 /**
   Mark the diagnostics area as 'DISABLED'.

=== modified file 'sql/sql_class.h'
--- a/sql/sql_class.h	2011-04-08 13:59:44 +0000
+++ b/sql/sql_class.h	2011-06-09 09:20:47 +0000
@@ -1163,9 +1163,6 @@ public:
   void set_eof_status(THD *thd);
   void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg);
 
-  /* Modify affected rows count and optionally message */
-  void modify_affected_rows(ha_rows new_affected_rows, const char *new_message= 0);
-
   void disable_status();
 
   void reset_diagnostics_area();

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-06-08 20:28:33 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-06-14 06:27:27 +0000
@@ -18956,6 +18956,30 @@ stepNext_2:
     {
       jam();
       logPartPtr.p->invalidatePageNo = logPartPtr.p->headPageNo;
+
+      if (! ((cstartType == NodeState::ST_INITIAL_START) ||
+             (cstartType == NodeState::ST_INITIAL_NODE_RESTART)))
+      {
+        jam();
+        if (logFilePtr.i == logPartPtr.p->lastLogfile)
+        {
+          jam();
+          Uint32 lastMbytePageNo =
+            logPartPtr.p->lastMbyte << ZTWOLOG_NO_PAGES_IN_MBYTE;
+          if (logPartPtr.p->invalidatePageNo < lastMbytePageNo)
+          {
+            jam();
+            if (DEBUG_REDO)
+            {
+              ndbout_c("readFileInInvalidate part: %u step: %u moving invalidatePageNo from %u to %u (lastMbyte)",
+                       logPartPtr.p->logPartNo, stepNext,
+                       logPartPtr.p->invalidatePageNo,
+                       lastMbytePageNo);
+            }
+            logPartPtr.p->invalidatePageNo = lastMbytePageNo;
+          }
+        }
+      }
       readFileInInvalidate(signal, 1);
       return;
     }

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-05-31 12:28:59 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-06-13 06:14:32 +0000
@@ -3909,7 +3909,7 @@ void Qmgr::failReportLab(Signal* signal,
       msg = "Start timeout";
       break;
     case FailRep::ZHEARTBEAT_FAILURE:
-      msg = "Hearbeat failure";
+      msg = "Heartbeat failure";
       break;
     case FailRep::ZLINK_FAILURE:
       msg = "Connection failure";

=== modified file 'storage/ndb/src/kernel/vm/NdbSeqLock.hpp'
--- a/storage/ndb/src/kernel/vm/NdbSeqLock.hpp	2011-05-17 07:06:30 +0000
+++ b/storage/ndb/src/kernel/vm/NdbSeqLock.hpp	2011-06-10 12:17:51 +0000
@@ -86,7 +86,7 @@ struct NdbSeqLock
   void write_lock() {}
   void write_unlock() {}
 
-  Uint32 read_lock() {}
+  Uint32 read_lock() { return 0; }
   bool read_unlock(Uint32 val) const { return true;}
 };
 

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2011-05-25 14:31:47 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2011-06-13 10:38:49 +0000
@@ -596,6 +596,7 @@ NdbEventOperationImpl::execute_nolock()
     {
       switch(myDict->getNdbError().code){
       case 711:
+      case 763:
         // ignore;
         break;
       default:

=== modified file 'storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-07 12:48:01 +0000
+++ b/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-07 13:28:40 +0000
@@ -2193,7 +2193,7 @@ NdbIndexStatImpl::MemDefault::mem_alloc(
   {
     size += 4 - size % 4;
   }
-  Item* item = (Item*)my_malloc(sizeof(Item) + size, MYF(0));
+  Item* item = (Item*)malloc(sizeof(Item) + size);
   if (item != 0)
   {
     item->m_magic = MemMagic;
@@ -2214,7 +2214,7 @@ NdbIndexStatImpl::MemDefault::mem_free(v
     assert(item->m_magic == MemMagic);
     size_t size = item->m_size;
     item->m_magic = 0;
-    my_free(item, MYF(0));
+    free(item);
     assert(m_used >= size);
     m_used -= size;
   }

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-06-06 12:18:27 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-06-13 10:38:49 +0000
@@ -317,7 +317,7 @@ ErrorBundle ErrorCodes[] = {
    */
   { 281,  HA_ERR_NO_CONNECTION, AE, "Operation not allowed due to cluster shutdown in progress" },
   { 299,  DMEC, AE, "Operation not allowed or aborted due to single user mode" },
-  { 763,  DMEC, AE, "Alter table requires cluster nodes to have exact same version" },
+  { 763,  DMEC, AE, "DDL is not supported with mixed data-node versions" },
   { 823,  DMEC, AE, "Too much attrinfo from application in tuple manager" },
   { 829,  DMEC, AE, "Corrupt data received for insert/update" },
   { 831,  DMEC, AE, "Too many nullable/bitfields in table definition" },

=== modified file 'storage/ndb/test/ndbapi/testSystemRestart.cpp'
--- a/storage/ndb/test/ndbapi/testSystemRestart.cpp	2011-02-18 18:40:25 +0000
+++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp	2011-06-10 12:50:28 +0000
@@ -35,6 +35,14 @@ int runLoadTable(NDBT_Context* ctx, NDBT
   return NDBT_OK;
 }
 
+int
+clearOldBackups(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbBackup backup(GETNDB(step)->getNodeId());
+  backup.clearOldBackups();
+  return NDBT_OK;
+}
+
 #define CHECK(b) if (!(b)) { \
   g_err << "ERR: "<< step->getName() \
          << " failed on line " << __LINE__ << endl; \
@@ -2594,6 +2602,7 @@ TESTCASE("SR_DD_1", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
@@ -2601,6 +2610,7 @@ TESTCASE("SR_DD_1", "")
 TESTCASE("SR_DD_1b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
 }
@@ -2609,6 +2619,7 @@ TESTCASE("SR_DD_1_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
@@ -2617,6 +2628,7 @@ TESTCASE("SR_DD_1b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
 }
@@ -2624,6 +2636,7 @@ TESTCASE("SR_DD_2", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
@@ -2631,6 +2644,7 @@ TESTCASE("SR_DD_2", "")
 TESTCASE("SR_DD_2b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
 }
@@ -2639,6 +2653,7 @@ TESTCASE("SR_DD_2_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
@@ -2647,6 +2662,7 @@ TESTCASE("SR_DD_2b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
 }
@@ -2654,6 +2670,7 @@ TESTCASE("SR_DD_3", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
@@ -2661,6 +2678,7 @@ TESTCASE("SR_DD_3", "")
 TESTCASE("SR_DD_3b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
 }
@@ -2669,6 +2687,7 @@ TESTCASE("SR_DD_3_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
@@ -2677,6 +2696,7 @@ TESTCASE("SR_DD_3b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
 }

=== modified file 'storage/ndb/test/run-test/autotest-boot.sh'
--- a/storage/ndb/test/run-test/autotest-boot.sh	2011-06-01 08:55:03 +0000
+++ b/storage/ndb/test/run-test/autotest-boot.sh	2011-06-10 15:38:50 +0000
@@ -243,9 +243,9 @@ fi
 if [ "$build" ]
 then
     rm -rf $install_dir
-    
-	if [ -z "$clone1" ]
-	then
+    p=`pwd`
+    if [ -z "$clone1" ]
+    then
         cd $dst_place0
         if [ `uname | grep -ic cygwin || true` -ne 0 ]
         then
@@ -255,18 +255,19 @@ then
             cmd /c devenv.com MySql.sln /Build RelWithDebInfo
             cmd /c devenv.com MySql.sln /Project INSTALL /Build
         else
-	        BUILD/compile-ndb-autotest --prefix=$install_dir0
-	        make install
-        fi
-	else
-	    cd $dst_place0
 	    BUILD/compile-ndb-autotest --prefix=$install_dir0
 	    make install
-	    
-	    cd $dst_place1
-	    BUILD/compile-ndb-autotest --prefix=$install_dir1
-	    make install
-	fi
+        fi
+    else
+	cd $dst_place0
+	BUILD/compile-ndb-autotest --prefix=$install_dir0
+	make install
+	
+	cd $dst_place1
+	BUILD/compile-ndb-autotest --prefix=$install_dir1
+	make install
+    fi
+    cd $p
 fi
 
 

=== modified file 'storage/ndb/test/src/NdbBackup.cpp'
--- a/storage/ndb/test/src/NdbBackup.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/src/NdbBackup.cpp	2011-06-10 12:50:28 +0000
@@ -64,7 +64,7 @@ NdbBackup::clearOldBackups()
      * Clear old backup files
      */ 
     BaseString tmp;
-    tmp.assfmt("ssh -v %s rm -rf %s/BACKUP", host, path);
+    tmp.assfmt("ssh %s rm -rf %s/BACKUP", host, path);
   
     ndbout << "buf: "<< tmp.c_str() <<endl;
     int res = system(tmp.c_str());  
@@ -107,6 +107,7 @@ loop:
     {
       NdbSleep_SecSleep(3);
       _backup_id += 100;
+      user_backup_id += 100;
       goto loop;
     }
     

No bundle (reason: revision is a merge (you can force generation of a bundle with env var BZR_FORCE_BUNDLE=1)).
Thread
bzr commit into mysql-5.1-telco-7.0-llcp branch (jonas:3690) jonas oreland14 Jun