List:Commits« Previous MessageNext Message »
From:Ole John Aske Date:June 15 2011 10:40am
Subject:bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch
(ole.john.aske:3503)
View as plain text  
#At file:///net/fimafeng09/export/home/tmp/oleja/mysql/mysql-5.1-telco-7.0-spj-scan-scan/ based on revid:ole.john.aske@stripped

 3503 Ole John Aske	2011-06-15 [merge]
      merge from telco-7.0 -> spj-scan-scan

    modified:
      mysql-test/suite/ndb_rpl/my.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf
      mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf
      mysql-test/suite/ndb_team/my.cnf
      mysql-test/suite/rpl_ndb/my.cnf
      sql/ha_ndbcluster.cc
      sql/ha_ndbcluster.h
      sql/sql_class.cc
      sql/sql_class.h
      storage/ndb/src/common/portlib/CMakeLists.txt
      storage/ndb/src/common/util/ndbzio.c
      storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
      storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
      storage/ndb/src/kernel/vm/NdbSeqLock.hpp
      storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
      storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp
      storage/ndb/src/ndbapi/ndberror.c
      storage/ndb/test/ndbapi/testSystemRestart.cpp
      storage/ndb/test/run-test/autotest-boot.sh
      storage/ndb/test/src/NdbBackup.cpp
=== modified file 'mysql-test/suite/ndb_rpl/my.cnf'
--- a/mysql-test/suite/ndb_rpl/my.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/my.cnf	2011-06-08 19:25:29 +0000
@@ -60,7 +60,6 @@ relay-log=                    slave-rela
 # Cluster only supports row format
 binlog-format=                 row
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -83,8 +82,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_break_3_chain.cnf	2011-06-08 19:25:29 +0000
@@ -66,8 +66,6 @@ default-storage-engine=myisam
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.cluster2.server-id
-
 [mysqld.1.cluster3]
 log-bin=                      cluster3-bin
 relay-log=                    cluster3-relay-bin
@@ -83,8 +81,6 @@ default-storage-engine=myisam
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.cluster3.server-id
-
 [ENV]
 SERVER_MYPORT_1=              @mysqld.1.cluster1.port
 SERVER_MYPORT_2=              @mysqld.1.cluster2.port

=== modified file 'mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf'
--- a/mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf	2011-05-13 07:40:50 +0000
+++ b/mysql-test/suite/ndb_rpl/t/ndb_rpl_multi_binlog_update.cnf	2011-06-08 19:25:29 +0000
@@ -58,7 +58,6 @@ binlog_format=row
 [mysqld.1.slave]
 # Note no binlog on this slave
 server-id= 4
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp
@@ -69,7 +68,6 @@ ndb_connectstring= @mysql_cluster.slave.
 [mysqld.2.slave]
 # Note binlog on this slave, but not logging slave updates
 server-id= 5
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp
@@ -82,7 +80,6 @@ binlog_format=row
 [mysqld.3.slave]
 # Note binlog on this slave, with slave updates logged
 server-id= 6
-init-rpl-role= slave
 skip-slave-start
 loose-skip-innodb
 slave-load-tmpdir= ../../../tmp

=== modified file 'mysql-test/suite/ndb_team/my.cnf'
--- a/mysql-test/suite/ndb_team/my.cnf	2011-04-15 09:31:03 +0000
+++ b/mysql-test/suite/ndb_team/my.cnf	2011-06-08 19:25:29 +0000
@@ -50,7 +50,6 @@ master-connect-retry=         1
 log-bin=                      slave-bin
 relay-log=                    slave-relay-bin
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -68,9 +67,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'mysql-test/suite/rpl_ndb/my.cnf'
--- a/mysql-test/suite/rpl_ndb/my.cnf	2011-04-26 09:28:41 +0000
+++ b/mysql-test/suite/rpl_ndb/my.cnf	2011-06-08 19:25:29 +0000
@@ -60,7 +60,6 @@ relay-log=                    slave-rela
 # Cluster only supports row format
 binlog-format=                 row
 
-init-rpl-role=                slave
 log-slave-updates
 master-retry-count=           10
 
@@ -83,8 +82,6 @@ skip-slave-start
 # test results will vary, thus a relative path is used.
 slave-load-tmpdir=            ../../../tmp
 
-rpl-recovery-rank=            @mysqld.1.slave.server-id
-
 [ENV]
 NDB_CONNECTSTRING=            @mysql_cluster.1.ndb_connectstring
 MASTER_MYPORT=                @mysqld.1.1.port

=== modified file 'sql/ha_ndbcluster.cc'
--- a/sql/ha_ndbcluster.cc	2011-06-07 12:19:47 +0000
+++ b/sql/ha_ndbcluster.cc	2011-06-15 10:40:26 +0000
@@ -5587,26 +5587,79 @@ int ha_ndbcluster::bulk_update_row(const
 
 int ha_ndbcluster::exec_bulk_update(uint *dup_key_found)
 {
+  NdbTransaction* trans= m_thd_ndb->trans;
   DBUG_ENTER("ha_ndbcluster::exec_bulk_update");
   *dup_key_found= 0;
-  if (m_thd_ndb->m_unsent_bytes &&
-      !thd_allow_batch(table->in_use) &&
-      (!m_thd_ndb->m_handler ||
-       m_blobs_pending))
+
+  // m_handler must be NULL or point to _this_ handler instance
+  assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this);
+
+  if (m_thd_ndb->m_handler &&
+      m_read_before_write_removal_possible)
   {
+    /*
+      This is an autocommit involving only one table and rbwr is on
+
+      Commit the autocommit transaction early(before the usual place
+      in ndbcluster_commit) in order to:
+      1) save one round trip, "no-commit+commit" converted to "commit"
+      2) return the correct number of updated and affected rows
+         to the update loop(which will ask handler in rbwr mode)
+    */
+    DBUG_PRINT("info", ("committing auto-commit+rbwr early"));
     uint ignore_count= 0;
-    if (execute_no_commit(m_thd_ndb, m_thd_ndb->trans,
-                          m_ignore_no_key || m_read_before_write_removal_used,
-                          &ignore_count) != 0)
+    const int ignore_error= 1;
+    if (execute_commit(m_thd_ndb, trans,
+                       m_thd_ndb->m_force_send, ignore_error,
+                       &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
-      DBUG_RETURN(ndb_err(m_thd_ndb->trans));
+      DBUG_RETURN(ndb_err(trans));
     }
+    DBUG_PRINT("info", ("ignore_count: %u", ignore_count));
     assert(m_rows_changed >= ignore_count);
     assert(m_rows_updated >= ignore_count);
     m_rows_changed-= ignore_count;
     m_rows_updated-= ignore_count;
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_unsent_bytes == 0)
+  {
+    DBUG_PRINT("exit", ("skip execute - no unsent bytes"));
+    DBUG_RETURN(0);
+  }
+
+  if (thd_allow_batch(table->in_use))
+  {
+    /*
+      Turned on by @@transaction_allow_batching=ON
+      or implicitly by slave exec thread
+    */
+    DBUG_PRINT("exit", ("skip execute - transaction_allow_batching is ON"));
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_handler &&
+      !m_blobs_pending)
+  {
+    // Execute at commit time(in 'ndbcluster_commit') to save a round trip
+    DBUG_PRINT("exit", ("skip execute - simple autocommit"));
+    DBUG_RETURN(0);
   }
+
+  uint ignore_count= 0;
+  if (execute_no_commit(m_thd_ndb, trans,
+                        m_ignore_no_key || m_read_before_write_removal_used,
+                        &ignore_count) != 0)
+  {
+    no_uncommitted_rows_execute_failure();
+    DBUG_RETURN(ndb_err(trans));
+  }
+  assert(m_rows_changed >= ignore_count);
+  assert(m_rows_updated >= ignore_count);
+  m_rows_changed-= ignore_count;
+  m_rows_updated-= ignore_count;
   DBUG_RETURN(0);
 }
 
@@ -5942,25 +5995,76 @@ bool ha_ndbcluster::start_bulk_delete()
 
 int ha_ndbcluster::end_bulk_delete()
 {
+  NdbTransaction* trans= m_thd_ndb->trans;
   DBUG_ENTER("end_bulk_delete");
   assert(m_is_bulk_delete); // Don't allow end() without start()
-  if (m_thd_ndb->m_unsent_bytes &&
-      !thd_allow_batch(table->in_use) &&
-      !m_thd_ndb->m_handler)
+  m_is_bulk_delete = false;
+
+  // m_handler must be NULL or point to _this_ handler instance
+  assert(m_thd_ndb->m_handler == NULL || m_thd_ndb->m_handler == this);
+
+  if (m_thd_ndb->m_handler &&
+      m_read_before_write_removal_possible)
   {
+    /*
+      This is an autocommit involving only one table and rbwr is on
+
+      Commit the autocommit transaction early(before the usual place
+      in ndbcluster_commit) in order to:
+      1) save one round trip, "no-commit+commit" converted to "commit"
+      2) return the correct number of updated and affected rows
+         to the delete loop(which will ask handler in rbwr mode)
+    */
+    DBUG_PRINT("info", ("committing auto-commit+rbwr early"));
     uint ignore_count= 0;
-    if (execute_no_commit(m_thd_ndb, m_thd_ndb->trans,
-                          m_ignore_no_key || m_read_before_write_removal_used,
-                          &ignore_count) != 0)
+    const int ignore_error= 1;
+    if (execute_commit(m_thd_ndb, trans,
+                       m_thd_ndb->m_force_send, ignore_error,
+                       &ignore_count) != 0)
     {
       no_uncommitted_rows_execute_failure();
-      m_is_bulk_delete = false;
-      DBUG_RETURN(ndb_err(m_thd_ndb->trans));
+      DBUG_RETURN(ndb_err(trans));
     }
+    DBUG_PRINT("info", ("ignore_count: %u", ignore_count));
     assert(m_rows_deleted >= ignore_count);
     m_rows_deleted-= ignore_count;
+    DBUG_RETURN(0);
   }
-  m_is_bulk_delete = false;
+
+  if (m_thd_ndb->m_unsent_bytes == 0)
+  {
+    DBUG_PRINT("exit", ("skip execute - no unsent bytes"));
+    DBUG_RETURN(0);
+  }
+
+  if (thd_allow_batch(table->in_use))
+  {
+    /*
+      Turned on by @@transaction_allow_batching=ON
+      or implicitly by slave exec thread
+    */
+    DBUG_PRINT("exit", ("skip execute - transaction_allow_batching is ON"));
+    DBUG_RETURN(0);
+  }
+
+  if (m_thd_ndb->m_handler)
+  {
+    // Execute at commit time(in 'ndbcluster_commit') to save a round trip
+    DBUG_PRINT("exit", ("skip execute - simple autocommit"));
+    DBUG_RETURN(0);
+  }
+
+  uint ignore_count= 0;
+  if (execute_no_commit(m_thd_ndb, trans,
+                        m_ignore_no_key || m_read_before_write_removal_used,
+                        &ignore_count) != 0)
+  {
+    no_uncommitted_rows_execute_failure();
+    DBUG_RETURN(ndb_err(trans));
+  }
+
+  assert(m_rows_deleted >= ignore_count);
+  m_rows_deleted-= ignore_count;
   DBUG_RETURN(0);
 }
 
@@ -8121,58 +8225,18 @@ int ndbcluster_commit(handlerton *hton, 
     if (thd_ndb->m_handler &&
         thd_ndb->m_handler->m_read_before_write_removal_possible)
     {
-#ifndef NDB_WITHOUT_READ_BEFORE_WRITE_REMOVAL
-      /* Autocommit with read-before-write removal
-       * Some operations in this autocommitted statement have not
-       * yet been executed
-       * They will be executed here as part of commit, and the results
-       * (rowcount, message) sent back to the client will then be modified 
-       * according to how the execution went.
-       * This saves a single roundtrip in the autocommit case
-       */
-      uint ignore_count= 0;
-      res= execute_commit(thd_ndb, trans, THDVAR(thd, force_send),
-                          TRUE, &ignore_count);
-      if (!res && ignore_count)
-      {
-        DBUG_PRINT("info", ("AutoCommit + RBW removal, ignore_count=%u",
-                            ignore_count));
-        /* We have some rows to ignore, modify recorded results,
-         * regenerate result message as required.
-         */
-        thd->row_count_func-= ignore_count;
-
-        ha_rows affected= 0;
-        char buff[ STRING_BUFFER_USUAL_SIZE ];
-        const char* msg= NULL;
-        if (thd->lex->sql_command == SQLCOM_DELETE)
-        {
-          assert(thd_ndb->m_handler->m_rows_deleted >= ignore_count);
-          affected= (thd_ndb->m_handler->m_rows_deleted-= ignore_count);
-        }
-        else
-        {
-          DBUG_PRINT("info", ("Update : message was %s", 
-                              thd->main_da.message()));
-          assert(thd_ndb->m_handler->m_rows_updated >= ignore_count);
-          affected= (thd_ndb->m_handler->m_rows_updated-= ignore_count);
-          /* For update in this scenario, we set found and changed to be 
-           * the same as affected
-           * Regenerate the update message
-           */
-          sprintf(buff, ER(ER_UPDATE_INFO), (ulong)affected, (ulong)affected,
-                  (ulong) thd->cuted_fields);
-          msg= buff;
-          DBUG_PRINT("info", ("Update : message changed to %s",
-                              msg));
-        }
-
-        /* Modify execution result + optionally message */
-        thd->main_da.modify_affected_rows(affected, msg);
+      /*
+        This is an autocommit involving only one table and
+        rbwr is on, thus the transaction has already been
+        committed in exec_bulk_update() or end_bulk_delete()
+      */
+      DBUG_PRINT("info", ("autocommit+rbwr, transaction already comitted"));
+      if (trans->commitStatus() != NdbTransaction::Committed)
+      {
+        sql_print_error("found uncomitted autocommit+rbwr transaction, "
+                        "commit status: %d", trans->commitStatus());
+        abort();
       }
-#else
-      abort(); // Should never come here without rbwr support
-#endif
     }
     else
       res= execute_commit(thd_ndb, trans, THDVAR(thd, force_send), FALSE);
@@ -9463,6 +9527,20 @@ int ha_ndbcluster::create(const char *na
     goto abort_return;
   }
 
+  // Save the table level storage media setting
+  switch(create_info->storage_media)
+  {
+    case HA_SM_DISK:
+      tab.setStorageType(NdbDictionary::Column::StorageTypeDisk);
+      break;
+    case HA_SM_DEFAULT:
+      tab.setStorageType(NdbDictionary::Column::StorageTypeDefault);
+      break;
+    case HA_SM_MEMORY:
+      tab.setStorageType(NdbDictionary::Column::StorageTypeMemory);
+      break;
+  }
+
   DBUG_PRINT("info", ("Table %s is %s stored with tablespace %s",
                       m_tabname,
                       (use_disk) ? "disk" : "memory",
@@ -11571,10 +11649,11 @@ int ndbcluster_find_all_files(THD *thd)
   DBUG_RETURN(-(skipped + unhandled));
 }
 
-int ndbcluster_find_files(handlerton *hton, THD *thd,
-                          const char *db,
-                          const char *path,
-                          const char *wild, bool dir, List<LEX_STRING> *files)
+
+static int
+ndbcluster_find_files(handlerton *hton, THD *thd,
+                      const char *db, const char *path,
+                      const char *wild, bool dir, List<LEX_STRING> *files)
 {
   DBUG_ENTER("ndbcluster_find_files");
   DBUG_PRINT("enter", ("db: %s", db));
@@ -11940,7 +12019,7 @@ static int ndbcluster_init(void *p)
     ndbcluster_binlog_init_handlerton();
     h->flags=            HTON_CAN_RECREATE | HTON_TEMPORARY_NOT_SUPPORTED;
     h->discover=         ndbcluster_discover;
-    h->find_files= ndbcluster_find_files;
+    h->find_files=       ndbcluster_find_files;
     h->table_exists_in_engine= ndbcluster_table_exists_in_engine;
     h->make_pushed_join= ndbcluster_make_pushed_join;
   }

=== modified file 'sql/ha_ndbcluster.h'
--- a/sql/ha_ndbcluster.h	2011-05-04 05:33:14 +0000
+++ b/sql/ha_ndbcluster.h	2011-06-15 10:40:26 +0000
@@ -964,8 +964,6 @@ private:
 
 int ndbcluster_discover(THD* thd, const char* dbname, const char* name,
                         const void** frmblob, uint* frmlen);
-int ndbcluster_find_files(THD *thd,const char *db,const char *path,
-                          const char *wild, bool dir, List<LEX_STRING> *files);
 int ndbcluster_table_exists_in_engine(THD* thd,
                                       const char *db, const char *name);
 void ndbcluster_print_error(int error, const NdbOperation *error_op);

=== modified file 'sql/sql_class.cc'
--- a/sql/sql_class.cc	2011-04-08 11:06:53 +0000
+++ b/sql/sql_class.cc	2011-06-09 09:20:47 +0000
@@ -583,24 +583,6 @@ Diagnostics_area::set_error_status(THD *
   m_status= DA_ERROR;
 }
 
-/**
- * modify_affected_rows
- * Modify the number of affected rows, and optionally the 
- * message in the Diagnostics area
- */
-void
-Diagnostics_area::modify_affected_rows(ha_rows new_affected_rows,
-                                       const char* new_message)
-{
-  DBUG_ASSERT(is_set());
-  DBUG_ASSERT(m_status == DA_OK);
-  DBUG_ASSERT(can_overwrite_status);
-
-  m_affected_rows= new_affected_rows;
-  if (new_message)
-    strmake(m_message, new_message, sizeof(m_message) - 1);
-}
-
 
 /**
   Mark the diagnostics area as 'DISABLED'.

=== modified file 'sql/sql_class.h'
--- a/sql/sql_class.h	2011-04-10 17:32:41 +0000
+++ b/sql/sql_class.h	2011-06-15 10:40:26 +0000
@@ -1163,9 +1163,6 @@ public:
   void set_eof_status(THD *thd);
   void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg);
 
-  /* Modify affected rows count and optionally message */
-  void modify_affected_rows(ha_rows new_affected_rows, const char *new_message= 0);
-
   void disable_status();
 
   void reset_diagnostics_area();

=== modified file 'storage/ndb/src/common/portlib/CMakeLists.txt'
--- a/storage/ndb/src/common/portlib/CMakeLists.txt	2011-05-11 12:23:24 +0000
+++ b/storage/ndb/src/common/portlib/CMakeLists.txt	2011-05-25 06:52:33 +0000
@@ -40,5 +40,7 @@ TARGET_LINK_LIBRARIES(NdbDir-t ndbportli
 ADD_EXECUTABLE(NdbGetInAddr-t NdbTCP.cpp)
 SET_TARGET_PROPERTIES(NdbGetInAddr-t
                       PROPERTIES COMPILE_FLAGS "-DTEST_NDBGETINADDR")
+TARGET_LINK_LIBRARIES(NdbGetInAddr-t ${LIBSOCKET} ${LIBNSL})
+
 
 

=== modified file 'storage/ndb/src/common/util/ndbzio.c'
--- a/storage/ndb/src/common/util/ndbzio.c	2011-04-18 14:15:23 +0000
+++ b/storage/ndb/src/common/util/ndbzio.c	2011-05-24 14:34:41 +0000
@@ -167,9 +167,11 @@ void ndbz_free(voidpf opaque, voidpf add
 }
 
 #ifdef _WIN32
-/* Windows doesn't define ENOTSUP, define it same as Solaris */
+#ifndef ENOTSUP
+/* If Windows doesn't define ENOTSUP, define it same as Solaris */
 #define ENOTSUP 48
 #endif
+#endif
 
 #ifndef HAVE_POSIX_MEMALIGN
 static inline int posix_memalign(void **memptr, size_t alignment, size_t size)

=== modified file 'storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp'
--- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-06-07 12:19:47 +0000
+++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp	2011-06-15 10:40:26 +0000
@@ -14238,6 +14238,15 @@ retry:
       sltLogPartPtr.p->logTailMbyte = 
         sltLogFilePtr.p->logLastPrepRef[tsltMbyte] & 65535;
 
+      if (DEBUG_REDO)
+      {
+        ndbout_c("part: %u setLogTail(gci: %u): file: %u mb: %u",
+                 sltLogPartPtr.p->logPartNo, 
+                 keepGci,
+                 sltLogPartPtr.p->logTailFileNo,
+                 sltLogPartPtr.p->logTailMbyte);
+      }
+
       bool tailmoved = !(ToldTailFileNo == sltLogPartPtr.p->logTailFileNo &&
                          ToldTailMByte == sltLogPartPtr.p->logTailMbyte);
 
@@ -16169,11 +16178,28 @@ void Dblqh::writeFileDescriptor(Signal* 
   arrGuard(logFilePtr.p->currentMbyte, clogFileSize);
   if (DEBUG_REDO)
   {
-    ndbout_c("part: %u file: %u setting logMaxGciCompleted[%u] = %u",
-             logPartPtr.p->logPartNo,
-             logFilePtr.p->fileNo,
-             logFilePtr.p->currentMbyte,
-             logPartPtr.p->logPartNewestCompletedGCI);
+    printf("part: %u file: %u setting logMaxGciCompleted[%u] = %u logMaxGciStarted[%u]: %u lastPrepRef[%u]: ",
+           logPartPtr.p->logPartNo,
+           logFilePtr.p->fileNo,
+           logFilePtr.p->currentMbyte,
+           logPartPtr.p->logPartNewestCompletedGCI,
+           logFilePtr.p->currentMbyte,
+           cnewestGci,
+           logFilePtr.p->currentMbyte);
+    if (logPartPtr.p->firstLogTcrec == RNIL)
+    {
+      ndbout_c("file: %u mb: %u (RNIL)",
+               logFilePtr.p->fileNo,
+               logFilePtr.p->currentMbyte);
+    }
+    else
+    {
+      wfdTcConnectptr.i = logPartPtr.p->firstLogTcrec;
+      ptrCheckGuard(wfdTcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+      ndbout_c("file: %u mb: %u",
+               wfdTcConnectptr.p->logStartFileNo,
+               wfdTcConnectptr.p->logStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE);
+    }
   }
   logFilePtr.p->logMaxGciCompleted[logFilePtr.p->currentMbyte] = 
     logPartPtr.p->logPartNewestCompletedGCI;
@@ -16346,10 +16372,11 @@ void Dblqh::writeSinglePage(Signal* sign
 
   if (DEBUG_REDO)
   {
-    ndbout_c("writeSingle 1 page at part: %u file: %u pos: %u",
+    ndbout_c("writeSingle 1 page at part: %u file: %u page: %u (mb: %u)",
              logPartPtr.p->logPartNo,
              logFilePtr.p->fileNo,
-             pageNo);
+             pageNo,
+             pageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE);
   }
 }//Dblqh::writeSinglePage()
 
@@ -16452,8 +16479,10 @@ void Dblqh::readSrLastMbyteLab(Signal* s
       logPartPtr.p->lastMbyte = logFilePtr.p->currentMbyte - 1;
       if (DEBUG_REDO)
       {
-        ndbout_c("readSrLastMbyteLab part: %u lastMbyte: %u",
-                 logPartPtr.p->logPartNo, logPartPtr.p->lastMbyte);
+        ndbout_c("readSrLastMbyteLab part: %u file: %u lastMbyte: %u",
+                 logPartPtr.p->logPartNo, 
+                 logFilePtr.p->fileNo,
+                 logPartPtr.p->lastMbyte);
       }
     }//if
   }//if
@@ -17503,6 +17532,17 @@ void Dblqh::execSrCompletedLab(Signal* s
         systemErrorLab(signal, __LINE__);
         return;
       }//if
+
+      if (DEBUG_REDO)
+      {
+        ndbout_c("part: %u srLogLimits SR_FOURTH_PHASE %u-%u (file: %u mb: %u)",
+                 logPartPtr.p->logPartNo, 
+                 logPartPtr.p->logStartGci,
+                 logPartPtr.p->logLastGci,
+                 logPartPtr.p->lastLogfile,
+                 logPartPtr.p->lastMbyte);
+      }
+
       signal->theData[0] = ZSR_LOG_LIMITS;
       signal->theData[1] = logPartPtr.i;
       signal->theData[2] = logPartPtr.p->lastLogfile;
@@ -17689,6 +17729,15 @@ void Dblqh::srGciLimits(Signal* signal) 
     jam();
     ptrAss(logPartPtr, logPartRecord);
     logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_STOP;
+    if (DEBUG_REDO)
+    {
+      ndbout_c("part: %u srLogLimits (srGciLimits) %u-%u (file: %u mb: %u)",
+               logPartPtr.p->logPartNo, 
+               logPartPtr.p->logStartGci,
+               logPartPtr.p->logLastGci,
+               logPartPtr.p->lastLogfile,
+               logPartPtr.p->lastMbyte);
+    }
     signal->theData[0] = ZSR_LOG_LIMITS;
     signal->theData[1] = logPartPtr.i;
     signal->theData[2] = logPartPtr.p->lastLogfile;
@@ -17720,22 +17769,34 @@ void Dblqh::srLogLimits(Signal* signal) 
    * ----------------------------------------------------------------------- */
   while(true) {
     ndbrequire(tmbyte < clogFileSize);
-    if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP) {
-      if (logFilePtr.p->logMaxGciCompleted[tmbyte] <= logPartPtr.p->logLastGci) {
+    if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_STOP)
+    {
+      if (logFilePtr.p->logMaxGciCompleted[tmbyte] <= logPartPtr.p->logLastGci)
+      {
         jam();
-      /* --------------------------------------------------------------------
-       *  WE ARE STEPPING BACKWARDS FROM MBYTE TO MBYTE. THIS IS THE FIRST 
-       *  MBYTE WHICH IS TO BE INCLUDED IN THE LOG EXECUTION. THE STOP GCI 
-       *  HAS NOT BEEN COMPLETED BEFORE THIS MBYTE. THUS THIS MBYTE HAVE 
-       *  TO BE EXECUTED.
-       * ------------------------------------------------------------------- */
+        /* --------------------------------------------------------------------
+         *  WE ARE STEPPING BACKWARDS FROM MBYTE TO MBYTE. THIS IS THE FIRST 
+         *  MBYTE WHICH IS TO BE INCLUDED IN THE LOG EXECUTION. THE STOP GCI 
+         *  HAS NOT BEEN COMPLETED BEFORE THIS MBYTE. THUS THIS MBYTE HAVE 
+         *  TO BE EXECUTED.
+         * ------------------------------------------------------------------ */
         logPartPtr.p->stopLogfile = logFilePtr.i;
         logPartPtr.p->stopMbyte = tmbyte;
         logPartPtr.p->logExecState = LogPartRecord::LES_SEARCH_START;
+        if (DEBUG_REDO)
+        {
+          ndbout_c("part: %u srLogLimits found stop pos file: %u mb: %u logMaxGciCompleted[tmbyte]: %u (lastGci: %u)",
+                   logPartPtr.p->logPartNo,
+                   logFilePtr.p->fileNo,
+                   tmbyte,
+                   logFilePtr.p->logMaxGciCompleted[tmbyte],
+                   logPartPtr.p->logLastGci);
+        }
       }//if
       else if (DEBUG_REDO)
       {
-        ndbout_c("SKIP part: %u file: %u mb: %u logMaxGciCompleted: %u >= %u",
+        ndbout_c("SEARCH STOP SKIP part: %u file: %u mb: %u "
+                 "logMaxGciCompleted: %u > %u",
                  logPartPtr.p->logPartNo,
                  logFilePtr.p->fileNo,
                  tmbyte,
@@ -17743,28 +17804,53 @@ void Dblqh::srLogLimits(Signal* signal) 
                  logPartPtr.p->logLastGci);
       }
     }//if
-  /* ------------------------------------------------------------------------
-   *  WHEN WE HAVEN'T FOUND THE STOP MBYTE IT IS NOT NECESSARY TO LOOK FOR THE
-   *  START MBYTE. THE REASON IS THE FOLLOWING LOGIC CHAIN: 
-   *    MAX_GCI_STARTED >= MAX_GCI_COMPLETED >= LAST_GCI >= START_GCI
-   *  THUS MAX_GCI_STARTED >= START_GCI. THUS MAX_GCI_STARTED < START_GCI CAN
-   *  NOT BE TRUE AS WE WILL CHECK OTHERWISE.
-   * ----------------------------------------------------------------------- */
-    if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_START) {
-      if (logFilePtr.p->logMaxGciStarted[tmbyte] < logPartPtr.p->logStartGci) {
+    /* ------------------------------------------------------------------------
+     *  WHEN WE HAVEN'T FOUND THE STOP MBYTE IT IS NOT NECESSARY TO LOOK FOR THE
+     *  START MBYTE. THE REASON IS THE FOLLOWING LOGIC CHAIN: 
+     *    MAX_GCI_STARTED >= MAX_GCI_COMPLETED >= LAST_GCI >= START_GCI
+     *  THUS MAX_GCI_STARTED >= START_GCI. THUS MAX_GCI_STARTED < START_GCI CAN
+     *  NOT BE TRUE AS WE WILL CHECK OTHERWISE.
+     * ---------------------------------------------------------------------- */
+    if (logPartPtr.p->logExecState == LogPartRecord::LES_SEARCH_START)
+    {
+      if (logFilePtr.p->logMaxGciStarted[tmbyte] < logPartPtr.p->logStartGci)
+      {
         jam();
-      /* --------------------------------------------------------------------
-       *  WE HAVE NOW FOUND THE START OF THE EXECUTION OF THE LOG. 
-       *  WE STILL HAVE TO MOVE IT BACKWARDS TO ALSO INCLUDE THE 
-       *  PREPARE RECORDS WHICH WERE STARTED IN A PREVIOUS MBYTE.
-       * ------------------------------------------------------------------- */
+        /* --------------------------------------------------------------------
+         *  WE HAVE NOW FOUND THE START OF THE EXECUTION OF THE LOG. 
+         *  WE STILL HAVE TO MOVE IT BACKWARDS TO ALSO INCLUDE THE 
+         *  PREPARE RECORDS WHICH WERE STARTED IN A PREVIOUS MBYTE.
+         * ------------------------------------------------------------------ */
+        if (DEBUG_REDO)
+        {
+          ndbout_c("part: %u srLogLimits found start pos file: %u mb: %u logMaxGciStarted[tmbyte]: %u (startGci: %u)",
+                   logPartPtr.p->logPartNo,
+                   logFilePtr.p->fileNo,
+                   tmbyte,
+                   logFilePtr.p->logMaxGciCompleted[tmbyte],
+                   logPartPtr.p->logStartGci);
+          ndbout_c("part: %u srLogLimits lastPrepRef => file: %u mb: %u",
+                   logPartPtr.p->logPartNo, 
+                   logFilePtr.p->logLastPrepRef[tmbyte] >> 16,
+                   logFilePtr.p->logLastPrepRef[tmbyte] & 65535);
+        }
         tlastPrepRef = logFilePtr.p->logLastPrepRef[tmbyte];
         logPartPtr.p->startMbyte = tlastPrepRef & 65535;
         LogFileRecordPtr locLogFilePtr;
         findLogfile(signal, tlastPrepRef >> 16, logPartPtr, &locLogFilePtr);
         logPartPtr.p->startLogfile = locLogFilePtr.i;
         logPartPtr.p->logExecState = LogPartRecord::LES_EXEC_LOG;
-      }//if
+      }
+      else if (DEBUG_REDO)
+      {
+        ndbout_c("SEARCH START SKIP part: %u file: %u mb: %u "
+                 "logMaxGciCompleted: %u >= %u",
+                 logPartPtr.p->logPartNo,
+                 logFilePtr.p->fileNo,
+                 tmbyte,
+                 logFilePtr.p->logMaxGciStarted[tmbyte],
+                 logPartPtr.p->logStartGci);
+      }
     }//if
     if (logPartPtr.p->logExecState != LogPartRecord::LES_EXEC_LOG) {
       if (tmbyte == 0) {
@@ -18250,11 +18336,12 @@ void Dblqh::execSr(Signal* signal) 
       logWord = readLogword(signal);
       if (DEBUG_REDO)
       {
-        ndbout_c("found gci: %u part: %u file: %u page: %u",
+        ndbout_c("found gci: %u part: %u file: %u page: %u (mb: %u)",
                  logWord,
                  logPartPtr.p->logPartNo,
                  logFilePtr.p->fileNo,
-                 logFilePtr.p->currentFilepage);
+                 logFilePtr.p->currentFilepage,
+                 logFilePtr.p->currentFilepage >> ZTWOLOG_NO_PAGES_IN_MBYTE);
       }
       if (logWord == logPartPtr.p->logLastGci)
       {
@@ -18868,6 +18955,30 @@ stepNext_2:
     {
       jam();
       logPartPtr.p->invalidatePageNo = logPartPtr.p->headPageNo;
+
+      if (! ((cstartType == NodeState::ST_INITIAL_START) ||
+             (cstartType == NodeState::ST_INITIAL_NODE_RESTART)))
+      {
+        jam();
+        if (logFilePtr.i == logPartPtr.p->lastLogfile)
+        {
+          jam();
+          Uint32 lastMbytePageNo =
+            logPartPtr.p->lastMbyte << ZTWOLOG_NO_PAGES_IN_MBYTE;
+          if (logPartPtr.p->invalidatePageNo < lastMbytePageNo)
+          {
+            jam();
+            if (DEBUG_REDO)
+            {
+              ndbout_c("readFileInInvalidate part: %u step: %u moving invalidatePageNo from %u to %u (lastMbyte)",
+                       logPartPtr.p->logPartNo, stepNext,
+                       logPartPtr.p->invalidatePageNo,
+                       lastMbytePageNo);
+            }
+            logPartPtr.p->invalidatePageNo = lastMbytePageNo;
+          }
+        }
+      }
       readFileInInvalidate(signal, 1);
       return;
     }
@@ -19879,11 +19990,12 @@ void Dblqh::completedLogPage(Signal* sig
 
   if (DEBUG_REDO)
   {
-    ndbout_c("writing %d pages at part: %u file: %u pos: %u",
+    ndbout_c("writing %d pages at part: %u file: %u page: %u (mb: %u)",
              twlpNoPages,
              logPartPtr.p->logPartNo,
              logFilePtr.p->fileNo,
-             logFilePtr.p->filePosition);
+             logFilePtr.p->filePosition,
+             logFilePtr.p->filePosition >> ZTWOLOG_NO_PAGES_IN_MBYTE);
   }
 
   if (twlpType == ZNORMAL) {
@@ -21089,11 +21201,12 @@ void Dblqh::readExecLog(Signal* signal) 
 
   if (DEBUG_REDO)
   {
-    ndbout_c("readExecLog %u page at part: %u file: %u pos: %u",
+    ndbout_c("readExecLog %u page at part: %u file: %u page: %u (mb: %u)",
              lfoPtr.p->noPagesRw,
              logPartPtr.p->logPartNo,
              logFilePtr.p->fileNo,
-             logPartPtr.p->execSrStartPageNo);
+             logPartPtr.p->execSrStartPageNo,
+             logPartPtr.p->execSrStartPageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE);
   }
 }//Dblqh::readExecLog()
 
@@ -21160,11 +21273,12 @@ void Dblqh::readExecSr(Signal* signal) 
 
   if (DEBUG_REDO)
   {
-    ndbout_c("readExecSr %u page at part: %u file: %u pos: %u",
+    ndbout_c("readExecSr %u page at part: %u file: %u page: %u (mb: %u)",
              8,
              logPartPtr.p->logPartNo,
              logFilePtr.p->fileNo,
-             tresPageid);
+             tresPageid,
+             tresPageid >> ZTWOLOG_NO_PAGES_IN_MBYTE);
   }
 }//Dblqh::readExecSr()
 
@@ -21322,10 +21436,11 @@ void Dblqh::readSinglePage(Signal* signa
 
   if (DEBUG_REDO)
   {
-    ndbout_c("readSinglePage 1 page at part: %u file: %u pos: %u",
+    ndbout_c("readSinglePage 1 page at part: %u file: %u page: %u (mb: %u)",
              logPartPtr.p->logPartNo,
              logFilePtr.p->fileNo,
-             pageNo);
+             pageNo,
+             pageNo >> ZTWOLOG_NO_PAGES_IN_MBYTE);
   }
 }//Dblqh::readSinglePage()
 
@@ -21854,11 +21969,12 @@ void Dblqh::writeCompletedGciLog(Signal*
 
   if (DEBUG_REDO)
   {
-    ndbout_c("writeCompletedGciLog gci: %u part: %u file: %u page: %u",
+    ndbout_c("writeCompletedGciLog gci: %u part: %u file: %u page: %u (mb: %u)",
              cnewestCompletedGci,
              logPartPtr.p->logPartNo,
              logFilePtr.p->fileNo,
-             logFilePtr.p->currentFilepage);
+             logFilePtr.p->currentFilepage,
+             logFilePtr.p->currentFilepage >> ZTWOLOG_NO_PAGES_IN_MBYTE);
   }
 
   writeLogWord(signal, ZCOMPLETED_GCI_TYPE);
@@ -21904,10 +22020,11 @@ void Dblqh::writeDirty(Signal* signal, U
 
   if (DEBUG_REDO)
   {
-    ndbout_c("writeDirty 1 page at part: %u file: %u pos: %u",
+    ndbout_c("writeDirty 1 page at part: %u file: %u page: %u (mb: %u)",
              logPartPtr.p->logPartNo,
              logFilePtr.p->fileNo,
-             logPartPtr.p->prevFilepage);
+             logPartPtr.p->prevFilepage,
+             logPartPtr.p->prevFilepage >> ZTWOLOG_NO_PAGES_IN_MBYTE);
   }
 }//Dblqh::writeDirty()
 

=== modified file 'storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp'
--- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-06-07 12:19:47 +0000
+++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp	2011-06-15 10:40:26 +0000
@@ -3909,7 +3909,7 @@ void Qmgr::failReportLab(Signal* signal,
       msg = "Start timeout";
       break;
     case FailRep::ZHEARTBEAT_FAILURE:
-      msg = "Hearbeat failure";
+      msg = "Heartbeat failure";
       break;
     case FailRep::ZLINK_FAILURE:
       msg = "Connection failure";

=== modified file 'storage/ndb/src/kernel/vm/NdbSeqLock.hpp'
--- a/storage/ndb/src/kernel/vm/NdbSeqLock.hpp	2011-05-17 07:06:30 +0000
+++ b/storage/ndb/src/kernel/vm/NdbSeqLock.hpp	2011-06-10 12:17:51 +0000
@@ -86,7 +86,7 @@ struct NdbSeqLock
   void write_lock() {}
   void write_unlock() {}
 
-  Uint32 read_lock() {}
+  Uint32 read_lock() { return 0; }
   bool read_unlock(Uint32 val) const { return true;}
 };
 

=== modified file 'storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2011-05-25 14:31:47 +0000
+++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2011-06-13 10:38:49 +0000
@@ -596,6 +596,7 @@ NdbEventOperationImpl::execute_nolock()
     {
       switch(myDict->getNdbError().code){
       case 711:
+      case 763:
         // ignore;
         break;
       default:

=== modified file 'storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp'
--- a/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-07 10:03:02 +0000
+++ b/storage/ndb/src/ndbapi/NdbIndexStatImpl.cpp	2011-06-07 13:28:40 +0000
@@ -2193,7 +2193,7 @@ NdbIndexStatImpl::MemDefault::mem_alloc(
   {
     size += 4 - size % 4;
   }
-  Item* item = (Item*)my_malloc(sizeof(Item) + size, MYF(0));
+  Item* item = (Item*)malloc(sizeof(Item) + size);
   if (item != 0)
   {
     item->m_magic = MemMagic;
@@ -2212,9 +2212,9 @@ NdbIndexStatImpl::MemDefault::mem_free(v
   {
     Item* item = (Item*)ptr - 1;
     assert(item->m_magic == MemMagic);
-    Uint32 size = item->m_size;
+    size_t size = item->m_size;
     item->m_magic = 0;
-    my_free(item, MYF(0));
+    free(item);
     assert(m_used >= size);
     m_used -= size;
   }

=== modified file 'storage/ndb/src/ndbapi/ndberror.c'
--- a/storage/ndb/src/ndbapi/ndberror.c	2011-06-07 12:19:47 +0000
+++ b/storage/ndb/src/ndbapi/ndberror.c	2011-06-15 10:40:26 +0000
@@ -317,7 +317,7 @@ ErrorBundle ErrorCodes[] = {
    */
   { 281,  HA_ERR_NO_CONNECTION, AE, "Operation not allowed due to cluster shutdown in progress" },
   { 299,  DMEC, AE, "Operation not allowed or aborted due to single user mode" },
-  { 763,  DMEC, AE, "Alter table requires cluster nodes to have exact same version" },
+  { 763,  DMEC, AE, "DDL is not supported with mixed data-node versions" },
   { 823,  DMEC, AE, "Too much attrinfo from application in tuple manager" },
   { 829,  DMEC, AE, "Corrupt data received for insert/update" },
   { 831,  DMEC, AE, "Too many nullable/bitfields in table definition" },

=== modified file 'storage/ndb/test/ndbapi/testSystemRestart.cpp'
--- a/storage/ndb/test/ndbapi/testSystemRestart.cpp	2011-02-18 18:40:25 +0000
+++ b/storage/ndb/test/ndbapi/testSystemRestart.cpp	2011-06-10 12:50:28 +0000
@@ -35,6 +35,14 @@ int runLoadTable(NDBT_Context* ctx, NDBT
   return NDBT_OK;
 }
 
+int
+clearOldBackups(NDBT_Context* ctx, NDBT_Step* step)
+{
+  NdbBackup backup(GETNDB(step)->getNodeId());
+  backup.clearOldBackups();
+  return NDBT_OK;
+}
+
 #define CHECK(b) if (!(b)) { \
   g_err << "ERR: "<< step->getName() \
          << " failed on line " << __LINE__ << endl; \
@@ -2594,6 +2602,7 @@ TESTCASE("SR_DD_1", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
@@ -2601,6 +2610,7 @@ TESTCASE("SR_DD_1", "")
 TESTCASE("SR_DD_1b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
 }
@@ -2609,6 +2619,7 @@ TESTCASE("SR_DD_1_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
@@ -2617,6 +2628,7 @@ TESTCASE("SR_DD_1b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_1);
   FINALIZER(runClearTable);
 }
@@ -2624,6 +2636,7 @@ TESTCASE("SR_DD_2", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
@@ -2631,6 +2644,7 @@ TESTCASE("SR_DD_2", "")
 TESTCASE("SR_DD_2b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
 }
@@ -2639,6 +2653,7 @@ TESTCASE("SR_DD_2_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
@@ -2647,6 +2662,7 @@ TESTCASE("SR_DD_2b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_2);
   FINALIZER(runClearTable);
 }
@@ -2654,6 +2670,7 @@ TESTCASE("SR_DD_3", "")
 {
   TC_PROPERTY("ALL", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
@@ -2661,6 +2678,7 @@ TESTCASE("SR_DD_3", "")
 TESTCASE("SR_DD_3b", "")
 {
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
 }
@@ -2669,6 +2687,7 @@ TESTCASE("SR_DD_3_LCP", "")
   TC_PROPERTY("ALL", 1);
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runStopper);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
@@ -2677,6 +2696,7 @@ TESTCASE("SR_DD_3b_LCP", "")
 {
   TC_PROPERTY("LCP", 1);
   INITIALIZER(runWaitStarted);
+  INITIALIZER(clearOldBackups);
   STEP(runSR_DD_3);
   FINALIZER(runClearTable);
 }

=== modified file 'storage/ndb/test/run-test/autotest-boot.sh'
--- a/storage/ndb/test/run-test/autotest-boot.sh	2011-06-01 08:55:03 +0000
+++ b/storage/ndb/test/run-test/autotest-boot.sh	2011-06-10 15:38:50 +0000
@@ -243,9 +243,9 @@ fi
 if [ "$build" ]
 then
     rm -rf $install_dir
-    
-	if [ -z "$clone1" ]
-	then
+    p=`pwd`
+    if [ -z "$clone1" ]
+    then
         cd $dst_place0
         if [ `uname | grep -ic cygwin || true` -ne 0 ]
         then
@@ -255,18 +255,19 @@ then
             cmd /c devenv.com MySql.sln /Build RelWithDebInfo
             cmd /c devenv.com MySql.sln /Project INSTALL /Build
         else
-	        BUILD/compile-ndb-autotest --prefix=$install_dir0
-	        make install
-        fi
-	else
-	    cd $dst_place0
 	    BUILD/compile-ndb-autotest --prefix=$install_dir0
 	    make install
-	    
-	    cd $dst_place1
-	    BUILD/compile-ndb-autotest --prefix=$install_dir1
-	    make install
-	fi
+        fi
+    else
+	cd $dst_place0
+	BUILD/compile-ndb-autotest --prefix=$install_dir0
+	make install
+	
+	cd $dst_place1
+	BUILD/compile-ndb-autotest --prefix=$install_dir1
+	make install
+    fi
+    cd $p
 fi
 
 

=== modified file 'storage/ndb/test/src/NdbBackup.cpp'
--- a/storage/ndb/test/src/NdbBackup.cpp	2011-02-02 00:40:07 +0000
+++ b/storage/ndb/test/src/NdbBackup.cpp	2011-06-10 12:50:28 +0000
@@ -64,7 +64,7 @@ NdbBackup::clearOldBackups()
      * Clear old backup files
      */ 
     BaseString tmp;
-    tmp.assfmt("ssh -v %s rm -rf %s/BACKUP", host, path);
+    tmp.assfmt("ssh %s rm -rf %s/BACKUP", host, path);
   
     ndbout << "buf: "<< tmp.c_str() <<endl;
     int res = system(tmp.c_str());  
@@ -107,6 +107,7 @@ loop:
     {
       NdbSleep_SecSleep(3);
       _backup_id += 100;
+      user_backup_id += 100;
       goto loop;
     }
     

No bundle (reason: revision is a merge).
Thread
bzr commit into mysql-5.1-telco-7.0-spj-scan-vs-scan branch(ole.john.aske:3503) Ole John Aske16 Jun