MySQL Lists are EOL. Please join:

List:Commits« Previous MessageNext Message »
From:tomas Date:April 23 2007 6:50pm
Subject:bk commit into 5.1 tree (tomas:1.2546)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of tomas. When tomas does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet@stripped, 2007-04-23 20:50:11+02:00, tomas@stripped +22 -0
  Merge whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-single-user
  into  whalegate.ndb.mysql.com:/home/tomas/mysql-5.1-telco-merge
  MERGE: 1.2494.1.83

  BitKeeper/etc/ignore@stripped, 2007-04-23 20:45:17+02:00, tomas@stripped +0 -2
    auto-union
    MERGE: 1.276.1.13

  configure.in@stripped, 2007-04-23 20:50:08+02:00, tomas@stripped +0 -0
    SCCS merged
    MERGE: 1.434.1.22

  mysql-test/mysql-test-run.pl@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.270.1.26

  mysql-test/r/rpl_ndb_basic.result@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.7.1.3

  mysql-test/r/rpl_ndb_log.result@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.18.1.7

  mysql-test/t/disabled.def@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.243.1.4

  sql/ha_ndbcluster.cc@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.408.1.31

  sql/ha_ndbcluster.h@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.170.3.5

  sql/ha_ndbcluster_binlog.cc@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.104.3.8

  sql/ha_ndbcluster_binlog.h@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.22.2.2

  sql/item_func.cc@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.353.1.26

  sql/log_event.cc@stripped, 2007-04-23 20:45:22+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.271.1.5

  sql/mysqld.cc@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.621.1.10

  sql/protocol.h@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.39.1.1

  sql/slave.cc@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.301.1.4

  storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.50.1.10

  storage/ndb/src/mgmclient/CommandInterpreter.cpp@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.79.1.12

  storage/ndb/test/include/NdbRestarter.hpp@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.11.1.3

  storage/ndb/test/ndbapi/testNodeRestart.cpp@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -3
    Auto merged
    MERGE: 1.36.1.19

  storage/ndb/test/run-test/daily-basic-tests.txt@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -4
    Auto merged
    MERGE: 1.61.1.19

  storage/ndb/test/tools/Makefile.am@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.17.1.1

  storage/ndb/test/tools/listen.cpp@stripped, 2007-04-23 20:45:23+02:00, tomas@stripped +0 -0
    Auto merged
    MERGE: 1.6.1.1

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	tomas
# Host:	whalegate.ndb.mysql.com
# Root:	/home/tomas/mysql-5.1-telco-merge/RESYNC

--- 1.444/configure.in	2007-04-16 19:17:19 +02:00
+++ 1.445/configure.in	2007-04-23 20:50:08 +02:00
@@ -7,6 +7,9 @@
 AC_CANONICAL_SYSTEM
 # The Docs Makefile.am parses this line!
 # remember to also update version.c in ndb
+#
+# When changing major version number please also check switch statement
+# in mysqlbinlog::check_master_version().
 AM_INIT_AUTOMAKE(mysql, 5.1.18-ndb-6.2.1)
 AM_CONFIG_HEADER(config.h)
 
@@ -28,6 +31,9 @@
 MYSQL_BASE_VERSION=`echo $MYSQL_NO_DASH_VERSION | sed -e "s|\.[[^.]]*$||"`
 MYSQL_VERSION_ID=`echo $MYSQL_NO_DASH_VERSION | sed -e 's|[[^0-9.]].*$||;s|$|.|' | sed -e 's/[[^0-9.]]//g; s/\./  /g; s/ \([[0-9]]\) / 0\\1 /g; s/ //g'`
 
+# Add previous major version for debian package upgrade path
+MYSQL_PREVIOUS_BASE_VERSION=5.0
+
 # The port should be constant for a LONG time
 MYSQL_TCP_PORT_DEFAULT=3306
 MYSQL_UNIX_ADDR_DEFAULT="/tmp/mysql.sock"
@@ -57,6 +63,7 @@
 AC_SUBST(MYSQL_NO_DASH_VERSION)
 AC_SUBST(MYSQL_BASE_VERSION)
 AC_SUBST(MYSQL_VERSION_ID)
+AC_SUBST(MYSQL_PREVIOUS_BASE_VERSION)
 AC_SUBST(PROTOCOL_VERSION)
 AC_DEFINE_UNQUOTED([PROTOCOL_VERSION], [$PROTOCOL_VERSION],
                    [mysql client protocol version])
@@ -2553,7 +2560,7 @@
  tests/Makefile Docs/Makefile support-files/Makefile dnl
  support-files/MacOSX/Makefile support-files/RHEL4-SElinux/Makefile dnl
  mysql-test/Makefile dnl
- debian/Makefile dnl
+ debian/Makefile debian/defs.mk debian/control dnl
  mysql-test/ndb/Makefile netware/Makefile sql-bench/Makefile dnl
  include/mysql_version.h plugin/Makefile win/Makefile)
 

--- 1.364/sql/item_func.cc	2007-04-16 19:17:19 +02:00
+++ 1.365/sql/item_func.cc	2007-04-23 20:45:22 +02:00
@@ -22,6 +22,7 @@
 
 #include "mysql_priv.h"
 #include "slave.h"				// for wait_for_master_pos
+#include "rpl_mi.h"
 #include <m_ctype.h>
 #include <hash.h>
 #include <time.h>
@@ -5351,7 +5352,7 @@
     Security_context *save_secutiry_ctx;
     res= set_routine_security_ctx(thd, m_sp, false, &save_secutiry_ctx);
     if (!res)
-      sp_restore_security_context(thd, save_secutiry_ctx);
+      m_sp->m_security_ctx.restore_security_context(thd, save_secutiry_ctx);
     
 #endif /* ! NO_EMBEDDED_ACCESS_CHECKS */
   }

--- 1.274/sql/log_event.cc	2007-04-05 15:00:13 +02:00
+++ 1.275/sql/log_event.cc	2007-04-23 20:45:22 +02:00
@@ -22,8 +22,11 @@
 
 #include "mysql_priv.h"
 #include "slave.h"
+#include "rpl_rli.h"
+#include "rpl_mi.h"
 #include "rpl_filter.h"
 #include "rpl_utility.h"
+#include "rpl_record.h"
 #include <my_dir.h>
 #endif /* MYSQL_CLIENT */
 #include <base64.h>
@@ -31,6 +34,8 @@
 
 #define log_cs	&my_charset_latin1
 
+#define FLAGSTR(V,F) ((V)&(F)?#F" ":"")
+
 /*
   Cache that will automatically be written to a dedicated file on
   destruction.
@@ -547,49 +552,7 @@
     Matz: I don't think we will need this check with this refactoring.
   */
   if (rli)
-  {
-    /*
-      If in a transaction, and if the slave supports transactions, just
-      inc_event_relay_log_pos(). We only have to check for OPTION_BEGIN
-      (not OPTION_NOT_AUTOCOMMIT) as transactions are logged with
-      BEGIN/COMMIT, not with SET AUTOCOMMIT= .
-
-      CAUTION: opt_using_transactions means
-      innodb || bdb ; suppose the master supports InnoDB and BDB,
-      but the slave supports only BDB, problems
-      will arise:
-      - suppose an InnoDB table is created on the master,
-      - then it will be MyISAM on the slave
-      - but as opt_using_transactions is true, the slave will believe he
-      is transactional with the MyISAM table. And problems will come
-      when one does START SLAVE; STOP SLAVE; START SLAVE; (the slave
-      will resume at BEGIN whereas there has not been any rollback).
-      This is the problem of using opt_using_transactions instead of a
-      finer "does the slave support
-      _the_transactional_handler_used_on_the_master_".
-
-      More generally, we'll have problems when a query mixes a
-      transactional handler and MyISAM and STOP SLAVE is issued in the
-      middle of the "transaction". START SLAVE will resume at BEGIN
-      while the MyISAM table has already been updated.
-    */
-    if ((thd->options & OPTION_BEGIN) && opt_using_transactions)
-      rli->inc_event_relay_log_pos();
-    else
-    {
-      rli->inc_group_relay_log_pos(log_pos);
-      flush_relay_log_info(rli);
-      /*
-         Note that Rotate_log_event::do_apply_event() does not call
-         this function, so there is no chance that a fake rotate event
-         resets last_master_timestamp.  Note that we update without
-         mutex (probably ok - except in some very rare cases, only
-         consequence is that value may take some time to display in
-         Seconds_Behind_Master - not critical).
-      */
-      rli->last_master_timestamp= when;
-    }
-  }
+    rli->stmt_done(log_pos, when);
 
   return 0;                                   // Cannot fail currently
 }
@@ -1010,6 +973,15 @@
     ev = new Format_description_log_event(buf, event_len, description_event); 
     break;
 #if defined(HAVE_REPLICATION) 
+  case PRE_GA_WRITE_ROWS_EVENT:
+    ev = new Write_rows_log_event_old(buf, event_len, description_event);
+    break;
+  case PRE_GA_UPDATE_ROWS_EVENT:
+    ev = new Update_rows_log_event_old(buf, event_len, description_event);
+    break;
+  case PRE_GA_DELETE_ROWS_EVENT:
+    ev = new Delete_rows_log_event_old(buf, event_len, description_event);
+    break;
   case WRITE_ROWS_EVENT:
     ev = new Write_rows_log_event(buf, event_len, description_event);
     break;
@@ -1039,6 +1011,10 @@
     break;
   }
 
+  DBUG_PRINT("read_event", ("%s(type_code: %d; event_len: %d)",
+                            ev ? ev->get_type_str() : "<unknown>",
+                            buf[EVENT_TYPE_OFFSET],
+                            event_len));
   /*
     is_valid() are small event-specific sanity tests which are
     important; for example there are some my_malloc() in constructors
@@ -3593,17 +3569,6 @@
 }
 #endif
 
-/**
-   Helper function to detect if the event is inside a group.
- */
-#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
-static bool is_in_group(THD *const thd, RELAY_LOG_INFO *const rli)
-{
-  return (thd->options & OPTION_BEGIN) != 0 ||
-         (rli->last_event_start_time > 0);
-}
-#endif
-
 
 /*
   Rotate_log_event::do_apply_event()
@@ -3654,7 +3619,7 @@
     relay log, which shall not change the group positions.
   */
   if ((server_id != ::server_id || rli->replicate_same_server_id) &&
-      !is_in_group(thd, rli))
+      !rli->is_in_group())
   {
     DBUG_PRINT("info", ("old group_master_log_name: '%s'  "
                         "old group_master_log_pos: %lu",
@@ -3663,6 +3628,9 @@
     memcpy(rli->group_master_log_name, new_log_ident, ident_len+1);
     rli->notify_group_master_log_name_update();
     rli->group_master_log_pos= pos;
+    strmake(rli->group_relay_log_name, rli->event_relay_log_name,
+            sizeof(rli->group_relay_log_name) - 1);
+    rli->notify_group_relay_log_name_update();
     rli->group_relay_log_pos= rli->event_relay_log_pos;
     DBUG_PRINT("info", ("new group_master_log_name: '%s'  "
                         "new group_master_log_pos: %lu",
@@ -3818,6 +3786,12 @@
 #if defined(HAVE_REPLICATION)&& !defined(MYSQL_CLIENT)
 int Intvar_log_event::do_apply_event(RELAY_LOG_INFO const *rli)
 {
+  /*
+    We are now in a statement until the associated query log event has
+    been processed.
+   */
+  const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT);
+
   switch (type) {
   case LAST_INSERT_ID_EVENT:
     thd->stmt_depends_on_first_successful_insert_id_in_prev_stmt= 1;
@@ -3918,6 +3892,12 @@
 #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
 int Rand_log_event::do_apply_event(RELAY_LOG_INFO const *rli)
 {
+  /*
+    We are now in a statement until the associated query log event has
+    been processed.
+   */
+  const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT);
+
   thd->rand.seed1= (ulong) seed1;
   thd->rand.seed2= (ulong) seed2;
   return 0;
@@ -4312,6 +4292,12 @@
   double real_val;
   longlong int_val;
 
+  /*
+    We are now in a statement until the associated query log event has
+    been processed.
+   */
+  const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT);
+
   if (is_null)
   {
     it= new Item_null();
@@ -5767,8 +5753,7 @@
 
 
 #ifndef MYSQL_CLIENT
-int Rows_log_event::do_add_row_data(byte *const row_data,
-                                    my_size_t const length)
+int Rows_log_event::do_add_row_data(byte *row_data, my_size_t length)
 {
   /*
     When the table has a primary key, we would probably want, by default, to
@@ -5826,163 +5811,6 @@
 #endif
 
 #if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
-/*
-  Unpack a row into table->record[0].
-  
-  SYNOPSIS
-    unpack_row()
-    rli     Relay log info
-    table   Table to unpack into
-    colcnt  Number of columns to read from record
-    row     Packed row data
-    cols    Pointer to columns data to fill in
-    row_end Pointer to variable that will hold the value of the
-            one-after-end position for the row
-    master_reclength
-             Pointer to variable that will be set to the length of the
-             record on the master side
-    rw_set   Pointer to bitmap that holds either the read_set or the
-             write_set of the table
-
-  DESCRIPTION
-
-      The function will always unpack into the table->record[0]
-      record.  This is because there are too many dependencies on
-      where the various member functions of Field and subclasses
-      expect to write.
-
-      The row is assumed to only consist of the fields for which the
-      bitset represented by 'arr' and 'bits'; the other parts of the
-      record are left alone.
-
-      At most 'colcnt' columns are read: if the table is larger than
-      that, the remaining fields are not filled in.
-
-  RETURN VALUE
-
-      Error code, or zero if no error. The following error codes can
-      be returned:
-
-      ER_NO_DEFAULT_FOR_FIELD
-        Returned if one of the fields existing on the slave but not on
-        the master does not have a default value (and isn't nullable)
- */
-static int
-unpack_row(RELAY_LOG_INFO const *rli,
-           TABLE *table, uint const colcnt,
-           char const *const row_data, MY_BITMAP const *cols,
-           char const **const row_end, ulong *const master_reclength,
-           MY_BITMAP* const rw_set, Log_event_type const event_type)
-{
-  DBUG_ENTER("unpack_row");
-  DBUG_ASSERT(row_data);
-  my_size_t const master_null_byte_count= (bitmap_bits_set(cols) + 7) / 8;
-  int error= 0;
-
-  char const *null_ptr= row_data;
-  char const *pack_ptr= row_data + master_null_byte_count;
-
-  bitmap_clear_all(rw_set);
-
-  empty_record(table);
-
-  Field **const begin_ptr = table->field;
-  Field **field_ptr;
-  Field **const end_ptr= begin_ptr + colcnt;
-
-  DBUG_ASSERT(null_ptr < row_data + master_null_byte_count);
-
-  // Mask to mask out the correct bit among the null bits
-  unsigned int null_mask= 1U;
-  // The "current" null bits
-  unsigned int null_bits= *null_ptr++;
-  for (field_ptr= begin_ptr ; field_ptr < end_ptr ; ++field_ptr)
-  {
-    Field *const f= *field_ptr;
-
-    /*
-      No need to bother about columns that does not exist: they have
-      gotten default values when being emptied above.
-     */
-    if (bitmap_is_set(cols, field_ptr -  begin_ptr))
-    {
-      if ((null_mask & 0xFF) == 0)
-      {
-        DBUG_ASSERT(null_ptr < row_data + master_null_byte_count);
-        null_mask= 1U;
-        null_bits= *null_ptr++;
-      }
-
-      DBUG_ASSERT(null_mask & 0xFF); // One of the 8 LSB should be set
-
-      /* Field...::unpack() cannot return 0 */
-      DBUG_ASSERT(pack_ptr != NULL);
-
-      if ((null_bits & null_mask) && f->maybe_null())
-        f->set_null();
-      else
-      {
-        f->set_notnull();
-
-        /*
-          We only unpack the field if it was non-null
-        */
-        pack_ptr= f->unpack(f->ptr, pack_ptr);
-      }
-
-      bitmap_set_bit(rw_set, f->field_index);
-      null_mask <<= 1;
-    }
-  }
-
-  /*
-    We should now have read all the null bytes, otherwise something is
-    really wrong.
-   */
-  DBUG_ASSERT(null_ptr == row_data + master_null_byte_count);
-
-  *row_end = pack_ptr;
-  if (master_reclength)
-  {
-    if (*field_ptr)
-      *master_reclength = (*field_ptr)->ptr - (char*) table->record[0];
-    else
-      *master_reclength = table->s->reclength;
-  }
-
-  /*
-    Set properties for remaining columns, if there are any. We let the
-    corresponding bit in the write_set be set, to write the value if
-    it was not there already. We iterate over all remaining columns,
-    even if there were an error, to get as many error messages as
-    possible.  We are still able to return a pointer to the next row,
-    so redo that.
-
-    This generation of error messages is only relevant when inserting
-    new rows.
-   */
-  for ( ; *field_ptr ; ++field_ptr)
-  {
-    uint32 const mask= NOT_NULL_FLAG | NO_DEFAULT_VALUE_FLAG;
-    Field *const f= *field_ptr;
-
-    if (event_type == WRITE_ROWS_EVENT &&
-        ((*field_ptr)->flags & mask) == mask)
-    {
-      slave_print_msg(ERROR_LEVEL, rli, ER_NO_DEFAULT_FOR_FIELD,
-                      "Field `%s` of table `%s`.`%s` "
-                      "has no default value and cannot be NULL",
-                      (*field_ptr)->field_name, table->s->db.str,
-                      table->s->table_name.str);
-      error = ER_NO_DEFAULT_FOR_FIELD;
-    }
-    else
-      f->set_default();
-  }
-
-  DBUG_RETURN(error);
-}
-
 int Rows_log_event::do_apply_event(RELAY_LOG_INFO const *rli)
 {
   DBUG_ENTER("Rows_log_event::do_apply_event(st_relay_log_info*)");
@@ -6186,6 +6014,17 @@
     /* A small test to verify that objects have consistent types */
     DBUG_ASSERT(sizeof(thd->options) == sizeof(OPTION_RELAXED_UNIQUE_CHECKS));
 
+    /*
+      Now we are in a statement and will stay in a statement until we
+      see a STMT_END_F.
+
+      We set this flag here, before actually applying any rows, in
+      case the SQL thread is stopped and we need to detect that we're
+      inside a statement and halting abruptly might cause problems
+      when restarting.
+     */
+    const_cast<RELAY_LOG_INFO*>(rli)->set_flag(RELAY_LOG_INFO::IN_STMT);
+
     error= do_before_row_operations(table);
     while (error == 0 && row_start < (const char*) m_rows_end)
     {
@@ -6258,6 +6097,45 @@
     DBUG_RETURN(error);
   }
 
+  /*
+    This code would ideally be placed in do_update_pos() instead, but
+    since we have no access to table there, we do the setting of
+    last_event_start_time here instead.
+  */
+  if (table && (table->s->primary_key == MAX_KEY) &&
+      !cache_stmt && get_flags(STMT_END_F) == RLE_NO_FLAGS)
+  {
+    /*
+      ------------ Temporary fix until WL#2975 is implemented ---------
+
+      This event is not the last one (no STMT_END_F). If we stop now
+      (in case of terminate_slave_thread()), how will we restart? We
+      have to restart from Table_map_log_event, but as this table is
+      not transactional, the rows already inserted will still be
+      present, and idempotency is not guaranteed (no PK) so we risk
+      that repeating leads to double insert. So we desperately try to
+      continue, hope we'll eventually leave this buggy situation (by
+      executing the final Rows_log_event). If we are in a hopeless
+      wait (reached end of last relay log and nothing gets appended
+      there), we timeout after one minute, and notify DBA about the
+      problem.  When WL#2975 is implemented, just remove the member
+      st_relay_log_info::last_event_start_time and all its occurences.
+    */
+    const_cast<RELAY_LOG_INFO*>(rli)->last_event_start_time= time(0);
+  }
+
+  DBUG_RETURN(0);
+}
+
+int
+Rows_log_event::do_update_pos(RELAY_LOG_INFO *rli)
+{
+  DBUG_ENTER("Rows_log_event::do_update_pos");
+  int error= 0;
+
+  DBUG_PRINT("info", ("flags: %s",
+                      get_flags(STMT_END_F) ? "STMT_END_F " : ""));
+
   if (get_flags(STMT_END_F))
   {
     /*
@@ -6276,6 +6154,7 @@
       replicate-ignore rules).
     */
     thd->binlog_flush_pending_rows_event(true);
+
     /*
       If this event is not in a transaction, the call below will, if some
       transactional storage engines are involved, commit the statement into
@@ -6286,6 +6165,7 @@
       binlog.
     */
     error= ha_autocommit_or_rollback(thd, 0);
+
     /*
       Now what if this is not a transactional engine? we still need to
       flush the pending event to the binlog; we did it with
@@ -6297,11 +6177,17 @@
     */
 
     thd->reset_current_stmt_binlog_row_based();
-    const_cast<RELAY_LOG_INFO*>(rli)->cleanup_context(thd, 0);
-
+    rli->cleanup_context(thd, 0);
     if (error == 0)
     {
       /*
+        Indicate that a statement is finished.
+        Step the group log position if we are not in a transaction,
+        otherwise increase the event log position.
+       */
+      rli->stmt_done(log_pos, when);
+
+      /*
         Clear any errors pushed in thd->net.last_err* if for example "no key
         found" (as this is allowed). This is a safety measure; apparently
         those errors (e.g. when executing a Delete_rows_log_event of a
@@ -6313,38 +6199,15 @@
     }
     else
       slave_print_msg(ERROR_LEVEL, rli, error,
-                      "Error in %s event: commit of row events failed, "
-                      "table `%s`.`%s`",
-                      get_type_str(), table->s->db.str, 
-                      table->s->table_name.str);
-    DBUG_RETURN(error);
+                      "Error in %s event: commit of row events failed",
+                      get_type_str());
   }
-
-  if (table && (table->s->primary_key == MAX_KEY) && !cache_stmt)
+  else
   {
-    /*
-      ------------ Temporary fix until WL#2975 is implemented ---------
-
-      This event is not the last one (no STMT_END_F). If we stop now
-      (in case of terminate_slave_thread()), how will we restart? We
-      have to restart from Table_map_log_event, but as this table is
-      not transactional, the rows already inserted will still be
-      present, and idempotency is not guaranteed (no PK) so we risk
-      that repeating leads to double insert. So we desperately try to
-      continue, hope we'll eventually leave this buggy situation (by
-      executing the final Rows_log_event). If we are in a hopeless
-      wait (reached end of last relay log and nothing gets appended
-      there), we timeout after one minute, and notify DBA about the
-      problem.  When WL#2975 is implemented, just remove the member
-      st_relay_log_info::last_event_start_time and all its occurences.
-    */
-    const_cast<RELAY_LOG_INFO*>(rli)->last_event_start_time= time(0);
+    rli->inc_event_relay_log_pos();
   }
 
-  DBUG_ASSERT(error == 0);
-  thd->clear_error();
-
-  DBUG_RETURN(0);
+  DBUG_RETURN(error);
 }
 
 #endif /* !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION) */
@@ -6428,7 +6291,9 @@
   {
     bool const last_stmt_event= get_flags(STMT_END_F);
     print_header(head, print_event_info, !last_stmt_event);
-    my_b_printf(head, "\t%s: table id %lu\n", name, m_table_id);
+    my_b_printf(head, "\t%s: table id %lu%s\n",
+                name, m_table_id,
+                last_stmt_event ? " flags: STMT_END_F" : "");
     print_base64(body, print_event_info, !last_stmt_event);
   }
 

--- 1.634/sql/mysqld.cc	2007-04-23 17:00:57 +02:00
+++ 1.635/sql/mysqld.cc	2007-04-23 20:45:23 +02:00
@@ -421,7 +421,8 @@
 char opt_ndb_constrbuf[1024];
 unsigned opt_ndb_constrbuf_len= 0;
 my_bool	opt_ndb_shm, opt_ndb_optimized_node_selection;
-ulong opt_ndb_cache_check_time;
+ulong opt_ndb_cache_check_time, opt_ndb_wait_connected;
+ulong opt_ndb_cluster_connection_pool;
 const char *opt_ndb_mgmd;
 ulong opt_ndb_nodeid;
 ulong ndb_extra_logging;
@@ -4917,6 +4918,8 @@
   OPT_NDB_USE_EXACT_COUNT, OPT_NDB_USE_TRANSACTIONS,
   OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ,
   OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME,
+  OPT_NDB_WAIT_CONNECTED,
+  OPT_NDB_CLUSTER_CONNECTION_POOL,
   OPT_NDB_MGMD, OPT_NDB_NODEID,
   OPT_NDB_DISTRIBUTION,
   OPT_NDB_INDEX_STAT_ENABLE,
@@ -5646,13 +5649,22 @@
    (gptr*) &global_system_variables.ndb_index_stat_enable,
    (gptr*) &max_system_variables.ndb_index_stat_enable,
    0, GET_BOOL, OPT_ARG, 0, 0, 1, 0, 0, 0},
-#endif
   {"ndb-use-copying-alter-table",
    OPT_NDB_USE_COPYING_ALTER_TABLE,
    "Force ndbcluster to always copy tables at alter table (should only be used if on-line alter table fails).",
    (gptr*) &global_system_variables.ndb_use_copying_alter_table,
    (gptr*) &global_system_variables.ndb_use_copying_alter_table,
    0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},  
+  { "ndb-wait-connected", OPT_NDB_WAIT_CONNECTED,
+    "Time (in seconds) for mysqld to wait for connection to cluster management and data nodes.",
+    (gptr*) &opt_ndb_wait_connected, (gptr*) &opt_ndb_wait_connected,
+    0, GET_ULONG, REQUIRED_ARG, 0, 0, LONG_TIMEOUT, 0, 0, 0},
+  { "ndb-cluster-connection-pool", OPT_NDB_CLUSTER_CONNECTION_POOL,
+    "Pool of cluster connections to cluster to be used by mysql server.",
+    (gptr*) &opt_ndb_cluster_connection_pool,
+    (gptr*) &opt_ndb_cluster_connection_pool,
+    0, GET_ULONG, REQUIRED_ARG, 1, 1, 63, 0, 0, 0},
+#endif
   {"new", 'n', "Use very new possible 'unsafe' functions.",
    (gptr*) &global_system_variables.new_mode,
    (gptr*) &max_system_variables.new_mode,

--- 1.302/sql/slave.cc	2007-04-05 15:00:14 +02:00
+++ 1.303/sql/slave.cc	2007-04-23 20:45:23 +02:00
@@ -17,8 +17,9 @@
 
 #include <mysql.h>
 #include <myisam.h>
-#include "rpl_rli.h"
 #include "slave.h"
+#include "rpl_mi.h"
+#include "rpl_rli.h"
 #include "sql_repl.h"
 #include "rpl_filter.h"
 #include "repl_failsafe.h"
@@ -794,8 +795,10 @@
   else
   {
     mi->clock_diff_with_master= 0; /* The "most sensible" value */
-    sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, \
-do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS");
+    sql_print_warning("\"SELECT UNIX_TIMESTAMP()\" failed on master, "
+                      "do not trust column Seconds_Behind_Master of SHOW "
+                      "SLAVE STATUS. Error: %s (%d)",
+                      mysql_error(mysql), mysql_errno(mysql));
   }
   if (master_res)
     mysql_free_result(master_res);
@@ -1333,8 +1336,7 @@
     if ((mi->slave_running == MYSQL_SLAVE_RUN_CONNECT) &&
         mi->rli.slave_running)
     {
-      long time_diff= ((long)((time_t)time((time_t*) 0)
-                              - mi->rli.last_master_timestamp)
+      long time_diff= ((long)(time(0) - mi->rli.last_master_timestamp)
                        - mi->clock_diff_with_master);
       /*
         Apparently on some systems time_diff can be <0. Here are possible
@@ -1632,6 +1634,7 @@
   switch (expected_error) {
   case ER_NET_READ_ERROR:
   case ER_NET_ERROR_ON_WRITE:
+  case ER_QUERY_INTERRUPTED:
   case ER_SERVER_SHUTDOWN:
   case ER_NEW_ABORTING_CONNECTION:
     DBUG_RETURN(1);
@@ -1736,11 +1739,12 @@
     /*
     */
 
-    DBUG_PRINT("info",("type_code=%d (%s), server_id=%d",
-                       type_code, ev->get_type_str(), ev->server_id));
-    DBUG_PRINT("info", ("thd->options={ %s%s}",
+    DBUG_PRINT("exec_event",("%s(type_code: %d; server_id: %d)",
+                       ev->get_type_str(), type_code, ev->server_id));
+    DBUG_PRINT("info", ("thd->options: %s%s; rli->last_event_start_time: %lu",
                         FLAGSTR(thd->options, OPTION_NOT_AUTOCOMMIT),
-                        FLAGSTR(thd->options, OPTION_BEGIN)));
+                        FLAGSTR(thd->options, OPTION_BEGIN),
+                        rli->last_event_start_time));
 
 
 
@@ -1782,21 +1786,21 @@
     if (reason == Log_event::EVENT_SKIP_NOT)
       exec_res= ev->apply_event(rli);
 #ifndef DBUG_OFF
-    else
-    {
-      /*
-        This only prints information to the debug trace.
+    /*
+      This only prints information to the debug trace.
 
-        TODO: Print an informational message to the error log?
-       */
-      static const char *const explain[] = {
-        "event was not skipped",                  // EVENT_SKIP_NOT,
-        "event originated from this server",      // EVENT_SKIP_IGNORE,
-        "event skip counter was non-zero"         // EVENT_SKIP_COUNT
-      };
-      DBUG_PRINT("info", ("%s was skipped because %s",
-                          ev->get_type_str(), explain[reason]));
-    }
+      TODO: Print an informational message to the error log?
+    */
+    static const char *const explain[] = {
+      // EVENT_SKIP_NOT,
+      "not skipped",
+      // EVENT_SKIP_IGNORE,
+      "skipped because event originated from this server",
+      // EVENT_SKIP_COUNT
+      "skipped because event skip counter was non-zero"
+    };
+    DBUG_PRINT("skip_event", ("%s event was %s",
+                              ev->get_type_str(), explain[reason]));
 #endif
 
     DBUG_PRINT("info", ("apply_event error = %d", exec_res));

--- 1.22/mysql-test/r/rpl_ndb_log.result	2007-04-05 15:00:13 +02:00
+++ 1.23/mysql-test/r/rpl_ndb_log.result	2007-04-23 20:45:22 +02:00
@@ -99,33 +99,33 @@
 Log_name	Pos	Event_type	Server_id	End_log_pos	Info
 slave-bin.000001	#	Format_desc	2	#	Server ver: VERSION, Binlog ver: 4
 slave-bin.000001	#	Query	1	#	use `test`; create table t1(n int not null auto_increment primary key)ENGINE=NDB
-slave-bin.000001	#	Query	1	#	BEGIN
+slave-bin.000001	#	Query	2	#	BEGIN
 slave-bin.000001	#	Table_map	2	#	table_id: # (test.t1)
 slave-bin.000001	#	Table_map	2	#	table_id: # (mysql.ndb_apply_status)
 slave-bin.000001	#	Write_rows	2	#	table_id: #
 slave-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
-slave-bin.000001	#	Query	1	#	COMMIT
+slave-bin.000001	#	Query	2	#	COMMIT
 slave-bin.000001	#	Query	1	#	use `test`; drop table t1
 slave-bin.000001	#	Query	1	#	use `test`; create table t1 (word char(20) not null)ENGINE=NDB
-slave-bin.000001	#	Query	1	#	BEGIN
+slave-bin.000001	#	Query	2	#	BEGIN
 slave-bin.000001	#	Table_map	2	#	table_id: # (test.t1)
 slave-bin.000001	#	Table_map	2	#	table_id: # (mysql.ndb_apply_status)
 slave-bin.000001	#	Write_rows	2	#	table_id: #
 slave-bin.000001	#	Write_rows	1	#	table_id: #
 slave-bin.000001	#	Write_rows	1	#	table_id: # flags: STMT_END_F
-slave-bin.000001	#	Query	1	#	COMMIT
+slave-bin.000001	#	Query	2	#	COMMIT
 slave-bin.000001	#	Query	1	#	use `test`; create table t3 (a int)ENGINE=NDB
 slave-bin.000001	#	Rotate	2	#	slave-bin.000002;pos=4
 show binlog events in 'slave-bin.000002' from 4;
 Log_name	Pos	Event_type	Server_id	End_log_pos	Info
 slave-bin.000002	#	Format_desc	2	#	Server ver: VERSION, Binlog ver: 4
 slave-bin.000002	#	Query	1	#	use `test`; create table t2 (n int)ENGINE=NDB
-slave-bin.000002	#	Query	1	#	BEGIN
+slave-bin.000002	#	Query	2	#	BEGIN
 slave-bin.000002	#	Table_map	2	#	table_id: # (test.t2)
 slave-bin.000002	#	Table_map	2	#	table_id: # (mysql.ndb_apply_status)
 slave-bin.000002	#	Write_rows	2	#	table_id: #
 slave-bin.000002	#	Write_rows	1	#	table_id: # flags: STMT_END_F
-slave-bin.000002	#	Query	1	#	COMMIT
+slave-bin.000002	#	Query	2	#	COMMIT
 show slave status;
 Slave_IO_State	Master_Host	Master_User	Master_Port	Connect_Retry	Master_Log_File	Read_Master_Log_Pos	Relay_Log_File	Relay_Log_Pos	Relay_Master_Log_File	Slave_IO_Running	Slave_SQL_Running	Replicate_Do_DB	Replicate_Ignore_DB	Replicate_Do_Table	Replicate_Ignore_Table	Replicate_Wild_Do_Table	Replicate_Wild_Ignore_Table	Last_Errno	Last_Error	Skip_Counter	Exec_Master_Log_Pos	Relay_Log_Space	Until_Condition	Until_Log_File	Until_Log_Pos	Master_SSL_Allowed	Master_SSL_CA_File	Master_SSL_CA_Path	Master_SSL_Cert	Master_SSL_Cipher	Master_SSL_Key	Seconds_Behind_Master	Master_SSL_Verify_Server_Cert
 #	127.0.0.1	root	MASTER_PORT	1	master-bin.000002	617	#	#	master-bin.000002	Yes	Yes				#			0		0	617	#	None		0	No						#	No

--- 1.280/mysql-test/mysql-test-run.pl	2007-04-16 19:17:19 +02:00
+++ 1.281/mysql-test/mysql-test-run.pl	2007-04-23 20:45:22 +02:00
@@ -3275,9 +3275,9 @@
 }
 
 
-sub run_testcase_mark_logs($)
+sub run_testcase_mark_logs($$)
 {
-  my ($log_msg)= @_;
+  my ($tinfo, $log_msg)= @_;
 
   # Write a marker to all log files
 
@@ -3290,6 +3290,12 @@
     mtr_tofile($mysqld->{path_myerr}, $log_msg);
   }
 
+  if ( $tinfo->{'component_id'} eq 'im')
+  {
+    mtr_tofile($instance_manager->{path_err}, $log_msg);
+    mtr_tofile($instance_manager->{path_log}, $log_msg);
+  }
+
   # ndbcluster log file
   mtr_tofile($path_ndb_testrun_log, $log_msg);
 
@@ -3416,7 +3422,7 @@
   }
 
   # Write to all log files to indicate start of testcase
-  run_testcase_mark_logs("CURRENT_TEST: $tinfo->{name}\n");
+  run_testcase_mark_logs($tinfo, "CURRENT_TEST: $tinfo->{name}\n");
 
   my $died= mtr_record_dead_children();
   if ($died or $master_restart or $slave_restart)
@@ -3490,7 +3496,7 @@
   # Stop Instance Manager if we are processing an IM-test case.
   # ----------------------------------------------------------------------
   if ( $tinfo->{'component_id'} eq 'im' and
-       !mtr_im_stop($instance_manager, $tinfo->{'name'}) )
+       !mtr_im_stop($instance_manager, $tinfo->{'name'}))
   {
     mtr_error("Failed to stop Instance Manager.")
   }

--- 1.246/mysql-test/t/disabled.def	2007-04-18 08:41:04 +02:00
+++ 1.247/mysql-test/t/disabled.def	2007-04-23 20:45:22 +02:00
@@ -12,6 +12,7 @@
 user_limits     : Bug#23921 random failure of user_limits.test
 
 im_options                : Bug#20294 2006-07-24 stewart   Instance manager test im_options fails randomly
+im_life_cycle            : BUG#27851 Instance manager dies on ASSERT in ~Thread_registry() or from not being able to close a mysqld instance.
 concurrent_innodb        : BUG#21579 2006-08-11 mleich innodb_concurrent random failures with varying differences
 ndb_single_user          : Bug will not be fixed in this release (not yet atleast)
 ndb_autodiscover         : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog
@@ -19,6 +20,7 @@
 ndb_load                 : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed
 
 partition_03ndb          : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table
+rpl_ndb_circular_simplex : BUG#27972 2007-04-20 mats Slave cannot start where it stopped
 rpl_ndb_2innodb          : BUG#19227 2006-04-20 pekka pk delete apparently not replicated
 rpl_ndb_2myisam          : BUG#19227 Seems to pass currently
 rpl_ndb_dd_partitions    : BUG#19259 2006-04-21 rpl_ndb_dd_partitions fails on s/AMD

--- 1.67/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2007-04-23 14:23:13 +02:00
+++ 1.68/storage/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp	2007-04-23 20:45:23 +02:00
@@ -314,6 +314,8 @@
     regOperPtr.p->saved_change_mask[0] = prevOpPtr.p->saved_change_mask[0];
     regOperPtr.p->saved_change_mask[1] = prevOpPtr.p->saved_change_mask[1];
 
+    regOperPtr.p->m_any_value = prevOpPtr.p->m_any_value;
+
     prevOpPtr.p->op_struct.m_wait_log_buffer= 0;
     prevOpPtr.p->op_struct.m_load_diskpage_on_commit= 0;
 

--- 1.448/sql/ha_ndbcluster.cc	2007-04-23 13:07:19 +02:00
+++ 1.449/sql/ha_ndbcluster.cc	2007-04-23 20:45:22 +02:00
@@ -24,6 +24,7 @@
 #endif
 
 #include "mysql_priv.h"
+#include "rpl_mi.h"
 
 #include <my_dir.h>
 #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
@@ -135,7 +136,7 @@
 }
 
 static int ndbcluster_inited= 0;
-static int ndbcluster_terminating= 0;
+int ndbcluster_terminating= 0;
 
 static Ndb* g_ndb= NULL;
 Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
@@ -369,7 +370,7 @@
   count= 0;
   all= NULL;
   stmt= NULL;
-  error= 0;
+  m_error= FALSE;
   query_state&= NDB_QUERY_NORMAL;
   options= 0;
   (void) hash_init(&open_tables, &my_charset_bin, 5, 0, 0,
@@ -404,7 +405,7 @@
 Thd_ndb::init_open_tables()
 {
   count= 0;
-  error= 0;
+  m_error= FALSE;
   my_hash_reset(&open_tables);
 }
 
@@ -480,7 +481,7 @@
   }
 
   THD *thd= current_thd;
-  if (get_thd_ndb(thd)->error)
+  if (get_thd_ndb(thd)->m_error)
     local_info->no_uncommitted_rows_count= 0;
 
   DBUG_RETURN(retval + local_info->no_uncommitted_rows_count);
@@ -514,7 +515,7 @@
   }
   {
     THD *thd= current_thd;
-    if (get_thd_ndb(thd)->error)
+    if (get_thd_ndb(thd)->m_error)
       local_info->no_uncommitted_rows_count= 0;
   }
   if (result == 0)
@@ -527,7 +528,7 @@
   if (m_ha_not_exact_count)
     return;
   DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure");
-  get_thd_ndb(current_thd)->error= 1;
+  get_thd_ndb(current_thd)->m_error= TRUE;
   DBUG_VOID_RETURN;
 }
 
@@ -551,7 +552,7 @@
   DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset");
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   thd_ndb->count++;
-  thd_ndb->error= 0;
+  thd_ndb->m_error= FALSE;
   DBUG_VOID_RETURN;
 }
 
@@ -2745,9 +2746,13 @@
     op->setValue(no_fields, part_func_value);
   }
 
-  if (thd->slave_thread)
-    op->setAnyValue(thd->server_id);
-
+  if (unlikely(m_slow_path))
+  {
+    if (!(thd->options & OPTION_BIN_LOG))
+      op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
+    else if (thd->slave_thread)
+      op->setAnyValue(thd->server_id);
+  }
   m_rows_changed++;
 
   /*
@@ -3029,9 +3034,13 @@
     op->setValue(no_fields, part_func_value);
   }
 
-  if (thd->slave_thread)
-    op->setAnyValue(thd->server_id);
-
+  if (unlikely(m_slow_path))
+  {
+    if (!(thd->options & OPTION_BIN_LOG))
+      op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
+    else if (thd->slave_thread)
+      op->setAnyValue(thd->server_id);
+  }
   /*
     Execute update operation if we are not doing a scan for update
     and there exist UPDATE AFTER triggers
@@ -3092,9 +3101,15 @@
 
     no_uncommitted_rows_update(-1);
 
-    if (thd->slave_thread)
-      ((NdbOperation *)trans->getLastDefinedOperation())->setAnyValue(thd->server_id);
-
+    if (unlikely(m_slow_path))
+    {
+      if (!(thd->options & OPTION_BIN_LOG))
+        ((NdbOperation *)trans->getLastDefinedOperation())->
+          setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
+      else if (thd->slave_thread)
+        ((NdbOperation *)trans->getLastDefinedOperation())->
+          setAnyValue(thd->server_id);
+    }
     if (!(m_primary_key_update || m_delete_cannot_batch))
       // If deleting from cursor, NoCommit will be handled in next_result
       DBUG_RETURN(0);
@@ -3125,8 +3140,13 @@
         DBUG_RETURN(error);
     }
 
-    if (thd->slave_thread)
-      op->setAnyValue(thd->server_id);
+    if (unlikely(m_slow_path))
+    {
+      if (!(thd->options & OPTION_BIN_LOG))
+        op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
+      else if (thd->slave_thread)
+        op->setAnyValue(thd->server_id);
+    }
   }
 
   // Execute delete operation
@@ -4278,8 +4298,7 @@
     {
       m_transaction_on= FALSE;
       /* Would be simpler if has_transactions() didn't always say "yes" */
-      thd->options|= OPTION_STATUS_NO_TRANS_UPDATE;
-      thd->no_trans_update= TRUE;
+      thd->no_trans_update.all= thd->no_trans_update.stmt= TRUE;
     }
     else if (!thd->transaction.on)
       m_transaction_on= FALSE;
@@ -4301,6 +4320,10 @@
         thd_ndb->stmt= trans;
 	thd_ndb->query_state&= NDB_QUERY_NORMAL;
         thd_ndb->trans_options= 0;
+        thd_ndb->m_slow_path= FALSE;
+        if (thd->slave_thread ||
+            !(thd->options & OPTION_BIN_LOG))
+          thd_ndb->m_slow_path= TRUE;
         trans_register_ha(thd, FALSE, ndbcluster_hton);
       } 
       else 
@@ -4318,6 +4341,10 @@
           thd_ndb->all= trans; 
 	  thd_ndb->query_state&= NDB_QUERY_NORMAL;
           thd_ndb->trans_options= 0;
+          thd_ndb->m_slow_path= FALSE;
+          if (thd->slave_thread ||
+              !(thd->options & OPTION_BIN_LOG))
+            thd_ndb->m_slow_path= TRUE;
           trans_register_ha(thd, TRUE, ndbcluster_hton);
 
           /*
@@ -4358,9 +4385,13 @@
     // Start of transaction
     m_rows_changed= 0;
     m_ops_pending= 0;
+    m_slow_path= thd_ndb->m_slow_path;
 #ifdef HAVE_NDB_BINLOG
-    if (m_share == ndb_apply_status_share && thd->slave_thread)
-      thd_ndb->trans_options|= TNTO_INJECTED_APPLY_STATUS;
+    if (unlikely(m_slow_path))
+    {
+      if (m_share == ndb_apply_status_share && thd->slave_thread)
+        thd_ndb->trans_options|= TNTO_INJECTED_APPLY_STATUS;
+    }
 #endif
     // TODO remove double pointers...
     m_thd_ndb_share= thd_ndb->get_open_table(thd, m_table);
@@ -4507,8 +4538,12 @@
     DBUG_RETURN(0);
 
 #ifdef HAVE_NDB_BINLOG
-  if (thd->slave_thread)
-    ndbcluster_update_apply_status(thd, thd_ndb->trans_options & TNTO_INJECTED_APPLY_STATUS);
+  if (unlikely(thd_ndb->m_slow_path))
+  {
+    if (thd->slave_thread)
+      ndbcluster_update_apply_status
+        (thd, thd_ndb->trans_options & TNTO_INJECTED_APPLY_STATUS);
+  }
 #endif /* HAVE_NDB_BINLOG */
 
   if (execute_commit(thd,trans) != 0)
@@ -5020,7 +5055,7 @@
   for (i= 0; i < form->s->fields; i++) 
   {
     Field *field= form->field[i];
-    DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d", 
+    DBUG_PRINT("info", ("name: %s  type: %u  pack_length: %d", 
                         field->field_name, field->real_type(),
                         field->pack_length()));
     if ((my_errno= create_ndb_column(col, field, create_info)))

--- 1.175/sql/ha_ndbcluster.h	2007-04-05 15:00:13 +02:00
+++ 1.176/sql/ha_ndbcluster.h	2007-04-23 20:45:22 +02:00
@@ -624,7 +624,8 @@
   uint lock_count;
   NdbTransaction *all;
   NdbTransaction *stmt;
-  int error;
+  bool m_error;
+  bool m_slow_path;
   uint32 options;
   uint32 trans_options;
   List<NDB_SHARE> changed_tables;
@@ -970,6 +971,7 @@
   ha_rows m_ops_pending;
   bool m_skip_auto_increment;
   bool m_blobs_pending;
+  bool m_slow_path;
   my_ptrdiff_t m_blobs_offset;
   // memory for blobs in one tuple
   char *m_blobs_buffer;
@@ -1005,4 +1007,6 @@
 
 static const char ndbcluster_hton_name[]= "ndbcluster";
 static const int ndbcluster_hton_name_length=sizeof(ndbcluster_hton_name)-1;
-
+extern int ndbcluster_terminating;
+extern int ndb_util_thread_running;
+extern pthread_cond_t COND_ndb_util_ready;

--- 1.40/sql/protocol.h	2007-03-22 18:14:21 +01:00
+++ 1.41/sql/protocol.h	2007-04-23 20:45:23 +02:00
@@ -88,9 +88,9 @@
   		     CHARSET_INFO *fromcs, CHARSET_INFO *tocs)=0;
   virtual bool store(float from, uint32 decimals, String *buffer)=0;
   virtual bool store(double from, uint32 decimals, String *buffer)=0;
-  virtual bool store(TIME *time)=0;
-  virtual bool store_date(TIME *time)=0;
-  virtual bool store_time(TIME *time)=0;
+  virtual bool store(MYSQL_TIME *time)=0;
+  virtual bool store_date(MYSQL_TIME *time)=0;
+  virtual bool store_time(MYSQL_TIME *time)=0;
   virtual bool store(Field *field)=0;
 #ifdef EMBEDDED_LIBRARY
   int begin_dataset();
@@ -127,9 +127,9 @@
   virtual bool store(const char *from, uint length, CHARSET_INFO *cs);
   virtual bool store(const char *from, uint length,
   		     CHARSET_INFO *fromcs, CHARSET_INFO *tocs);
-  virtual bool store(TIME *time);
-  virtual bool store_date(TIME *time);
-  virtual bool store_time(TIME *time);
+  virtual bool store(MYSQL_TIME *time);
+  virtual bool store_date(MYSQL_TIME *time);
+  virtual bool store_time(MYSQL_TIME *time);
   virtual bool store(float nr, uint32 decimals, String *buffer);
   virtual bool store(double from, uint32 decimals, String *buffer);
   virtual bool store(Field *field);
@@ -162,9 +162,9 @@
   virtual bool store(const char *from,uint length, CHARSET_INFO *cs);
   virtual bool store(const char *from, uint length,
   		     CHARSET_INFO *fromcs, CHARSET_INFO *tocs);
-  virtual bool store(TIME *time);
-  virtual bool store_date(TIME *time);
-  virtual bool store_time(TIME *time);
+  virtual bool store(MYSQL_TIME *time);
+  virtual bool store_date(MYSQL_TIME *time);
+  virtual bool store_time(MYSQL_TIME *time);
   virtual bool store(float nr, uint32 decimals, String *buffer);
   virtual bool store(double from, uint32 decimals, String *buffer);
   virtual bool store(Field *field);

--- 1.284/BitKeeper/etc/ignore	2007-04-16 19:17:14 +02:00
+++ 1.285/BitKeeper/etc/ignore	2007-04-23 20:45:17 +02:00
@@ -355,6 +355,8 @@
 client/link_sources
 client/log_event.cc
 client/log_event.h
+client/log_event_old.cc
+client/log_event_old.h
 client/mf_iocache.c
 client/mf_iocache.cc
 client/my_decimal.cc
@@ -381,6 +383,8 @@
 client/mysys_priv.h
 client/readline.cpp
 client/rpl_constants.h
+client/rpl_record_old.cc
+client/rpl_record_old.h
 client/select_test
 client/sql_string.cpp
 client/ssl_test
@@ -1092,6 +1096,7 @@
 libmysqld/lock.cc
 libmysqld/log.cc
 libmysqld/log_event.cc
+libmysqld/log_event_old.cc
 libmysqld/md5.c
 libmysqld/mf_iocache.cc
 libmysqld/mini_client.cc
@@ -1114,6 +1119,8 @@
 libmysqld/repl_failsafe.cc
 libmysqld/rpl_filter.cc
 libmysqld/rpl_injector.cc
+libmysqld/rpl_record.cc
+libmysqld/rpl_record_old.cc
 libmysqld/set_var.cc
 libmysqld/simple-test
 libmysqld/slave.cc
@@ -1810,6 +1817,7 @@
 repl-tests/test-repl/sum-wlen-slave.master.reje
 replace/*.ds?
 replace/*.vcproj
+scripts/comp_sql
 scripts/fill_func_tables
 scripts/fill_func_tables.sql
 scripts/fill_help_tables
@@ -1827,6 +1835,7 @@
 scripts/mysql_fix_extensions
 scripts/mysql_fix_privilege_tables
 scripts/mysql_fix_privilege_tables.sql
+scripts/mysql_fix_privilege_tables_sql.c
 scripts/mysql_install_db
 scripts/mysql_secure_installation
 scripts/mysql_setpermission
@@ -2631,6 +2640,16 @@
 storage/ndb/lib/libREP_API.so
 storage/ndb/lib/libndbclient.so
 storage/ndb/lib/libndbclient_extra.so
+storage/ndb/ndbapi-examples/mgmapi_logevent/mgmapi_logevent
+storage/ndb/ndbapi-examples/mgmapi_logevent2/mgmapi_logevent2
+storage/ndb/ndbapi-examples/ndbapi_async/ndbapi_async
+storage/ndb/ndbapi-examples/ndbapi_async1/ndbapi_async1
+storage/ndb/ndbapi-examples/ndbapi_event/ndbapi_event
+storage/ndb/ndbapi-examples/ndbapi_retries/ndbapi_retries
+storage/ndb/ndbapi-examples/ndbapi_scan/ndbapi_scan
+storage/ndb/ndbapi-examples/ndbapi_simple/ndbapi_simple
+storage/ndb/ndbapi-examples/ndbapi_simple_dual/ndbapi_simple_dual
+storage/ndb/ndbapi-examples/ndbapi_simple_index/ndbapi_simple_index
 storage/ndb/src/common/debugger/libtrace.dsp
 storage/ndb/src/common/debugger/signaldata/libsignaldataprint.dsp
 storage/ndb/src/common/logger/liblogger.dsp
@@ -2746,6 +2765,7 @@
 storage/ndb/test/tools/hugoScanUpdate
 storage/ndb/test/tools/listen_event
 storage/ndb/test/tools/ndb_cpcc
+storage/ndb/test/tools/rep_latency
 storage/ndb/test/tools/restart
 storage/ndb/test/tools/verify_index
 storage/ndb/tools/ndb_config

--- 1.18/storage/ndb/test/tools/Makefile.am	2007-04-10 10:34:23 +02:00
+++ 1.19/storage/ndb/test/tools/Makefile.am	2007-04-23 20:45:23 +02:00
@@ -13,7 +13,7 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 
-ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord  hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index  ndb_cpcc listen_event eventlog
+ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord  hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index  ndb_cpcc listen_event eventlog rep_latency
 
 # transproxy 
 
@@ -34,6 +34,7 @@
 ndb_cpcc_SOURCES = cpcc.cpp
 listen_event_SOURCES = listen.cpp
 eventlog_SOURCES = log_listner.cpp
+rep_latency_SOURCES = rep_latency.cpp
 
 include $(top_srcdir)/storage/ndb/config/common.mk.am
 include $(top_srcdir)/storage/ndb/config/type_ndbapitest.mk.am

--- 1.112/sql/ha_ndbcluster_binlog.cc	2007-04-10 10:34:22 +02:00
+++ 1.113/sql/ha_ndbcluster_binlog.cc	2007-04-23 20:45:22 +02:00
@@ -602,10 +602,30 @@
   ndbcluster_binlog_inited= 0;
 
 #ifdef HAVE_NDB_BINLOG
+  if (ndb_util_thread_running > 0)
+  {
+    /*
+      Wait for util thread to die (as this uses the injector mutex)
+      There is a very small change that ndb_util_thread dies and the
+      following mutex is freed before it's accessed. This shouldn't
+      however be a likely case as the ndbcluster_binlog_end is supposed to
+      be called before ndb_cluster_end().
+    */
+    pthread_mutex_lock(&LOCK_ndb_util_thread);
+    /* Ensure mutex are not freed if ndb_cluster_end is running at same time */
+    ndb_util_thread_running++;
+    ndbcluster_terminating= 1;
+    pthread_cond_signal(&COND_ndb_util_thread);
+    while (ndb_util_thread_running > 1)
+      pthread_cond_wait(&COND_ndb_util_ready, &LOCK_ndb_util_thread);
+    ndb_util_thread_running--;
+    pthread_mutex_unlock(&LOCK_ndb_util_thread);
+  }
+
   /* wait for injector thread to finish */
   ndbcluster_binlog_terminating= 1;
-  pthread_cond_signal(&injector_cond);
   pthread_mutex_lock(&injector_mutex);
+  pthread_cond_signal(&injector_cond);
   while (ndb_binlog_thread_running > 0)
     pthread_cond_wait(&injector_cond, &injector_mutex);
   pthread_mutex_unlock(&injector_mutex);
@@ -892,6 +912,7 @@
   uint32 id;
   uint32 version;
   uint32 type;
+  uint32 any_value;
 };
 
 /*
@@ -1412,6 +1433,12 @@
       /* type */
       r|= op->setValue(SCHEMA_TYPE_I, log_type);
       DBUG_ASSERT(r == 0);
+      /* any value */
+      if (!(thd->options & OPTION_BIN_LOG))
+        r|= op->setAnyValue(NDB_ANYVALUE_FOR_NOLOGGING);
+      else
+        r|= op->setAnyValue(thd->server_id);
+      DBUG_ASSERT(r == 0);
       if (log_db != new_db && new_db && new_table_name)
       {
         log_db= new_db;
@@ -1757,6 +1784,31 @@
   DBUG_RETURN(0);
 }
 
+static void ndb_binlog_query(THD *thd, Cluster_schema *schema)
+{
+  if (schema->any_value & NDB_ANYVALUE_RESERVED)
+  {
+    if (schema->any_value != NDB_ANYVALUE_FOR_NOLOGGING)
+      sql_print_warning("NDB: unknown value for binlog signalling 0x%X, "
+                        "query not logged",
+                        schema->any_value);
+    return;
+  }
+  uint32 thd_server_id_save= thd->server_id;
+  DBUG_ASSERT(sizeof(thd_server_id_save) == sizeof(thd->server_id));
+  char *thd_db_save= thd->db;
+  if (schema->any_value == 0)
+    thd->server_id= ::server_id;
+  else
+    thd->server_id= schema->any_value;
+  thd->db= schema->db;
+  thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query,
+                    schema->query_length, FALSE,
+                    schema->name[0] == 0 || thd->db[0] == 0);
+  thd->server_id= thd_server_id_save;
+  thd->db= thd_db_save;
+}
+
 static int
 ndb_binlog_thread_handle_schema_event(THD *thd, Ndb *ndb,
                                       NdbEventOperation *pOp,
@@ -1781,7 +1833,10 @@
       MY_BITMAP slock;
       bitmap_init(&slock, schema->slock, 8*SCHEMA_SLOCK_SIZE, FALSE);
       uint node_id= g_ndb_cluster_connection->node_id();
-      ndbcluster_get_schema(tmp_share, schema);
+      {
+        ndbcluster_get_schema(tmp_share, schema);
+        schema->any_value= pOp->getAnyValue();
+      }
       enum SCHEMA_OP_TYPE schema_type= (enum SCHEMA_OP_TYPE)schema->type;
       DBUG_PRINT("info",
                  ("%s.%s: log query_length: %d  query: '%s'  type: %d",
@@ -1905,7 +1960,8 @@
           run_query(thd, schema->query,
                     schema->query + schema->query_length,
                     TRUE,    /* print error */
-                    FALSE);  /* binlog the query */
+                    TRUE);   /* don't binlog the query */
+          log_query= 1;
           break;
         case SOT_TABLESPACE:
         case SOT_LOGFILE_GROUP:
@@ -1915,14 +1971,7 @@
           abort();
         }
         if (log_query && ndb_binlog_running)
-        {
-          char *thd_db_save= thd->db;
-          thd->db= schema->db;
-          thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query,
-                            schema->query_length, FALSE,
-                            schema->name[0] == 0 || thd->db[0] == 0);
-          thd->db= thd_db_save;
-        }
+          ndb_binlog_query(thd, schema);
         /* signal that schema operation has been handled */
         DBUG_DUMP("slock", (char*)schema->slock, schema->slock_length);
         if (bitmap_is_set(&slock, node_id))
@@ -2099,10 +2148,10 @@
         log_query= 1;
         break;
       case SOT_DROP_TABLE:
+        log_query= 1;
         // invalidation already handled by binlog thread
         if (share && share->op)
         {
-          log_query= 1;
           break;
         }
         // fall through
@@ -2180,14 +2229,7 @@
       }
     }
     if (ndb_binlog_running && log_query)
-    {
-      char *thd_db_save= thd->db;
-      thd->db= schema->db;
-      thd->binlog_query(THD::STMT_QUERY_TYPE, schema->query,
-                        schema->query_length, FALSE,
-                        schema->name[0] == 0);
-      thd->db= thd_db_save;
-    }
+      ndb_binlog_query(thd, schema);
   }
   while ((schema= post_epoch_unlock_list->pop()))
   {
@@ -2343,6 +2385,18 @@
 {
   DBUG_ENTER("ndbcluster_binlog_start");
 
+  if (::server_id == 0)
+  {
+    sql_print_warning("NDB: server id set to zero will cause any other mysqld "
+                      "with bin log to log with wrong server id");
+  }
+  else if (::server_id & 0x1 << 31)
+  {
+    sql_print_error("NDB: server id's with high bit set is reserved for internal "
+                    "purposes");
+    DBUG_RETURN(-1);
+  }
+
   pthread_mutex_init(&injector_mutex, MY_MUTEX_INIT_FAST);
   pthread_cond_init(&injector_cond, NULL);
   pthread_mutex_init(&ndb_schema_share_mutex, MY_MUTEX_INIT_FAST);
@@ -3212,9 +3266,17 @@
   if (share == ndb_apply_status_share)
     return 0;
 
-  uint originating_server_id= pOp->getAnyValue();
+  uint32 originating_server_id= pOp->getAnyValue();
   if (originating_server_id == 0)
     originating_server_id= ::server_id;
+  else if (originating_server_id & NDB_ANYVALUE_RESERVED)
+  {
+    if (originating_server_id != NDB_ANYVALUE_FOR_NOLOGGING)
+      sql_print_warning("NDB: unknown value for binlog signalling 0x%X, "
+                        "event not logged",
+                        originating_server_id);
+    return 0;
+  }
 
   TABLE *table= share->table;
   DBUG_ASSERT(trans.good());

--- 1.25/sql/ha_ndbcluster_binlog.h	2007-04-03 07:25:48 +02:00
+++ 1.26/sql/ha_ndbcluster_binlog.h	2007-04-23 20:45:22 +02:00
@@ -30,6 +30,10 @@
 
 #define NDB_INVALID_SCHEMA_OBJECT 241
 
+/* server id's with high bit set is reservered */
+#define NDB_ANYVALUE_FOR_NOLOGGING 0xFFFFFFFF
+#define NDB_ANYVALUE_RESERVED      0x80000000
+
 extern handlerton *ndbcluster_hton;
 
 /*

--- 1.7/storage/ndb/test/tools/listen.cpp	2007-03-16 11:14:06 +01:00
+++ 1.8/storage/ndb/test/tools/listen.cpp	2007-04-23 20:45:23 +02:00
@@ -22,6 +22,128 @@
 #include <getarg.h>
 
 
+#define BATCH_SIZE 128
+struct Table_info
+{
+  Uint32 id;
+};
+
+struct Trans_arg
+{
+  Ndb *ndb;
+  NdbTransaction *trans;
+  Uint32 bytes_batched;
+};
+
+Vector< Vector<NdbRecAttr*> > event_values;
+Vector< Vector<NdbRecAttr*> > event_pre_values;
+Vector<struct Table_info> table_infos;
+
+static void do_begin(Ndb *ndb, struct Trans_arg &trans_arg)
+{
+  trans_arg.ndb =  ndb;
+  trans_arg.trans = ndb->startTransaction();
+  trans_arg.bytes_batched = 0;
+}
+
+static void do_equal(NdbOperation *op,
+                     NdbEventOperation *pOp)
+{
+  struct Table_info *ti = (struct Table_info *)pOp->getCustomData();
+  Vector<NdbRecAttr*> &ev = event_values[ti->id];
+  const NdbDictionary::Table *tab= pOp->getTable();
+  unsigned i, n_columns = tab->getNoOfColumns();
+  for (i= 0; i < n_columns; i++)
+  {
+    if (tab->getColumn(i)->getPrimaryKey() &&
+        op->equal(i, ev[i]->aRef()))
+    {
+      abort();
+    }
+  }
+}
+
+static void do_set_value(NdbOperation *op,
+                         NdbEventOperation *pOp)
+{
+  struct Table_info *ti = (struct Table_info *)pOp->getCustomData();
+  Vector<NdbRecAttr*> &ev = event_values[ti->id];
+  const NdbDictionary::Table *tab= pOp->getTable();
+  unsigned i, n_columns = tab->getNoOfColumns();
+  for (i= 0; i < n_columns; i++)
+  {
+    if (!tab->getColumn(i)->getPrimaryKey() &&
+        op->setValue(i, ev[i]->aRef()))
+    {
+      abort();
+    }
+  }
+}
+
+static void do_insert(struct Trans_arg &trans_arg, NdbEventOperation *pOp)
+{
+  if (!trans_arg.trans)
+    return;
+
+  NdbOperation *op =
+    trans_arg.trans->getNdbOperation(pOp->getEvent()->getTableName());
+  op->writeTuple();
+
+  do_equal(op, pOp);
+  do_set_value(op, pOp);
+
+  trans_arg.bytes_batched++;
+  if (trans_arg.bytes_batched > BATCH_SIZE)
+  {
+    trans_arg.trans->execute(NdbTransaction::NoCommit);
+    trans_arg.bytes_batched = 0; 
+  }
+}
+static void do_update(struct Trans_arg &trans_arg, NdbEventOperation *pOp)
+{
+  if (!trans_arg.trans)
+    return;
+
+  NdbOperation *op =
+    trans_arg.trans->getNdbOperation(pOp->getEvent()->getTableName());
+  op->writeTuple();
+
+  do_equal(op, pOp);
+  do_set_value(op, pOp);
+
+  trans_arg.bytes_batched++;
+  if (trans_arg.bytes_batched > BATCH_SIZE)
+  {
+    trans_arg.trans->execute(NdbTransaction::NoCommit);
+    trans_arg.bytes_batched = 0; 
+  }
+}
+static void do_delete(struct Trans_arg &trans_arg, NdbEventOperation *pOp)
+{
+  if (!trans_arg.trans)
+    return;
+
+  NdbOperation *op =
+    trans_arg.trans->getNdbOperation(pOp->getEvent()->getTableName());
+  op->deleteTuple();
+
+  do_equal(op, pOp);
+
+  trans_arg.bytes_batched++;
+  if (trans_arg.bytes_batched > BATCH_SIZE)
+  {
+    trans_arg.trans->execute(NdbTransaction::NoCommit);
+    trans_arg.bytes_batched = 0; 
+  }
+}
+static void do_commit(struct Trans_arg &trans_arg)
+{
+  if (!trans_arg.trans)
+    return;
+  trans_arg.trans->execute(NdbTransaction::Commit);
+  trans_arg.ndb->closeTransaction(trans_arg.trans);
+}
+
 int 
 main(int argc, const char** argv){
   ndb_init();
@@ -29,8 +151,14 @@
   
   int _help = 0;
   const char* db = 0;
+  const char* connectstring1 = 0;
+  const char* connectstring2 = 0;
 
   struct getargs args[] = {
+    { "connectstring1", 'c',
+      arg_string, &connectstring1, "connectstring1", "" },
+    { "connectstring2", 'C',
+      arg_string, &connectstring2, "connectstring2", "" },
     { "database", 'd', arg_string, &db, "Database", "" },
     { "usage", '?', arg_flag, &_help, "Print help", "" }
   };
@@ -46,7 +174,7 @@
   }
 
   // Connect to Ndb
-  Ndb_cluster_connection con;
+  Ndb_cluster_connection con(connectstring1);
   if(con.connect(12, 5, 1) != 0)
   {
     return NDBT_ProgramExit(NDBT_FAILED);
@@ -61,12 +189,35 @@
   // Connect to Ndb and wait for it to become ready
   while(MyNdb.waitUntilReady() != 0)
     ndbout << "Waiting for ndb to become ready..." << endl;
-   
+
+  Ndb_cluster_connection *con2 = NULL;
+  Ndb *ndb2 =  NULL;
+  if (connectstring2)
+  {
+    con2 = new Ndb_cluster_connection(connectstring2);
+
+    if(con2->connect(12, 5, 1) != 0)
+    {
+      return NDBT_ProgramExit(NDBT_FAILED);
+    }
+    ndb2 = new Ndb( con2, db ? db : "TEST_DB" );
+
+    if(ndb2->init() != 0){
+      ERR(ndb2->getNdbError());
+      return NDBT_ProgramExit(NDBT_FAILED);
+    }
+
+    // Connect to Ndb and wait for it to become ready
+    while(ndb2->waitUntilReady() != 0)
+      ndbout << "Waiting for ndb to become ready..." << endl;
+  }
+
   int result = 0;
   
   NdbDictionary::Dictionary *myDict = MyNdb.getDictionary();
   Vector<NdbDictionary::Event*> events;
   Vector<NdbEventOperation*> event_ops;
+  int sz = 0;
   for(i= optind; i<argc; i++)
   {
     const NdbDictionary::Table* table= myDict->getTable(argv[i]);
@@ -121,12 +272,23 @@
       goto end;
     }
 
+    event_values.push_back(Vector<NdbRecAttr *>());
+    event_pre_values.push_back(Vector<NdbRecAttr *>());
     for (int a = 0; a < table->getNoOfColumns(); a++) 
     {
-      pOp->getValue(table->getColumn(a)->getName());
-      pOp->getPreValue(table->getColumn(a)->getName());
+      event_values[sz].
+        push_back(pOp->getValue(table->getColumn(a)->getName()));
+      event_pre_values[sz].
+        push_back(pOp->getPreValue(table->getColumn(a)->getName()));
     }
     event_ops.push_back(pOp);
+    {
+      struct Table_info ti;
+      ti.id = sz;
+      table_infos.push_back(ti);
+    }
+    pOp->setCustomData((void *)&table_infos[sz]);
+    sz++;
   }
 
   for(i= 0; i<(int)event_ops.size(); i++)
@@ -140,6 +302,7 @@
     }
   }
 
+  struct Trans_arg trans_arg;
   while(true)
   {
     while(MyNdb.pollEvents(100) == 0);
@@ -149,18 +312,26 @@
     {
       Uint64 gci= pOp->getGCI();
       Uint64 cnt_i= 0, cnt_u= 0, cnt_d= 0;
+      if (ndb2)
+        do_begin(ndb2, trans_arg);
       do
       {
 	switch(pOp->getEventType())
 	{
 	case NdbDictionary::Event::TE_INSERT:
 	  cnt_i++;
+          if (ndb2)
+            do_insert(trans_arg, pOp);
 	  break;
 	case NdbDictionary::Event::TE_DELETE:
 	  cnt_d++;
+          if (ndb2)
+            do_delete(trans_arg, pOp);
 	  break;
 	case NdbDictionary::Event::TE_UPDATE:
 	  cnt_u++;
+          if (ndb2)
+            do_update(trans_arg, pOp);
 	  break;
 	case NdbDictionary::Event::TE_CLUSTER_FAILURE:
 	  break;
@@ -180,6 +351,8 @@
 	  abort();
 	}
       } while ((pOp= MyNdb.nextEvent()) && gci == pOp->getGCI());
+      if (ndb2)
+        do_commit(trans_arg);
       ndbout_c("GCI: %lld events: %lld(I) %lld(U) %lld(D)", gci, cnt_i, cnt_u, cnt_d);
     }
   }
@@ -187,8 +360,15 @@
   for(i= 0; i<(int)event_ops.size(); i++)
     MyNdb.dropEventOperation(event_ops[i]);
 
+  if (ndb2)
+    delete ndb2;
+  if (con2)
+    delete con2;
   return NDBT_ProgramExit(NDBT_OK);
 }
 
+template class Vector<struct Table_info>;
+template class Vector<NdbRecAttr*>;
+template class Vector< Vector<NdbRecAttr*> >;
 template class Vector<NdbDictionary::Event*>;
 template class Vector<NdbEventOperation*>;
Thread
bk commit into 5.1 tree (tomas:1.2546)tomas23 Apr