List:Commits« Previous MessageNext Message »
From:tomas Date:January 4 2006 3:39pm
Subject:bk commit into 5.1 tree (tomas:1.1990)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of tomas. When tomas does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.1990 06/01/04 16:39:20 tomas@stripped +1 -0
  Merge tulin@stripped:/home/bk/mysql-5.1-wl2325-v5
  into  poseidon.ndb.mysql.com:/home/tomas/mysql-5.1-wl2325-repl

  sql/ha_ndbcluster.cc
    1.232 06/01/04 16:39:14 tomas@stripped +0 -0
    SCCS merged

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	tomas
# Host:	poseidon.ndb.mysql.com
# Root:	/home/tomas/mysql-5.1-wl2325-repl/RESYNC

--- 1.231/sql/ha_ndbcluster.cc	2006-01-04 15:22:06 +01:00
+++ 1.232/sql/ha_ndbcluster.cc	2006-01-04 16:39:14 +01:00
@@ -33,10 +33,7 @@
 #include <../util/Bitmask.hpp>
 #include <ndbapi/NdbIndexStat.hpp>
 
-#ifdef HAVE_NDB_BINLOG
-#include "rpl_injector.h"
-#include "slave.h"
-#endif
+#include "ha_ndbcluster_binlog.h"
 
 // options from from mysqld.cc
 extern my_bool opt_ndb_optimized_node_selection;
@@ -55,13 +52,9 @@
 // createable against NDB from this handler
 static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used
 
-static const char *ha_ndb_ext=".ndb";
-static const char share_prefix[]= "./";
-
-static int ndbcluster_close_connection(THD *thd);
-static int ndbcluster_commit(THD *thd, bool all);
-static int ndbcluster_rollback(THD *thd, bool all);
-static handler* ndbcluster_create_handler(TABLE_SHARE *table);
+static bool ndbcluster_init(void);
+static int ndbcluster_end(ha_panic_function flag);
+static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type);
 
 handlerton ndbcluster_hton = {
   "ndbcluster",
@@ -69,31 +62,7 @@
   "Clustered, fault-tolerant, memory-based tables", 
   DB_TYPE_NDBCLUSTER,
   ndbcluster_init,
-  0, /* slot */
-  0, /* savepoint size */
-  ndbcluster_close_connection,
-  NULL, /* savepoint_set */
-  NULL, /* savepoint_rollback */
-  NULL, /* savepoint_release */
-  ndbcluster_commit,
-  ndbcluster_rollback,
-  NULL, /* prepare */
-  NULL, /* recover */
-  NULL, /* commit_by_xid */
-  NULL, /* rollback_by_xid */
-  NULL, /* create_cursor_read_view */
-  NULL, /* set_cursor_read_view */
-  NULL, /* close_cursor_read_view */
-  ndbcluster_create_handler, /* Create a new handler */
-  ndbcluster_drop_database, /* Drop a database */
-  ndbcluster_end, /* Panic call */
-  NULL, /* Release temporary latches */
-  NULL, /* Update Statistics */
-  NULL, /* Start Consistent Snapshot */
-  NULL, /* Flush logs */
-  ndbcluster_show_status, /* Show status */
-  NULL, /* Replication Report Sent Binlog */
-  HTON_NO_FLAGS
+  ~(uint)0, /* slot */
 };
 
 static handler *ndbcluster_create_handler(TABLE_SHARE *table)
@@ -126,38 +95,24 @@
   break;                                 \
 }
 
-// Typedefs for long names
-typedef NdbDictionary::Object NDBOBJ;
-typedef NdbDictionary::Column NDBCOL;
-typedef NdbDictionary::Table NDBTAB;
-typedef NdbDictionary::Index  NDBINDEX;
-typedef NdbDictionary::Dictionary  NDBDICT;
-typedef NdbDictionary::Event NDBEVENT;
-
 static int ndbcluster_inited= 0;
-static int ndbcluster_util_inited= 0;
+int ndbcluster_util_inited= 0;
 
 static Ndb* g_ndb= NULL;
-static Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
+Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
+unsigned char g_node_id_map[max_ndb_nodes];
 
 // Handler synchronization
 pthread_mutex_t ndbcluster_mutex;
 
 // Table lock handling
-static HASH ndbcluster_open_tables;
+HASH ndbcluster_open_tables;
 
 static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length,
                                 my_bool not_used __attribute__((unused)));
-static NDB_SHARE *get_share(const char *key,
-                            bool create_if_not_exists= TRUE,
-                            bool have_lock= FALSE);
 #ifdef HAVE_NDB_BINLOG
-/* you should have lock on ndbcluster_mutex when calling */
-static int handle_trailing_share(NDB_SHARE *share);
-static int rename_share(NDB_SHARE *share, const char *new_key);
+static int rename_share(NDB_SHARE *share, const char *new_key, bool have_lock);
 #endif
-static void free_share(NDB_SHARE **share, bool have_lock= FALSE);
-static void real_free_share(NDB_SHARE **share);
 static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len);
 
 static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len);
@@ -167,35 +122,9 @@
 static int ndb_get_table_statistics(Ndb*, const char *, 
                                     struct Ndb_statistics *);
 
-#ifndef DBUG_OFF
-void print_records(TABLE *table, const char *record)
-{
-  if (_db_on_)
-  {
-    for (uint j= 0; j < table->s->fields; j++)
-    {
-      char buf[40];
-      int pos= 0;
-      Field *field= table->field[j];
-      const byte* field_ptr= field->ptr - table->record[0] + record;
-      int pack_len= field->pack_length();
-      int n= pack_len < 10 ? pack_len : 10;
-      
-      for (int i= 0; i < n && pos < 20; i++)
-      {
-	pos+= sprintf(&buf[pos]," %x", (int) (unsigned char) field_ptr[i]);
-      }
-      buf[pos]= 0;
-      DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf));
-    }
-  }
-}
-#else
-#define print_records(a,b)
-#endif
 
 // Util thread variables
-static pthread_t ndb_util_thread;
+pthread_t ndb_util_thread;
 pthread_mutex_t LOCK_ndb_util_thread;
 pthread_cond_t COND_ndb_util_thread;
 pthread_handler_t ndb_util_thread_func(void *arg);
@@ -207,75 +136,6 @@
 */
 static uint32 dummy_buf;
 
-#ifdef HAVE_NDB_BINLOG
-#define INJECTOR_EVENT_LEN 200
-/* NDB Injector thread (used for binlog creation) */
-ulong ndb_report_thresh_binlog_epoch_slip;
-ulong ndb_report_thresh_binlog_mem_usage;
-static ulonglong ndb_latest_applied_binlog_epoch= 0;
-static ulonglong ndb_latest_handled_binlog_epoch= 0;
-static ulonglong ndb_latest_received_binlog_epoch= 0;
-static pthread_t ndb_binlog_thread;
-static int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
-                                          const char *db,
-                                          const char *table_name,
-                                          bool do_binlog,
-                                          NDB_SHARE *share= 0);
-static int ndbcluster_create_event(Ndb *ndb, const NDBTAB *table,
-                                   const char *event_name, NDB_SHARE *share);
-static int ndbcluster_create_event_ops(NDB_SHARE *share,
-                                       const NDBTAB *ndbtab,
-                                       const char *event_name);
-static int ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
-                                        NDB_SHARE *share);
-static void ndb_rep_event_name(String *event_name,
-                               const char *db, const char *tbl);
-#ifndef DBUG_OFF
-static void dbug_print_table(const char *info, TABLE *table);
-#endif
-static int ndbcluster_binlog_start();
-pthread_handler_t ndb_binlog_thread_func(void *arg);
-
-/*
-  Mutex and condition used for interacting between client sql thread
-  and injector thread
-*/
-pthread_mutex_t injector_mutex;
-pthread_cond_t  injector_cond;
-/*
-  Flag showing if the ndb injector thread is running, if so == 1
-*/
-static int ndb_binlog_thread_running= 0;
-
-/*
-  table cluster_replication.apply_status
-*/
-static int ndbcluster_create_apply_status_table(THD *thd);
-static NDB_SHARE *ndbcluster_check_apply_status_share();
-static NDB_SHARE *ndbcluster_get_apply_status_share();
-static NDB_SHARE *apply_status_share= 0;
-
-/*
-  Global reference to the ndb injector thread THD oject
-
-  Has one sole purpose, for setting the in_use table member variable
-  in get_share(...)
-*/
-static THD *injector_thd= 0;
-
-/*
-  Global reference to ndb injector thd object.
-
-  Used mainly by the binlog index thread, but exposed to the client sql
-  thread for one reason; to setup the events operations for a table
-  to enable ndb injector thread receiving events.
-
-  Must therefore always be used with a surrounding
-  pthread_mutex_lock(&injector_mutex), when doing create/dropEventOperation
-*/
-static Ndb *injector_ndb= 0;
-#endif /* HAVE_NDB_BINLOG */
-
 /*
   Stats that can be retrieved from ndb
 */
@@ -293,7 +153,7 @@
 static const char * ndb_connected_host= 0;
 static long ndb_connected_port= 0;
 static long ndb_number_of_replicas= 0;
-static long ndb_number_of_storage_nodes= 0;
+long ndb_number_of_storage_nodes= 0;
 
 static int update_status_variables(Ndb_cluster_connection *c)
 {
@@ -314,9 +174,6 @@
   {NullS, NullS, SHOW_LONG}
 };
 
-/* instantiated in storage/ndb/src/ndbapi/Ndbif.cpp */
-extern Uint64 g_latest_trans_gci;
-
 /*
   Error handling functions
 */
@@ -444,6 +301,7 @@
   all= NULL;
   stmt= NULL;
   error= 0;
+  options= 0;
 }
 
 Thd_ndb::~Thd_ndb()
@@ -469,14 +327,6 @@
 }
 
 inline
-Thd_ndb *
-get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton.slot]; }
-
-inline
-void
-set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton.slot]= thd_ndb; }
-
-inline
 Ndb *ha_ndbcluster::get_ndb()
 {
   return get_thd_ndb(current_thd)->ndb;
@@ -2595,8 +2445,8 @@
     set to null.
 */
 
-static void ndb_unpack_record(TABLE *table, NdbValue *value,
-                              MY_BITMAP *defined, byte *buf)
+void ndb_unpack_record(TABLE *table, NdbValue *value,
+                       MY_BITMAP *defined, byte *buf)
 {
   Field **p_field= table->field, *field= *p_field;
   uint row_offset= (uint) (buf - table->record[0]);
@@ -3669,7 +3519,7 @@
   Commit a transaction started in NDB
  */
 
-int ndbcluster_commit(THD *thd, bool all)
+static int ndbcluster_commit(THD *thd, bool all)
 {
   int res= 0;
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
@@ -3720,7 +3570,7 @@
   Rollback a transaction started in NDB
  */
 
-int ndbcluster_rollback(THD *thd, bool all)
+static int ndbcluster_rollback(THD *thd, bool all)
 {
   int res= 0;
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
@@ -4069,7 +3919,8 @@
 #ifdef HAVE_NDB_BINLOG
     ndbcluster_create_binlog_setup(get_ndb(), name2, m_dbname, m_tabname,
                                    ndb_binlog_thread_running > 0 &&
-                                   !is_prefix(m_tabname, tmp_file_prefix));
+                                   !is_prefix(m_tabname, tmp_file_prefix),
+                                   0, TRUE);
 #endif /* HAVE_NDB_BINLOG */
     DBUG_RETURN(my_errno);
   }
@@ -4230,7 +4081,7 @@
       uint length= (uint) strlen(key);
       if ((share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
                                            (byte*) key, length)))
-        handle_trailing_share(share);
+        handle_trailing_share(share, TRUE);
     }
     /*
       get a new share
@@ -4273,8 +4124,12 @@
         sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations."
                         " Event: %s", name2);
         /* a warning has been issued to the client */
-        break;
       }
+      ndbcluster_log_schema_op(current_thd, share,
+                               current_thd->query, current_thd->query_length,
+                               share->db, share->table_name,
+                               0, 0,
+                               SOT_CREATE_TABLE);
       break;
     }
   }
@@ -4379,7 +4234,7 @@
   if (ndb_binlog_thread_running > 0 &&
       (share= get_share(from, false)))
   {
-    int r= rename_share(share, to);
+    int r= rename_share(share, to, TRUE);
     DBUG_ASSERT(r == 0);
   }
 #endif
@@ -4393,7 +4248,7 @@
 #ifdef HAVE_NDB_BINLOG
     if (share)
     {
-      int r= rename_share(share, from);
+      int r= rename_share(share, from, TRUE);
       DBUG_ASSERT(r == 0);
       free_share(&share);
     }
@@ -4413,12 +4268,14 @@
   }
 
 #ifdef HAVE_NDB_BINLOG
+  int is_old_table_tmpfile= 1;
   if (share && share->op)
     dict->forceGCPWait();
 
   /* handle old table */
   if (!is_prefix(m_tabname, tmp_file_prefix))
   {
+    is_old_table_tmpfile= 0;
     String event_name(INJECTOR_EVENT_LEN);
     ndb_rep_event_name(&event_name, from + sizeof(share_prefix) - 1, 0);
     ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share);
@@ -4455,6 +4312,18 @@
                           "Creating event for logging table failed. "
                           "See error log for details.");
     }
+    if (is_old_table_tmpfile)
+      ndbcluster_log_schema_op(current_thd, share,
+                               current_thd->query, current_thd->query_length,
+                               share->db, share->table_name,
+                               0, 0,
+                               SOT_ALTER_TABLE);
+    else
+      ndbcluster_log_schema_op(current_thd, share,
+                               current_thd->query, current_thd->query_length,
+                               share->db, share->table_name,
+                               0, 0,
+                               SOT_RENAME_TABLE);
   }
   if (share)
     free_share(&share);
@@ -4553,7 +4422,16 @@
   */
   int table_dropped= dict->getNdbError().code != 709;
 
-  if (table_dropped && share && share->op)
+  if (!is_prefix(table_name, tmp_file_prefix) && share)
+  {
+    ndbcluster_log_schema_op(current_thd, share,
+                             current_thd->query, current_thd->query_length,
+                             share->db, share->table_name,
+                             0, 0,
+                             SOT_DROP_TABLE);
+  }
+  else if (table_dropped && share && share->op) /* ndbcluster_log_schema_op
+                                                   will do a force GCP */
     dict->forceGCPWait();
 
   if (!is_prefix(table_name, tmp_file_prefix))
@@ -4906,7 +4784,7 @@
 }
 
 
-int ndbcluster_close_connection(THD *thd)
+static int ndbcluster_close_connection(THD *thd)
 {
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   DBUG_ENTER("ndbcluster_close_connection");
@@ -5069,14 +4947,21 @@
   DBUG_RETURN(ret);      
 }
 
-void ndbcluster_drop_database(char *path)
+static void ndbcluster_drop_database(char *path)
 {
   ndbcluster_drop_database_impl(path);
+#ifdef HAVE_NDB_BINLOG
+  char db[FN_REFLEN];
+  ha_ndbcluster::set_dbname(path, db);
+  ndbcluster_log_schema_op(current_thd, 0,
+                           current_thd->query, current_thd->query_length,
+                           db, "", 0, 0, SOT_DROP_DB);
+#endif
 }
 /*
   find all tables in ndb and discover those needed
 */
-static int ndbcluster_find_all_files(THD *thd)
+int ndbcluster_find_all_files(THD *thd)
 {
   DBUG_ENTER("ndbcluster_find_all_files");
   Ndb* ndb;
@@ -5111,10 +4996,11 @@
 
       if (!(ndbtab= dict->getTable(elmt.name)))
       {
-        sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s",
-                        elmt.database, elmt.name,
-                        dict->getNdbError().code,
-                        dict->getNdbError().message);
+        if (elmt.state == NDBOBJ::StateOnline)
+          sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s",
+                          elmt.database, elmt.name,
+                          dict->getNdbError().code,
+                          dict->getNdbError().message);
         unhandled++;
         continue;
       }
@@ -5174,7 +5060,7 @@
                                          ndb_binlog_thread_running > 0 &&
                                          !is_prefix(elmt.name,
                                                     tmp_file_prefix),
-                                         share);
+                                         share, FALSE);
           pthread_mutex_unlock(&LOCK_open);
         }
         else
@@ -5314,7 +5200,7 @@
         pthread_mutex_lock(&LOCK_open);
         ndbcluster_create_binlog_setup(ndb, name, db, file_name,
                                        !is_prefix(file_name, tmp_file_prefix),
-                                       share);
+                                       share, FALSE);
         pthread_mutex_unlock(&LOCK_open);
         pthread_mutex_lock(&ndbcluster_mutex);
       }
@@ -5395,11 +5281,18 @@
 static int connect_callback()
 {
   update_status_variables(g_ndb_cluster_connection);
+
+  uint node_id, i= 0;
+  Ndb_cluster_connection_node_iter node_iter;
+  memset((void *)g_node_id_map, 0xFFFF, sizeof(g_node_id_map));
+  while ((node_id= g_ndb_cluster_connection->get_next_node(node_iter)))
+    g_node_id_map[node_id]= i++;
+
   pthread_cond_signal(&COND_ndb_util_thread);
   return 0;
 }
 
-bool ndbcluster_init()
+static bool ndbcluster_init()
 {
   int res;
   DBUG_ENTER("ndbcluster_init");
@@ -5407,6 +5300,21 @@
   if (have_ndbcluster != SHOW_OPTION_YES)
     goto ndbcluster_init_error;
 
+  {
+    handlerton &h= ndbcluster_hton;
+    h.close_connection= ndbcluster_close_connection;
+    h.commit=           ndbcluster_commit;
+    h.rollback=         ndbcluster_rollback;
+    h.create=           ndbcluster_create_handler; /* Create a new handler */
+    h.drop_database=    ndbcluster_drop_database;  /* Drop a database */
+    h.panic=            ndbcluster_end;            /* Panic call */
+    h.show_status=      ndbcluster_show_status;    /* Show status */
+#ifdef HAVE_NDB_BINLOG
+    ndbcluster_binlog_init_handlerton();
+#endif
+    h.flags=            HTON_NO_FLAGS;
+  }
+
   // Set connectstring if specified
   if (opt_ndbcluster_connectstring != 0)
     DBUG_PRINT("connectstring", ("%s", opt_ndbcluster_connectstring));     
@@ -5516,72 +5424,7 @@
   DBUG_RETURN(TRUE);
 }
 
-
-/*
-  End use of the NDB Cluster table handler
-  - free all global variables allocated by 
-    ndbcluster_init()
-*/
-
-int ndbcluster_binlog_end()
-{
-  DBUG_ENTER("ndb_binlog_end");
-
-  if (!ndbcluster_util_inited)
-    DBUG_RETURN(0);
-
-  // Kill ndb utility thread
-  (void) pthread_mutex_lock(&LOCK_ndb_util_thread);  
-  DBUG_PRINT("exit",("killing ndb util thread: %lx", ndb_util_thread));
-  (void) pthread_cond_signal(&COND_ndb_util_thread);
-  (void) pthread_mutex_unlock(&LOCK_ndb_util_thread);
-
-#ifdef HAVE_NDB_BINLOG
-  /* wait for injector thread to finish */
-  if (ndb_binlog_thread_running > 0)
-  {
-    pthread_mutex_lock(&injector_mutex);
-    while (ndb_binlog_thread_running > 0)
-    {
-      struct timespec abstime;
-      set_timespec(abstime, 1);
-      pthread_cond_timedwait(&injector_cond, &injector_mutex, &abstime);
-    }
-    pthread_mutex_unlock(&injector_mutex);
-  }
-
-  /* remove all shares */
-  {
-    pthread_mutex_lock(&ndbcluster_mutex);
-    for (uint i= 0; i < ndbcluster_open_tables.records; i++)
-    {
-      NDB_SHARE *share=
-        (NDB_SHARE*) hash_element(&ndbcluster_open_tables, i);
-      if (share->table)
-        DBUG_PRINT("share",
-                   ("table->s->db.table_name: %s.%s",
-                    share->table->s->db.str, share->table->s->table_name.str));
-      if (share->state != NSS_DROPPED && !--share->use_count)
-        real_free_share(&share);
-      else
-      {
-        DBUG_PRINT("share",
-                   ("[%d] 0x%lx  key: %s  key_length: %d",
-                    i, share, share->key, share->key_length));
-        DBUG_PRINT("share",
-                   ("db.tablename: %s.%s  use_count: %d  commit_count: %d",
-                    share->db, share->table_name,
-                    share->use_count, share->commit_count));
-      }
-    }
-    pthread_mutex_unlock(&ndbcluster_mutex);
-  }
-#endif
-  ndbcluster_util_inited= 0;
-  DBUG_RETURN(0);
-}
-
-int ndbcluster_end(ha_panic_function type)
+static int ndbcluster_end(ha_panic_function type)
 {
   DBUG_ENTER("ndbcluster_end");
 
@@ -6080,60 +5923,6 @@
 }
 
 
-#ifndef DBUG_OFF
-static void dbug_print_table(const char *info, TABLE *table)
-{
-  if (table == 0)
-  {
-    DBUG_PRINT("info",("%s: (null)", info));
-    return;
-  }
-  DBUG_PRINT("info",
-             ("%s: %s.%s s->fields: %d  "
-              "reclength: %d  rec_buff_length: %d  record[0]: %lx  "
-              "record[1]: %lx",
-              info,
-              table->s->db.str,
-              table->s->table_name.str,
-              table->s->fields,
-              table->s->reclength,
-              table->s->rec_buff_length,
-              table->record[0],
-              table->record[1]));
-
-  for (unsigned int i= 0; i < table->s->fields; i++) 
-  {
-    Field *f= table->field[i];
-    DBUG_PRINT("info",
-               ("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d  pack_length: %d  "
-                "ptr: 0x%lx[+%d]  null_bit: %u  null_ptr: 0x%lx[+%d]",
-                i,
-                f->field_name,
-                f->flags,
-                (f->flags & PRI_KEY_FLAG)  ? "pri"       : "attr",
-                (f->flags & NOT_NULL_FLAG) ? ""          : ",nullable",
-                (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed",
-                (f->flags & ZEROFILL_FLAG) ? ",zerofill" : "",
-                (f->flags & BLOB_FLAG)     ? ",blob"     : "",
-                (f->flags & BINARY_FLAG)   ? ",binary"   : "",
-                f->real_type(),
-                f->pack_length(),
-                f->ptr, f->ptr - table->record[0],
-                f->null_bit,
-                f->null_ptr, (byte*) f->null_ptr - table->record[0]));
-    if (f->type() == MYSQL_TYPE_BIT)
-    {
-      Field_bit *g= (Field_bit*) f;
-      DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d  bit_ptr: 0x%lx[+%d] "
-                                   "bit_ofs: %u  bit_len: %u",
-                                   g->field_length, g->bit_ptr,
-                                   (byte*) g->bit_ptr-table->record[0],
-                                   g->bit_ofs, g->bit_len));
-    }
-  }
-}
-#endif
-
 /*
   Handling the shared NDB_SHARE structure that is needed to
   provide table locking.
@@ -6187,7 +5976,7 @@
   
   Must be called with previous pthread_mutex_lock(&ndbcluster_mutex)
 */
-static int handle_trailing_share(NDB_SHARE *share)
+int handle_trailing_share(NDB_SHARE *share, bool have_lock)
 {
   static ulong trailing_share_id= 0;
   DBUG_ENTER("handle_trailing_share");
@@ -6195,7 +5984,7 @@
   ++share->use_count;
   pthread_mutex_unlock(&ndbcluster_mutex);
 
-  close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0);
+  close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, have_lock);
 
   pthread_mutex_lock(&ndbcluster_mutex);
   if (!--share->use_count)
@@ -6257,7 +6046,7 @@
 /*
   Rename share is used during rename table.
 */
-static int rename_share(NDB_SHARE *share, const char *new_key)
+static int rename_share(NDB_SHARE *share, const char *new_key, bool have_lock)
 {
   NDB_SHARE *tmp;
   pthread_mutex_lock(&ndbcluster_mutex);
@@ -6266,7 +6055,7 @@
                               share->key, share->key_length));
   if ((tmp= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
                                      (byte*) new_key, new_length)))
-    handle_trailing_share(tmp);
+    handle_trailing_share(tmp, have_lock);
 
   /* remove the share from hash */
   hash_delete(&ndbcluster_open_tables, (byte*) share);
@@ -6339,7 +6128,7 @@
   Increase refcount on existing share.
   Always returns share and cannot fail.
 */
-static NDB_SHARE *get_share(NDB_SHARE *share)
+NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share)
 {
   pthread_mutex_lock(&ndbcluster_mutex);
   share->use_count++;
@@ -6371,8 +6160,8 @@
 
   have_lock == TRUE, pthread_mutex_lock(&ndbcluster_mutex) already taken
 */
-static NDB_SHARE *get_share(const char *key, bool create_if_not_exists,
-                            bool have_lock)
+NDB_SHARE *ndbcluster_get_share(const char *key, bool create_if_not_exists,
+                                bool have_lock)
 {
   DBUG_ENTER("get_share");
   DBUG_PRINT("info", ("get_share: key %s", key));
@@ -6423,70 +6212,7 @@
       share->table_name= share->db + strlen(share->db) + 1;
       ha_ndbcluster::set_tabname(key, share->table_name);
 #ifdef HAVE_NDB_BINLOG
-      share->op= 0;
-      share->table= 0;
-      while (ndb_binlog_thread_running > 0)
-      {
-        TABLE_SHARE *table_share= 
-          (TABLE_SHARE *) my_malloc(sizeof(*table_share), MYF(MY_WME));
-        TABLE *table= (TABLE*) my_malloc(sizeof(*table), MYF(MY_WME));
-        int error;
-
-        init_tmp_table_share(table_share, share->db, 0, share->table_name, 
-                             share->key);
-        if ((error= open_table_def(thd, table_share, 0)))
-        {
-          sql_print_error("Unable to get table share for %s, error=%d",
-                          share->key, error);
-          DBUG_PRINT("error", ("open_table_def failed %d", error));
-          my_free((gptr) table_share, MYF(0));
-          table_share= 0;
-          my_free((gptr) table, MYF(0));
-          table= 0;
-          break;
-        }
-        if ((error= open_table_from_share(thd, table_share, "", 0, 
-                                          (uint) READ_ALL, 0, table)))
-        {
-          sql_print_error("Unable to open table for %s, error=%d(%d)",
-                          share->key, error, my_errno);
-          DBUG_PRINT("error", ("open_table_from_share failed %d", error));
-          my_free((gptr) table_share, MYF(0));
-          table_share= 0;
-          my_free((gptr) table, MYF(0));
-          table= 0;
-          break;
-        }
-        assign_new_table_id(table);
-        if (!table->record[1] || table->record[1] == table->record[0])
-        {
-          table->record[1]= alloc_root(&table->mem_root,
-                                       table->s->rec_buff_length);
-        }
-        table->in_use= injector_thd;
-        
-        table->s->db.str= share->db;
-        table->s->db.length= strlen(share->db);
-        table->s->table_name.str= share->table_name;
-        table->s->table_name.length= strlen(share->table_name);
- 
-        share->table_share= table_share;
-        share->table= table;
-#ifndef DBUG_OFF
-        dbug_print_table("table", table);
-#endif
-        /*
-          ! do not touch the contents of the table
-          it may be in use by the injector thread
-	*/
-        share->ndb_value[0]= (NdbValue*)
-          alloc_root(*root_ptr, sizeof(NdbValue) * table->s->fields
-                     + 1 /*extra for hidden key*/);
-        share->ndb_value[1]= (NdbValue*)
-          alloc_root(*root_ptr, sizeof(NdbValue) * table->s->fields
-                     +1 /*extra for hidden key*/);
-        break;
-      }
+      ndbcluster_binlog_init_share(share);
 #endif
       *root_ptr= old_root;
     }
@@ -6515,7 +6241,7 @@
   return share;
 }
 
-static void real_free_share(NDB_SHARE **share)
+void ndbcluster_real_free_share(NDB_SHARE **share)
 {
   DBUG_PRINT("real_free_share",
              ("0x%lx key: %s  key_length: %d",
@@ -6562,7 +6288,7 @@
 
   have_lock == TRUE, pthread_mutex_lock(&ndbcluster_mutex) already taken
 */
-static void free_share(NDB_SHARE **share, bool have_lock)
+void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
 {
   if (!have_lock)
     pthread_mutex_lock(&ndbcluster_mutex);
@@ -6588,7 +6314,6 @@
 }
 
 
-
 /*
   Internal representation of the frm blob
    
@@ -7262,7 +6987,7 @@
     Wait for cluster to start
   */
   pthread_mutex_lock(&LOCK_ndb_util_thread);
-  while (!ndb_cluster_node_id)
+  while (!ndb_cluster_node_id && (ndbcluster_hton.slot != ~(uint)0))
   {
     /* ndb not connected yet */
     set_timespec(abstime, 1);
@@ -7277,14 +7002,25 @@
   }
   pthread_mutex_unlock(&LOCK_ndb_util_thread);
 
+  {
+    Thd_ndb *thd_ndb;
+    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+    {
+      sql_print_error("Could not allocate Thd_ndb object");
+      goto ndb_util_thread_end;
+    }
+    set_thd_ndb(thd, thd_ndb);
+    thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP;
+  }
+
+#ifdef HAVE_NDB_BINLOG
+  /* create tables needed by the replication */
+  ndbcluster_setup_binlog_table_shares(thd);
+#else
   /*
     Get all table definitions from the storage node
   */
   ndbcluster_find_all_files(thd);
-
-#ifdef HAVE_NDB_BINLOG
-  /* create tables needed by the replication */
-  ndbcluster_create_apply_status_table(thd);
 #endif
 
   ndbcluster_util_inited= 1;
@@ -7313,15 +7049,11 @@
 
 #ifdef HAVE_NDB_BINLOG
     /*
-      Check that the apply_status_share has been created.
+      Check that the apply_status_share and schema_share has been created.
       If not try to create it
     */
-    if (!apply_status_share &&
-        ndbcluster_check_apply_status_share() == 0)
-    {
-      ndbcluster_find_all_files(thd);
-      ndbcluster_create_apply_status_table(thd);
-    }
+    if (!apply_status_share || !schema_share)
+      ndbcluster_setup_binlog_table_shares(thd);
 #endif
 
     if (ndb_cache_check_time == 0)
@@ -7429,6 +7161,7 @@
     }
   }
 ndb_util_thread_end:
+  sql_print_information("Stopping Cluster Utility thread");
   net_end(&thd->net);
   thd->cleanup();
   delete thd;
@@ -8774,7 +8507,6 @@
                        enum ha_stat_type stat_type)
 {
   char buf[IO_SIZE];
-  ulonglong ndb_latest_epoch= 0;
   DBUG_ENTER("ndbcluster_show_status");
   
   if (have_ndbcluster != SHOW_OPTION_YES) 
@@ -8813,1449 +8545,11 @@
     }
   }
 #ifdef HAVE_NDB_BINLOG
-  pthread_mutex_lock(&injector_mutex);
-  if (injector_ndb)
-  {
-    ndb_latest_epoch= injector_ndb->getLatestGCI();
-    pthread_mutex_unlock(&injector_mutex);
-
-    snprintf(buf, sizeof(buf),
-             "latest_epoch=%llu, "
-             "latest_trans_epoch=%llu, "
-             "latest_received_binlog_epoch=%llu, "
-             "latest_handled_binlog_epoch=%llu, "
-             "latest_applied_binlog_epoch=%llu",
-             ndb_latest_epoch,
-             g_latest_trans_gci,
-             ndb_latest_received_binlog_epoch,
-             ndb_latest_handled_binlog_epoch,
-             ndb_latest_applied_binlog_epoch);
-    if (stat_print(thd, ndbcluster_hton.name, "binlog", buf))
-      DBUG_RETURN(TRUE);
-  }
-  else
-    pthread_mutex_unlock(&injector_mutex);
+  ndbcluster_show_status_binlog(thd, stat_print, stat_type);
 #endif
 
   DBUG_RETURN(FALSE);
 }
-
-#ifdef HAVE_NDB_BINLOG
-
-/*
-  Run a query through mysql_parse
-
-  Used to:
-  - purging the cluster_replication.binlog_index
-  - creating the cluster_replication.apply_status table
-*/
-static void run_query(THD *thd, char *buf, char *end, my_bool print_error)
-{
-  ulong save_query_length= thd->query_length;
-  char *save_query= thd->query;
-  ulong save_thread_id= thd->variables.pseudo_thread_id;
-  NET save_net= thd->net;
-
-  bzero((char*) &thd->net, sizeof(NET));
-  thd->query_length= end - buf;
-  thd->query= buf;
-  thd->variables.pseudo_thread_id= thread_id;
-  DBUG_PRINT("query", ("%s", thd->query));
-
-  mysql_parse(thd, thd->query, thd->query_length);
-
-  if (print_error && thd->query_error)
-  {
-    sql_print_error("NDB: %s: error %s %d %d %d",
-                    buf, thd->net.last_error, thd->net.last_errno,
-                    thd->net.report_error, thd->query_error);
-  }
-
-  thd->query_length= save_query_length;
-  thd->query= save_query;
-  thd->variables.pseudo_thread_id= save_thread_id;
-  thd->net= save_net;
-}
-
-
-/*********************************************************************
-  Internal helper functions for handeling of the cluster replication tables
-  - cluster_replication.binlog_index
-  - cluster_replication.apply_status
-*********************************************************************/
-
-/*
-  defines for cluster replication table names
-*/
-#define NDB_REP_DB      "cluster_replication"
-#define NDB_REP_TABLE   "binlog_index"
-#define NDB_APPLY_TABLE "apply_status"
-#define NDB_APPLY_TABLE_FILE "./" NDB_REP_DB "/" NDB_APPLY_TABLE
-
-/*
-  Global variables for holding the binlog_index table reference
-*/
-TABLE *binlog_index= 0;
-TABLE_LIST binlog_tables;
-
-/*
-  struct to hold the data to be inserted into the
-  cluster_replication.binlog_index table
-*/
-struct Binlog_index_row {
-  longlong gci;
-  const char *master_log_file;
-  longlong master_log_pos;
-  longlong n_inserts;
-  longlong n_updates;
-  longlong n_deletes;
-  longlong n_schemaops;
-};
-
-/*
-  Open the cluster_replication.binlog_index table
-*/
-static int open_binlog_index(THD *thd, TABLE_LIST *tables,
-                             TABLE **binlog_index)
-{
-  static char repdb[]= NDB_REP_DB;
-  static char reptable[]= NDB_REP_TABLE;
-  const char *save_proc_info= thd->proc_info;
-
-  bzero((char*) tables, sizeof(*tables));
-  tables->db= repdb;
-  tables->alias= tables->table_name= reptable;
-  tables->lock_type= TL_WRITE;
-  thd->proc_info= "Opening " NDB_REP_DB "." NDB_REP_TABLE;
-  tables->required_type= FRMTYPE_TABLE;
-  uint counter;
-  thd->clear_error();
-  if (open_tables(thd, &tables, &counter, MYSQL_LOCK_IGNORE_FLUSH))
-  {
-    sql_print_error("NDB Binlog: Opening binlog_index: %d, '%s'",
-                    thd->net.last_errno,
-                    thd->net.last_error ? thd->net.last_error : "");
-    thd->proc_info= save_proc_info;
-    return -1;
-  }
-  *binlog_index= tables->table;
-  thd->proc_info= save_proc_info;
-  return 0;
-}
-
-/*
-  Insert one row in the cluster_replication.binlog_index
-
-  declared friend in handler.h to be able to call write_row directly
-  so that this insert is not replicated
-*/
-int ndb_add_binlog_index(THD *thd, void *_row)
-{
-  Binlog_index_row &row= *(Binlog_index_row *) _row;
-  int error= 0;
-  bool need_reopen;
-  for ( ; ; ) /* loop for need_reopen */
-  {
-    if (!binlog_index && open_binlog_index(thd, &binlog_tables, &binlog_index))
-    {
-      error= -1;
-      goto add_binlog_index_err;
-    }
-
-    if (lock_tables(thd, &binlog_tables, 1, &need_reopen))
-    {
-      if (need_reopen)
-      {
-        close_tables_for_reopen(thd, &binlog_tables);
-	binlog_index= 0;
-        continue;
-      }
-      sql_print_error("NDB Binlog: Unable to lock table binlog_index");
-      error= -1;
-      goto add_binlog_index_err;
-    }
-    break;
-  }
-
-  binlog_index->field[0]->store(row.master_log_pos);
-  binlog_index->field[1]->store(row.master_log_file,
-                                strlen(row.master_log_file),
-                                &my_charset_bin);
-  binlog_index->field[2]->store(row.gci);
-  binlog_index->field[3]->store(row.n_inserts);
-  binlog_index->field[4]->store(row.n_updates);
-  binlog_index->field[5]->store(row.n_deletes);
-  binlog_index->field[6]->store(row.n_schemaops);
-
-  int r;
-  if ((r= binlog_index->file->write_row(binlog_index->record[0])))
-  {
-    sql_print_error("NDB Binlog: Writing row to binlog_index: %d", r);
-    error= -1;
-    goto add_binlog_index_err;
-  }
-
-  mysql_unlock_tables(thd, thd->lock);
-  thd->lock= 0;
-  return 0;
-add_binlog_index_err:
-  close_thread_tables(thd);
-  binlog_index= 0;
-  return error;
-}
-
-/*
-  check the availability af the cluster_replication.apply_status share
-  - return share, but do not increase refcount
-  - return 0 if there is no share
-*/
-static NDB_SHARE *ndbcluster_check_apply_status_share()
-{
-  pthread_mutex_lock(&ndbcluster_mutex);
-
-  void *share= hash_search(&ndbcluster_open_tables, 
-                           NDB_APPLY_TABLE_FILE,
-                           sizeof(NDB_APPLY_TABLE_FILE) - 1);
-  DBUG_PRINT("info",("ndbcluster_check_apply_status_share %s %p",
-                     NDB_APPLY_TABLE_FILE, share));
-  pthread_mutex_unlock(&ndbcluster_mutex);
-  return (NDB_SHARE*) share;
-}
-
-/*
-  Get the share for the cluster_replication.apply_status share
-
-  - return 0 if share does not exist
-*/
-static NDB_SHARE *ndbcluster_get_apply_status_share()
-{
-  return get_share(NDB_APPLY_TABLE_FILE, false);
-}
-
-/*
-  Create the cluster_replication.apply_status table
-*/
-static int ndbcluster_create_apply_status_table(THD *thd)
-{
-  DBUG_ENTER("ndbcluster_create_apply_status_table");
-
-  /*
-    Check if we already have the apply status table.
-    If so it should have been discovered at startup
-    and thus have a share
-  */
-
-  if (ndbcluster_check_apply_status_share())
-    DBUG_RETURN(0);
-
-  if (g_ndb_cluster_connection->get_no_ready() <= 0)
-    DBUG_RETURN(0);
-
-  char buf[1024], *end;
-
-  sql_print_information("NDB: Creating " NDB_REP_DB "." NDB_APPLY_TABLE);
-
-  /*
-    Check if apply status table exists in MySQL "dictionary"
-    if so, remove it since there is none in Ndb
-  */
-  {
-    strxnmov(buf, sizeof(buf),
-             mysql_data_home,
-             "/" NDB_REP_DB "/" NDB_APPLY_TABLE,
-             reg_ext, NullS);
-    unpack_filename(buf,buf);
-    my_delete(buf, MYF(0));
-  }
-
-  /*
-    Note, updating this table schema must be reflected in ndb_restore
-  */
-  end= strmov(buf, "CREATE TABLE IF NOT EXISTS "
-                   NDB_REP_DB "." NDB_APPLY_TABLE
-                   " ( server_id INT UNSIGNED NOT NULL,"
-                   " epoch BIGINT UNSIGNED NOT NULL, "
-                   " PRIMARY KEY USING HASH (server_id) ) ENGINE=NDB");
-
-  run_query(thd, buf, end, TRUE);
-
-  DBUG_RETURN(0);
-}
-
-
-/*********************************************************************
-  Functions for start, stop, wait for ndbcluster binlog thread
-*********************************************************************/
-
-static int do_ndbcluster_binlog_close_connection= 0;
-
-static int ndbcluster_binlog_start()
-{
-  DBUG_ENTER("ndbcluster_binlog_start");
-
-  pthread_mutex_init(&injector_mutex, MY_MUTEX_INIT_FAST);
-  pthread_cond_init(&injector_cond, NULL);
-
-  /* Create injector thread */
-  if (pthread_create(&ndb_binlog_thread, &connection_attrib,
-                     ndb_binlog_thread_func, 0))
-  {
-    DBUG_PRINT("error", ("Could not create ndb injector thread"));
-    pthread_cond_destroy(&injector_cond);
-    pthread_mutex_destroy(&injector_mutex);
-    DBUG_RETURN(-1);
-  }
-
-  /*
-    Wait for the ndb injector thread to finish starting up.
-  */
-  pthread_mutex_lock(&injector_mutex);
-  while (!ndb_binlog_thread_running)
-    pthread_cond_wait(&injector_cond, &injector_mutex);
-  pthread_mutex_unlock(&injector_mutex);
-  
-  if (ndb_binlog_thread_running < 0)
-    DBUG_RETURN(-1);
-
-  DBUG_RETURN(0);
-}
-
-static void ndbcluster_binlog_close_connection(THD *thd)
-{
-  DBUG_ENTER("ndbcluster_binlog_close_connection");
-  const char *save_info= thd->proc_info;
-  thd->proc_info= "ndbcluster_binlog_close_connection";
-  do_ndbcluster_binlog_close_connection= 1;
-  while (ndb_binlog_thread_running > 0)
-    sleep(1);
-  thd->proc_info= save_info;
-  DBUG_VOID_RETURN;
-}
-
-/*
-  called in mysql_show_binlog_events and reset_logs to make sure we wait for
-  all events originating from this mysql server to arrive in the binlog
-
-  Wait for the last epoch in which the last transaction is a part of.
-
-  Wait a maximum of 30 seconds.
-*/
-void ndbcluster_binlog_wait(THD *thd)
-{
-  if (ndb_binlog_thread_running > 0)
-  {
-    DBUG_ENTER("ndbcluster_binlog_wait");
-    const char *save_info= thd ? thd->proc_info : 0;
-    ulonglong wait_epoch= g_latest_trans_gci;
-    int count= 30;
-    if (thd)
-      thd->proc_info= "Waiting for ndbcluster binlog update to "
-	"reach current position";
-    while (count && ndb_binlog_thread_running > 0 &&
-           ndb_latest_handled_binlog_epoch < wait_epoch)
-    {
-      count--;
-      sleep(1);
-    }
-    if (thd)
-      thd->proc_info= save_info;
-    DBUG_VOID_RETURN;
-  }
-}
-
-/*****************************************************************
-  functions called from master sql client threads
-****************************************************************/
-
-/*
- Called from MYSQL_LOG::reset_logs in log.cc when binlog is emptied
-*/
-int ndbcluster_reset_logs(THD *thd)
-{
-  if (ndb_binlog_thread_running <= 0)
-    return 0;
-
-  DBUG_ENTER("ndbcluster_reset_logs");
-
-  /*
-    Wait for all events orifinating from this mysql server has
-    reached the binlog before continuing to reset
-  */
-  ndbcluster_binlog_wait(thd);
-
-  char buf[1024];
-  char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_REP_TABLE);
-
-  run_query(thd, buf, end, FALSE);
-
-  DBUG_RETURN(0);
-}
-
-/*
-  Called from MYSQL_LOG::purge_logs in log.cc when the binlog "file"
-  is removed
-*/
-
-int ndbcluster_binlog_index_purge_file(THD *thd, const char *file)
-{
-  if (ndb_binlog_thread_running <= 0)
-    return 0;
-
-  DBUG_ENTER("ndbcluster_binlog_index_purge_file");
-  DBUG_PRINT("enter", ("file: %s", file));
-
-  char buf[1024];
-  char *end= strmov(strmov(strmov(buf,
-                                  "DELETE FROM "
-                                  NDB_REP_DB "." NDB_REP_TABLE
-                                  " WHERE File='"), file), "'");
-
-  run_query(thd, buf, end, FALSE);
-
-  DBUG_RETURN(0);
-}
-
-/*****************************************************************
-  functions called from slave sql client threads
-****************************************************************/
-void ndbcluster_reset_slave(THD *thd)
-{
-  if (ndb_binlog_thread_running <= 0)
-    return;
-
-  DBUG_ENTER("ndbcluster_reset_slave");
-  char buf[1024];
-  char *end= strmov(buf, "DELETE FROM " NDB_REP_DB "." NDB_APPLY_TABLE);
-  run_query(thd, buf, end, FALSE);
-  DBUG_VOID_RETURN;
-}
-
-
-/**************************************************************
-  Internal helper functions for creating/dropping ndb events
-  used by the client sql threads
-**************************************************************/
-static void
-ndb_rep_event_name(String *event_name,const char *db, const char *tbl)
-{
-  event_name->set_ascii("REPL$", 5);
-  event_name->append(db);
-  if (tbl)
-  {
-    event_name->append('/');
-    event_name->append(tbl);
-  }
-}
-
-/*
-  Common function for setting up everything for logging a table at
-  create/discover.
-*/
-static int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key,
-                                          const char *db,
-                                          const char *table_name,
-                                          bool do_binlog,
-                                          NDB_SHARE *share)
-{
-  DBUG_ENTER("ndbcluster_create_binlog_setup");
-
-  pthread_mutex_lock(&ndbcluster_mutex);
-
-  /* Handle any trailing share */
-  if (share == 0)
-  {
-    share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
-                                    (byte*) key, strlen(key));
-    if (share)
-      handle_trailing_share(share);
-  }
-  else
-    handle_trailing_share(share);
-  
-  /* Create share which is needed to hold replication information */
-  if (!(share= get_share(key, true, true)))
-  {
-    sql_print_error("NDB Binlog: "
-                    "allocating table share for %s failed", key);
-  }
-  pthread_mutex_unlock(&ndbcluster_mutex);
-
-  while (share && do_binlog)
-  {
-    /*
-      ToDo make sanity check of share so that the table is actually the same
-      I.e. we need to do open file from frm in this case
-      Currently awaiting this to be fixed in the 4.1 tree in the general
-      case
-    */
-
-    /* Create the event in NDB */
-    ndb->setDatabaseName(db);
-
-    NDBDICT *dict= ndb->getDictionary();
-    const NDBTAB *ndbtab= dict->getTable(table_name);
-    if (ndbtab == 0)
-    {
-      sql_print_information("NDB Binlog: Failed to get table %s from ndb: "
-                            "%s, %d", key, dict->getNdbError().message,
-                            dict->getNdbError().code);
-      break; // error
-    }
-    String event_name(INJECTOR_EVENT_LEN);
-    ndb_rep_event_name(&event_name, db, table_name);
-    /*
-      event should have been created by someone else,
-      but let's make sure, and create if it doesn't exist
-    */
-    if (!dict->getEvent(event_name.c_ptr()))
-    {
-      if (ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share))
-      {
-        sql_print_error("NDB Binlog: "
-                        "FAILED CREATE (DISCOVER) TABLE Event: %s",
-                        event_name.c_ptr());
-        break; // error
-      }
-      sql_print_information("NDB Binlog: "
-                            "CREATE (DISCOVER) TABLE Event: %s",
-                            event_name.c_ptr());
-    }
-    else
-      sql_print_information("NDB Binlog: DISCOVER TABLE Event: %s",
-                            event_name.c_ptr());
-
-    /*
-      create the event operations for receiving logging events
-    */
-    if (ndbcluster_create_event_ops(share, ndbtab,
-                                    event_name.c_ptr()) < 0)
-    {
-      sql_print_error("NDB Binlog:"
-                      "FAILED CREATE (DISCOVER) EVENT OPERATIONS Event: %s",
-                      event_name.c_ptr());
-      /* a warning has been issued to the client */
-      DBUG_RETURN(0);
-    }
-    DBUG_RETURN(0);
-  }
-  DBUG_RETURN(-1);
-}
-
-static int
-ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
-                        const char *event_name, 
-                        NDB_SHARE *share)
-{
-  DBUG_ENTER("ndbcluster_create_event");
-  NDBDICT *dict= ndb->getDictionary();
-
-  if (!dict)
-  {
-    sql_print_error("NDB Binlog: could not setup binlog, "
-                    "Invalid NdbDictionary");
-    DBUG_RETURN(-1);
-  }
-
-  NDBEVENT my_event(event_name);
-  my_event.setTable(*ndbtab);
-  my_event.addTableEvent(NDBEVENT::TE_ALL);
-  if (share->table->s->primary_key == MAX_KEY)
-    /* No primary key, susbscribe for all attributes */
-    my_event.setReport(NDBEVENT::ER_ALL);
-  else
-    my_event.setReport(NDBEVENT::ER_UPDATED);
-  /* add all columns to the event */
-  int n_cols= ndbtab->getNoOfColumns();
-  for(int a= 0; a < n_cols; a++)
-    my_event.addEventColumn(a);
-
-  if (dict->createEvent(my_event)) // Add event to database
-  {
-#ifdef NDB_BINLOG_EXTRA_WARNINGS
-    /*
-      failed, print a warning
-    */
-    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
-                        ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
-                        dict->getNdbError().code,
-                        dict->getNdbError().message, "NDB");
-#endif
-    if (dict->getNdbError().classification != NdbError::SchemaObjectExists)
-    {
-      sql_print_error("NDB Binlog: Unable to create event in database. "
-                      "Event: %s  Error Code: %d  Message: %s", event_name,
-                      dict->getNdbError().code, dict->getNdbError().message);
-      DBUG_RETURN(-1);
-    }
-
-    /*
-      trailing event from before; an error, but try to correct it
-    */
-    if (dict->dropEvent(my_event.getName()))
-    {
-      sql_print_error("NDB Binlog: Unable to create event in database. "
-                      " Attempt to correct with drop failed. "
-                      "Event: %s Error Code: %d Message: %s",
-                      event_name,
-                      dict->getNdbError().code,
-                      dict->getNdbError().message);
-      DBUG_RETURN(-1);
-    }
-
-    /*
-      try to add the event again
-    */
-    if (dict->createEvent(my_event))
-    {
-      sql_print_error("NDB Binlog: Unable to create event in database. "
-                      " Attempt to correct with drop ok, but create failed. "
-                      "Event: %s Error Code: %d Message: %s",
-                      event_name,
-                      dict->getNdbError().code,
-                      dict->getNdbError().message);
-      DBUG_RETURN(-1);
-    }
-#ifdef NDB_BINLOG_EXTRA_WARNINGS
-    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
-                        ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
-                        0, "NDB Binlog: Removed trailing event",
-                        "NDB");
-#endif
-  }
-
-  DBUG_RETURN(0);
-}
-
-inline int is_ndb_compatible_type(Field *field)
-{
-  return
-    !(field->flags & BLOB_FLAG) &&
-    field->type() != MYSQL_TYPE_BIT &&
-    field->pack_length() != 0;
-}
-
-/*
-  - create eventOperations for receiving log events
-  - setup ndb recattrs for reception of log event data
-  - "start" the event operation
-
-  used at create/discover of tables
-*/
-static int
-ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
-                            const char *event_name)
-{
-  /*
-    we are in either create table or rename table so table should be
-    locked, hence we can work with the share without locks
-  */
-
-  DBUG_ENTER("ndbcluster_create_event_ops");
-
-  DBUG_ASSERT(share != 0);
-
-  if (share->op)
-  {
-    assert(share->op->getCustomData() == (void *) share);
-
-    DBUG_ASSERT(share->use_count > 1);
-    sql_print_error("NDB Binlog: discover reusing old ev op");
-    free_share(&share); // old event op already has reference
-    DBUG_RETURN(0);
-  }
-
-  TABLE *table= share->table;
-  if (table)
-  {
-    /*
-      Logging of blob tables is not yet implemented, it would require:
-      1. setup of events also on the blob attribute tables
-      2. collect the pieces of the blob into one from an epoch to
-         provide a full blob to binlog
-    */
-    if (table->s->blob_fields)
-    {
-      sql_print_error("NDB Binlog: logging of blob table %s "
-                      "is not supported", share->key);
-      DBUG_RETURN(0);
-    }
-  }
-
-  pthread_mutex_lock(&injector_mutex);
-  if (injector_ndb == 0)
-  {
-    pthread_mutex_unlock(&injector_mutex);
-    DBUG_RETURN(-1);
-  }
-
-  NdbEventOperation *op= injector_ndb->createEventOperation(event_name);
-  if (!op)
-  {
-    pthread_mutex_unlock(&injector_mutex);
-    sql_print_error("NDB Binlog: Creating NdbEventOperation failed for"
-                    " %s",event_name);
-    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
-                        ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
-                        injector_ndb->getNdbError().code,
-                        injector_ndb->getNdbError().message,
-                        "NDB");
-    DBUG_RETURN(-1);
-  }
-
-  int n_columns= ndbtab->getNoOfColumns();
-  int n_fields= table ? table->s->fields : 0;
-  for (int j= 0; j < n_columns; j++)
-  {
-    const char *col_name= ndbtab->getColumn(j)->getName();
-    NdbRecAttr *attr0, *attr1;
-    if (j < n_fields)
-    {
-      Field *f= share->table->field[j];
-      if (is_ndb_compatible_type(f))
-      {
-        DBUG_PRINT("info", ("%s compatible", col_name));
-        attr0= op->getValue(col_name, f->ptr);
-        attr1= op->getPreValue(col_name, (f->ptr-share->table->record[0]) +
-                               share->table->record[1]);
-      }
-      else
-      {
-        DBUG_PRINT("info", ("%s non compatible", col_name));
-        attr0= op->getValue(col_name);
-        attr1= op->getPreValue(col_name);
-      }
-    }
-    else
-    {
-      DBUG_PRINT("info", ("%s hidden key", col_name));
-      attr0= op->getValue(col_name);
-      attr1= op->getPreValue(col_name);
-    }
-    share->ndb_value[0][j].rec= attr0;
-    share->ndb_value[1][j].rec= attr1;
-  }
-  op->setCustomData((void *) share); // set before execute
-  share->op= op; // assign op in NDB_SHARE
-  if (op->execute())
-  {
-    share->op= NULL;
-    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
-                        ER_GET_ERRMSG, ER(ER_GET_ERRMSG), 
-                        op->getNdbError().code, op->getNdbError().message,
-                        "NDB");
-    sql_print_error("NDB Binlog: ndbevent->execute failed for %s; %d %s",
-                    event_name,
-                    op->getNdbError().code, op->getNdbError().message);
-    injector_ndb->dropEventOperation(op);
-    pthread_mutex_unlock(&injector_mutex);
-    DBUG_RETURN(-1);
-  }
-  pthread_mutex_unlock(&injector_mutex);
-
-  get_share(share);
-
-  DBUG_PRINT("info",("%s share->op: 0x%lx, share->use_count: %u",
-                     share->key, share->op, share->use_count));
-
-  sql_print_information("NDB Binlog: logging %s", share->key);
-  DBUG_RETURN(0);
-}
-
-/*
-  when entering the calling thread should have a share lock id share != 0
-  then the injector thread will have  one as well, i.e. share->use_count == 0
-  (unless it has already dropped... then share->op == 0)
-*/
-static int
-ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
-                             NDB_SHARE *share)
-{
-  DBUG_ENTER("ndbcluster_handle_drop_table");
-
-  NDBDICT *dict= ndb->getDictionary();
-  if (event_name && dict->dropEvent(event_name))
-  {
-    /* drop event failed for some reason, issue a warning */
-    push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
-                        ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
-                        dict->getNdbError().code,
-                        dict->getNdbError().message, "NDB");
-    if (dict->getNdbError().code != 4710)
-    {
-      /* error is not that the event did not exist */
-      sql_print_error("NDB Binlog: Unable to drop event in database. "
-                      "Event: %s Error Code: %d Message: %s",
-                      event_name,
-                      dict->getNdbError().code,
-                      dict->getNdbError().message);
-      /* ToDo; handle error? */
-      if (share && share->op &&
-          share->op->getState() == NdbEventOperation::EO_EXECUTING &&
-          dict->getNdbError().code != 4009)
-      {  
-        DBUG_ASSERT(false);
-        DBUG_RETURN(-1);
-      }
-    }
-  }
-
-  if (share == 0 || share->op == 0)
-  {
-    DBUG_RETURN(0);
-  }
-
-/*
-  Syncronized drop between client thread and injector thread is
-  neccessary in order to maintain ordering in the binlog,
-  such that the drop occurs _after_ any inserts/updates/deletes.
-
-  The penalty for this is that the drop table becomes slow.
-
-  This wait is however not strictly neccessary to produce a binlog
-  that is usable.  However the slave does not currently handle
-  these out of order, thus we are keeping the SYNC_DROP_ defined
-  for now.
-*/
-#define SYNC_DROP_
-#ifdef SYNC_DROP_
-  (void) pthread_mutex_lock(&share->mutex);
-  int max_timeout= 10;
-  while (share->op)
-  {
-    struct timespec abstime;
-    set_timespec(abstime, 1);
-    (void) pthread_cond_timedwait(&injector_cond,
-                                  &share->mutex,
-                                  &abstime);
-    max_timeout--;
-    if (share->op == 0)
-      break;
-    if (max_timeout == 0)
-    {
-      sql_print_error("NDB delete table: timed out. Ignoring...");
-      break;
-    }
-    sql_print_information("NDB delete table: "
-                          "waiting max %u sec for drop table %s.",
-                          max_timeout, share->key);
-  }
-  (void) pthread_mutex_unlock(&share->mutex);
-#else
-  (void) pthread_mutex_lock(&share->mutex);
-  share->op_old= share->op;
-  share->op= 0;
-  (void) pthread_mutex_unlock(&share->mutex);
-#endif
-
-  DBUG_RETURN(0);
-}
-
-
-/********************************************************************
-  Internal helper functions for differentd events from the stoarage nodes
-  used by the ndb injector thread
-********************************************************************/
-
-/*
-  Handle error states on events from the storage nodes
-*/
-static int ndb_binlog_thread_handle_error(Ndb *ndb, NdbEventOperation *pOp,
-                                          Binlog_index_row &row)
-{
-  NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
-  DBUG_ENTER("ndb_binlog_thread_handle_error");
-
-  int overrun= pOp->isOverrun();
-  if (overrun)
-  {
-    /*
-      ToDo: this error should rather clear the binlog_index...
-      and continue
-    */
-    sql_print_error("NDB Binlog: Overrun in event buffer, "
-                    "this means we have dropped events. Cannot "
-                    "continue binlog for %s", share->key);
-    pOp->clearError();
-    DBUG_RETURN(-1);
-  }
-
-  if (!pOp->isConsistent())
-  {
-    /*
-      ToDo: this error should rather clear the binlog_index...
-      and continue
-    */
-    sql_print_error("NDB Binlog: Not Consistent. Cannot "
-                    "continue binlog for %s. Error code: %d"
-                    " Message: %s", share->key,
-                    pOp->getNdbError().code,
-                    pOp->getNdbError().message);
-    pOp->clearError();
-    DBUG_RETURN(-1);
-  }
-  sql_print_error("NDB Binlog: unhandled error %d for table %s",
-                  pOp->hasError(), share->key);
-  pOp->clearError();
-  DBUG_RETURN(0);
-}
-
-/*
-  Handle _non_ data events from the storage nodes
-*/
-static int
-ndb_binlog_thread_handle_non_data_event(Ndb *ndb, NdbEventOperation *pOp,
-                                        Binlog_index_row &row)
-{
-  NDB_SHARE *share= (NDB_SHARE *)pOp->getCustomData();
-  NDBEVENT::TableEvent type= pOp->getEventType();
-  int remote_drop_table= 0, do_close_cached_tables= 0;
-
-  /* make sure to flush any pending events as they can be dependent
-     on one of the tables being changed below
-  */
-  injector_thd->binlog_flush_pending_rows_event(true);
-
-  switch (type)
-  {
-  case NDBEVENT::TE_CLUSTER_FAILURE:
-    sql_print_information("NDB Binlog: cluster failure for %s.", share->key);
-
-    DBUG_PRINT("info", ("CLUSTER FAILURE EVENT: "
-                        "%s  received share: 0x%lx  op: %lx  share op: %lx  "
-                        "op_old: %lx",
-                       share->key, share, pOp, share->op, share->op_old));
-    if (apply_status_share)
-    {
-      free_share(&apply_status_share);
-      apply_status_share= 0;
-    }
-    break;
-  case NDBEVENT::TE_ALTER:
-    /* ToDo: remove printout */
-    sql_print_information("NDB Binlog: rename table %s%s/%s -> %s.",
-                          share_prefix, share->table->s->db.str,
-                          share->table->s->table_name.str,
-                          share->key);
-    /* do the rename of the table in the share */
-    share->table->s->db.str= share->db;
-    share->table->s->db.length= strlen(share->db);
-    share->table->s->table_name.str= share->table_name;
-    share->table->s->table_name.length= strlen(share->table_name);
-    goto drop_alter_common;
-  case NDBEVENT::TE_DROP:
-    /* ToDo: remove printout */
-    sql_print_information("NDB Binlog: drop table %s.",
-                          share->key);
-drop_alter_common:
-    row.n_schemaops++;
-    DBUG_PRINT("info", ("TABLE %s EVENT: %s  received share: 0x%lx  op: %lx  "
-                        "share op: %lx  op_old: %lx",
-                       type == NDBEVENT::TE_DROP ? "DROP" : "ALTER",
-                       share->key, share, pOp, share->op, share->op_old));
-    if (pOp->getReqNodeId() != ndb_cluster_node_id)
-    {
-      ndb->setDatabaseName(share->table->s->db.str);
-      ha_ndbcluster::invalidate_dictionary_cache(share->table,
-                                                 ndb,
-                                                 share->table->s->table_name.str,
-                                                 TRUE);
-      remote_drop_table= 1;
-    }
-    break;
-  default:
-    sql_print_error("NDB Binlog: unknown non data event %d for %s. "
-                    "Ignoring...", (unsigned) type, share->key);
-    return 0;
-  }
-
-  (void) pthread_mutex_lock(&share->mutex);
-  DBUG_ASSERT(share->op == pOp || share->op_old == pOp);
-  if (share->op_old == pOp)
-    share->op_old= 0;
-  else
-    share->op= 0;
-  // either just us or drop table handling as well
-      
-  /* Signal ha_ndbcluster::delete/rename_table that drop is done */
-  (void) pthread_mutex_unlock(&share->mutex);
-  (void) pthread_cond_signal(&injector_cond);
-
-  pthread_mutex_lock(&ndbcluster_mutex);
-  free_share(&share, TRUE);
-  if (remote_drop_table && share && share->state != NSS_DROPPED)
-  {
-    DBUG_PRINT("info", ("remote drop table"));
-    if (share->use_count != 1)
-      do_close_cached_tables= 1;
-    share->state= NSS_DROPPED;
-    free_share(&share, TRUE);
-  }
-  pthread_mutex_unlock(&ndbcluster_mutex);
-
-  share= 0;
-  pOp->setCustomData(0);
-          
-  pthread_mutex_lock(&injector_mutex);
-  injector_ndb->dropEventOperation(pOp);
-  pOp= 0;
-  pthread_mutex_unlock(&injector_mutex);
-
-  if (do_close_cached_tables)
-    close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0);
-
-  return 0;
-}
-
-/*
-  Handle data events from the storage nodes
-*/
-static int
-ndb_binlog_thread_handle_data_event(Ndb *ndb, NdbEventOperation *pOp,
-                                    Binlog_index_row &row,
-                                    injector::transaction &trans)
-{
-  NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
-  TABLE *table= share->table;
-  
-  assert(table != 0);
-#ifndef DBUG_OFF
-  dbug_print_table("table", table);
-#endif
-  TABLE_SHARE *table_s= table->s;
-  uint n_fields= table_s->fields;
-  MY_BITMAP b;
-  /* Potential buffer for the bitmap */
-  uint32 bitbuf[128 / (sizeof(uint32) * 8)];
-  bitmap_init(&b, n_fields <= sizeof(bitbuf) * 8 ? bitbuf : NULL, 
-              n_fields, false);
-  bitmap_set_all(&b);
-
-  /*
-   row data is already in table->record[0]
-   As we told the NdbEventOperation to do this
-   (saves moving data about many times)
-  */
-
-  switch(pOp->getEventType())
-  {
-  case NDBEVENT::TE_INSERT:
-    row.n_inserts++;
-    DBUG_PRINT("info", ("INSERT INTO %s", share->key));
-    {
-      ndb_unpack_record(table, share->ndb_value[0], &b, table->record[0]);
-      trans.write_row(::server_id, injector::transaction::table(table, true),
-                      &b, n_fields, table->record[0]);
-    }
-    break;
-  case NDBEVENT::TE_DELETE:
-    row.n_deletes++;
-    DBUG_PRINT("info",("DELETE FROM %s", share->key));
-    {
-      /*
-        table->record[0] contains only the primary key in this case
-        since we do not have an after image
-      */
-      int n;
-      if (table->s->primary_key != MAX_KEY)
-        n= 0; /*
-                use the primary key only as it save time and space and
-                it is the only thing needed to log the delete
-	      */
-      else
-        n= 1; /*
-                we use the before values since we don't have a primary key
-                since the mysql server does not handle the hidden primary
-                key
-	      */
-
-      ndb_unpack_record(table, share->ndb_value[n], &b, table->record[n]);
-      print_records(table, table->record[n]);
-      trans.delete_row(::server_id, injector::transaction::table(table, true),
-                       &b, n_fields, table->record[n]);
-    }
-    break;
-  case NDBEVENT::TE_UPDATE:
-    row.n_updates++;
-    DBUG_PRINT("info", ("UPDATE %s", share->key));
-    {
-      ndb_unpack_record(table, share->ndb_value[0],
-                        &b, table->record[0]);
-      print_records(table, table->record[0]);
-      if (table->s->primary_key != MAX_KEY) 
-      {
-        /*
-          since table has a primary key, we can to a write
-          using only after values
-	*/
-        trans.write_row(::server_id, injector::transaction::table(table, true),
-                        &b, n_fields, table->record[0]);// after values
-      }
-      else
-      {
-        /*
-          mysql server cannot handle the ndb hidden key and
-          therefore needs the before image as well
-	*/
-        ndb_unpack_record(table, share->ndb_value[1], &b, table->record[1]);
-        print_records(table, table->record[1]);
-        trans.update_row(::server_id,
-                         injector::transaction::table(table, true),
-                         &b, n_fields,
-                         table->record[1], // before values
-                         table->record[0]);// after values
-      }
-    }
-    break;
-  default:
-    /* We should REALLY never get here. */
-    DBUG_PRINT("info", ("default - uh oh, a brain exploded."));
-    break;
-  }
-
-  return 0;
-}
-
-/*
-  Timer class for doing performance measurements
-*/
-//#define RUN_NDB_BINLOG_TIMER
-#ifdef RUN_NDB_BINLOG_TIMER
-class Timer
-{
-public:
-  Timer() { start(); }
-  void start() { gettimeofday(&m_start, 0); }
-  void stop() { gettimeofday(&m_stop, 0); }
-  ulong elapsed_ms()
-  {
-    return (ulong)
-      (((longlong) m_stop.tv_sec - (longlong) m_start.tv_sec) * 1000 +
-       ((longlong) m_stop.tv_usec -
-        (longlong) m_start.tv_usec + 999) / 1000);
-  }
-private:
-  struct timeval m_start,m_stop;
-};
-#endif
-
-
-/****************************************************************
-  Injector thread main loop
-****************************************************************/
-
-pthread_handler_t ndb_binlog_thread_func(void *arg)
-{
-  THD *thd; /* needs to be first for thread_stack */
-  Ndb *ndb= 0;
-  int ndb_update_binlog_index= 1;
-  injector *inj= injector::instance();
-
-  pthread_mutex_lock(&injector_mutex);
-  /*
-    Set up the Thread
-  */
-  my_thread_init();
-  DBUG_ENTER("ndb_binlog_thread");
-
-  thd= new THD; /* note that contructor of THD uses DBUG_ */
-  THD_CHECK_SENTRY(thd);
-
-  thd->thread_stack= (char*) &thd; /* remember where our stack is */
-  if (thd->store_globals())
-  {
-    thd->cleanup();
-    delete thd;
-    ndb_binlog_thread_running= -1;
-    pthread_mutex_unlock(&injector_mutex);
-    pthread_cond_signal(&injector_cond);
-    my_thread_end();
-    pthread_exit(0);
-    DBUG_RETURN(NULL);
-  }
-
-  thd->init_for_queries();
-  thd->command= COM_DAEMON;
-  injector_thd= thd;
-
-  /*
-    Set up ndb binlog
-  */
-  sql_print_information("Starting Cluster Binlog");
-
-  pthread_detach_this_thread();
-  thd->real_id= pthread_self();
-  pthread_mutex_lock(&LOCK_thread_count);
-  thd->thread_id= thread_id++;
-  threads.append(thd);
-  pthread_mutex_unlock(&LOCK_thread_count);
-  thd->lex->start_transaction_opt= 0;
-
-  if (!(ndb= new Ndb(g_ndb_cluster_connection, "")) ||
-      ndb->init())
-  {
-    sql_print_error("NDB Binlog: Getting Ndb object failed");
-    ndb_binlog_thread_running= -1;
-    pthread_mutex_unlock(&injector_mutex);
-    pthread_cond_signal(&injector_cond);
-    goto err;
-  }
-
-  /*
-    Expose global reference to our ndb object.
-
-    Used by both sql client thread and binlog thread to interact
-    with the storage
-    pthread_mutex_lock(&injector_mutex);
-  */
-  injector_ndb= ndb;
-  ndb_binlog_thread_running= 1;
-
-  /*
-    We signal the thread that started us that we've finished
-    starting up.
-  */
-  pthread_mutex_unlock(&injector_mutex);
-  pthread_cond_signal(&injector_cond);
-
-  thd->system_thread= SYSTEM_THREAD_NDBCLUSTER_BINLOG;
-  thd->version= refresh_version;
-  thd->set_time();
-  thd->main_security_ctx.host_or_ip= "";
-  thd->client_capabilities= 0;
-  my_net_init(&thd->net, 0);
-  thd->main_security_ctx.master_access= ~0;
-  thd->main_security_ctx.priv_user= 0;
-
-  thd->proc_info= "Waiting for ndbcluster to start";
-
-  pthread_mutex_lock(&injector_mutex);
-  while (!ndbcluster_util_inited)
-  {
-    /* ndb not connected yet */
-    struct timespec abstime;
-    set_timespec(abstime, 1);
-    pthread_cond_timedwait(&injector_cond, &injector_mutex, &abstime);
-    if (abort_loop)
-    {
-      pthread_mutex_unlock(&injector_mutex);
-      goto err;
-    }
-  }
-  pthread_mutex_unlock(&injector_mutex);
-
-  /*
-    Main NDB Injector loop
-  */
-
-  thd->query_id= 0; // to keep valgrind quiet
-  {
-    static char db[]= "";
-    thd->db= db;
-    open_binlog_index(thd, &binlog_tables, &binlog_index);
-    if (!(apply_status_share= ndbcluster_get_apply_status_share()))
-    {
-      sql_print_error("NDB: Could not get apply status share");
-    }
-    thd->db= db;
-  }
-
-#ifdef RUN_NDB_BINLOG_TIMER
-  Timer main_timer;
-#endif
-  for ( ; !((abort_loop || do_ndbcluster_binlog_close_connection) &&
-            ndb_latest_handled_binlog_epoch >= g_latest_trans_gci); )
-  {
-
-#ifdef RUN_NDB_BINLOG_TIMER
-    main_timer.stop();
-    sql_print_information("main_timer %ld ms",  main_timer.elapsed_ms());
-    main_timer.start();
-#endif
-
-    /*
-      now we don't want any events before next gci is complete
-    */
-    thd->proc_info= "Waiting for event from ndbcluster";
-    thd->set_time();
-    
-    /* wait for event or 1000 ms */
-    Uint64 gci;
-    int res= ndb->pollEvents(1000, &gci);
-    ndb_latest_received_binlog_epoch= gci;
-
-    if ((abort_loop || do_ndbcluster_binlog_close_connection) &&
-        ndb_latest_handled_binlog_epoch >= g_latest_trans_gci)
-      break; /* Shutting down server */
-
-    if (binlog_index && binlog_index->s->version < refresh_version)
-    {
-      if (binlog_index->s->version < refresh_version)
-      {
-        close_thread_tables(thd);
-        binlog_index= 0;
-      }
-    }
-
-    if (res > 0)
-    {
-      DBUG_PRINT("info", ("pollEvents res: %d", res));
-#ifdef RUN_NDB_BINLOG_TIMER
-      Timer gci_timer, write_timer;
-      int event_count= 0;
-#endif
-      thd->proc_info= "Processing events";
-      NdbEventOperation *pOp= ndb->nextEvent();
-      Binlog_index_row row;
-      while (pOp != NULL)
-      {
-        ndb->
-          setReportThreshEventGCISlip(ndb_report_thresh_binlog_epoch_slip);
-        ndb->setReportThreshEventFreeMem(ndb_report_thresh_binlog_mem_usage);
-
-        assert(pOp->getGCI() <= ndb_latest_received_binlog_epoch);
-        if (!apply_status_share)
-        {
-          if (!(apply_status_share= ndbcluster_get_apply_status_share()))
-            sql_print_error("NDB: Could not get apply status share");
-        }
-        bzero((char*) &row, sizeof(row));
-        injector::transaction trans= inj->new_trans(thd);
-        gci= pOp->getGCI();
-        if (apply_status_share)
-        {
-          TABLE *table= apply_status_share->table;
-          MY_BITMAP b;
-          uint32 bitbuf;
-          DBUG_ASSERT(table->s->fields <= sizeof(bitbuf) * 8);
-          bitmap_init(&b, &bitbuf, table->s->fields, false);
-          bitmap_set_all(&b);
-          table->field[0]->store((longlong)::server_id);
-          table->field[1]->store((longlong)gci);
-          trans.write_row(::server_id,
-                          injector::transaction::table(table, true),
-                          &b, table->s->fields,
-                          table->record[0]);
-        }
-#ifdef RUN_NDB_BINLOG_TIMER
-        write_timer.start();
-#endif
-        do
-        {
-#ifdef RUN_NDB_BINLOG_TIMER
-          event_count++;
-#endif
-          if (pOp->hasError() &&
-              ndb_binlog_thread_handle_error(ndb, pOp, row) < 0)
-            goto err;
-
-#ifndef DBUG_OFF
-          {
-            NDB_SHARE *share= (NDB_SHARE*) pOp->getCustomData();
-            DBUG_PRINT("info",
-                       ("EVENT TYPE:%d  GCI:%lld  last applied: %lld  "
-                        "share: 0x%lx", pOp->getEventType(), gci,
-                        ndb_latest_applied_binlog_epoch, share));
-            DBUG_ASSERT(share != 0);
-          }
-#endif
-          if ((unsigned) pOp->getEventType() <
-              (unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT)
-            ndb_binlog_thread_handle_data_event(ndb, pOp, row, trans);
-          else
-            ndb_binlog_thread_handle_non_data_event(ndb, pOp, row);
-
-          pOp= ndb->nextEvent();
-        } while (pOp && pOp->getGCI() == gci);
-
-        /*
-          note! pOp is not referring to an event in the next epoch
-          or is == 0
-	*/
-#ifdef RUN_NDB_BINLOG_TIMER
-        write_timer.stop();
-#endif
-
-        if (row.n_inserts || row.n_updates
-            || row.n_deletes || row.n_schemaops)
-        {
-          injector::transaction::binlog_pos start= trans.start_pos();
-          if (int r= trans.commit())
-          {
-            sql_print_error("NDB binlog:"
-                            "Error during COMMIT of GCI. Error: %d",
-                            r);
-            /* TODO: Further handling? */
-          }
-          row.gci= gci;
-          row.master_log_file= start.file_name();
-          row.master_log_pos= start.file_pos();
-
-          DBUG_PRINT("info",("COMMIT gci %lld",gci));
-          if (ndb_update_binlog_index)
-            ndb_add_binlog_index(thd, &row);
-          ndb_latest_applied_binlog_epoch= gci;
-        }
-        else
-          trans.commit();
-        ndb_latest_handled_binlog_epoch= gci;
-#ifdef RUN_NDB_BINLOG_TIMER
-        gci_timer.stop();
-        sql_print_information("gci %ld event_count %d write time "
-                              "%ld(%d e/s), total time %ld(%d e/s)",
-                              (ulong)gci, event_count,
-                              write_timer.elapsed_ms(),
-                              event_count / write_timer.elapsed_ms(),
-                              gci_timer.elapsed_ms(),
-                              event_count / gci_timer.elapsed_ms());
-#endif
-      }
-    }
-    ndb_latest_handled_binlog_epoch= ndb_latest_received_binlog_epoch;
-  }
-err:
-  DBUG_PRINT("info",("Shutting down cluster binlog thread"));
-  close_thread_tables(thd);
-  pthread_mutex_lock(&injector_mutex);
-  /* don't mess with the injector_ndb anymore from other threads */
-  injector_ndb= 0;
-  pthread_mutex_unlock(&injector_mutex);
-  thd->db= 0; // as not to try to free memory
-  sql_print_information("Stopping Cluster Binlog");
-
-  if (apply_status_share)
-    free_share(&apply_status_share);
-
-  /* remove all event operations */
-  if (ndb)
-  {
-    NdbEventOperation *op;
-    DBUG_PRINT("info",("removing all event operations"));
-    while ((op= ndb->getEventOperation()))
-    {
-      DBUG_PRINT("info",("removing event operation on %s",
-                         op->getEvent()->getName()));
-      NDB_SHARE *share= (NDB_SHARE*) op->getCustomData();
-      free_share(&share);
-      ndb->dropEventOperation(op);
-    }
-    delete ndb;
-    ndb= 0;
-  }
-
-  net_end(&thd->net);
-  thd->cleanup();
-  delete thd;
-
-  ndb_binlog_thread_running= -1;
-  (void) pthread_cond_signal(&injector_cond);
-
-  DBUG_PRINT("exit", ("ndb_binlog_thread"));
-  my_thread_end();
-
-  pthread_exit(0);
-  DBUG_RETURN(NULL);
-}
-#endif /* HAVE_NDB_BINLOG */
 
 
 /*
Thread
bk commit into 5.1 tree (tomas:1.1990)tomas4 Jan