List:Commits« Previous MessageNext Message »
From:tomas Date:January 5 2006 11:58am
Subject:bk commit into 5.1 tree (tomas:1.2039)
View as plain text  
Below is the list of changes that have just been committed into a local
5.1 repository of tomas. When tomas does a push these changes will
be propagated to the main repository and, within 24 hours after the
push, to the public repository.
For information on how to access the public repository
see http://dev.mysql.com/doc/mysql/en/installing-source-tree.html

ChangeSet
  1.2039 06/01/05 12:58:40 tomas@stripped +22 -0
  Merge tulin@stripped:/home/bk/mysql-5.1-new
  into  poseidon.ndb.mysql.com:/home/tomas/mysql-5.1-wl2325-v6

  mysql-test/r/information_schema.result
    1.98 06/01/05 12:58:34 tomas@stripped +0 -0
    SCCS merged

  libmysqld/Makefile.am
    1.77 06/01/05 12:58:34 tomas@stripped +0 -0
    SCCS merged

  storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp
    1.27 06/01/05 12:55:44 tomas@stripped +0 -0
    Auto merged

  storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
    1.17 06/01/05 12:55:44 tomas@stripped +0 -0
    Auto merged

  storage/ndb/src/kernel/blocks/backup/Backup.cpp
    1.30 06/01/05 12:55:44 tomas@stripped +0 -0
    Auto merged

  sql/sql_parse.cc
    1.503 06/01/05 12:55:44 tomas@stripped +0 -0
    Auto merged

  sql/sql_db.cc
    1.124 06/01/05 12:55:43 tomas@stripped +0 -0
    Auto merged

  sql/sql_base.cc
    1.294 06/01/05 12:55:43 tomas@stripped +0 -0
    Auto merged

  sql/set_var.cc
    1.155 06/01/05 12:55:43 tomas@stripped +0 -0
    Auto merged

  sql/mysqld.cc
    1.512 06/01/05 12:55:43 tomas@stripped +0 -0
    Auto merged

  sql/mysql_priv.h
    1.363 06/01/05 12:55:42 tomas@stripped +0 -0
    Auto merged

  sql/log.cc
    1.182 06/01/05 12:55:42 tomas@stripped +0 -0
    Auto merged

  sql/handler.h
    1.175 06/01/05 12:55:42 tomas@stripped +0 -0
    Auto merged

  sql/handler.cc
    1.202 06/01/05 12:55:42 tomas@stripped +0 -0
    Auto merged

  sql/ha_ndbcluster.cc
    1.229 06/01/05 12:55:42 tomas@stripped +0 -0
    Auto merged

  sql/Makefile.am
    1.130 06/01/05 12:55:41 tomas@stripped +0 -0
    Auto merged

  mysql-test/t/information_schema.test
    1.68 06/01/05 12:55:41 tomas@stripped +0 -0
    Auto merged

  mysql-test/r/show_check.result
    1.88 06/01/05 12:55:41 tomas@stripped +0 -0
    Auto merged

  mysql-test/mysql-test-run.sh
    1.285 06/01/05 12:55:41 tomas@stripped +0 -0
    Auto merged

  mysql-test/mysql-test-run.pl
    1.51 06/01/05 12:55:41 tomas@stripped +0 -0
    Auto merged

  include/my_base.h
    1.79 06/01/05 12:55:41 tomas@stripped +0 -0
    Auto merged

  client/mysqltest.c
    1.167 06/01/05 12:55:41 tomas@stripped +0 -0
    Auto merged

# This is a BitKeeper patch.  What follows are the unified diffs for the
# set of deltas contained in the patch.  The rest of the patch, the part
# that BitKeeper cares about, is below these diffs.
# User:	tomas
# Host:	poseidon.ndb.mysql.com
# Root:	/home/tomas/mysql-5.1-wl2325-v6/RESYNC

--- 1.78/include/my_base.h	2005-12-28 13:05:19 +01:00
+++ 1.79/include/my_base.h	2006-01-05 12:55:41 +01:00
@@ -154,7 +154,9 @@
     to overwrite entire row.
   */
   HA_EXTRA_KEYREAD_PRESERVE_FIELDS,
-  HA_EXTRA_MMAP
+  HA_EXTRA_MMAP,
+  HA_EXTRA_IGNORE_NO_KEY,		/* Tuple not found don't rollback everything*/
+  HA_EXTRA_NO_IGNORE_NO_KEY
 };
 
 	/* The following is parameter to ha_panic() */

--- 1.284/mysql-test/mysql-test-run.sh	2005-12-29 10:55:03 +01:00
+++ 1.285/mysql-test/mysql-test-run.sh	2006-01-05 12:55:41 +01:00
@@ -208,11 +208,14 @@
 
 MASTER_RUNNING=0
 MASTER1_RUNNING=0
+MASTER_MYHOST=127.0.0.1
 MASTER_MYPORT=9306
 SLAVE_RUNNING=0
+SLAVE_MYHOST=127.0.0.1
 SLAVE_MYPORT=9308 # leave room for 2 masters for cluster tests
 MYSQL_MANAGER_PORT=9305 # needs to be out of the way of slaves
 NDBCLUSTER_PORT=9350
+NDBCLUSTER_PORT_SLAVE=9358
 MYSQL_MANAGER_PW_FILE=$MYSQL_TEST_DIR/var/tmp/manager.pwd
 MYSQL_MANAGER_LOG=$MYSQL_TEST_DIR/var/log/manager.log
 MYSQL_MANAGER_USER=root
@@ -248,7 +251,11 @@
 EXTRA_MYSQLBINLOG_OPT=""
 USE_RUNNING_SERVER=0
 USE_NDBCLUSTER=@USE_NDBCLUSTER@
+USE_NDBCLUSTER_SLAVE=@USE_NDBCLUSTER@
 USE_RUNNING_NDBCLUSTER=""
+USE_RUNNING_NDBCLUSTER_SLAVE=""
+NDB_EXTRA_TEST=0
+NDBCLUSTER_EXTRA_OPTS=""
 USE_PURIFY=""
 PURIFY_LOGS=""
 DO_GCOV=""
@@ -275,6 +282,7 @@
 NDB_MGM_EXTRA_OPTS=
 NDB_MGMD_EXTRA_OPTS=
 NDBD_EXTRA_OPTS=
+SLAVE_MYSQLDBINLOG=1
 
 DO_STRESS=""
 STRESS_SUITE="main"
@@ -312,9 +320,18 @@
     --extern)  USE_RUNNING_SERVER=1 ;;
     --with-ndbcluster)
       USE_NDBCLUSTER="--ndbcluster" ;;
+    --with-ndbcluster-slave)
+      USE_NDBCLUSTER_SLAVE="--ndbcluster" ;;
     --ndb-connectstring=*)
       USE_NDBCLUSTER="--ndbcluster" ;
       USE_RUNNING_NDBCLUSTER=`$ECHO "$1" | $SED -e "s;--ndb-connectstring=;;"` ;;
+    --ndb-connectstring-slave=*)
+      USE_NDBCLUSTER_SLAVE="--ndbcluster" ;
+      USE_RUNNING_NDBCLUSTER_SLAVE=`$ECHO "$1" | $SED -e "s;--ndb-connectstring-slave=;;"` ;;
+    --ndb-extra-test)
+      NDBCLUSTER_EXTRA_OPTS=" "
+      NDB_EXTRA_TEST=1 ;
+      ;;
     --ndb_mgm-extra-opts=*)
       NDB_MGM_EXTRA_OPTS=`$ECHO "$1" | $SED -e "s;--ndb_mgm-extra-opts=;;"` ;;
     --ndb_mgmd-extra-opts=*)
@@ -331,6 +348,8 @@
     --slave_port=*) SLAVE_MYPORT=`$ECHO "$1" | $SED -e "s;--slave_port=;;"` ;;
     --manager-port=*) MYSQL_MANAGER_PORT=`$ECHO "$1" | $SED -e "s;--manager_port=;;"` ;;
     --ndbcluster_port=*) NDBCLUSTER_PORT=`$ECHO "$1" | $SED -e "s;--ndbcluster_port=;;"` ;;
+    --ndbcluster-port=*) NDBCLUSTER_PORT=`$ECHO "$1" | $SED -e "s;--ndbcluster-port=;;"` ;;
+    --ndbcluster-port-slave=*) NDBCLUSTER_PORT_SLAVE=`$ECHO "$1" | $SED -e "s;--ndbcluster-port-slave=;;"` ;;
     --with-openssl)
      EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT \
      --ssl-ca=$MYSQL_TEST_DIR/std_data/cacert.pem \
@@ -494,14 +513,20 @@
       TMP=`$ECHO "$1" | $SED -e "s;--valgrind-options=;;"`
       VALGRIND="$VALGRIND $TMP"
       ;;
+    --skip-ndbcluster-slave | --skip-ndb-slave)
+      USE_NDBCLUSTER_SLAVE=""
+      EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-ndbcluster"
+      ;;
     --valgrind-mysqltest)
       DO_VALGRIND_MYSQL_TEST=1
       ;;
     --skip-ndbcluster | --skip-ndb)
       USE_NDBCLUSTER=""
+      USE_NDBCLUSTER_SLAVE=""
       EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-ndbcluster"
       EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-ndbcluster"
       ;;
+    --skip-slave-binlog) SLAVE_MYSQLDBINLOG=0 ;;
     --skip-*)
       EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT $1"
       EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT $1"
@@ -571,9 +596,13 @@
 CURRENT_TEST="$MYSQL_TEST_DIR/var/log/current_test"
 SMALL_SERVER="--key_buffer_size=1M --sort_buffer=256K --max_heap_table_size=1M"
 
-export MASTER_MYPORT SLAVE_MYPORT MYSQL_TCP_PORT MASTER_MYSOCK MASTER_MYSOCK1
+export MASTER_MYHOST MASTER_MYPORT SLAVE_MYHOST SLAVE_MYPORT MYSQL_TCP_PORT MASTER_MYSOCK MASTER_MYSOCK1
 
 NDBCLUSTER_OPTS="--port=$NDBCLUSTER_PORT --data-dir=$MYSQL_TEST_DIR/var --ndb_mgm-extra-opts=$NDB_MGM_EXTRA_OPTS --ndb_mgmd-extra-opts=$NDB_MGMD_EXTRA_OPTS --ndbd-extra-opts=$NDBD_EXTRA_OPTS"
+NDBCLUSTER_OPTS_SLAVE="--port=$NDBCLUSTER_PORT_SLAVE --data-dir=$MYSQL_TEST_DIR/var"
+if [ -n "$USE_NDBCLUSTER_SLAVE" ] ; then
+  USE_NDBCLUSTER_SLAVE="$USE_NDBCLUSTER_SLAVE --ndb-connectstring=localhost:$NDBCLUSTER_PORT_SLAVE"
+fi
 NDB_BACKUP_DIR=$MYSQL_TEST_DIR/var/ndbcluster-$NDBCLUSTER_PORT
 NDB_TOOLS_OUTPUT=$MYSQL_TEST_DIR/var/log/ndb_tools.log
 
@@ -716,7 +745,6 @@
  fi
  MYSQL_TEST="$CLIENT_BINDIR/mysqltest"
  MYSQL_CHECK="$CLIENT_BINDIR/mysqlcheck"
- MYSQL_DUMP="$CLIENT_BINDIR/mysqldump"
  MYSQL_SLAP="$CLIENT_BINDIR/mysqlslap"
  MYSQL_SHOW="$CLIENT_BINDIR/mysqlshow"
  MYSQL_IMPORT="$CLIENT_BINDIR/mysqlimport"
@@ -756,6 +784,13 @@
    MYSQL_TEST="$VALGRIND_MYSQLTEST $CLIENT_BINDIR/mysqltest"
    MYSQL_CLIENT_TEST="$CLIENT_BINDIR/mysql_client_test"
  fi
+ if [ -f "$BASEDIR/client/.libs/mysqldump" ] ; then
+   MYSQL_DUMP="$BASEDIR/client/.libs/mysqldump"
+ elif  [ -f "$BASEDIR/client/mysqldump" ] ; then
+   MYSQL_DUMP="$BASEDIR/client/mysqldump"
+ else
+   MYSQL_DUMP="$BASEDIR/bin/mysqldump"
+ fi
 fi
 
 if [ -z "$MASTER_MYSQLD" ]
@@ -822,6 +857,9 @@
 export PURIFYOPTIONS
 NDB_STATUS_OK=1
 export NDB_STATUS_OK
+NDB_SLAVE_STATUS_OK=1
+export NDB_SLAVE_STATUS_OK
+export NDB_EXTRA_TEST NDBCLUSTER_PORT NDBCLUSTER_PORT_SLAVE
 
 MYSQL_TEST_ARGS="--no-defaults --socket=$MASTER_MYSOCK --database=$DB \
  --user=$DBUSER --password=$DBPASSWD --silent -v --skip-safemalloc \
@@ -1184,24 +1222,30 @@
 
 start_ndbcluster()
 {
-  if [ ! -z "$USE_NDBCLUSTER" ]
+  if [ ! -n "$USE_NDBCLUSTER" ] ;
+  then
+    USING_NDBCLUSTER=0
+    USE_NDBCLUSTER_OPT=
+  fi
+  
+  if [ x$USING_NDBCLUSTER = x1 -a -z "$USE_NDBCLUSTER_OPT" ]
   then
   rm -f $NDB_TOOLS_OUTPUT
   if [ -z "$USE_RUNNING_NDBCLUSTER" ]
   then
-    echo "Starting ndbcluster"
-    if [ "$DO_BENCH" = 1 ]
+    if [ "$DO_BENCH" != 1 -a -z "$NDBCLUSTER_EXTRA_OPTS" ]
     then
-      NDBCLUSTER_EXTRA_OPTS=""
-    else
       NDBCLUSTER_EXTRA_OPTS="--small"
     fi
-    ./ndb/ndbcluster $NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --initial || NDB_STATUS_OK=0
+    OPTS="$NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --verbose=2 --initial"
+    echo "Starting master ndbcluster " $OPTS
+    ./ndb/ndbcluster $OPTS || NDB_STATUS_OK=0
     if [ x$NDB_STATUS_OK != x1 ] ; then
       if [ x$FORCE != x1 ] ; then
         exit 1
       fi
-      USE_NDBCLUSTER=
+      USING_NDBCLUSTER=0
+      USE_NDBCLUSTER_OPT=
       return
     fi
 
@@ -1210,15 +1254,16 @@
     NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER"
     echo "Using ndbcluster at $NDB_CONNECTSTRING"
   fi
-  USE_NDBCLUSTER="$USE_NDBCLUSTER --ndb-connectstring=\"$NDB_CONNECTSTRING\""
+  USE_NDBCLUSTER_OPT="$USE_NDBCLUSTER --ndb-connectstring=\"$NDB_CONNECTSTRING\""
   export NDB_CONNECTSTRING
   fi
 }
 
 stop_ndbcluster()
 {
- if [ ! -z "$USE_NDBCLUSTER" ]
+ if [ -n "$USE_NDBCLUSTER_OPT" ]
  then
+ USE_NDBCLUSTER_OPT=
  if [ -z "$USE_RUNNING_NDBCLUSTER" ]
  then
    # Kill any running ndbcluster stuff
@@ -1277,7 +1322,7 @@
           --local-infile \
           --exit-info=256 \
           --core \
-          $USE_NDBCLUSTER \
+          $USE_NDBCLUSTER_OPT \
           --datadir=$MASTER_MYDDIR$1 \
           --pid-file=$MASTER_MYPID$1 \
           --socket=$MASTER_MYSOCK$1 \
@@ -1307,7 +1352,7 @@
           --character-sets-dir=$CHARSETSDIR \
           --default-character-set=$CHARACTER_SET \
           --core \
-          $USE_NDBCLUSTER \
+          $USE_NDBCLUSTER_OPT \
           --tmpdir=$MYSQL_TMP_DIR \
           --language=$LANGUAGE \
           --innodb_data_file_path=ibdata1:128M:autoextend \
@@ -1374,6 +1419,7 @@
   [ x$SKIP_SLAVE = x1 ] && return
   eval "this_slave_running=\$SLAVE$1_RUNNING"
   [ x$this_slave_running = 1 ] && return
+
   # When testing fail-safe replication, we will have more than one slave
   # in this case, we start secondary slaves with an argument
   slave_ident="slave$1"
@@ -1397,6 +1443,34 @@
    slave_pid=$SLAVE_MYPID
    slave_sock="$SLAVE_MYSOCK"
  fi
+
+  #
+  if [ x$USING_NDBCLUSTER = x1 -a -n "$USE_NDBCLUSTER_SLAVE" ] ; then
+    if [ $slave_server_id -eq 2 ] ; then
+      savedir=`pwd`
+      cd $MYSQL_TEST_DIR
+      if [ "$DO_BENCH" != 1 -a -z "$NDBCLUSTER_EXTRA_OPTS" ]
+       then
+         NDBCLUSTER_EXTRA_OPTS="--small"
+      fi
+
+      OPTS="$NDBCLUSTER_OPTS_SLAVE --initial $NDBCLUSTER_EXTRA_OPTS --diskless --ndbd-nodes=1 --verbose=2"
+      echo "Starting slave ndbcluster " $OPTS
+      ./ndb/ndbcluster $OPTS \
+                      || NDB_SLAVE_STATUS_OK=0
+      #                > /dev/null 2>&1 || NDB_SLAVE_STATUS_OK=0
+      cd $savedir
+      if [ x$NDB_SLAVE_STATUS_OK != x1 ] ; then
+        if [ x$FORCE != x1 ] ; then
+          exit 1
+        fi
+        USE_NDBCLUSTER_SLAVE_OPT=
+        USE_NDBCLUSTER_SLAVE=
+      fi
+      USE_NDBCLUSTER_SLAVE_OPT=$USE_NDBCLUSTER_SLAVE
+    fi
+  fi
+
   # Remove stale binary logs and old master.info files
   # except for too tests which need them
   if [ "$tname" != "rpl_crash_binlog_ib_1b" ] && [ "$tname" != "rpl_crash_binlog_ib_2b" ] && [ "$tname" != "rpl_crash_binlog_ib_3b" ]
@@ -1422,12 +1496,16 @@
    master_info=$SLAVE_MASTER_INFO
  fi
 
+  if [ x$SLAVE_MYSQLDBINLOG = x1 ]
+  then
+    EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --log-bin=$MYSQL_TEST_DIR/var/log/$slave_ident-bin --log-slave-updates"
+  fi
+
   $RM -f $slave_datadir/log.*
   slave_args="--no-defaults $master_info \
   	    --exit-info=256 \
-          --log-bin=$MYSQL_TEST_DIR/var/log/$slave_ident-bin \
+          $SLAVE_MYSQLDBINLOG_OPT \
           --relay-log=$MYSQL_TEST_DIR/var/log/$slave_ident-relay-bin \
-          --log-slave-updates \
           --log=$slave_log \
           --basedir=$MY_BASEDIR \
           --datadir=$slave_datadir \
@@ -1449,7 +1527,8 @@
           --log-bin-trust-function-creators \
           --loose-binlog-show-xid=0 \
            $SMALL_SERVER \
-           $EXTRA_SLAVE_MYSQLD_OPT $EXTRA_SLAVE_OPT"
+           $EXTRA_SLAVE_MYSQLD_OPT $EXTRA_SLAVE_OPT \
+           $USE_NDBCLUSTER_SLAVE_OPT"
   CUR_MYERR=$slave_err
   CUR_MYSOCK=$slave_sock
 
@@ -1497,7 +1576,6 @@
 #  start_master
 #  start_slave
   cd $MYSQL_TEST_DIR
-  start_ndbcluster
   return 1
 }
 
@@ -1531,6 +1609,12 @@
       sleep $SLEEP_TIME_AFTER_RESTART
     fi
     eval "SLAVE$1_RUNNING=0"
+    if [ -n "$USE_NDBCLUSTER_SLAVE_OPT" ] ; then
+      savedir=`pwd`
+      cd $MYSQL_TEST_DIR
+      ./ndb/ndbcluster $NDBCLUSTER_OPTS_SLAVE --stop
+      cd $savedir
+    fi
   fi
 }
 
@@ -1619,6 +1703,7 @@
  result_file="r/$tname.result"
  echo $tname > $CURRENT_TEST
  SKIP_SLAVE=`$EXPR \( $tname : rpl \) = 0 \& \( $tname : federated \) = 0`
+ NDBCLUSTER_TEST=`$EXPR \( $tname : '.*ndb.*' \) != 0`
  if [ "$USE_MANAGER" = 1 ] ; then
   many_slaves=`$EXPR \( \( $tname : rpl_failsafe \) != 0 \) \| \( \( $tname : rpl_chain_temp_table \) != 0 \)`
  fi
@@ -1708,9 +1793,19 @@
      esac
      stop_master
      stop_master 1
+
+     # only stop the cluster if this test will not use cluster
+     if [ x$NDBCLUSTER_TEST != x1 ] ;
+     then
+       stop_ndbcluster
+     fi
+
      report_current_test $tname
+     USING_NDBCLUSTER=$NDBCLUSTER_TEST
+     # start_ndbcluster knows if cluster is already started
+     start_ndbcluster
      start_master
-     if [ -n "$USE_NDBCLUSTER" -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then
+     if [ x$USING_NDBCLUSTER = x1 -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then
        start_master 1
      fi
      TZ=$MY_TZ; export TZ
@@ -1719,14 +1814,25 @@
      # or there is no master running (FIXME strange.....)
      # or there is a master init script
      if [ ! -z "$EXTRA_MASTER_OPT" ] || [ x$MASTER_RUNNING != x1 ] || \
-	[ -f $master_init_script ]
+	[ -f $master_init_script ] || \
+        [ -n "$USE_NDBCLUSTER" -a x$NDBCLUSTER_TEST != x$USING_NDBCLUSTER ]
      then
        EXTRA_MASTER_OPT=""
        stop_master
        stop_master 1
+
+       # only stop the cluster if this test will not use cluster
+       if [ x$NDBCLUSTER_TEST != x1 ] ;
+       then
+         stop_ndbcluster
+       fi
+
        report_current_test $tname
+       USING_NDBCLUSTER=$NDBCLUSTER_TEST
+       # start_ndbcluster knows if cluster is already started
+       start_ndbcluster
        start_master
-       if [ -n "$USE_NDBCLUSTER"  -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then
+       if [ x$USING_NDBCLUSTER = x1  -a -z "$DO_BENCH" -a -z "$DO_STRESS" ] ; then
          start_master 1
        fi
      else
@@ -2014,6 +2120,8 @@
     fi
   fi
 
+  # just to force stopping anything from previous runs
+  USE_NDBCLUSTER_OPT=$USE_NDBCLUSTER
   stop_ndbcluster
 
   # Remove files that can cause problems
@@ -2031,7 +2139,8 @@
 
   if [ -n "$1" -a `expr "X$*" : '.*ndb'` -eq 0 ]
   then
-    USE_NDBCLUSTER=""
+    USING_NDBCLUSTER=0
+    USE_NDBCLUSTER_OPT=
   fi
 
   start_manager
@@ -2063,7 +2172,7 @@
     EXTRA_BENCH_ARGS="--small-test --small-tables"
   fi
 
-  if [ ! -z "$USE_NDBCLUSTER" ]
+  if [ x$USING_NDBCLUSTER = x1 ]
   then
     EXTRA_BENCH_ARGS="--create-options=TYPE=ndb $EXTRA_BENCH_ARGS"
   fi 

--- 1.129/sql/Makefile.am	2005-12-31 10:13:33 +01:00
+++ 1.130/sql/Makefile.am	2006-01-05 12:55:41 +01:00
@@ -58,6 +58,7 @@
 			sql_select.h structs.h table.h sql_udf.h hash_filo.h\
 			lex.h lex_symbol.h sql_acl.h sql_crypt.h  \
 			log_event.h sql_repl.h slave.h rpl_filter.h \
+			rpl_injector.h \
 			stacktrace.h sql_sort.h sql_cache.h set_var.h \
 			spatial.h gstream.h client_settings.h tzfile.h \
                         tztime.h my_decimal.h\
@@ -89,6 +90,7 @@
 			sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \
 			sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \
 			slave.cc sql_repl.cc rpl_filter.cc rpl_tblmap.cc \
+			rpl_injector.cc \
                         sql_union.cc sql_derived.cc \
 			client.c sql_client.cc mini_client_errors.c pack.c\
 			stacktrace.c repl_failsafe.h repl_failsafe.cc \
@@ -103,6 +105,7 @@
 			ha_innodb.h  ha_berkeley.h  ha_archive.h \
 			ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \
 			ha_blackhole.h  ha_federated.h  ha_ndbcluster.h \
+			ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h \
 			ha_partition.cc ha_partition.h
 mysqld_DEPENDENCIES =	@mysql_se_objs@
 gen_lex_hash_SOURCES =	gen_lex_hash.cc
@@ -157,6 +160,9 @@
 		$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
 
 ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
+		$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
+
+ha_ndbcluster_binlog.o:ha_ndbcluster_binlog.cc ha_ndbcluster_binlog.h
 		$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
 
 #Until we can get rid of dependencies on ha_ndbcluster.h

--- 1.201/sql/handler.cc	2006-01-04 13:56:53 +01:00
+++ 1.202/sql/handler.cc	2006-01-05 12:55:42 +01:00
@@ -2398,6 +2398,116 @@
   DBUG_RETURN(error);
 }
 
+#ifdef HAVE_ROW_BASED_REPLICATION
+int ha_reset_logs(THD *thd)
+{
+  bool result= 0;
+  handlerton **types;
+  for (types= sys_table_types; *types; types++)
+  {
+    if ((*types)->state == SHOW_OPTION_YES && 
+        (*types)->reset_logs)
+    {
+      if ((*types)->reset_logs(thd))
+        result= 1;
+    }
+  }
+  return result;
+}
+
+int ha_binlog_index_purge_file(THD *thd, const char *file)
+{
+  bool result= 0;
+  handlerton **types;
+  for (types= sys_table_types; *types; types++)
+  {
+    if ((*types)->state == SHOW_OPTION_YES && 
+        (*types)->binlog_index_purge_file)
+    {
+      if ((*types)->binlog_index_purge_file(thd, file))
+        result= 1;
+    }
+  }
+  return result;
+}
+
+void ha_reset_slave(THD* thd)
+{
+  handlerton **types;
+  for (types= sys_table_types; *types; types++)
+  {
+    if ((*types)->state == SHOW_OPTION_YES && 
+        (*types)->reset_slave)
+    {
+      (*types)->reset_slave(thd);
+    }
+  }
+}
+
+void ha_binlog_wait(THD* thd)
+{
+  handlerton **types;
+  for (types= sys_table_types; *types; types++)
+  {
+    if ((*types)->state == SHOW_OPTION_YES && 
+        (*types)->binlog_wait)
+    {
+      (*types)->binlog_wait(thd);
+    }
+  }
+}
+
+int ha_binlog_end(THD* thd)
+{
+  bool result= 0;
+  handlerton **types;
+  for (types= sys_table_types; *types; types++)
+  {
+    if ((*types)->state == SHOW_OPTION_YES && 
+        (*types)->binlog_end)
+    {
+      if ((*types)->binlog_end(thd))
+        result= 1;
+    }
+  }
+  return result;
+}
+
+int ha_create_database(THD *thd, const char *db, const char *query,
+                       int query_length)
+{
+  bool result= 0;
+  handlerton **types;
+  for (types= sys_table_types; *types; types++)
+  {
+    if ((*types)->state == SHOW_OPTION_YES && 
+        (*types)->create_database)
+    {
+      fprintf(stderr, "%s\n", (*types)->name);
+      if ((*types)->create_database(thd, db, query, query_length))
+        result= 1;
+    }
+  }
+  return result;
+}
+
+int ha_alter_database(THD *thd, const char *db, const char *query,
+                      int query_length)
+{
+  bool result= 0;
+  handlerton **types;
+  for (types= sys_table_types; *types; types++)
+  {
+    if ((*types)->state == SHOW_OPTION_YES && 
+        (*types)->alter_database)
+    {
+      if ((*types)->alter_database(thd, db, query, query_length))
+        result = 1;
+    }
+  }
+  return result;
+}
+#endif
 
 /*
   Read the first row of a multi-range set.
@@ -2810,10 +2920,12 @@
     binlog_filter->db_ok(table->s->db.str);
 }
 
-template<class RowsEventT> int binlog_log_row(TABLE* table,
+template<class RowsEventT> int binlog_log_row(handler *h, TABLE* table,
                                               const byte *before_record,
                                               const byte *after_record)
 {
+  if (h->is_injective())
+    return 0;
   bool error= 0;
   THD *const thd= current_thd;
 
@@ -2848,9 +2960,9 @@
   have -fno-implicit-template as compiling option.
 */
 
-template int binlog_log_row<Write_rows_log_event>(TABLE *, const byte *, const byte *);
-template int binlog_log_row<Delete_rows_log_event>(TABLE *, const byte *, const byte *);
-template int binlog_log_row<Update_rows_log_event>(TABLE *, const byte *, const byte *);
+template int binlog_log_row<Write_rows_log_event>(handler *, TABLE *, const byte *, const byte *);
+template int binlog_log_row<Delete_rows_log_event>(handler *, TABLE *, const byte *, const byte *);
+template int binlog_log_row<Update_rows_log_event>(handler *, TABLE *, const byte *, const byte *);
 
 #endif /* HAVE_ROW_BASED_REPLICATION */
 
@@ -2860,7 +2972,7 @@
   if (likely(!(error= write_row(buf))))
   {
 #ifdef HAVE_ROW_BASED_REPLICATION
-    error= binlog_log_row<Write_rows_log_event>(table, 0, buf);
+    error= binlog_log_row<Write_rows_log_event>(this, table, 0, buf);
 #endif
   }
   return error;
@@ -2872,7 +2984,7 @@
   if (likely(!(error= update_row(old_data, new_data))))
   {
 #ifdef HAVE_ROW_BASED_REPLICATION
-    error= binlog_log_row<Update_rows_log_event>(table, old_data, new_data);
+    error= binlog_log_row<Update_rows_log_event>(this, table, old_data, new_data);
 #endif
   }
   return error;
@@ -2884,7 +2996,7 @@
   if (likely(!(error= delete_row(buf))))
   {
 #ifdef HAVE_ROW_BASED_REPLICATION
-    error= binlog_log_row<Delete_rows_log_event>(table, buf, 0);
+    error= binlog_log_row<Delete_rows_log_event>(this, table, buf, 0);
 #endif
   }
   return error;

--- 1.174/sql/handler.h	2005-12-26 12:54:47 +01:00
+++ 1.175/sql/handler.h	2006-01-05 12:55:42 +01:00
@@ -435,6 +435,15 @@
    bool (*flush_logs)();
    bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat);
    uint32 flags;                                /* global handler flags */
+   int (*reset_logs)(THD *thd);
+   int (*binlog_index_purge_file)(THD *thd, const char *file);
+   void (*reset_slave)(THD *thd);
+   void (*binlog_wait)(THD *thd);
+   int (*binlog_end)(THD *thd);
+   int (*create_database)(THD *thd, const char *db, const char *query,
+                          int query_length);
+   int (*alter_database)(THD *thd, const char *db, const char *query,
+                         int query_length);
 } handlerton;
 
 extern const handlerton default_hton;
@@ -1105,6 +1114,12 @@
   virtual int ha_update_row(const byte * old_data, byte * new_data);
   virtual int ha_delete_row(const byte * buf);
   /*
+    If the handler does it's own injection of the rows, this member function
+    should return 'true'.
+  */
+  virtual bool is_injective() const { return false; }
+  
+  /*
     SYNOPSIS
       start_bulk_update()
     RETURN
@@ -1614,3 +1629,23 @@
 int ha_repl_report_sent_binlog(THD *thd, char *log_file_name,
                                my_off_t end_offset);
 int ha_repl_report_replication_stop(THD *thd);
+
+#ifdef HAVE_ROW_BASED_REPLICATION
+int ha_reset_logs(THD *thd);
+int ha_binlog_index_purge_file(THD *thd, const char *file);
+void ha_reset_slave(THD *thd);
+void ha_binlog_wait(THD *thd);
+int ha_binlog_end(THD *thd);
+int ha_create_database(THD *thd, const char *db, const char *query,
+                       int query_length);
+int ha_alter_database(THD *thd, const char *db, const char *query,
+                      int query_length);
+#else
+#define ha_reset_logs(a) 0
+#define ha_binlog_index_purge_file(a,b) 0
+#define ha_reset_slave(a)
+#define ha_binlog_wait(a)
+#define ha_binlog_end(a) 0
+#define ha_create_database(a,b,c,d) 0
+#define ha_alter_database(a,b,c,d) 0
+#endif

--- 1.181/sql/log.cc	2005-12-24 12:06:57 +01:00
+++ 1.182/sql/log.cc	2006-01-05 12:55:42 +01:00
@@ -987,6 +987,7 @@
   enum_log_type save_log_type;
   DBUG_ENTER("reset_logs");
 
+  ha_reset_logs(thd);
   /*
     We need to get both locks to be sure that no one is trying to
     write to the index log file.
@@ -1236,6 +1237,9 @@
     DBUG_PRINT("info",("purging %s",log_info.log_file_name));
     if (!my_delete(log_info.log_file_name, MYF(0)) && decrease_log_space)
       *decrease_log_space-= file_size;
+
+    ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
+
     if (find_next_log(&log_info, 0) || exit_loop)
       break;
   }
@@ -1296,6 +1300,9 @@
 	stat_area.st_mtime >= purge_time)
       break;
     my_delete(log_info.log_file_name, MYF(0));
+
+    ha_binlog_index_purge_file(current_thd, log_info.log_file_name);
+
     if (find_next_log(&log_info, 0))
       break;
   }

--- 1.362/sql/mysql_priv.h	2005-12-31 09:23:08 +01:00
+++ 1.363/sql/mysql_priv.h	2006-01-05 12:55:42 +01:00
@@ -540,6 +540,7 @@
   COMMIT_RELEASE=-1,   COMMIT=0,    COMMIT_AND_CHAIN=6
 };
 
+bool begin_trans(THD *thd);
 int end_trans(THD *thd, enum enum_mysql_completiontype completion);
 
 Item *negate_expression(THD *thd, Item *expr);
@@ -642,6 +643,7 @@
 void table_cache_free(void);
 bool table_def_init(void);
 void table_def_free(void);
+void assign_new_table_id(TABLE *table);
 uint cached_open_tables(void);
 uint cached_table_definitions(void);
 void kill_mysql(void);
@@ -1037,7 +1039,7 @@
 bool remove_table_from_cache(THD *thd, const char *db, const char *table,
                              uint flags);
 
-bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables);
+bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE);
 void copy_field_from_tmp_record(Field *field,int offset);
 bool fill_record(THD *thd, Field **field, List<Item> &values,
                  bool ignore_errors);

--- 1.511/sql/mysqld.cc	2005-12-31 06:02:47 +01:00
+++ 1.512/sql/mysqld.cc	2006-01-05 12:55:43 +01:00
@@ -416,6 +416,8 @@
 ulong opt_ndb_cache_check_time;
 const char *opt_ndb_mgmd;
 ulong opt_ndb_nodeid;
+ulong ndb_report_thresh_binlog_epoch_slip;
+ulong ndb_report_thresh_binlog_mem_usage;
 
 extern struct show_var_st ndb_status_variables[];
 extern const char *ndb_distribution_names[];
@@ -1133,6 +1135,10 @@
 
   mysql_log.cleanup();
   mysql_slow_log.cleanup();
+  /* make sure that handlers finish up
+     what they have that is dependent on the binlog
+  */
+  ha_binlog_end(current_thd);
   mysql_bin_log.cleanup();
 
 #ifdef HAVE_REPLICATION
@@ -3095,11 +3101,15 @@
   }
   if (opt_binlog_format_id == BF_UNSPECIFIED)
   {
-    /*
-      We use statement-based by default, but could change this to be row-based
-      if this is a cluster build (i.e. have_ndbcluster is true)...
-    */
-    opt_binlog_format_id= BF_STMT;
+#ifdef HAVE_ROW_BASED_REPLICATION
+    if (have_ndbcluster == SHOW_OPTION_YES)
+    {
+      rpl_filter->add_ignore_table("cluster_replication.binlog_index");
+      opt_binlog_format_id= BF_ROW;
+    }
+    else
+#endif
+      opt_binlog_format_id= BF_STMT;
   }
 #ifdef HAVE_ROW_BASED_REPLICATION
   if (opt_binlog_format_id == BF_ROW) 
@@ -4631,6 +4641,8 @@
   OPT_NDB_DISTRIBUTION,
   OPT_NDB_INDEX_STAT_ENABLE,
   OPT_NDB_INDEX_STAT_CACHE_ENTRIES, OPT_NDB_INDEX_STAT_UPDATE_FREQ,
+  OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
+  OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
   OPT_SKIP_SAFEMALLOC,
   OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE,
   OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS,
@@ -5284,6 +5296,22 @@
    (gptr*) &global_system_variables.ndb_force_send,
    (gptr*) &global_system_variables.ndb_force_send,
    0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},
+  {"ndb-report-thresh-binlog-epoch-slip", OPT_NDB_REPORT_THRESH_BINLOG_EPOCH_SLIP,
+   "Threshold on number of epochs to be behind before reporting binlog status. "
+   "E.g. 3 means that if the difference between what epoch has been received "
+   "from the storage nodes and what has been applied to the binlog is 3 or more, "
+   "a status message will be sent to the cluster log.",
+   (gptr*) &ndb_report_thresh_binlog_epoch_slip,
+   (gptr*) &ndb_report_thresh_binlog_epoch_slip,
+   0, GET_ULONG, REQUIRED_ARG, 3, 0, 256, 0, 0, 0},
+  {"ndb-report-thresh-binlog-mem-usage", OPT_NDB_REPORT_THRESH_BINLOG_MEM_USAGE,
+   "Threshold on percentage of free memory before reporting binlog status. E.g. "
+   "10 means that if amount of available memory for receiving binlog data from "
+   "the storage nodes goes below 10%, "
+   "a status message will be sent to the cluster log.",
+   (gptr*) &ndb_report_thresh_binlog_mem_usage,
+   (gptr*) &ndb_report_thresh_binlog_mem_usage,
+   0, GET_ULONG, REQUIRED_ARG, 10, 0, 100, 0, 0, 0},
   {"ndb-use-exact-count", OPT_NDB_USE_EXACT_COUNT,
    "Use exact records count during query planning and for fast "
    "select count(*), disable for faster queries.",

--- 1.293/sql/sql_base.cc	2006-01-04 22:36:22 +01:00
+++ 1.294/sql/sql_base.cc	2006-01-05 12:55:43 +01:00
@@ -803,13 +803,14 @@
 */
 
 bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
-			 TABLE_LIST *tables)
+			 TABLE_LIST *tables, bool have_lock)
 {
   bool result=0;
   DBUG_ENTER("close_cached_tables");
   DBUG_ASSERT(thd || (!if_wait_for_refresh && !tables));
 
-  VOID(pthread_mutex_lock(&LOCK_open));
+  if (!have_lock)
+    VOID(pthread_mutex_lock(&LOCK_open));
   if (!tables)
   {
     refresh_version++;				// Force close of open tables
@@ -888,7 +889,8 @@
     for (TABLE *table=thd->open_tables; table ; table= table->next)
       table->s->version= refresh_version;
   }
-  VOID(pthread_mutex_unlock(&LOCK_open));
+  if (!have_lock)
+    VOID(pthread_mutex_unlock(&LOCK_open));
   if (if_wait_for_refresh)
   {
     pthread_mutex_lock(&thd->mysys_var->mutex);
@@ -2383,7 +2385,7 @@
 
     table->s->table_map_id is not ULONG_MAX.
  */
-static void assign_new_table_id(TABLE *table)
+void assign_new_table_id(TABLE *table)
 {
   static ulong last_table_id= ULONG_MAX;
 

--- 1.123/sql/sql_db.cc	2005-12-31 05:54:32 +01:00
+++ 1.124/sql/sql_db.cc	2006-01-05 12:55:43 +01:00
@@ -401,6 +401,7 @@
                      bool silent)
 {
   char	 path[FN_REFLEN+16];
+  char	 tmp_query[FN_REFLEN+16];
   long result= 1;
   int error= 0;
   MY_STAT stat_info;
@@ -486,15 +487,18 @@
 
     if (!thd->query)				// Only in replication
     {
-      query= 	     path;
-      query_length= (uint) (strxmov(path,"create database `", db, "`", NullS) -
-			    path);
+      query= 	     tmp_query;
+      query_length= (uint) (strxmov(tmp_query,"create database `",
+                                    db, "`", NullS) - tmp_query);
     }
     else
     {
       query= 	    thd->query;
       query_length= thd->query_length;
     }
+
+    ha_create_database(thd, db, query, query_length);
+
     if (mysql_bin_log.is_open())
     {
       Query_log_event qinfo(thd, query, query_length, 0, 
@@ -568,6 +572,8 @@
 		     thd->variables.collation_server;
     thd->variables.collation_database= thd->db_charset;
   }
+
+  ha_alter_database(thd, db, thd->query, thd->query_length);
 
   if (mysql_bin_log.is_open())
   {

--- 1.502/sql/sql_parse.cc	2005-12-28 17:47:55 +01:00
+++ 1.503/sql/sql_parse.cc	2006-01-05 12:55:44 +01:00
@@ -78,7 +78,7 @@
   "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user",
   "Binlog Dump","Table Dump",  "Connect Out", "Register Slave",
   "Prepare", "Execute", "Long Data", "Close stmt",
-  "Reset stmt", "Set option", "Fetch",
+  "Reset stmt", "Set option", "Fetch", "Daemon",
   "Error"					// Last command number
 };
 
@@ -148,7 +148,7 @@
   DBUG_RETURN(error);
 }
 
-static bool begin_trans(THD *thd)
+bool begin_trans(THD *thd)
 {
   int error=0;
   if (unlikely(thd->in_sub_stmt))
@@ -6609,6 +6609,8 @@
   I_List_iterator<THD> it(threads);
   while ((tmp=it++))
   {
+    if (tmp->command == COM_DAEMON)
+      continue;
     if (tmp->thread_id == id)
     {
       pthread_mutex_lock(&tmp->LOCK_delete);	// Lock from delete

--- 1.97/mysql-test/r/information_schema.result	2006-01-04 22:38:46 +01:00
+++ 1.98/mysql-test/r/information_schema.result	2006-01-05 12:58:34 +01:00
@@ -14,6 +14,7 @@
 select schema_name from information_schema.schemata;
 schema_name
 information_schema
+cluster_replication
 mysql
 test
 show databases like 't%';
@@ -22,6 +23,7 @@
 show databases;
 Database
 information_schema
+cluster_replication
 mysql
 test
 show databases where `database` = 't%';
@@ -34,7 +36,7 @@
 create table t5 (id int auto_increment primary key);
 insert into t5 values (10);
 create view v1 (c) as select table_name from information_schema.TABLES;
-select * from v1;
+select * from v1 where c not in ('apply_status');
 c
 CHARACTER_SETS
 COLLATIONS
@@ -54,6 +56,7 @@
 TRIGGERS
 VIEWS
 USER_PRIVILEGES
+binlog_index
 columns_priv
 db
 func
@@ -329,6 +332,7 @@
 select * from v0;
 c
 information_schema
+cluster_replication
 mysql
 test
 explain select * from v0;
@@ -796,8 +800,9 @@
 flush privileges;
 SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHEMA;
 table_schema	count(*)
-information_schema	18
-mysql	18
+cluster_replication	<count>
+information_schema	<count>
+mysql	<count>
 create table t1 (i int, j int);
 create trigger trg1 before insert on t1 for each row
 begin

--- 1.67/mysql-test/t/information_schema.test	2006-01-04 22:38:46 +01:00
+++ 1.68/mysql-test/t/information_schema.test	2006-01-05 12:55:41 +01:00
@@ -34,7 +34,8 @@
 create table t5 (id int auto_increment primary key);
 insert into t5 values (10);
 create view v1 (c) as select table_name from information_schema.TABLES;
-select * from v1;
+# the presence of 'apply_status' will depend on if cluster is running or not so skip
+select * from v1 where c not in ('apply_status');
 
 select c,table_name from v1 
 inner join information_schema.TABLES v2 on (v1.c=v2.table_name)
@@ -448,7 +449,8 @@
 --disable_result_log
 SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES;
 --enable_result_log
-SELECT count(*) FROM INFORMATION_SCHEMA.TABLES;
+# the presence of 'apply_status' will depend on if cluster is running or not, so skip
+SELECT count(*) FROM INFORMATION_SCHEMA.TABLES WHERE table_schema NOT IN ('cluster_replication');
 let $tab_count= 65;
 while ($tab_count)
 {
@@ -514,6 +516,7 @@
 # Bug #9404  information_schema: Weird error messages
 # with SELECT SUM() ... GROUP BY queries
 #
+-- replace_column 2 <count>
 SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHEMA;
 
 

--- 1.50/mysql-test/mysql-test-run.pl	2006-01-04 10:36:49 +01:00
+++ 1.51/mysql-test/mysql-test-run.pl	2006-01-05 12:55:41 +01:00
@@ -140,6 +140,7 @@
 our $glob_timers=                 undef;
 our $glob_use_running_server=     0;
 our $glob_use_running_ndbcluster= 0;
+our $glob_use_running_ndbcluster_slave= 0;
 our $glob_use_embedded_server=    0;
 our @glob_test_mode;
 
@@ -229,6 +230,8 @@
 
 our $opt_ndbcluster_port;
 our $opt_ndbconnectstring;
+our $opt_ndbcluster_port_slave;
+our $opt_ndbconnectstring_slave;
 
 our $opt_no_manager;            # Does nothing now, we never use manager
 our $opt_manager_port;          # Does nothing now, we never use manager
@@ -295,12 +298,16 @@
 
 our $opt_skip_ndbcluster;
 our $opt_with_ndbcluster;
+our $opt_skip_ndbcluster_slave;
+our $opt_with_ndbcluster_slave;
+our $opt_ndb_extra_test;
 
 our $exe_ndb_mgm;
 our $path_ndb_tools_dir;
 our $path_ndb_backup_dir;
 our $file_ndb_testrun_log;
 our $flag_ndb_status_ok= 1;
+our $flag_ndb_slave_status_ok= 1;
 
 ######################################################################
 #
@@ -320,6 +327,9 @@
 sub ndbcluster_install ();
 sub ndbcluster_start ();
 sub ndbcluster_stop ();
+sub ndbcluster_install_slave ();
+sub ndbcluster_start_slave ();
+sub ndbcluster_stop_slave ();
 sub run_benchmarks ($);
 sub run_tests ();
 sub mysql_install_db ();
@@ -488,6 +498,7 @@
   my $opt_master_myport= 9306;
   my $opt_slave_myport=  9308;
   $opt_ndbcluster_port=  9350;
+  $opt_ndbcluster_port_slave=  9358;
   my $im_port=           9310;
   my $im_mysqld1_port=   9312;
   my $im_mysqld2_port=   9314;
@@ -521,6 +532,10 @@
              'force'                    => \$opt_force,
              'with-ndbcluster'          => \$opt_with_ndbcluster,
              'skip-ndbcluster|skip-ndb' => \$opt_skip_ndbcluster,
+             'with-ndbcluster-slave'    => \$opt_with_ndbcluster_slave,
+             'skip-ndbcluster-slave|skip-ndb-slave'
+                                        => \$opt_skip_ndbcluster_slave,
+             'ndb-extra-test'           => \$opt_ndb_extra_test,
              'do-test=s'                => \$opt_do_test,
              'suite=s'                  => \$opt_suite,
              'skip-rpl'                 => \$opt_skip_rpl,
@@ -531,6 +546,7 @@
              'master_port=i'            => \$opt_master_myport,
              'slave_port=i'             => \$opt_slave_myport,
              'ndbcluster_port=i'        => \$opt_ndbcluster_port,
+             'ndbcluster_port_slave=i'  => \$opt_ndbcluster_port_slave,
              'manager-port=i'           => \$opt_manager_port, # Currently not used
              'im-port=i'                => \$im_port, # Instance Manager port.
              'im-mysqld1-port=i'        => \$im_mysqld1_port, # Port of mysqld, controlled by IM
@@ -545,6 +561,7 @@
              # Run test on running server
              'extern'                   => \$opt_extern,
              'ndbconnectstring=s'       => \$opt_ndbconnectstring,
+             'ndbconnectstring-slave=s' => \$opt_ndbconnectstring_slave,
 
              # Debugging
              'gdb'                      => \$opt_gdb,
@@ -751,9 +768,25 @@
     $opt_ndbconnectstring= "host=localhost:$opt_ndbcluster_port";
   }
 
+  if ( $opt_ndbconnectstring_slave )
+  {
+    $glob_use_running_ndbcluster_slave= 1;
+    $opt_with_ndbcluster_slave= 1;
+  }
+  else
+  {
+    $opt_ndbconnectstring_slave= "host=localhost:$opt_ndbcluster_port_slave";
+  }
+
   if ( $opt_skip_ndbcluster )
   {
     $opt_with_ndbcluster= 0;
+    $opt_skip_ndbcluster_slave= 1;
+  }
+
+  if ( $opt_skip_ndbcluster_slave )
+  {
+    $opt_with_ndbcluster_slave= 0;
   }
 
   # The ":s" in the argument spec, means we have three different cases
@@ -846,6 +879,8 @@
    path_mysock   => "$sockdir/slave.sock",
    path_myport   => $opt_slave_myport,
    start_timeout => 400,
+
+   ndbcluster    =>  1, # ndbcluster not started
   };
 
   $slave->[1]=
@@ -1166,6 +1201,8 @@
 
     ndbcluster_stop();
     $master->[0]->{'ndbcluster'}= 1;
+    ndbcluster_stop_slave();
+    $slave->[0]->{'ndbcluster'}= 1;
   }
 }
 
@@ -1352,6 +1389,68 @@
   return;
 }
 
+sub ndbcluster_install_slave () {
+
+  if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave )
+  {
+    return 0;
+  }
+  mtr_report("Install ndbcluster slave");
+  if (  mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
+		["--port=$opt_ndbcluster_port_slave",
+		 "--data-dir=$opt_vardir",
+		 "--small",
+		 "--diskless",
+		 "--ndbd-nodes=1",
+		 "--initial"],
+		"", "", "", "") )
+  {
+    mtr_error("Error ndbcluster_install_slave");
+    return 1;
+  }
+
+  ndbcluster_stop_slave();
+  $slave->[0]->{'ndbcluster'}= 1;
+
+  return 0;
+}
+
+sub ndbcluster_start_slave () {
+
+  if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave )
+  {
+    return 0;
+  }
+  # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
+  if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
+	       ["--port=$opt_ndbcluster_port_slave",
+		"--data-dir=$opt_vardir",
+		"--ndbd-nodes=1"],
+	       "", "/dev/null", "", "") )
+  {
+    mtr_error("Error ndbcluster_start_slave");
+    return 1;
+  }
+
+  return 0;
+}
+
+sub ndbcluster_stop_slave () {
+
+  if ( ! $opt_with_ndbcluster_slave or $glob_use_running_ndbcluster_slave )
+  {
+    return;
+  }
+  # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null
+  mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
+          ["--port=$opt_ndbcluster_port_slave",
+           "--data-dir=$opt_vardir",
+           "--stop"],
+          "", "/dev/null", "", "");
+
+  return;
+}
+
 
 ##############################################################################
 #
@@ -1503,6 +1602,13 @@
     $flag_ndb_status_ok= 0;
   }
 
+  if ( ndbcluster_install_slave() )
+  {
+    # failed to install, disable usage but flag that its no ok
+    $opt_with_ndbcluster_slave= 0;
+    $flag_ndb_slave_status_ok= 0;
+  }
+
   return 0;
 }
 
@@ -1826,6 +1932,18 @@
       {
         if ( ! $slave->[$idx]->{'pid'} )
         {
+          if ( $idx == 0)
+	  {
+	    if ( $slave->[0]->{'ndbcluster'} )
+	    {
+	      $slave->[0]->{'ndbcluster'}= ndbcluster_start_slave();
+	      if ( $slave->[0]->{'ndbcluster'} )
+	      {
+		report_failure_and_restart($tinfo);
+		return;
+	      }
+	    }
+	  }
           $slave->[$idx]->{'pid'}=
             mysqld_start('slave',$idx,
                          $tinfo->{'slave_opt'}, $tinfo->{'slave_mi'});
@@ -2088,6 +2206,12 @@
     {
       mtr_add_arg($args, "%s--skip-ndbcluster", $prefix);
     }
+    if ( $opt_with_ndbcluster )
+    {
+      mtr_add_arg($args, "%s--ndbcluster", $prefix);
+      mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
+                  $opt_ndbconnectstring);
+    }
   }
 
   if ( $type eq 'slave' )
@@ -2144,6 +2268,17 @@
       mtr_add_arg($args, "%s--server-id=%d", $prefix, $slave_server_id);
       mtr_add_arg($args, "%s--rpl-recovery-rank=%d", $prefix, $slave_rpl_rank);
     }
+    
+    if ( $opt_skip_ndbcluster_slave )
+    {
+      mtr_add_arg($args, "%s--skip-ndbcluster", $prefix);
+    }
+    if ( $idx == 0 and $opt_with_ndbcluster_slave )
+    {
+      mtr_add_arg($args, "%s--ndbcluster", $prefix);
+      mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
+                  $opt_ndbconnectstring_slave);
+    }
   } # end slave
 
   if ( $opt_debug )
@@ -2160,13 +2295,6 @@
     }
   }
 
-  if ( $opt_with_ndbcluster )
-  {
-    mtr_add_arg($args, "%s--ndbcluster", $prefix);
-    mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix,
-                $opt_ndbconnectstring);
-  }
-
   # FIXME always set nowdays??? SMALL_SERVER
   mtr_add_arg($args, "%s--key_buffer_size=1M", $prefix);
   mtr_add_arg($args, "%s--sort_buffer=256K", $prefix);
@@ -2381,6 +2509,12 @@
     }
   }
 
+  if ( ! $slave->[0]->{'ndbcluster'} )
+  {
+    ndbcluster_stop_slave();
+    $slave->[0]->{'ndbcluster'}= 1;
+  }
+
   mtr_stop_mysqld_servers(\@args);
 }
 
@@ -2590,6 +2724,8 @@
   $ENV{'MYSQL_MY_PRINT_DEFAULTS'}=  $exe_my_print_defaults;
 
   $ENV{'NDB_STATUS_OK'}=            $flag_ndb_status_ok;
+  $ENV{'NDB_SLAVE_STATUS_OK'}=      $flag_ndb_slave_status_ok;
+  $ENV{'NDB_EXTRA_TEST'}=           $opt_ndb_extra_test;
   $ENV{'NDB_MGM'}=                  $exe_ndb_mgm;
   $ENV{'NDB_BACKUP_DIR'}=           $path_ndb_backup_dir;
   $ENV{'NDB_TOOLS_DIR'}=            $path_ndb_tools_dir;

--- 1.29/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2005-12-28 03:43:33 +01:00
+++ 1.30/storage/ndb/src/kernel/blocks/backup/Backup.cpp	2006-01-05 12:55:44 +01:00
@@ -1301,6 +1301,7 @@
   
   for (int i=0; i < 3; i++) {
     req->setTriggerEvent(triggerEventValues[i]);
+    req->setReportAllMonitoredAttributes(false);
     BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i],
 	     ptr.p->backupId, tabPtr.p->tableId);
     w.reset();

--- 1.16/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2006-01-01 21:30:03 +01:00
+++ 1.17/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp	2006-01-05 12:55:44 +01:00
@@ -266,6 +266,7 @@
     ljam();
     tptr.p->sendBeforeValues = false;
   }
+  /*
   tptr.p->sendOnlyChangedAttributes = false;
   if (((tptr.p->triggerType == TriggerType::SUBSCRIPTION) ||
       (tptr.p->triggerType == TriggerType::SUBSCRIPTION_BEFORE)) &&
@@ -273,7 +274,8 @@
     ljam();
     tptr.p->sendOnlyChangedAttributes = true;
   }
-
+  */
+  tptr.p->sendOnlyChangedAttributes = !req->getReportAllMonitoredAttributes();
   // Set monitor all
   tptr.p->monitorAllAttributes = req->getMonitorAllAttributes();
   tptr.p->monitorReplicas = req->getMonitorReplicas();

--- 1.26/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2006-01-01 21:25:52 +01:00
+++ 1.27/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp	2006-01-05 12:55:44 +01:00
@@ -268,6 +268,7 @@
   m_state= EO_ERROR;
   mi_type= 0;
   m_magic_number= 0;
+  m_error.code= myDict->getNdbError().code;
   m_ndb->theEventBuffer->remove_op();
   m_ndb->theEventBuffer->add_drop_unlock();
   DBUG_RETURN(r);
@@ -670,7 +671,7 @@
 
   NdbMutex_Lock(m_mutex);
   NdbEventOperationImpl *ev_op= move_data();
-  if (unlikely(ev_op == 0))
+  if (unlikely(ev_op == 0 && aMillisecondNumber))
   {
     NdbCondition_WaitTimeout(p_cond, m_mutex, aMillisecondNumber);
     ev_op= move_data();
@@ -994,6 +995,33 @@
 }
 
 void
+NdbEventBuffer::report_node_failure(Uint32 node_id)
+{
+  DBUG_ENTER("NdbEventBuffer::report_node_failure");
+  SubTableData data;
+  LinearSectionPtr ptr[3];
+  bzero(&data, sizeof(data));
+  bzero(ptr, sizeof(ptr));
+
+  data.tableId = ~0;
+  data.operation = NdbDictionary::Event::_TE_NODE_FAILURE;
+  data.req_nodeid = (Uint8)node_id;
+  data.ndbd_nodeid = (Uint8)node_id;
+  data.logType = SubTableData::LOG;
+  /**
+   * Insert this event for each operation
+   */
+  NdbEventOperation* op= 0;
+  while((op = m_ndb->getEventOperation(op)))
+  {
+    NdbEventOperationImpl* impl= &op->m_impl;
+    data.senderData = impl->m_oid;
+    insertDataL(impl, &data, ptr); 
+  }
+  DBUG_VOID_RETURN;
+}
+
+void
 NdbEventBuffer::completeClusterFailed()
 {
   DBUG_ENTER("NdbEventBuffer::completeClusterFailed");
@@ -1376,7 +1404,7 @@
     m_min_free_thresh= m_free_thresh;
     m_max_free_thresh= 100;
     goto send_report;
- }
+  }
   if (latest_gci-apply_gci >=  m_gci_slip_thresh)
   {
     goto send_report;

--- 1.228/sql/ha_ndbcluster.cc	2005-12-31 05:53:52 +01:00
+++ 1.229/sql/ha_ndbcluster.cc	2006-01-05 12:55:42 +01:00
@@ -33,6 +33,8 @@
 #include <../util/Bitmask.hpp>
 #include <ndbapi/NdbIndexStat.hpp>
 
+#include "ha_ndbcluster_binlog.h"
+
 // options from from mysqld.cc
 extern my_bool opt_ndb_optimized_node_selection;
 extern const char *opt_ndbcluster_connectstring;
@@ -50,13 +52,9 @@
 // createable against NDB from this handler
 static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used
 
-static const char *ha_ndb_ext=".ndb";
-static const char share_prefix[]= "./";
-
-static int ndbcluster_close_connection(THD *thd);
-static int ndbcluster_commit(THD *thd, bool all);
-static int ndbcluster_rollback(THD *thd, bool all);
-static handler* ndbcluster_create_handler(TABLE_SHARE *table);
+static bool ndbcluster_init(void);
+static int ndbcluster_end(ha_panic_function flag);
+static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type);
 
 handlerton ndbcluster_hton = {
   MYSQL_HANDLERTON_INTERFACE_VERSION,
@@ -65,28 +63,7 @@
   "Clustered, fault-tolerant, memory-based tables", 
   DB_TYPE_NDBCLUSTER,
   ndbcluster_init,
-  0, /* slot */
-  0, /* savepoint size */
-  ndbcluster_close_connection,
-  NULL, /* savepoint_set */
-  NULL, /* savepoint_rollback */
-  NULL, /* savepoint_release */
-  ndbcluster_commit,
-  ndbcluster_rollback,
-  NULL, /* prepare */
-  NULL, /* recover */
-  NULL, /* commit_by_xid */
-  NULL, /* rollback_by_xid */
-  NULL, /* create_cursor_read_view */
-  NULL, /* set_cursor_read_view */
-  NULL, /* close_cursor_read_view */
-  ndbcluster_create_handler, /* Create a new handler */
-  ndbcluster_drop_database, /* Drop a database */
-  ndbcluster_end, /* Panic call */
-  NULL, /* Start Consistent Snapshot */
-  NULL, /* Flush logs */
-  ndbcluster_show_status, /* Show status */
-  HTON_NO_FLAGS
+  ~(uint)0, /* slot */
 };
 
 static handler *ndbcluster_create_handler(TABLE_SHARE *table)
@@ -119,33 +96,24 @@
   break;                                 \
 }
 
-// Typedefs for long names
-typedef NdbDictionary::Object NDBOBJ;
-typedef NdbDictionary::Column NDBCOL;
-typedef NdbDictionary::Table NDBTAB;
-typedef NdbDictionary::Index  NDBINDEX;
-typedef NdbDictionary::Dictionary  NDBDICT;
-typedef NdbDictionary::Event  NDBEVENT;
-
 static int ndbcluster_inited= 0;
-static int ndbcluster_util_inited= 0;
+int ndbcluster_util_inited= 0;
 
 static Ndb* g_ndb= NULL;
-static Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
+Ndb_cluster_connection* g_ndb_cluster_connection= NULL;
+unsigned char g_node_id_map[max_ndb_nodes];
 
 // Handler synchronization
 pthread_mutex_t ndbcluster_mutex;
 
 // Table lock handling
-static HASH ndbcluster_open_tables;
+HASH ndbcluster_open_tables;
 
 static byte *ndbcluster_get_key(NDB_SHARE *share,uint *length,
                                 my_bool not_used __attribute__((unused)));
-static NDB_SHARE *get_share(const char *key,
-                            bool create_if_not_exists= TRUE,
-                            bool have_lock= FALSE);
-static void free_share(NDB_SHARE **share, bool have_lock= FALSE);
-static void real_free_share(NDB_SHARE **share);
+#ifdef HAVE_NDB_BINLOG
+static int rename_share(NDB_SHARE *share, const char *new_key, bool have_lock);
+#endif
 static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len);
 
 static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len);
@@ -155,35 +123,9 @@
 static int ndb_get_table_statistics(Ndb*, const char *, 
                                     struct Ndb_statistics *);
 
-#ifndef DBUG_OFF
-void print_records(TABLE *table, const char *record)
-{
-  if (_db_on_)
-  {
-    for (uint j= 0; j < table->s->fields; j++)
-    {
-      char buf[40];
-      int pos= 0;
-      Field *field= table->field[j];
-      const byte* field_ptr= field->ptr - table->record[0] + record;
-      int pack_len= field->pack_length();
-      int n= pack_len < 10 ? pack_len : 10;
-      
-      for (int i= 0; i < n && pos < 20; i++)
-      {
-	pos+= sprintf(&buf[pos]," %x", (int) (unsigned char) field_ptr[i]);
-      }
-      buf[pos]= 0;
-      DBUG_PRINT("info",("[%u]field_ptr[0->%d]: %s", j, n, buf));
-    }
-  }
-}
-#else
-#define print_records(a,b)
-#endif
 
 // Util thread variables
-static pthread_t ndb_util_thread;
+pthread_t ndb_util_thread;
 pthread_mutex_t LOCK_ndb_util_thread;
 pthread_cond_t COND_ndb_util_thread;
 pthread_handler_t ndb_util_thread_func(void *arg);
@@ -212,7 +154,7 @@
 static const char * ndb_connected_host= 0;
 static long ndb_connected_port= 0;
 static long ndb_number_of_replicas= 0;
-static long ndb_number_of_storage_nodes= 0;
+long ndb_number_of_storage_nodes= 0;
 
 static int update_status_variables(Ndb_cluster_connection *c)
 {
@@ -233,9 +175,6 @@
   {NullS, NullS, SHOW_LONG}
 };
 
-/* instantiated in storage/ndb/src/ndbapi/Ndbif.cpp */
-extern Uint64 g_latest_trans_gci;
-
 /*
   Error handling functions
 */
@@ -363,6 +302,7 @@
   all= NULL;
   stmt= NULL;
   error= 0;
+  options= 0;
 }
 
 Thd_ndb::~Thd_ndb()
@@ -388,14 +328,6 @@
 }
 
 inline
-Thd_ndb *
-get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton.slot]; }
-
-inline
-void
-set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton.slot]= thd_ndb; }
-
-inline
 Ndb *ha_ndbcluster::get_ndb()
 {
   return get_thd_ndb(current_thd)->ndb;
@@ -2514,8 +2446,8 @@
     set to null.
 */
 
-static void ndb_unpack_record(TABLE *table, NdbValue *value,
-                              MY_BITMAP *defined, byte *buf)
+void ndb_unpack_record(TABLE *table, NdbValue *value,
+                       MY_BITMAP *defined, byte *buf)
 {
   Field **p_field= table->field, *field= *p_field;
   uint row_offset= (uint) (buf - table->record[0]);
@@ -2753,6 +2685,7 @@
   statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status);
   DBUG_ENTER("ha_ndbcluster::index_read_idx");
   DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len));  
+  close_scan();
   index_init(index_no, 0);  
   DBUG_RETURN(index_read(buf, key, key_len, find_flag));
 }
@@ -3150,6 +3083,16 @@
     m_use_write= FALSE;
     m_ignore_dup_key= FALSE;
     break;
+  case HA_EXTRA_IGNORE_NO_KEY:
+    DBUG_PRINT("info", ("HA_EXTRA_IGNORE_NO_KEY"));
+    DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit"));
+    m_ignore_no_key= TRUE;
+    break;
+  case HA_EXTRA_NO_IGNORE_NO_KEY:
+    DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_NO_KEY"));
+    DBUG_PRINT("info", ("Turning on AO_IgnoreError at Commit/NoCommit"));
+    m_ignore_no_key= FALSE;
+    break;
   default:
     break;
   }
@@ -3578,7 +3521,7 @@
   Commit a transaction started in NDB
  */
 
-int ndbcluster_commit(THD *thd, bool all)
+static int ndbcluster_commit(THD *thd, bool all)
 {
   int res= 0;
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
@@ -3629,7 +3572,7 @@
   Rollback a transaction started in NDB
  */
 
-int ndbcluster_rollback(THD *thd, bool all)
+static int ndbcluster_rollback(THD *thd, bool all)
 {
   int res= 0;
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
@@ -3976,6 +3919,12 @@
     */
     if ((my_errno= write_ndb_file()))
       DBUG_RETURN(my_errno);
+#ifdef HAVE_NDB_BINLOG
+    ndbcluster_create_binlog_setup(get_ndb(), name2, m_dbname, m_tabname,
+                                   ndb_binlog_thread_running > 0 &&
+                                   !is_prefix(m_tabname, tmp_file_prefix),
+                                   0, TRUE);
+#endif /* HAVE_NDB_BINLOG */
     DBUG_RETURN(my_errno);
   }
 
@@ -4122,6 +4071,73 @@
   if (!my_errno)
     my_errno= write_ndb_file();
 
+#ifdef HAVE_NDB_BINLOG
+  if (!my_errno)
+  {
+    NDB_SHARE *share= 0;
+    pthread_mutex_lock(&ndbcluster_mutex);
+    /*
+      First make sure we get a "fresh" share here, not an old trailing one...
+    */
+    {
+      const char *key= name2;
+      uint length= (uint) strlen(key);
+      if ((share= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
+                                           (byte*) key, length)))
+        handle_trailing_share(share, TRUE);
+    }
+    /*
+      get a new share
+    */
+    if (!(share= get_share(name2, true, true)))
+    {
+      sql_print_error("NDB: allocating table share for %s failed", name2);
+      /* my_errno is set */
+    }
+    pthread_mutex_unlock(&ndbcluster_mutex);
+
+    while (!is_prefix(m_tabname, tmp_file_prefix))
+    {
+      const NDBTAB *t= dict->getTable(m_tabname);
+      String event_name(INJECTOR_EVENT_LEN);
+      ndb_rep_event_name(&event_name,m_dbname,m_tabname);
+
+      /*
+        Always create an event for the table, as other mysql servers
+        expect it to be there.
+      */
+      if (ndbcluster_create_event(ndb, t, event_name.c_ptr(), share) < 0)
+      {
+        /* this is only a serious error if the binlog is on */
+	if (share && ndb_binlog_thread_running > 0)
+	{
+          push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+                              ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+                              "Creating event for logging table failed. "
+                              "See error log for details.");
+	}
+        break;
+      }
+      sql_print_information("NDB Binlog: CREATE TABLE Event: %s",
+                            event_name.c_ptr());
+
+      if (share && ndb_binlog_thread_running > 0 &&
+          ndbcluster_create_event_ops(share, t, event_name.c_ptr()) < 0)
+      {
+        sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations."
+                        " Event: %s", name2);
+        /* a warning has been issued to the client */
+      }
+      ndbcluster_log_schema_op(current_thd, share,
+                               current_thd->query, current_thd->query_length,
+                               share->db, share->table_name,
+                               0, 0,
+                               SOT_CREATE_TABLE);
+      break;
+    }
+  }
+#endif /* HAVE_NDB_BINLOG */
+
   DBUG_RETURN(my_errno);
 }
 
@@ -4216,6 +4232,15 @@
     if (!(orig_tab= dict->getTable(m_tabname)))
       ERR_RETURN(dict->getNdbError());
   }
+#ifdef HAVE_NDB_BINLOG
+  NDB_SHARE *share= 0;
+  if (ndb_binlog_thread_running > 0 &&
+      (share= get_share(from, false)))
+  {
+    int r= rename_share(share, to, TRUE);
+    DBUG_ASSERT(r == 0);
+  }
+#endif
   m_table= (void *)orig_tab;
   // Change current database to that of target table
   set_dbname(to);
@@ -4223,6 +4248,14 @@
 
   if ((result= alter_table_name(new_tabname)))
   {
+#ifdef HAVE_NDB_BINLOG
+    if (share)
+    {
+      int r= rename_share(share, from, TRUE);
+      DBUG_ASSERT(r == 0);
+      free_share(&share);
+    }
+#endif
     DBUG_RETURN(result);
   }
   
@@ -4230,9 +4263,75 @@
   if ((result= handler::rename_table(from, to)))
   {
     // ToDo in 4.1 should rollback alter table...
+#ifdef HAVE_NDB_BINLOG
+    if (share)
+      free_share(&share);
+#endif
     DBUG_RETURN(result);
   }
 
+#ifdef HAVE_NDB_BINLOG
+  int is_old_table_tmpfile= 1;
+  if (share && share->op)
+    dict->forceGCPWait();
+
+  /* handle old table */
+  if (!is_prefix(m_tabname, tmp_file_prefix))
+  {
+    is_old_table_tmpfile= 0;
+    String event_name(INJECTOR_EVENT_LEN);
+    ndb_rep_event_name(&event_name, from + sizeof(share_prefix) - 1, 0);
+    ndbcluster_handle_drop_table(ndb, event_name.c_ptr(), share);
+  }
+
+  if (!result && !is_prefix(new_tabname, tmp_file_prefix))
+  {
+    /* always create an event for the table */
+    String event_name(INJECTOR_EVENT_LEN);
+    ndb_rep_event_name(&event_name, to + sizeof(share_prefix) - 1, 0);
+    const NDBTAB *ndbtab= dict->getTable(new_tabname);
+
+    if (ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share) >= 0)
+    {
+      sql_print_information("NDB Binlog: RENAME Event: %s",
+                            event_name.c_ptr());
+      if (share)
+      {
+        if (ndbcluster_create_event_ops(share, ndbtab,
+                                        event_name.c_ptr()) < 0)
+        {
+          sql_print_error("NDB Binlog: FAILED create event operations "
+                          "during RENAME. Event %s", event_name.c_ptr());
+          /* a warning has been issued to the client */
+        }
+      }
+    }
+    else
+    {
+      sql_print_error("NDB Binlog: FAILED create event during RENAME."
+                      "Event: %s", event_name.c_ptr());
+      push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR,
+                          ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
+                          "Creating event for logging table failed. "
+                          "See error log for details.");
+    }
+    if (is_old_table_tmpfile)
+      ndbcluster_log_schema_op(current_thd, share,
+                               current_thd->query, current_thd->query_length,
+                               share->db, share->table_name,
+                               0, 0,
+                               SOT_ALTER_TABLE);
+    else
+      ndbcluster_log_schema_op(current_thd, share,
+                               current_thd->query, current_thd->query_length,
+                               share->db, share->table_name,
+                               0, 0,
+                               SOT_RENAME_TABLE);
+  }
+  if (share)
+    free_share(&share);
+#endif
+
   DBUG_RETURN(result);
 }
 
@@ -4275,6 +4374,9 @@
 {
   DBUG_ENTER("ha_ndbcluster::ndbcluster_delete_table");
   NDBDICT *dict= ndb->getDictionary();
+#ifdef HAVE_NDB_BINLOG
+  NDB_SHARE *share= get_share(path, false);
+#endif
 
   /* Drop the table from NDB */
   
@@ -4291,9 +4393,75 @@
 
   if (res)
   {
+#ifdef HAVE_NDB_BINLOG
+    /* the drop table failed for some reason, drop the share anyways */
+    if (share)
+    {
+      pthread_mutex_lock(&ndbcluster_mutex);
+      if (share->state != NSS_DROPPED)
+      {
+        /*
+          The share kept by the server has not been freed, free it
+        */
+        share->state= NSS_DROPPED;
+        free_share(&share, TRUE);
+      }
+      /* free the share taken above */
+      free_share(&share, TRUE);
+      pthread_mutex_unlock(&ndbcluster_mutex);
+    }
+#endif
     DBUG_RETURN(res);
   }
 
+#ifdef HAVE_NDB_BINLOG
+  /* stop the logging of the dropped table, and cleanup */
+
+  /*
+    drop table is successful even if table does not exist in ndb
+    and in case table was actually not dropped, there is no need
+    to force a gcp, and setting the event_name to null will indicate
+    that there is no event to be dropped
+  */
+  int table_dropped= dict->getNdbError().code != 709;
+
+  if (!is_prefix(table_name, tmp_file_prefix) && share)
+  {
+    ndbcluster_log_schema_op(current_thd, share,
+                             current_thd->query, current_thd->query_length,
+                             share->db, share->table_name,
+                             0, 0,
+                             SOT_DROP_TABLE);
+  }
+  else if (table_dropped && share && share->op) /* ndbcluster_log_schema_op
+                                                   will do a force GCP */
+    dict->forceGCPWait();
+
+  if (!is_prefix(table_name, tmp_file_prefix))
+  {
+    String event_name(INJECTOR_EVENT_LEN);
+    ndb_rep_event_name(&event_name, path + sizeof(share_prefix) - 1, 0);
+    ndbcluster_handle_drop_table(ndb,
+                                 table_dropped ? event_name.c_ptr() : 0,
+                                 share);
+  }
+
+  if (share)
+  {
+    pthread_mutex_lock(&ndbcluster_mutex);
+    if (share->state != NSS_DROPPED)
+    {
+      /*
+        The share kept by the server has not been freed, free it
+      */
+      share->state= NSS_DROPPED;
+      free_share(&share, TRUE);
+    }
+    /* free the share taken above */
+    free_share(&share, TRUE);
+    pthread_mutex_unlock(&ndbcluster_mutex);
+  }
+#endif
   DBUG_RETURN(0);
 }
 
@@ -4382,7 +4550,8 @@
                 HA_NO_PREFIX_CHAR_KEYS | \
                 HA_NEED_READ_RANGE_BUFFER | \
                 HA_CAN_GEOMETRY | \
-                HA_CAN_BIT_FIELD
+                HA_CAN_BIT_FIELD | \
+                HA_PRIMARY_KEY_ALLOW_RANDOM_ACCESS
 
 ha_ndbcluster::ha_ndbcluster(TABLE_SHARE *table_arg):
   handler(&ndbcluster_hton, table_arg),
@@ -4618,7 +4787,7 @@
 }
 
 
-int ndbcluster_close_connection(THD *thd)
+static int ndbcluster_close_connection(THD *thd)
 {
   Thd_ndb *thd_ndb= get_thd_ndb(thd);
   DBUG_ENTER("ndbcluster_close_connection");
@@ -4781,14 +4950,21 @@
   DBUG_RETURN(ret);      
 }
 
-void ndbcluster_drop_database(char *path)
+static void ndbcluster_drop_database(char *path)
 {
   ndbcluster_drop_database_impl(path);
+#ifdef HAVE_NDB_BINLOG
+  char db[FN_REFLEN];
+  ha_ndbcluster::set_dbname(path, db);
+  ndbcluster_log_schema_op(current_thd, 0,
+                           current_thd->query, current_thd->query_length,
+                           db, "", 0, 0, SOT_DROP_DB);
+#endif
 }
 /*
   find all tables in ndb and discover those needed
 */
-static int ndbcluster_find_all_files(THD *thd)
+int ndbcluster_find_all_files(THD *thd)
 {
   DBUG_ENTER("ndbcluster_find_all_files");
   Ndb* ndb;
@@ -4823,10 +4999,11 @@
 
       if (!(ndbtab= dict->getTable(elmt.name)))
       {
-        sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s",
-                        elmt.database, elmt.name,
-                        dict->getNdbError().code,
-                        dict->getNdbError().message);
+        if (elmt.state == NDBOBJ::StateOnline)
+          sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s",
+                          elmt.database, elmt.name,
+                          dict->getNdbError().code,
+                          dict->getNdbError().message);
         unhandled++;
         continue;
       }
@@ -4865,6 +5042,34 @@
         }
         pthread_mutex_unlock(&LOCK_open);
       }
+#ifdef HAVE_NDB_BINLOG
+      else
+      {
+        /* set up replication for this table */
+        NDB_SHARE *share;
+        pthread_mutex_lock(&ndbcluster_mutex);
+        if (((share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables,
+                                            (byte*) key, strlen(key)))
+              && share->op == 0 && share->op_old == 0)
+            || share == 0)
+        {
+          /*
+            there is no binlog creation setup for this table
+            attempt to do it
+          */
+          pthread_mutex_unlock(&ndbcluster_mutex);
+          pthread_mutex_lock(&LOCK_open);
+          ndbcluster_create_binlog_setup(ndb, key, elmt.database, elmt.name,
+                                         ndb_binlog_thread_running > 0 &&
+                                         !is_prefix(elmt.name,
+                                                    tmp_file_prefix),
+                                         share, TRUE);
+          pthread_mutex_unlock(&LOCK_open);
+        }
+        else
+          pthread_mutex_unlock(&ndbcluster_mutex);
+      }
+#endif
     }
   }
   while (unhandled && retries--);
@@ -4972,6 +5177,41 @@
     }
   }
 
+#ifdef HAVE_NDB_BINLOG
+  /* setup logging to binlog for all discovered tables */
+  if (ndb_binlog_thread_running > 0)
+  {
+    char *end;
+    char *end1=
+      strxnmov(name, sizeof(name), mysql_data_home, "/", db, "/", NullS);
+    NDB_SHARE *share;
+    pthread_mutex_lock(&ndbcluster_mutex);
+    for (i= 0; i < ok_tables.records; i++)
+    {
+      file_name= (char*)hash_element(&ok_tables, i);
+      end= strxnmov(end1, sizeof(name) - (end1 - name), file_name, NullS);
+      if ((share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables,
+                                          (byte*)name, end - name))
+          && share->op == 0 && share->op_old == 0)
+      {
+        /*
+          there is no binlog creation setup for this table
+          attempt to do it
+	*/
+        
+        pthread_mutex_unlock(&ndbcluster_mutex);
+        pthread_mutex_lock(&LOCK_open);
+        ndbcluster_create_binlog_setup(ndb, name, db, file_name,
+                                       !is_prefix(file_name, tmp_file_prefix),
+                                       share, TRUE);
+        pthread_mutex_unlock(&LOCK_open);
+        pthread_mutex_lock(&ndbcluster_mutex);
+      }
+    }
+    pthread_mutex_unlock(&ndbcluster_mutex);
+  }
+#endif
+
   // Check for new files to discover
   DBUG_PRINT("info", ("Checking for new files to discover"));       
   List<char> create_list;
@@ -5044,11 +5284,18 @@
 static int connect_callback()
 {
   update_status_variables(g_ndb_cluster_connection);
+
+  uint node_id, i= 0;
+  Ndb_cluster_connection_node_iter node_iter;
+  memset((void *)g_node_id_map, 0xFFFF, sizeof(g_node_id_map));
+  while ((node_id= g_ndb_cluster_connection->get_next_node(node_iter)))
+    g_node_id_map[node_id]= i++;
+
   pthread_cond_signal(&COND_ndb_util_thread);
   return 0;
 }
 
-bool ndbcluster_init()
+static bool ndbcluster_init()
 {
   int res;
   DBUG_ENTER("ndbcluster_init");
@@ -5056,6 +5303,21 @@
   if (have_ndbcluster != SHOW_OPTION_YES)
     goto ndbcluster_init_error;
 
+  {
+    handlerton &h= ndbcluster_hton;
+    h.close_connection= ndbcluster_close_connection;
+    h.commit=           ndbcluster_commit;
+    h.rollback=         ndbcluster_rollback;
+    h.create=           ndbcluster_create_handler; /* Create a new handler */
+    h.drop_database=    ndbcluster_drop_database;  /* Drop a database */
+    h.panic=            ndbcluster_end;            /* Panic call */
+    h.show_status=      ndbcluster_show_status;    /* Show status */
+#ifdef HAVE_NDB_BINLOG
+    ndbcluster_binlog_init_handlerton();
+#endif
+    h.flags=            HTON_NO_FLAGS;
+  }
+
   // Set connectstring if specified
   if (opt_ndbcluster_connectstring != 0)
     DBUG_PRINT("connectstring", ("%s", opt_ndbcluster_connectstring));     
@@ -5119,6 +5381,22 @@
   (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0,
                    (hash_get_key) ndbcluster_get_key,0,0);
   pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST);
+#ifdef HAVE_NDB_BINLOG
+  /* start the ndb injector thread */
+  if (opt_bin_log)
+  {
+    if (binlog_row_based)
+    {
+      if (ndbcluster_binlog_start())
+        goto ndbcluster_init_error;
+    }
+    else
+    {
+      sql_print_error("NDB: only row based binary logging is supported");
+    }
+  }
+#endif /* HAVE_NDB_BINLOG */
+  
   pthread_mutex_init(&LOCK_ndb_util_thread, MY_MUTEX_INIT_FAST);
   pthread_cond_init(&COND_ndb_util_thread, NULL);
 
@@ -5149,26 +5427,13 @@
   DBUG_RETURN(TRUE);
 }
 
-
-/*
-  End use of the NDB Cluster table handler
-  - free all global variables allocated by 
-    ndbcluster_init()
-*/
-
-int ndbcluster_end(ha_panic_function type)
+static int ndbcluster_end(ha_panic_function type)
 {
   DBUG_ENTER("ndbcluster_end");
 
   if (!ndbcluster_inited)
     DBUG_RETURN(0);
 
-  // Kill ndb utility thread
-  (void) pthread_mutex_lock(&LOCK_ndb_util_thread);  
-  DBUG_PRINT("exit",("killing ndb util thread: %lx", ndb_util_thread));
-  (void) pthread_cond_signal(&COND_ndb_util_thread);
-  (void) pthread_mutex_unlock(&LOCK_ndb_util_thread);
-
   if (g_ndb)
   {
 #ifndef DBUG_OFF
@@ -5195,7 +5460,6 @@
   pthread_mutex_destroy(&LOCK_ndb_util_thread);
   pthread_cond_destroy(&COND_ndb_util_thread);
   ndbcluster_inited= 0;
-  ndbcluster_util_inited= 0;
   DBUG_RETURN(0);
 }
 
@@ -5662,60 +5926,6 @@
 }
 
 
-#ifndef DBUG_OFF
-static void dbug_print_table(const char *info, TABLE *table)
-{
-  if (table == 0)
-  {
-    DBUG_PRINT("info",("%s: (null)", info));
-    return;
-  }
-  DBUG_PRINT("info",
-             ("%s: %s.%s s->fields: %d  "
-              "reclength: %d  rec_buff_length: %d  record[0]: %lx  "
-              "record[1]: %lx",
-              info,
-              table->s->db,
-              table->s->table_name,
-              table->s->fields,
-              table->s->reclength,
-              table->s->rec_buff_length,
-              table->record[0],
-              table->record[1]));
-
-  for (unsigned int i= 0; i < table->s->fields; i++) 
-  {
-    Field *f= table->field[i];
-    DBUG_PRINT("info",
-               ("[%d] \"%s\"(0x%lx:%s%s%s%s%s%s) type: %d  pack_length: %d  "
-                "ptr: 0x%lx[+%d]  null_bit: %u  null_ptr: 0x%lx[+%d]",
-                i,
-                f->field_name,
-                f->flags,
-                (f->flags & PRI_KEY_FLAG)  ? "pri"       : "attr",
-                (f->flags & NOT_NULL_FLAG) ? ""          : ",nullable",
-                (f->flags & UNSIGNED_FLAG) ? ",unsigned" : ",signed",
-                (f->flags & ZEROFILL_FLAG) ? ",zerofill" : "",
-                (f->flags & BLOB_FLAG)     ? ",blob"     : "",
-                (f->flags & BINARY_FLAG)   ? ",binary"   : "",
-                f->real_type(),
-                f->pack_length(),
-                f->ptr, f->ptr - table->record[0],
-                f->null_bit,
-                f->null_ptr, (byte*) f->null_ptr - table->record[0]));
-    if (f->type() == MYSQL_TYPE_BIT)
-    {
-      Field_bit *g= (Field_bit*) f;
-      DBUG_PRINT("MYSQL_TYPE_BIT",("field_length: %d  bit_ptr: 0x%lx[+%d] "
-                                   "bit_ofs: %u  bit_len: %u",
-                                   g->field_length, g->bit_ptr,
-                                   (byte*) g->bit_ptr-table->record[0],
-                                   g->bit_ofs, g->bit_len));
-    }
-  }
-}
-#endif
-
 /*
   Handling the shared NDB_SHARE structure that is needed to
   provide table locking.
@@ -5745,6 +5955,12 @@
                ("db.tablename: %s.%s  use_count: %d  commit_count: %d",
                 share->db, share->table_name,
                 share->use_count, share->commit_count));
+#ifdef HAVE_NDB_BINLOG
+    if (share->table)
+      DBUG_PRINT("share",
+                 ("table->s->db.table_name: %s.%s",
+                  share->table->s->db.str, share->table->s->table_name.str));
+#endif
   }
   DBUG_VOID_RETURN;
 }
@@ -5752,11 +5968,170 @@
 #define dbug_print_open_tables()
 #endif
 
+#ifdef HAVE_NDB_BINLOG
+/*
+  For some reason a share is still around, try to salvage the situation
+  by closing all cached tables. If the share still exists, there is an
+  error somewhere but only report this to the error log.  Keep this
+  "trailing share" but rename it since there are still references to it
+  to avoid segmentation faults.  There is a risk that the memory for
+  this trailing share leaks.
+  
+  Must be called with previous pthread_mutex_lock(&ndbcluster_mutex)
+*/
+int handle_trailing_share(NDB_SHARE *share, bool have_lock)
+{
+  static ulong trailing_share_id= 0;
+  DBUG_ENTER("handle_trailing_share");
+
+  ++share->use_count;
+  pthread_mutex_unlock(&ndbcluster_mutex);
+
+  close_cached_tables((THD*) 0, 0, (TABLE_LIST*) 0, have_lock);
+
+  pthread_mutex_lock(&ndbcluster_mutex);
+  if (!--share->use_count)
+  {
+    DBUG_PRINT("info", ("NDB_SHARE: close_cashed_tables %s freed share.",
+               share->key)); 
+    real_free_share(&share);
+    DBUG_RETURN(0);
+  }
+
+  /*
+    share still exists, if share has not been dropped by server
+    release that share
+  */
+  if (share->state != NSS_DROPPED && !--share->use_count)
+  {
+    DBUG_PRINT("info", ("NDB_SHARE: %s already exists, "
+                        "use_count=%d  state != NSS_DROPPED.",
+                        share->key, share->use_count)); 
+    real_free_share(&share);
+    DBUG_RETURN(0);
+  }
+  DBUG_PRINT("error", ("NDB_SHARE: %s already exists  use_count=%d.",
+                       share->key, share->use_count));
+
+  sql_print_error("NDB_SHARE: %s already exists  use_count=%d."
+                  " Moving away for safety, but possible memleak.",
+                  share->key, share->use_count);
+  dbug_print_open_tables();
+
+  /*
+    This is probably an error.  We can however save the situation
+    at the cost of a possible mem leak, by "renaming" the share
+    - First remove from hash
+  */
+  hash_delete(&ndbcluster_open_tables, (byte*) share);
+
+  /*
+    now give it a new name, just a running number
+    if space is not enough allocate some more
+  */
+  {
+    const uint min_key_length= 10;
+    if (share->key_length < min_key_length)
+    {
+      share->key= alloc_root(&share->mem_root, min_key_length + 1);
+      share->key_length= min_key_length;
+    }
+    share->key_length=
+      my_snprintf(share->key, min_key_length + 1, "#leak%d",
+                  trailing_share_id++);
+  }
+  /* Keep it for possible the future trailing free */
+  my_hash_insert(&ndbcluster_open_tables, (byte*) share);
+
+  DBUG_RETURN(0);
+}
+
+/*
+  Rename share is used during rename table.
+*/
+static int rename_share(NDB_SHARE *share, const char *new_key, bool have_lock)
+{
+  NDB_SHARE *tmp;
+  pthread_mutex_lock(&ndbcluster_mutex);
+  uint new_length= (uint) strlen(new_key);
+  DBUG_PRINT("rename_share", ("old_key: %s  old__length: %d",
+                              share->key, share->key_length));
+  if ((tmp= (NDB_SHARE*) hash_search(&ndbcluster_open_tables,
+                                     (byte*) new_key, new_length)))
+    handle_trailing_share(tmp, have_lock);
+
+  /* remove the share from hash */
+  hash_delete(&ndbcluster_open_tables, (byte*) share);
+  dbug_print_open_tables();
+
+  /* save old stuff if insert should fail */
+  uint old_length= share->key_length;
+  char *old_key= share->key;
+
+  /*
+    now allocate and set the new key, db etc
+    enough space for key, db, and table_name
+  */
+  share->key= alloc_root(&share->mem_root, 2 * (new_length + 1));
+  strmov(share->key, new_key);
+  share->key_length= new_length;
+
+  if (my_hash_insert(&ndbcluster_open_tables, (byte*) share))
+  {
+    // ToDo free the allocated stuff above?
+    DBUG_PRINT("error", ("rename_share: my_hash_insert %s failed",
+                         share->key));
+    share->key= old_key;
+    share->key_length= old_length;
+    if (my_hash_insert(&ndbcluster_open_tables, (byte*) share))
+    {
+      sql_print_error("rename_share: failed to recover %s", share->key);
+      DBUG_PRINT("error", ("rename_share: my_hash_insert %s failed",
+                           share->key));
+    }
+    dbug_print_open_tables();
+    pthread_mutex_unlock(&ndbcluster_mutex);
+    return -1;
+  }
+  dbug_print_open_tables();
+
+  share->db= share->key + new_length + 1;
+  ha_ndbcluster::set_dbname(new_key, share->db);
+  share->table_name= share->db + strlen(share->db) + 1;
+  ha_ndbcluster::set_tabname(new_key, share->table_name);
+
+  DBUG_PRINT("rename_share",
+             ("0x%lx key: %s  key_length: %d",
+              share, share->key, share->key_length));
+  DBUG_PRINT("rename_share",
+             ("db.tablename: %s.%s  use_count: %d  commit_count: %d",
+              share->db, share->table_name,
+              share->use_count, share->commit_count));
+  DBUG_PRINT("rename_share",
+             ("table->s->db.table_name: %s.%s",
+              share->table->s->db.str, share->table->s->table_name.str));
+
+  if (share->op == 0)
+  {
+    share->table->s->db.str= share->db;
+    share->table->s->db.length= strlen(share->db);
+    share->table->s->table_name.str= share->table_name;
+    share->table->s->table_name.length= strlen(share->table_name);
+  }
+  /* else rename will be handled when the ALTER event comes */
+  share->old_names= old_key;
+  // ToDo free old_names after ALTER EVENT
+
+  pthread_mutex_unlock(&ndbcluster_mutex);
+  return 0;
+}
+#endif
+
 /*
   Increase refcount on existing share.
   Always returns share and cannot fail.
 */
-static NDB_SHARE *get_share(NDB_SHARE *share)
+NDB_SHARE *ndbcluster_get_share(NDB_SHARE *share)
 {
   pthread_mutex_lock(&ndbcluster_mutex);
   share->use_count++;
@@ -5788,9 +6163,12 @@
 
   have_lock == TRUE, pthread_mutex_lock(&ndbcluster_mutex) already taken
 */
-static NDB_SHARE *get_share(const char *key, bool create_if_not_exists,
-                            bool have_lock)
+NDB_SHARE *ndbcluster_get_share(const char *key, bool create_if_not_exists,
+                                bool have_lock)
 {
+  DBUG_ENTER("get_share");
+  DBUG_PRINT("info", ("get_share: key %s", key));
+  THD *thd= current_thd;
   NDB_SHARE *share;
   if (!have_lock)
     pthread_mutex_lock(&ndbcluster_mutex);
@@ -5836,6 +6214,9 @@
       ha_ndbcluster::set_dbname(key, share->db);
       share->table_name= share->db + strlen(share->db) + 1;
       ha_ndbcluster::set_tabname(key, share->table_name);
+#ifdef HAVE_NDB_BINLOG
+      ndbcluster_binlog_init_share(share);
+#endif
       *root_ptr= old_root;
     }
     else
@@ -5863,7 +6244,7 @@
   return share;
 }
 
-static void real_free_share(NDB_SHARE **share)
+void ndbcluster_real_free_share(NDB_SHARE **share)
 {
   DBUG_PRINT("real_free_share",
              ("0x%lx key: %s  key_length: %d",
@@ -5878,6 +6259,26 @@
   pthread_mutex_destroy(&(*share)->mutex);
   free_root(&(*share)->mem_root, MYF(0));
 
+#ifdef HAVE_NDB_BINLOG
+  if ((*share)->table)
+  {
+    closefrm((*share)->table, 0);
+#if 0 // todo ?
+    free_root(&(*share)->table->mem_root, MYF(0));
+#endif
+
+#ifndef DBUG_OFF
+    bzero((gptr)(*share)->table_share, sizeof(*(*share)->table_share));
+    bzero((gptr)(*share)->table, sizeof(*(*share)->table));
+#endif
+    my_free((gptr) (*share)->table_share, MYF(0));
+    my_free((gptr) (*share)->table, MYF(0));
+#ifndef DBUG_OFF
+    (*share)->table_share= 0;
+    (*share)->table= 0;
+#endif
+  }
+#endif
   my_free((gptr) *share, MYF(0));
   *share= 0;
 
@@ -5890,7 +6291,7 @@
 
   have_lock == TRUE, pthread_mutex_lock(&ndbcluster_mutex) already taken
 */
-static void free_share(NDB_SHARE **share, bool have_lock)
+void ndbcluster_free_share(NDB_SHARE **share, bool have_lock)
 {
   if (!have_lock)
     pthread_mutex_lock(&ndbcluster_mutex);
@@ -5916,7 +6317,6 @@
 }
 
 
-
 /*
   Internal representation of the frm blob
    
@@ -6590,7 +6990,7 @@
     Wait for cluster to start
   */
   pthread_mutex_lock(&LOCK_ndb_util_thread);
-  while (!ndb_cluster_node_id)
+  while (!ndb_cluster_node_id && (ndbcluster_hton.slot != ~(uint)0))
   {
     /* ndb not connected yet */
     set_timespec(abstime, 1);
@@ -6605,13 +7005,35 @@
   }
   pthread_mutex_unlock(&LOCK_ndb_util_thread);
 
+  {
+    Thd_ndb *thd_ndb;
+    if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb()))
+    {
+      sql_print_error("Could not allocate Thd_ndb object");
+      goto ndb_util_thread_end;
+    }
+    set_thd_ndb(thd, thd_ndb);
+    thd_ndb->options|= TNO_NO_LOG_SCHEMA_OP;
+  }
+
+#ifdef HAVE_NDB_BINLOG
+  /* create tables needed by the replication */
+  ndbcluster_setup_binlog_table_shares(thd);
+#else
   /*
     Get all table definitions from the storage node
   */
   ndbcluster_find_all_files(thd);
+#endif
 
   ndbcluster_util_inited= 1;
 
+#ifdef HAVE_NDB_BINLOG
+  /* If running, signal injector thread that all is setup */
+  if (ndb_binlog_thread_running > 0)
+    pthread_cond_signal(&injector_cond);
+#endif
+
   set_timespec(abstime, 0);
   for (;!abort_loop;)
   {
@@ -6628,6 +7050,15 @@
     if (abort_loop)
       break; /* Shutting down server */
 
+#ifdef HAVE_NDB_BINLOG
+    /*
+      Check that the apply_status_share and schema_share has been created.
+      If not try to create it
+    */
+    if (!apply_status_share || !schema_share)
+      ndbcluster_setup_binlog_table_shares(thd);
+#endif
+
     if (ndb_cache_check_time == 0)
     {
       /* Wake up in 1 second to check if value has changed */
@@ -6641,6 +7072,12 @@
     for (uint i= 0; i < ndbcluster_open_tables.records; i++)
     {
       share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i);
+#ifdef HAVE_NDB_BINLOG
+      if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0))
+          <= 0)
+        continue; // injector thread is the only user, skip statistics
+      share->util_lock= current_thd; // Mark that util thread has lock
+#endif /* HAVE_NDB_BINLOG */
       share->use_count++; /* Make sure the table can't be closed */
       DBUG_PRINT("ndb_util_thread",
                  ("Found open table[%d]: %s, use_count: %d",
@@ -6655,6 +7092,17 @@
     List_iterator_fast<NDB_SHARE> it(util_open_tables);
     while ((share= it++))
     {
+#ifdef HAVE_NDB_BINLOG
+      if ((share->use_count - (int) (share->op != 0) - (int) (share->op != 0))
+          <= 1)
+      {
+        /*
+          Util thread and injector thread is the only user, skip statistics
+	*/
+        free_share(&share);
+        continue;
+      }
+#endif /* HAVE_NDB_BINLOG */
       DBUG_PRINT("ndb_util_thread",
                  ("Fetching commit count for: %s",
                   share->key));
@@ -6716,6 +7164,7 @@
     }
   }
 ndb_util_thread_end:
+  sql_print_information("Stopping Cluster Utility thread");
   net_end(&thd->net);
   thd->cleanup();
   delete thd;
@@ -8071,7 +8520,20 @@
   {
     DBUG_RETURN(FALSE);
   }
-  
+
+  update_status_variables(g_ndb_cluster_connection);
+  my_snprintf(buf, sizeof(buf),
+              "cluster_node_id=%u, "
+              "connected_host=%s, "
+              "connected_port=%u, "
+              "number_of_storage_nodes=%u",
+              ndb_cluster_node_id,
+              ndb_connected_host,
+              ndb_connected_port,
+              ndb_number_of_storage_nodes);
+  if (stat_print(thd, ndbcluster_hton.name, "connection", buf))
+    DBUG_RETURN(TRUE);
+
   if (get_thd_ndb(thd) && get_thd_ndb(thd)->ndb)
   {
     Ndb* ndb= (get_thd_ndb(thd))->ndb;
@@ -8087,10 +8549,13 @@
         DBUG_RETURN(TRUE);
     }
   }
-  send_eof(thd);
-  
+#ifdef HAVE_NDB_BINLOG
+  ndbcluster_show_status_binlog(thd, stat_print, stat_type);
+#endif
+
   DBUG_RETURN(FALSE);
 }
+
 
 /*
   Create a table in NDB Cluster

--- 1.87/mysql-test/r/show_check.result	2005-12-31 05:51:24 +01:00
+++ 1.88/mysql-test/r/show_check.result	2006-01-05 12:55:41 +01:00
@@ -53,6 +53,7 @@
 show databases;
 Database
 information_schema
+cluster_replication
 mysql
 test
 show databases like "test%";

--- 1.154/sql/set_var.cc	2006-01-04 20:39:50 +01:00
+++ 1.155/sql/set_var.cc	2006-01-05 12:55:43 +01:00
@@ -478,6 +478,14 @@
 				  &SV::ndb_autoincrement_prefetch_sz);
 sys_var_thd_bool
 sys_ndb_force_send("ndb_force_send", &SV::ndb_force_send);
+#ifdef HAVE_NDB_BINLOG
+sys_var_long_ptr
+sys_ndb_report_thresh_binlog_epoch_slip("ndb_report_thresh_binlog_epoch_slip",
+                                        &ndb_report_thresh_binlog_epoch_slip);
+sys_var_long_ptr
+sys_ndb_report_thresh_binlog_mem_usage("ndb_report_thresh_binlog_mem_usage",
+                                       &ndb_report_thresh_binlog_mem_usage);
+#endif
 sys_var_thd_bool
 sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count);
 sys_var_thd_bool
@@ -808,6 +816,12 @@
   {sys_ndb_index_stat_cache_entries.name, (char*) &sys_ndb_index_stat_cache_entries, SHOW_SYS},
   {sys_ndb_index_stat_enable.name, (char*) &sys_ndb_index_stat_enable, SHOW_SYS},
   {sys_ndb_index_stat_update_freq.name, (char*) &sys_ndb_index_stat_update_freq, SHOW_SYS},
+#ifdef HAVE_NDB_BINLOG
+  {sys_ndb_report_thresh_binlog_epoch_slip.name,
+   (char*) &sys_ndb_report_thresh_binlog_epoch_slip,                SHOW_SYS},
+  {sys_ndb_report_thresh_binlog_mem_usage.name,
+   (char*) &sys_ndb_report_thresh_binlog_mem_usage,                 SHOW_SYS},
+#endif
   {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count,   SHOW_SYS},
   {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS},
   {sys_net_buffer_length.name,(char*) &sys_net_buffer_length,       SHOW_SYS},

--- 1.76/libmysqld/Makefile.am	2005-12-23 05:04:23 +01:00
+++ 1.77/libmysqld/Makefile.am	2006-01-05 12:58:34 +01:00
@@ -65,11 +65,13 @@
 	sp_head.cc sp_pcontext.cc sp.cc sp_cache.cc sp_rcontext.cc \
 	parse_file.cc sql_view.cc sql_trigger.cc my_decimal.cc \
 	item_xmlfunc.cc \
-        rpl_filter.cc sql_partition.cc handlerton.cc sql_plugin.cc
+        rpl_filter.cc sql_partition.cc handlerton.cc sql_plugin.cc \
+        rpl_injector.cc
 
 libmysqld_int_a_SOURCES= $(libmysqld_sources) $(libmysqlsources) $(sqlsources)
 EXTRA_libmysqld_a_SOURCES =	ha_innodb.cc ha_berkeley.cc ha_archive.cc \
 			ha_blackhole.cc ha_federated.cc ha_ndbcluster.cc \
+			ha_ndbcluster_binlog.cc \
 			ha_partition.cc
 libmysqld_a_DEPENDENCIES= @mysql_se_objs@
 libmysqld_a_SOURCES=
@@ -99,6 +101,9 @@
 		$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
 
 ha_ndbcluster.o:ha_ndbcluster.cc
+		$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
+
+ha_ndbcluster_binlog.o: ha_ndbcluster_binlog.cc
 		$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
 
 # Until we can remove dependency on ha_ndbcluster.h

--- 1.166/client/mysqltest.c	2005-12-28 03:43:32 +01:00
+++ 1.167/client/mysqltest.c	2006-01-05 12:55:41 +01:00
@@ -827,7 +827,7 @@
 int var_set(const char *var_name, const char *var_name_end,
             const char *var_val, const char *var_val_end)
 {
-  int digit;
+  int digit, result, env_var= 0;
   VAR* v;
   DBUG_ENTER("var_set");
   DBUG_PRINT("enter", ("var_name: '%.*s' = '%.*s' (length: %d)",
@@ -835,11 +835,11 @@
                        (int) (var_val_end - var_val), var_val,
                        (int) (var_val_end - var_val)));
 
-  if (*var_name++ != '$')
-  {
-    var_name--;
-    die("Variable name in %s does not start with '$'", var_name);
-  }
+  if (*var_name != '$')
+    env_var= 1;
+  else
+    var_name++;
+
   digit = *var_name - '0';
   if (!(digit < 10 && digit >= 0))
   {
@@ -847,7 +847,23 @@
   }
   else
     v = var_reg + digit;
-  DBUG_RETURN(eval_expr(v, var_val, (const char**)&var_val_end));
+
+  result= eval_expr(v, var_val, (const char**) &var_val_end);
+
+  if (env_var)
+  {
+    char buf[1024];
+    memcpy(buf, v->name, v->name_len);
+    buf[v->name_len]= 0;
+    if (v->int_dirty)
+    {
+      sprintf(v->str_val, "%d", v->int_val);
+      v->int_dirty= 0;
+      v->str_val_len= strlen(v->str_val);
+    }
+    setenv(buf, v->str_val, 1);
+  }
+  DBUG_RETURN(result);
 }
 
 
@@ -1452,6 +1468,91 @@
   rpl_parse = mysql_rpl_parse_enabled(mysql);
   mysql_disable_rpl_parse(mysql);
 
+#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
+  /*
+     Wait for ndb binlog to be up-to-date with all changes
+     done on the local mysql server
+  */
+  {
+    ulong have_ndbcluster;
+    if (mysql_query(mysql, query= "show variables like 'have_ndbcluster'"))
+      die("At line %u: failed in %s: %d: %s", start_lineno, query,
+          mysql_errno(mysql), mysql_error(mysql));
+    if (!(last_result= res= mysql_store_result(mysql)))
+      die("line %u: mysql_store_result() retuned NULL for '%s'", start_lineno,
+          query);
+    if (!(row= mysql_fetch_row(res)))
+      die("line %u: empty result in %s", start_lineno, query);
+
+    have_ndbcluster= strcmp("YES", row[1]) == 0;
+    mysql_free_result(res);
+    last_result= 0;
+
+    if (have_ndbcluster)
+    {
+      ulonglong epoch, tmp_epoch= 0;
+      int count= 0;
+
+      do
+      {
+        const char binlog[]= "binlog";
+        const char latest_trans_epoch[]=
+          "latest_trans_epoch=";
+        const char latest_applied_binlog_epoch[]=
+          "latest_applied_binlog_epoch=";
+        if (count)
+          sleep(1);
+        if (mysql_query(mysql, query= "show engine ndb status"))
+          die("At line %u: failed in '%s': %d: %s", start_lineno, query,
+              mysql_errno(mysql), mysql_error(mysql));
+        if (!(last_result= res= mysql_store_result(mysql)))
+          die("line %u: mysql_store_result() retuned NULL for '%s'",
+              start_lineno, query);
+        while ((row= mysql_fetch_row(res)))
+        {
+          if (strcmp(row[1], binlog) == 0)
+          {
+            const char *status= row[2];
+            /* latest_trans_epoch */
+            if (count == 0)
+            {
+              while (*status && strncmp(status, latest_trans_epoch,
+                                        sizeof(latest_trans_epoch)-1))
+                status++;
+              if (*status)
+              {
+                status+= sizeof(latest_trans_epoch)-1;
+                epoch= strtoull(status, (char**) 0, 10);
+              }
+              else
+                die("line %u: result does not contain '%s' in '%s'",
+                    start_lineno, latest_trans_epoch, query);
+            }
+            /* latest_applied_binlog_epoch */
+            while (*status && strncmp(status, latest_applied_binlog_epoch,
+                                      sizeof(latest_applied_binlog_epoch)-1))
+              status++;
+            if (*status)
+            {
+              status+= sizeof(latest_applied_binlog_epoch)-1;
+              tmp_epoch= strtoull(status, (char**) 0, 10);
+            }
+            else
+              die("line %u: result does not contain '%s' in '%s'",
+                  start_lineno, latest_applied_binlog_epoch, query);
+            break;
+          }
+        }
+        mysql_free_result(res);
+        if (!row)
+          die("line %u: result does not contain '%s' in '%s'",
+              start_lineno, binlog, query);
+        last_result=0;
+        count++;
+      } while (tmp_epoch < epoch && count <= 3);
+    }
+  }
+#endif
   if (mysql_query(mysql, query= "show master status"))
     die("failed in show master status: %d: %s",
 	mysql_errno(mysql), mysql_error(mysql));
@@ -1502,7 +1603,8 @@
   while (*p && (*p != '=') && !my_isspace(charset_info,*p))
     p++;
   var_name_end= p;
-  if (var_name+1 == var_name_end)
+  if (var_name == var_name_end ||
+      (var_name+1 == var_name_end && *var_name == '$'))
     die("Missing variable name in let");
   while (my_isspace(charset_info,*p))
     p++;
Thread
bk commit into 5.1 tree (tomas:1.2039)tomas5 Jan